aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/atm/eni.c5
-rw-r--r--drivers/atm/he.c4
-rw-r--r--drivers/atm/idt77252.c52
-rw-r--r--drivers/atm/iphase.c14
-rw-r--r--drivers/atm/solos-pci.c3
-rw-r--r--drivers/bcma/Kconfig33
-rw-r--r--drivers/bcma/Makefile7
-rw-r--r--drivers/bcma/README19
-rw-r--r--drivers/bcma/TODO3
-rw-r--r--drivers/bcma/bcma_private.h28
-rw-r--r--drivers/bcma/core.c51
-rw-r--r--drivers/bcma/driver_chipcommon.c89
-rw-r--r--drivers/bcma/driver_chipcommon_pmu.c134
-rw-r--r--drivers/bcma/driver_pci.c163
-rw-r--r--drivers/bcma/host_pci.c196
-rw-r--r--drivers/bcma/main.c247
-rw-r--r--drivers/bcma/scan.c360
-rw-r--r--drivers/bcma/scan.h56
-rw-r--r--drivers/bluetooth/Kconfig4
-rw-r--r--drivers/bluetooth/ath3k.c4
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c124
-rw-r--r--drivers/bluetooth/btmrvl_sdio.h68
-rw-r--r--drivers/bluetooth/btusb.c1
-rw-r--r--drivers/bluetooth/hci_ath.c7
-rw-r--r--drivers/bluetooth/hci_h4.c7
-rw-r--r--drivers/bluetooth/hci_ldisc.c6
-rw-r--r--drivers/ieee802154/fakehard.c10
-rw-r--r--drivers/infiniband/core/addr.c9
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c3
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c3
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c5
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h1
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c59
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c11
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c28
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c24
-rw-r--r--drivers/isdn/capi/Kconfig15
-rw-r--r--drivers/isdn/capi/Makefile1
-rw-r--r--drivers/isdn/capi/capi.c24
-rw-r--r--drivers/isdn/capi/capifs.c239
-rw-r--r--drivers/isdn/capi/capifs.h28
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c5
-rw-r--r--drivers/isdn/gigaset/ev-layer.c26
-rw-r--r--drivers/isdn/hardware/eicon/debug.c3
-rw-r--r--drivers/isdn/hardware/eicon/message.c23
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c6
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c5
-rw-r--r--drivers/isdn/hisax/arcofi.c4
-rw-r--r--drivers/isdn/hisax/elsa_cs.c2
-rw-r--r--drivers/isdn/hisax/elsa_ser.c3
-rw-r--r--drivers/isdn/hisax/hfc_usb.c6
-rw-r--r--drivers/isdn/hisax/ipacx.c4
-rw-r--r--drivers/isdn/hisax/jade.c3
-rw-r--r--drivers/isdn/hisax/l3dss1.c4
-rw-r--r--drivers/isdn/hisax/l3ni1.c4
-rw-r--r--drivers/isdn/hisax/st5481.h1
-rw-r--r--drivers/isdn/hisax/st5481_init.c6
-rw-r--r--drivers/isdn/hisax/teles_cs.c2
-rw-r--r--drivers/isdn/hysdn/hysdn_proclog.c11
-rw-r--r--drivers/isdn/i4l/isdn_common.c11
-rw-r--r--drivers/isdn/i4l/isdn_net.c2
-rw-r--r--drivers/isdn/i4l/isdn_tty.c2
-rw-r--r--drivers/isdn/mISDN/layer2.c20
-rw-r--r--drivers/isdn/mISDN/socket.c3
-rw-r--r--drivers/net/3c509.c16
-rw-r--r--drivers/net/3c59x.c4
-rw-r--r--drivers/net/8139cp.c46
-rw-r--r--drivers/net/acenic.c12
-rw-r--r--drivers/net/arm/etherh.c5
-rw-r--r--drivers/net/arm/ks8695net.c7
-rw-r--r--drivers/net/atl1c/atl1c_ethtool.c17
-rw-r--r--drivers/net/atl1c/atl1c_main.c25
-rw-r--r--drivers/net/atl1e/atl1e_ethtool.c7
-rw-r--r--drivers/net/atl1e/atl1e_main.c14
-rw-r--r--drivers/net/atlx/atl1.c29
-rw-r--r--drivers/net/atlx/atl2.c18
-rw-r--r--drivers/net/b44.c13
-rw-r--r--drivers/net/bcm63xx_enet.c7
-rw-r--r--drivers/net/benet/be.h69
-rw-r--r--drivers/net/benet/be_cmds.c341
-rw-r--r--drivers/net/benet/be_cmds.h457
-rw-r--r--drivers/net/benet/be_ethtool.c227
-rw-r--r--drivers/net/benet/be_hw.h4
-rw-r--r--drivers/net/benet/be_main.c622
-rw-r--r--drivers/net/bna/bfa_ioc.c13
-rw-r--r--drivers/net/bna/bna_ctrl.c21
-rw-r--r--drivers/net/bna/bna_txrx.c7
-rw-r--r--drivers/net/bna/bnad.c22
-rw-r--r--drivers/net/bna/bnad.h2
-rw-r--r--drivers/net/bna/bnad_ethtool.c70
-rw-r--r--drivers/net/bnx2.c184
-rw-r--r--drivers/net/bnx2.h3
-rw-r--r--drivers/net/bnx2x/bnx2x.h31
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c643
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h454
-rw-r--r--drivers/net/bnx2x/bnx2x_dcb.c50
-rw-r--r--drivers/net/bnx2x/bnx2x_dcb.h16
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c167
-rw-r--r--drivers/net/bnx2x/bnx2x_fw_defs.h2
-rw-r--r--drivers/net/bnx2x/bnx2x_fw_file_hdr.h2
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h4
-rw-r--r--drivers/net/bnx2x/bnx2x_init.h2
-rw-r--r--drivers/net/bnx2x/bnx2x_init_ops.h2
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c14
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c360
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h2
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c2
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.h2
-rw-r--r--drivers/net/bonding/Makefile3
-rw-r--r--drivers/net/bonding/bond_3ad.c43
-rw-r--r--drivers/net/bonding/bond_3ad.h4
-rw-r--r--drivers/net/bonding/bond_alb.c57
-rw-r--r--drivers/net/bonding/bond_alb.h1
-rw-r--r--drivers/net/bonding/bond_main.c530
-rw-r--r--drivers/net/bonding/bond_procfs.c2
-rw-r--r--drivers/net/bonding/bond_sysfs.c84
-rw-r--r--drivers/net/bonding/bonding.h56
-rw-r--r--drivers/net/cassini.c37
-rw-r--r--drivers/net/chelsio/common.h5
-rw-r--r--drivers/net/chelsio/cxgb2.c59
-rw-r--r--drivers/net/chelsio/sge.c13
-rw-r--r--drivers/net/chelsio/tp.c5
-rw-r--r--drivers/net/chelsio/tp.h1
-rw-r--r--drivers/net/cnic.c62
-rw-r--r--drivers/net/cnic.h1
-rw-r--r--drivers/net/cnic_if.h6
-rw-r--r--drivers/net/cris/eth_v10.c4
-rw-r--r--drivers/net/cxgb3/adapter.h7
-rw-r--r--drivers/net/cxgb3/common.h1
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c117
-rw-r--r--drivers/net/cxgb3/sge.c7
-rw-r--r--drivers/net/cxgb4/cxgb4.h6
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c103
-rw-r--r--drivers/net/cxgb4/sge.c4
-rw-r--r--drivers/net/cxgb4vf/adapter.h6
-rw-r--r--drivers/net/cxgb4vf/cxgb4vf_main.c73
-rw-r--r--drivers/net/cxgb4vf/sge.c4
-rw-r--r--drivers/net/depca.c35
-rw-r--r--drivers/net/dl2k.c29
-rw-r--r--drivers/net/dm9000.c57
-rw-r--r--drivers/net/dummy.c4
-rw-r--r--drivers/net/e100.c78
-rw-r--r--drivers/net/e1000/e1000.h5
-rw-r--r--drivers/net/e1000/e1000_ethtool.c60
-rw-r--r--drivers/net/e1000/e1000_main.c42
-rw-r--r--drivers/net/e1000e/82571.c22
-rw-r--r--drivers/net/e1000e/e1000.h12
-rw-r--r--drivers/net/e1000e/es2lan.c5
-rw-r--r--drivers/net/e1000e/ethtool.c154
-rw-r--r--drivers/net/e1000e/hw.h1
-rw-r--r--drivers/net/e1000e/ich8lan.c16
-rw-r--r--drivers/net/e1000e/lib.c18
-rw-r--r--drivers/net/e1000e/netdev.c213
-rw-r--r--drivers/net/e1000e/phy.c4
-rw-r--r--drivers/net/eepro.c2
-rw-r--r--drivers/net/ehea/ehea_ethtool.c46
-rw-r--r--drivers/net/ehea/ehea_main.c4
-rw-r--r--drivers/net/enc28j60.c5
-rw-r--r--drivers/net/enic/Makefile2
-rw-r--r--drivers/net/enic/enic.h5
-rw-r--r--drivers/net/enic/enic_dev.c62
-rw-r--r--drivers/net/enic/enic_dev.h7
-rw-r--r--drivers/net/enic/enic_main.c274
-rw-r--r--drivers/net/enic/enic_pp.c264
-rw-r--r--drivers/net/enic/enic_pp.h27
-rw-r--r--drivers/net/enic/enic_res.c4
-rw-r--r--drivers/net/enic/vnic_dev.c97
-rw-r--r--drivers/net/enic/vnic_dev.h6
-rw-r--r--drivers/net/enic/vnic_devcmd.h57
-rw-r--r--drivers/net/enic/vnic_vic.c5
-rw-r--r--drivers/net/enic/vnic_vic.h13
-rw-r--r--drivers/net/ewrk3.c58
-rw-r--r--drivers/net/forcedeth.c113
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/gianfar.c16
-rw-r--r--drivers/net/gianfar.h20
-rw-r--r--drivers/net/gianfar_ethtool.c110
-rw-r--r--drivers/net/greth.c46
-rw-r--r--drivers/net/greth.h4
-rw-r--r--drivers/net/hamachi.c78
-rw-r--r--drivers/net/hamradio/bpqether.c4
-rw-r--r--drivers/net/hp100.c12
-rw-r--r--drivers/net/ibm_newemac/core.c17
-rw-r--r--drivers/net/ibmlana.c7
-rw-r--r--drivers/net/ibmveth.c98
-rw-r--r--drivers/net/ifb.c4
-rw-r--r--drivers/net/igb/e1000_82575.c11
-rw-r--r--drivers/net/igb/igb.h2
-rw-r--r--drivers/net/igb/igb_ethtool.c48
-rw-r--r--drivers/net/igb/igb_main.c90
-rw-r--r--drivers/net/igbvf/ethtool.c14
-rw-r--r--drivers/net/ioc3-eth.c30
-rw-r--r--drivers/net/irda/smsc-ircc2.c44
-rw-r--r--drivers/net/ixgb/ixgb.h3
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c53
-rw-r--r--drivers/net/ixgbe/ixgbe.h111
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c61
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c178
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c532
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h12
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.c77
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c109
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c191
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c4
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c1037
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.h4
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c20
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c100
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h218
-rw-r--r--drivers/net/ixgbe/ixgbe_x540.c317
-rw-r--r--drivers/net/ixgbevf/ethtool.c8
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c30
-rw-r--r--drivers/net/ixgbevf/mbx.h1
-rw-r--r--drivers/net/ixgbevf/vf.c34
-rw-r--r--drivers/net/ixgbevf/vf.h1
-rw-r--r--drivers/net/jme.c80
-rw-r--r--drivers/net/jme.h2
-rw-r--r--drivers/net/ksz884x.c82
-rw-r--r--drivers/net/loopback.c3
-rw-r--r--drivers/net/macb.c11
-rw-r--r--drivers/net/macvlan.c21
-rw-r--r--drivers/net/mdio.c23
-rw-r--r--drivers/net/mii.c31
-rw-r--r--drivers/net/mlx4/en_ethtool.c49
-rw-r--r--drivers/net/mlx4/en_netdev.c26
-rw-r--r--drivers/net/mlx4/en_rx.c2
-rw-r--r--drivers/net/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/mv643xx_eth.c34
-rw-r--r--drivers/net/myri10ge/myri10ge.c68
-rw-r--r--drivers/net/natsemi.c11
-rw-r--r--drivers/net/ne3210.c15
-rw-r--r--drivers/net/netconsole.c82
-rw-r--r--drivers/net/netxen/netxen_nic.h2
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c117
-rw-r--r--drivers/net/netxen/netxen_nic_init.c3
-rw-r--r--drivers/net/netxen/netxen_nic_main.c55
-rw-r--r--drivers/net/niu.c63
-rw-r--r--drivers/net/niu.h1
-rw-r--r--drivers/net/ns83820.c8
-rw-r--r--drivers/net/pch_gbe/pch_gbe.h4
-rw-r--r--drivers/net/pch_gbe/pch_gbe_ethtool.c65
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c48
-rw-r--r--drivers/net/pch_gbe/pch_gbe_param.c16
-rw-r--r--drivers/net/pch_gbe/pch_gbe_phy.c4
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c6
-rw-r--r--drivers/net/pcnet32.c90
-rw-r--r--drivers/net/phy/phy.c12
-rw-r--r--drivers/net/pptp.c10
-rw-r--r--drivers/net/ps3_gelic_net.c34
-rw-r--r--drivers/net/ps3_gelic_net.h3
-rw-r--r--drivers/net/ps3_gelic_wireless.c4
-rw-r--r--drivers/net/qla3xxx.c4
-rw-r--r--drivers/net/qlcnic/qlcnic.h416
-rw-r--r--drivers/net/qlcnic/qlcnic_ctx.c250
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c417
-rw-r--r--drivers/net/qlcnic/qlcnic_hdr.h40
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c578
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c66
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c634
-rw-r--r--drivers/net/qlge/qlge.h2
-rw-r--r--drivers/net/qlge/qlge_ethtool.c74
-rw-r--r--drivers/net/qlge/qlge_main.c25
-rw-r--r--drivers/net/r8169.c1006
-rw-r--r--drivers/net/s2io.c197
-rw-r--r--drivers/net/s2io.h13
-rw-r--r--drivers/net/sc92031.c8
-rw-r--r--drivers/net/sfc/efx.c47
-rw-r--r--drivers/net/sfc/ethtool.c116
-rw-r--r--drivers/net/sfc/falcon.c4
-rw-r--r--drivers/net/sfc/falcon_xmac.c2
-rw-r--r--drivers/net/sfc/mac.h4
-rw-r--r--drivers/net/sfc/mcdi_mac.c2
-rw-r--r--drivers/net/sfc/mcdi_phy.c8
-rw-r--r--drivers/net/sfc/mdio_10g.c4
-rw-r--r--drivers/net/sfc/net_driver.h12
-rw-r--r--drivers/net/sfc/nic.c6
-rw-r--r--drivers/net/sfc/nic.h6
-rw-r--r--drivers/net/sfc/phy.h8
-rw-r--r--drivers/net/sfc/qt202x_phy.c2
-rw-r--r--drivers/net/sfc/rx.c3
-rw-r--r--drivers/net/sfc/selftest.c11
-rw-r--r--drivers/net/sfc/siena.c2
-rw-r--r--drivers/net/sfc/tenxpress.c4
-rw-r--r--drivers/net/sfc/tx.c9
-rw-r--r--drivers/net/sfc/txc43128_phy.c2
-rw-r--r--drivers/net/skge.c93
-rw-r--r--drivers/net/skge.h1
-rw-r--r--drivers/net/sky2.c195
-rw-r--r--drivers/net/sky2.h1
-rw-r--r--drivers/net/smc-mca.c6
-rw-r--r--drivers/net/smc911x.c4
-rw-r--r--drivers/net/smc91x.c4
-rw-r--r--drivers/net/smsc911x.c448
-rw-r--r--drivers/net/smsc911x.h22
-rw-r--r--drivers/net/spider_net.c15
-rw-r--r--drivers/net/spider_net.h7
-rw-r--r--drivers/net/spider_net_ethtool.c23
-rw-r--r--drivers/net/stmmac/dwmac1000_core.c5
-rw-r--r--drivers/net/stmmac/stmmac.h1
-rw-r--r--drivers/net/stmmac/stmmac_ethtool.c33
-rw-r--r--drivers/net/stmmac/stmmac_main.c86
-rw-r--r--drivers/net/sungem.c18
-rw-r--r--drivers/net/sunhme.c25
-rw-r--r--drivers/net/tehuti.c28
-rw-r--r--drivers/net/tg3.c2891
-rw-r--r--drivers/net/tg3.h267
-rw-r--r--drivers/net/tile/tilepro.c2
-rw-r--r--drivers/net/tokenring/madgemc.c2
-rw-r--r--drivers/net/tokenring/olympic.c57
-rw-r--r--drivers/net/tulip/21142.c14
-rw-r--r--drivers/net/tulip/Makefile2
-rw-r--r--drivers/net/tulip/de2104x.c183
-rw-r--r--drivers/net/tulip/de4x5.c4
-rw-r--r--drivers/net/tulip/dmfe.c9
-rw-r--r--drivers/net/tulip/eeprom.c4
-rw-r--r--drivers/net/tulip/interrupt.c48
-rw-r--r--drivers/net/tulip/media.c49
-rw-r--r--drivers/net/tulip/pnic.c22
-rw-r--r--drivers/net/tulip/pnic2.c16
-rw-r--r--drivers/net/tulip/timer.c47
-rw-r--r--drivers/net/tulip/tulip.h8
-rw-r--r--drivers/net/tulip/tulip_core.c50
-rw-r--r--drivers/net/tulip/uli526x.c71
-rw-r--r--drivers/net/tulip/winbond-840.c81
-rw-r--r--drivers/net/tulip/xircom_cb.c268
-rw-r--r--drivers/net/tun.c69
-rw-r--r--drivers/net/typhoon.c58
-rw-r--r--drivers/net/ucc_geth_ethtool.c1
-rw-r--r--drivers/net/usb/Kconfig2
-rw-r--r--drivers/net/usb/asix.c28
-rw-r--r--drivers/net/usb/catc.c2
-rw-r--r--drivers/net/usb/cdc_ncm.c13
-rw-r--r--drivers/net/usb/dm9601.c6
-rw-r--r--drivers/net/usb/plusb.c32
-rw-r--r--drivers/net/usb/rndis_host.c39
-rw-r--r--drivers/net/usb/rtl8150.c11
-rw-r--r--drivers/net/usb/smsc75xx.c131
-rw-r--r--drivers/net/usb/smsc95xx.c90
-rw-r--r--drivers/net/usb/usbnet.c6
-rw-r--r--drivers/net/veth.c48
-rw-r--r--drivers/net/via-rhine.c242
-rw-r--r--drivers/net/via-velocity.c28
-rw-r--r--drivers/net/virtio_net.c46
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c43
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c73
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h11
-rw-r--r--drivers/net/vxge/vxge-config.c48
-rw-r--r--drivers/net/vxge/vxge-config.h70
-rw-r--r--drivers/net/vxge/vxge-ethtool.c104
-rw-r--r--drivers/net/vxge/vxge-main.c143
-rw-r--r--drivers/net/vxge/vxge-main.h14
-rw-r--r--drivers/net/vxge/vxge-traffic.h2
-rw-r--r--drivers/net/vxge/vxge-version.h4
-rw-r--r--drivers/net/wan/dlci.c4
-rw-r--r--drivers/net/wan/hdlc_fr.c9
-rw-r--r--drivers/net/wan/lapbether.c4
-rw-r--r--drivers/net/wireless/Kconfig1
-rw-r--r--drivers/net/wireless/Makefile2
-rw-r--r--drivers/net/wireless/ath/Kconfig1
-rw-r--r--drivers/net/wireless/ath/Makefile1
-rw-r--r--drivers/net/wireless/ath/ar9170/Kconfig20
-rw-r--r--drivers/net/wireless/ath/ar9170/Makefile3
-rw-r--r--drivers/net/wireless/ath/ar9170/ar9170.h258
-rw-r--r--drivers/net/wireless/ath/ar9170/cmd.c127
-rw-r--r--drivers/net/wireless/ath/ar9170/cmd.h92
-rw-r--r--drivers/net/wireless/ath/ar9170/eeprom.h179
-rw-r--r--drivers/net/wireless/ath/ar9170/hw.h430
-rw-r--r--drivers/net/wireless/ath/ar9170/led.c181
-rw-r--r--drivers/net/wireless/ath/ar9170/mac.c519
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c2190
-rw-r--r--drivers/net/wireless/ath/ar9170/phy.c1719
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.c1008
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.h82
-rw-r--r--drivers/net/wireless/ath/ath.h10
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c28
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h31
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c7
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c73
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h7
-rw-r--r--drivers/net/wireless/ath/ath5k/caps.c3
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c65
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.h17
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.c158
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c158
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c9
-rw-r--r--drivers/net/wireless/ath/ath5k/pci.c32
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c35
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig21
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile6
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c45
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c43
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c34
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c44
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h178
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c387
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c125
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c189
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c35
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c99
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h47
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9340_initvals.h1525
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9485_initvals.h935
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h31
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c238
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c100
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h20
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c21
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c33
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c139
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h11
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c26
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c34
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c33
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c334
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h296
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c333
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_debug.c960
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_gpio.c216
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c165
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c415
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c778
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c50
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.h25
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h17
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c384
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h103
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c55
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c135
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c328
-rw-r--r--drivers/net/wireless/ath/ath9k/phy.h18
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c228
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h135
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c126
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.h82
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c174
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h2
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c94
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c164
-rw-r--r--drivers/net/wireless/ath/key.c38
-rw-r--r--drivers/net/wireless/ath/regd.c8
-rw-r--r--drivers/net/wireless/ath/regd_common.h2
-rw-r--r--drivers/net/wireless/b43/main.c69
-rw-r--r--drivers/net/wireless/b43/main.h1
-rw-r--r--drivers/net/wireless/b43/phy_n.c20
-rw-r--r--drivers/net/wireless/b43legacy/main.c52
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c7
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-lib.c3
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-rs.c3
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.c20
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.h1
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-dev.h4
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-hcmd.c4
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-helpers.h6
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-tx.c52
-rw-r--r--drivers/net/wireless/iwlegacy/iwl3945-base.c22
-rw-r--r--drivers/net/wireless/iwlegacy/iwl4965-base.c47
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig10
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c61
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c170
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000-hw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c103
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000-hw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c139
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.c47
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.h8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c1073
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h70
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c45
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c59
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ict.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-led.c73
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c169
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c31
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h31
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rxon.c274
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-sta.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tt.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tt.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c247
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ucode.c346
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c1223
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h113
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h812
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c270
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h121
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h43
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c1074
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h349
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c106
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h195
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h42
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c25
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-helpers.h39
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c294
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h498
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c52
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h358
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c375
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-spectrum.h92
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sv-open.c469
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-testmode.h151
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c116
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c3
-rw-r--r--drivers/net/wireless/libertas/cfg.c61
-rw-r--r--drivers/net/wireless/libertas/cmd.c285
-rw-r--r--drivers/net/wireless/libertas/cmdresp.c46
-rw-r--r--drivers/net/wireless/libertas/debugfs.c41
-rw-r--r--drivers/net/wireless/libertas/decl.h8
-rw-r--r--drivers/net/wireless/libertas/defs.h120
-rw-r--r--drivers/net/wireless/libertas/dev.h20
-rw-r--r--drivers/net/wireless/libertas/ethtool.c3
-rw-r--r--drivers/net/wireless/libertas/host.h33
-rw-r--r--drivers/net/wireless/libertas/if_cs.c92
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c37
-rw-r--r--drivers/net/wireless/libertas/if_spi.c282
-rw-r--r--drivers/net/wireless/libertas/if_spi.h68
-rw-r--r--drivers/net/wireless/libertas/if_usb.c157
-rw-r--r--drivers/net/wireless/libertas/if_usb.h14
-rw-r--r--drivers/net/wireless/libertas/main.c171
-rw-r--r--drivers/net/wireless/libertas/mesh.c218
-rw-r--r--drivers/net/wireless/libertas/mesh.h6
-rw-r--r--drivers/net/wireless/libertas/rx.c41
-rw-r--r--drivers/net/wireless/libertas/tx.c34
-rw-r--r--drivers/net/wireless/libertas/types.h18
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c11
-rw-r--r--drivers/net/wireless/mwifiex/11n.c744
-rw-r--r--drivers/net/wireless/mwifiex/11n.h161
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c298
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.h32
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c616
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.h65
-rw-r--r--drivers/net/wireless/mwifiex/Kconfig21
-rw-r--r--drivers/net/wireless/mwifiex/Makefile41
-rw-r--r--drivers/net/wireless/mwifiex/README204
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c1417
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.h31
-rw-r--r--drivers/net/wireless/mwifiex/cfp.c360
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c1414
-rw-r--r--drivers/net/wireless/mwifiex/debugfs.c770
-rw-r--r--drivers/net/wireless/mwifiex/decl.h129
-rw-r--r--drivers/net/wireless/mwifiex/fw.h1187
-rw-r--r--drivers/net/wireless/mwifiex/init.c645
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h331
-rw-r--r--drivers/net/wireless/mwifiex/join.c1423
-rw-r--r--drivers/net/wireless/mwifiex/main.c1055
-rw-r--r--drivers/net/wireless/mwifiex/main.h1004
-rw-r--r--drivers/net/wireless/mwifiex/scan.c3025
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c1754
-rw-r--r--drivers/net/wireless/mwifiex/sdio.h305
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c1219
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c972
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c406
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c1593
-rw-r--r--drivers/net/wireless/mwifiex/sta_rx.c200
-rw-r--r--drivers/net/wireless/mwifiex/sta_tx.c198
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c200
-rw-r--r--drivers/net/wireless/mwifiex/util.c202
-rw-r--r--drivers/net/wireless/mwifiex/util.h32
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c1231
-rw-r--r--drivers/net/wireless/mwifiex/wmm.h110
-rw-r--r--drivers/net/wireless/mwl8k.c875
-rw-r--r--drivers/net/wireless/p54/eeprom.c2
-rw-r--r--drivers/net/wireless/p54/fwio.c31
-rw-r--r--drivers/net/wireless/p54/lmac.h1
-rw-r--r--drivers/net/wireless/p54/main.c31
-rw-r--r--drivers/net/wireless/p54/p54.h2
-rw-r--r--drivers/net/wireless/p54/p54pci.c3
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig37
-rw-r--r--drivers/net/wireless/rt2x00/Makefile1
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c35
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c34
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c91
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h108
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c369
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c267
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c632
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h122
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c108
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00crypto.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c43
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c139
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00firmware.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00ht.c133
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h57
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00link.c61
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c95
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c19
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.h17
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c177
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h34
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c129
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.h33
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c70
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c192
-rw-r--r--drivers/net/wireless/rtlwifi/Kconfig15
-rw-r--r--drivers/net/wireless/rtlwifi/Makefile1
-rw-r--r--drivers/net/wireless/rtlwifi/base.c733
-rw-r--r--drivers/net/wireless/rtlwifi/base.h60
-rw-r--r--drivers/net/wireless/rtlwifi/cam.c106
-rw-r--r--drivers/net/wireless/rtlwifi/cam.h5
-rw-r--r--drivers/net/wireless/rtlwifi/core.c416
-rw-r--r--drivers/net/wireless/rtlwifi/core.h1
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.c234
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.h5
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c609
-rw-r--r--drivers/net/wireless/rtlwifi/pci.h19
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c261
-rw-r--r--drivers/net/wireless/rtlwifi/ps.h9
-rw-r--r--drivers/net/wireless/rtlwifi/rc.c212
-rw-r--r--drivers/net/wireless/rtlwifi/rc.h9
-rw-r--r--drivers/net/wireless/rtlwifi/regd.c97
-rw-r--r--drivers/net/wireless/rtlwifi/regd.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c525
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c115
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c128
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h76
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/def.h27
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/dm.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/dm.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c509
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.h34
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/led.c19
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/led.h3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.c223
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.h55
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/reg.h58
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/rf.c28
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/rf.h17
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c123
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.h14
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c180
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.h11
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c13
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.h5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/phy.c14
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/phy.h14
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/rf.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/rf.h4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c50
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.h5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/Makefile15
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/def.h598
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/dm.c733
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/dm.h164
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/fw.c654
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/fw.h375
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/hw.c2512
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/hw.h79
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/led.c149
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/led.h37
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/phy.c1740
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/phy.h101
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/reg.h1188
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/rf.c546
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/rf.h43
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/sw.c423
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/sw.h (renamed from drivers/net/wireless/iwlwifi/iwl-agn-led.h)19
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/table.c634
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/table.h49
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c976
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.h45
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c19
-rw-r--r--drivers/net/wireless/rtlwifi/usb.h2
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h173
-rw-r--r--drivers/net/wireless/wl1251/cmd.h4
-rw-r--r--drivers/net/wireless/wl1251/event.c6
-rw-r--r--drivers/net/wireless/wl1251/main.c22
-rw-r--r--drivers/net/wireless/wl1251/ps.c23
-rw-r--r--drivers/net/wireless/wl1251/ps.h2
-rw-r--r--drivers/net/wireless/wl1251/wl1251.h9
-rw-r--r--drivers/net/wireless/wl12xx/Kconfig2
-rw-r--r--drivers/net/wireless/wl12xx/acx.c201
-rw-r--r--drivers/net/wireless/wl12xx/acx.h102
-rw-r--r--drivers/net/wireless/wl12xx/boot.c283
-rw-r--r--drivers/net/wireless/wl12xx/boot.h52
-rw-r--r--drivers/net/wireless/wl12xx/cmd.c113
-rw-r--r--drivers/net/wireless/wl12xx/cmd.h34
-rw-r--r--drivers/net/wireless/wl12xx/conf.h116
-rw-r--r--drivers/net/wireless/wl12xx/debugfs.c242
-rw-r--r--drivers/net/wireless/wl12xx/event.c34
-rw-r--r--drivers/net/wireless/wl12xx/event.h5
-rw-r--r--drivers/net/wireless/wl12xx/ini.h98
-rw-r--r--drivers/net/wireless/wl12xx/init.c149
-rw-r--r--drivers/net/wireless/wl12xx/init.h3
-rw-r--r--drivers/net/wireless/wl12xx/io.c11
-rw-r--r--drivers/net/wireless/wl12xx/io.h3
-rw-r--r--drivers/net/wireless/wl12xx/main.c823
-rw-r--r--drivers/net/wireless/wl12xx/ps.c33
-rw-r--r--drivers/net/wireless/wl12xx/ps.h2
-rw-r--r--drivers/net/wireless/wl12xx/reg.h15
-rw-r--r--drivers/net/wireless/wl12xx/rx.c80
-rw-r--r--drivers/net/wireless/wl12xx/scan.c260
-rw-r--r--drivers/net/wireless/wl12xx/scan.h114
-rw-r--r--drivers/net/wireless/wl12xx/sdio.c91
-rw-r--r--drivers/net/wireless/wl12xx/sdio_test.c20
-rw-r--r--drivers/net/wireless/wl12xx/spi.c17
-rw-r--r--drivers/net/wireless/wl12xx/testmode.c6
-rw-r--r--drivers/net/wireless/wl12xx/tx.c245
-rw-r--r--drivers/net/wireless/wl12xx/tx.h62
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx.h78
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c262
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.h533
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf.h2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf_al2230.c198
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf_al7230b.c240
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf_rf2959.c78
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf_uw2453.c86
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c4
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.h2
-rw-r--r--drivers/net/xen-netback/common.h7
-rw-r--r--drivers/net/xen-netback/interface.c84
-rw-r--r--drivers/net/xen-netback/xenbus.c2
-rw-r--r--drivers/net/xen-netfront.c106
-rw-r--r--drivers/s390/net/claw.c22
-rw-r--r--drivers/s390/net/ctcm_main.c2
-rw-r--r--drivers/s390/net/ctcm_mpc.c13
-rw-r--r--drivers/s390/net/lcs.c10
-rw-r--r--drivers/s390/net/netiucv.c2
-rw-r--r--drivers/s390/net/qeth_core.h26
-rw-r--r--drivers/s390/net/qeth_core_main.c150
-rw-r--r--drivers/s390/net/qeth_core_mpc.h17
-rw-r--r--drivers/s390/net/qeth_core_sys.c61
-rw-r--r--drivers/s390/net/qeth_l2_main.c21
-rw-r--r--drivers/s390/net/qeth_l3.h2
-rw-r--r--drivers/s390/net/qeth_l3_main.c264
-rw-r--r--drivers/s390/net/qeth_l3_sys.c103
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c11
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c10
-rw-r--r--drivers/scsi/fcoe/fcoe.c11
-rw-r--r--drivers/ssb/driver_chipcommon.c68
-rw-r--r--drivers/ssb/driver_chipcommon_pmu.c2
-rw-r--r--drivers/ssb/driver_pcicore.c255
-rw-r--r--drivers/ssb/main.c54
-rw-r--r--drivers/ssb/scan.c7
-rw-r--r--drivers/staging/hv/netvsc_drv.c3
768 files changed, 71217 insertions, 32832 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 177c7d156933..aca706751469 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -68,6 +68,8 @@ source "drivers/watchdog/Kconfig"
68 68
69source "drivers/ssb/Kconfig" 69source "drivers/ssb/Kconfig"
70 70
71source "drivers/bcma/Kconfig"
72
71source "drivers/mfd/Kconfig" 73source "drivers/mfd/Kconfig"
72 74
73source "drivers/regulator/Kconfig" 75source "drivers/regulator/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 3f135b6fb014..a29527f4ded6 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -110,6 +110,7 @@ obj-$(CONFIG_HID) += hid/
110obj-$(CONFIG_PPC_PS3) += ps3/ 110obj-$(CONFIG_PPC_PS3) += ps3/
111obj-$(CONFIG_OF) += of/ 111obj-$(CONFIG_OF) += of/
112obj-$(CONFIG_SSB) += ssb/ 112obj-$(CONFIG_SSB) += ssb/
113obj-$(CONFIG_BCMA) += bcma/
113obj-$(CONFIG_VHOST_NET) += vhost/ 114obj-$(CONFIG_VHOST_NET) += vhost/
114obj-$(CONFIG_VLYNQ) += vlynq/ 115obj-$(CONFIG_VLYNQ) += vlynq/
115obj-$(CONFIG_STAGING) += staging/ 116obj-$(CONFIG_STAGING) += staging/
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index c495fae74200..3230ea0df83c 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -1469,10 +1469,7 @@ if (eni_boards) printk(KERN_INFO "loss: %ld\n",ENI_DEV(eni_boards)->lost);
1469 1469
1470static void bug_int(struct atm_dev *dev,unsigned long reason) 1470static void bug_int(struct atm_dev *dev,unsigned long reason)
1471{ 1471{
1472 struct eni_dev *eni_dev;
1473
1474 DPRINTK(">bug_int\n"); 1472 DPRINTK(">bug_int\n");
1475 eni_dev = ENI_DEV(dev);
1476 if (reason & MID_DMA_ERR_ACK) 1473 if (reason & MID_DMA_ERR_ACK)
1477 printk(KERN_CRIT DEV_LABEL "(itf %d): driver error - DMA " 1474 printk(KERN_CRIT DEV_LABEL "(itf %d): driver error - DMA "
1478 "error\n",dev->number); 1475 "error\n",dev->number);
@@ -1900,7 +1897,6 @@ static void eni_close(struct atm_vcc *vcc)
1900 1897
1901static int eni_open(struct atm_vcc *vcc) 1898static int eni_open(struct atm_vcc *vcc)
1902{ 1899{
1903 struct eni_dev *eni_dev;
1904 struct eni_vcc *eni_vcc; 1900 struct eni_vcc *eni_vcc;
1905 int error; 1901 int error;
1906 short vpi = vcc->vpi; 1902 short vpi = vcc->vpi;
@@ -1910,7 +1906,6 @@ static int eni_open(struct atm_vcc *vcc)
1910 EVENT("eni_open\n",0,0); 1906 EVENT("eni_open\n",0,0);
1911 if (!test_bit(ATM_VF_PARTIAL,&vcc->flags)) 1907 if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
1912 vcc->dev_data = NULL; 1908 vcc->dev_data = NULL;
1913 eni_dev = ENI_DEV(vcc->dev);
1914 if (vci != ATM_VPI_UNSPEC && vpi != ATM_VCI_UNSPEC) 1909 if (vci != ATM_VPI_UNSPEC && vpi != ATM_VCI_UNSPEC)
1915 set_bit(ATM_VF_ADDR,&vcc->flags); 1910 set_bit(ATM_VF_ADDR,&vcc->flags);
1916 if (vcc->qos.aal != ATM_AAL0 && vcc->qos.aal != ATM_AAL5) 1911 if (vcc->qos.aal != ATM_AAL0 && vcc->qos.aal != ATM_AAL5)
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 6cf59bf281dc..9a51df4f5b74 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -1801,7 +1801,7 @@ return_host_buffers:
1801next_rbrq_entry: 1801next_rbrq_entry:
1802 he_dev->rbrq_head = (struct he_rbrq *) 1802 he_dev->rbrq_head = (struct he_rbrq *)
1803 ((unsigned long) he_dev->rbrq_base | 1803 ((unsigned long) he_dev->rbrq_base |
1804 RBRQ_MASK(++he_dev->rbrq_head)); 1804 RBRQ_MASK(he_dev->rbrq_head + 1));
1805 1805
1806 } 1806 }
1807 read_unlock(&vcc_sklist_lock); 1807 read_unlock(&vcc_sklist_lock);
@@ -1884,7 +1884,7 @@ next_tbrq_entry:
1884 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status)); 1884 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
1885 he_dev->tbrq_head = (struct he_tbrq *) 1885 he_dev->tbrq_head = (struct he_tbrq *)
1886 ((unsigned long) he_dev->tbrq_base | 1886 ((unsigned long) he_dev->tbrq_base |
1887 TBRQ_MASK(++he_dev->tbrq_head)); 1887 TBRQ_MASK(he_dev->tbrq_head + 1));
1888 } 1888 }
1889 1889
1890 if (updated) { 1890 if (updated) {
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 048f99fe6f83..1f8d724a18bf 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -1261,14 +1261,13 @@ idt77252_rx_raw(struct idt77252_dev *card)
1261 PCI_DMA_FROMDEVICE); 1261 PCI_DMA_FROMDEVICE);
1262 1262
1263 while (head != tail) { 1263 while (head != tail) {
1264 unsigned int vpi, vci, pti; 1264 unsigned int vpi, vci;
1265 u32 header; 1265 u32 header;
1266 1266
1267 header = le32_to_cpu(*(u32 *) &queue->data[0]); 1267 header = le32_to_cpu(*(u32 *) &queue->data[0]);
1268 1268
1269 vpi = (header & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT; 1269 vpi = (header & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT;
1270 vci = (header & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT; 1270 vci = (header & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT;
1271 pti = (header & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
1272 1271
1273#ifdef CONFIG_ATM_IDT77252_DEBUG 1272#ifdef CONFIG_ATM_IDT77252_DEBUG
1274 if (debug & DBG_RAW_CELL) { 1273 if (debug & DBG_RAW_CELL) {
@@ -2709,53 +2708,10 @@ idt77252_proc_read(struct atm_dev *dev, loff_t * pos, char *page)
2709static void 2708static void
2710idt77252_collect_stat(struct idt77252_dev *card) 2709idt77252_collect_stat(struct idt77252_dev *card)
2711{ 2710{
2712 u32 cdc, vpec, icc; 2711 (void) readl(SAR_REG_CDC);
2712 (void) readl(SAR_REG_VPEC);
2713 (void) readl(SAR_REG_ICC);
2713 2714
2714 cdc = readl(SAR_REG_CDC);
2715 vpec = readl(SAR_REG_VPEC);
2716 icc = readl(SAR_REG_ICC);
2717
2718#ifdef NOTDEF
2719 printk("%s:", card->name);
2720
2721 if (cdc & 0x7f0000) {
2722 char *s = "";
2723
2724 printk(" [");
2725 if (cdc & (1 << 22)) {
2726 printk("%sRM ID", s);
2727 s = " | ";
2728 }
2729 if (cdc & (1 << 21)) {
2730 printk("%sCON TAB", s);
2731 s = " | ";
2732 }
2733 if (cdc & (1 << 20)) {
2734 printk("%sNO FB", s);
2735 s = " | ";
2736 }
2737 if (cdc & (1 << 19)) {
2738 printk("%sOAM CRC", s);
2739 s = " | ";
2740 }
2741 if (cdc & (1 << 18)) {
2742 printk("%sRM CRC", s);
2743 s = " | ";
2744 }
2745 if (cdc & (1 << 17)) {
2746 printk("%sRM FIFO", s);
2747 s = " | ";
2748 }
2749 if (cdc & (1 << 16)) {
2750 printk("%sRX FIFO", s);
2751 s = " | ";
2752 }
2753 printk("]");
2754 }
2755
2756 printk(" CDC %04x, VPEC %04x, ICC: %04x\n",
2757 cdc & 0xffff, vpec & 0xffff, icc & 0xffff);
2758#endif
2759} 2715}
2760 2716
2761static irqreturn_t 2717static irqreturn_t
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 1c674a91f146..dee4f01a64d8 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -613,7 +613,6 @@ static int ia_que_tx (IADEV *iadev) {
613 struct sk_buff *skb; 613 struct sk_buff *skb;
614 int num_desc; 614 int num_desc;
615 struct atm_vcc *vcc; 615 struct atm_vcc *vcc;
616 struct ia_vcc *iavcc;
617 num_desc = ia_avail_descs(iadev); 616 num_desc = ia_avail_descs(iadev);
618 617
619 while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) { 618 while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
@@ -627,7 +626,6 @@ static int ia_que_tx (IADEV *iadev) {
627 printk("Free the SKB on closed vci %d \n", vcc->vci); 626 printk("Free the SKB on closed vci %d \n", vcc->vci);
628 break; 627 break;
629 } 628 }
630 iavcc = INPH_IA_VCC(vcc);
631 if (ia_pkt_tx (vcc, skb)) { 629 if (ia_pkt_tx (vcc, skb)) {
632 skb_queue_head(&iadev->tx_backlog, skb); 630 skb_queue_head(&iadev->tx_backlog, skb);
633 } 631 }
@@ -823,8 +821,6 @@ static void IaFrontEndIntr(IADEV *iadev) {
823 volatile IA_SUNI *suni; 821 volatile IA_SUNI *suni;
824 volatile ia_mb25_t *mb25; 822 volatile ia_mb25_t *mb25;
825 volatile suni_pm7345_t *suni_pm7345; 823 volatile suni_pm7345_t *suni_pm7345;
826 u32 intr_status;
827 u_int frmr_intr;
828 824
829 if(iadev->phy_type & FE_25MBIT_PHY) { 825 if(iadev->phy_type & FE_25MBIT_PHY) {
830 mb25 = (ia_mb25_t*)iadev->phy; 826 mb25 = (ia_mb25_t*)iadev->phy;
@@ -832,18 +828,18 @@ static void IaFrontEndIntr(IADEV *iadev) {
832 } else if (iadev->phy_type & FE_DS3_PHY) { 828 } else if (iadev->phy_type & FE_DS3_PHY) {
833 suni_pm7345 = (suni_pm7345_t *)iadev->phy; 829 suni_pm7345 = (suni_pm7345_t *)iadev->phy;
834 /* clear FRMR interrupts */ 830 /* clear FRMR interrupts */
835 frmr_intr = suni_pm7345->suni_ds3_frm_intr_stat; 831 (void) suni_pm7345->suni_ds3_frm_intr_stat;
836 iadev->carrier_detect = 832 iadev->carrier_detect =
837 Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV)); 833 Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV));
838 } else if (iadev->phy_type & FE_E3_PHY ) { 834 } else if (iadev->phy_type & FE_E3_PHY ) {
839 suni_pm7345 = (suni_pm7345_t *)iadev->phy; 835 suni_pm7345 = (suni_pm7345_t *)iadev->phy;
840 frmr_intr = suni_pm7345->suni_e3_frm_maint_intr_ind; 836 (void) suni_pm7345->suni_e3_frm_maint_intr_ind;
841 iadev->carrier_detect = 837 iadev->carrier_detect =
842 Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat&SUNI_E3_LOS)); 838 Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat&SUNI_E3_LOS));
843 } 839 }
844 else { 840 else {
845 suni = (IA_SUNI *)iadev->phy; 841 suni = (IA_SUNI *)iadev->phy;
846 intr_status = suni->suni_rsop_status & 0xff; 842 (void) suni->suni_rsop_status;
847 iadev->carrier_detect = Boolean(!(suni->suni_rsop_status & SUNI_LOSV)); 843 iadev->carrier_detect = Boolean(!(suni->suni_rsop_status & SUNI_LOSV));
848 } 844 }
849 if (iadev->carrier_detect) 845 if (iadev->carrier_detect)
@@ -2660,7 +2656,6 @@ static void ia_close(struct atm_vcc *vcc)
2660 2656
2661static int ia_open(struct atm_vcc *vcc) 2657static int ia_open(struct atm_vcc *vcc)
2662{ 2658{
2663 IADEV *iadev;
2664 struct ia_vcc *ia_vcc; 2659 struct ia_vcc *ia_vcc;
2665 int error; 2660 int error;
2666 if (!test_bit(ATM_VF_PARTIAL,&vcc->flags)) 2661 if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
@@ -2668,7 +2663,6 @@ static int ia_open(struct atm_vcc *vcc)
2668 IF_EVENT(printk("ia: not partially allocated resources\n");) 2663 IF_EVENT(printk("ia: not partially allocated resources\n");)
2669 vcc->dev_data = NULL; 2664 vcc->dev_data = NULL;
2670 } 2665 }
2671 iadev = INPH_IA_DEV(vcc->dev);
2672 if (vcc->vci != ATM_VPI_UNSPEC && vcc->vpi != ATM_VCI_UNSPEC) 2666 if (vcc->vci != ATM_VPI_UNSPEC && vcc->vpi != ATM_VCI_UNSPEC)
2673 { 2667 {
2674 IF_EVENT(printk("iphase open: unspec part\n");) 2668 IF_EVENT(printk("iphase open: unspec part\n");)
@@ -3052,11 +3046,9 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
3052static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb) 3046static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
3053{ 3047{
3054 IADEV *iadev; 3048 IADEV *iadev;
3055 struct ia_vcc *iavcc;
3056 unsigned long flags; 3049 unsigned long flags;
3057 3050
3058 iadev = INPH_IA_DEV(vcc->dev); 3051 iadev = INPH_IA_DEV(vcc->dev);
3059 iavcc = INPH_IA_VCC(vcc);
3060 if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer)))) 3052 if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
3061 { 3053 {
3062 if (!skb) 3054 if (!skb)
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index cd0ff66469b2..5d1d07645132 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -527,7 +527,6 @@ static int flash_upgrade(struct solos_card *card, int chip)
527{ 527{
528 const struct firmware *fw; 528 const struct firmware *fw;
529 const char *fw_name; 529 const char *fw_name;
530 uint32_t data32 = 0;
531 int blocksize = 0; 530 int blocksize = 0;
532 int numblocks = 0; 531 int numblocks = 0;
533 int offset; 532 int offset;
@@ -576,7 +575,7 @@ static int flash_upgrade(struct solos_card *card, int chip)
576 575
577 dev_info(&card->dev->dev, "Changing FPGA to Update mode\n"); 576 dev_info(&card->dev->dev, "Changing FPGA to Update mode\n");
578 iowrite32(1, card->config_regs + FPGA_MODE); 577 iowrite32(1, card->config_regs + FPGA_MODE);
579 data32 = ioread32(card->config_regs + FPGA_MODE); 578 (void) ioread32(card->config_regs + FPGA_MODE);
580 579
581 /* Set mode to Chip Erase */ 580 /* Set mode to Chip Erase */
582 if(chip == 0 || chip == 2) 581 if(chip == 0 || chip == 2)
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
new file mode 100644
index 000000000000..353781b5b78b
--- /dev/null
+++ b/drivers/bcma/Kconfig
@@ -0,0 +1,33 @@
1config BCMA_POSSIBLE
2 bool
3 depends on HAS_IOMEM && HAS_DMA
4 default y
5
6menu "Broadcom specific AMBA"
7 depends on BCMA_POSSIBLE
8
9config BCMA
10 tristate "BCMA support"
11 depends on BCMA_POSSIBLE
12 help
13 Bus driver for Broadcom specific Advanced Microcontroller Bus
14 Architecture.
15
16config BCMA_HOST_PCI_POSSIBLE
17 bool
18 depends on BCMA && PCI = y
19 default y
20
21config BCMA_HOST_PCI
22 bool "Support for BCMA on PCI-host bus"
23 depends on BCMA_HOST_PCI_POSSIBLE
24
25config BCMA_DEBUG
26 bool "BCMA debugging"
27 depends on BCMA
28 help
29 This turns on additional debugging messages.
30
31 If unsure, say N
32
33endmenu
diff --git a/drivers/bcma/Makefile b/drivers/bcma/Makefile
new file mode 100644
index 000000000000..0d56245bcb79
--- /dev/null
+++ b/drivers/bcma/Makefile
@@ -0,0 +1,7 @@
1bcma-y += main.o scan.o core.o
2bcma-y += driver_chipcommon.o driver_chipcommon_pmu.o
3bcma-y += driver_pci.o
4bcma-$(CONFIG_BCMA_HOST_PCI) += host_pci.o
5obj-$(CONFIG_BCMA) += bcma.o
6
7ccflags-$(CONFIG_BCMA_DEBUG) := -DDEBUG
diff --git a/drivers/bcma/README b/drivers/bcma/README
new file mode 100644
index 000000000000..f7e7ce46c603
--- /dev/null
+++ b/drivers/bcma/README
@@ -0,0 +1,19 @@
1Broadcom introduced new bus as replacement for older SSB. It is based on AMBA,
2however from programming point of view there is nothing AMBA specific we use.
3
4Standard AMBA drivers are platform specific, have hardcoded addresses and use
5AMBA standard fields like CID and PID.
6
7In case of Broadcom's cards every device consists of:
81) Broadcom specific AMBA device. It is put on AMBA bus, but can not be treated
9 as standard AMBA device. Reading it's CID or PID can cause machine lockup.
102) AMBA standard devices called ports or wrappers. They have CIDs (AMBA_CID)
11 and PIDs (0x103BB369), but we do not use that info for anything. One of that
12 devices is used for managing Broadcom specific core.
13
14Addresses of AMBA devices are not hardcoded in driver and have to be read from
15EPROM.
16
17In this situation we decided to introduce separated bus. It can contain up to
1816 devices identified by Broadcom specific fields: manufacturer, id, revision
19and class.
diff --git a/drivers/bcma/TODO b/drivers/bcma/TODO
new file mode 100644
index 000000000000..da7aa99fe81c
--- /dev/null
+++ b/drivers/bcma/TODO
@@ -0,0 +1,3 @@
1- Interrupts
2- Defines for PCI core driver
3- Create kernel Documentation (use info from README)
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
new file mode 100644
index 000000000000..2f72e9c585fd
--- /dev/null
+++ b/drivers/bcma/bcma_private.h
@@ -0,0 +1,28 @@
1#ifndef LINUX_BCMA_PRIVATE_H_
2#define LINUX_BCMA_PRIVATE_H_
3
4#ifndef pr_fmt
5#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6#endif
7
8#include <linux/bcma/bcma.h>
9#include <linux/delay.h>
10
11#define BCMA_CORE_SIZE 0x1000
12
13struct bcma_bus;
14
15/* main.c */
16extern int bcma_bus_register(struct bcma_bus *bus);
17extern void bcma_bus_unregister(struct bcma_bus *bus);
18
19/* scan.c */
20int bcma_bus_scan(struct bcma_bus *bus);
21
22#ifdef CONFIG_BCMA_HOST_PCI
23/* host_pci.c */
24extern int __init bcma_host_pci_init(void);
25extern void __exit bcma_host_pci_exit(void);
26#endif /* CONFIG_BCMA_HOST_PCI */
27
28#endif
diff --git a/drivers/bcma/core.c b/drivers/bcma/core.c
new file mode 100644
index 000000000000..ced379f7b371
--- /dev/null
+++ b/drivers/bcma/core.c
@@ -0,0 +1,51 @@
1/*
2 * Broadcom specific AMBA
3 * Core ops
4 *
5 * Licensed under the GNU/GPL. See COPYING for details.
6 */
7
8#include "bcma_private.h"
9#include <linux/bcma/bcma.h>
10
11bool bcma_core_is_enabled(struct bcma_device *core)
12{
13 if ((bcma_aread32(core, BCMA_IOCTL) & (BCMA_IOCTL_CLK | BCMA_IOCTL_FGC))
14 != BCMA_IOCTL_CLK)
15 return false;
16 if (bcma_aread32(core, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET)
17 return false;
18 return true;
19}
20EXPORT_SYMBOL_GPL(bcma_core_is_enabled);
21
22static void bcma_core_disable(struct bcma_device *core, u32 flags)
23{
24 if (bcma_aread32(core, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET)
25 return;
26
27 bcma_awrite32(core, BCMA_IOCTL, flags);
28 bcma_aread32(core, BCMA_IOCTL);
29 udelay(10);
30
31 bcma_awrite32(core, BCMA_RESET_CTL, BCMA_RESET_CTL_RESET);
32 udelay(1);
33}
34
35int bcma_core_enable(struct bcma_device *core, u32 flags)
36{
37 bcma_core_disable(core, flags);
38
39 bcma_awrite32(core, BCMA_IOCTL, (BCMA_IOCTL_CLK | BCMA_IOCTL_FGC | flags));
40 bcma_aread32(core, BCMA_IOCTL);
41
42 bcma_awrite32(core, BCMA_RESET_CTL, 0);
43 udelay(1);
44
45 bcma_awrite32(core, BCMA_IOCTL, (BCMA_IOCTL_CLK | flags));
46 bcma_aread32(core, BCMA_IOCTL);
47 udelay(1);
48
49 return 0;
50}
51EXPORT_SYMBOL_GPL(bcma_core_enable);
diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c
new file mode 100644
index 000000000000..606102256b44
--- /dev/null
+++ b/drivers/bcma/driver_chipcommon.c
@@ -0,0 +1,89 @@
1/*
2 * Broadcom specific AMBA
3 * ChipCommon core driver
4 *
5 * Copyright 2005, Broadcom Corporation
6 * Copyright 2006, 2007, Michael Buesch <mb@bu3sch.de>
7 *
8 * Licensed under the GNU/GPL. See COPYING for details.
9 */
10
11#include "bcma_private.h"
12#include <linux/bcma/bcma.h>
13
14static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
15 u32 mask, u32 value)
16{
17 value &= mask;
18 value |= bcma_cc_read32(cc, offset) & ~mask;
19 bcma_cc_write32(cc, offset, value);
20
21 return value;
22}
23
24void bcma_core_chipcommon_init(struct bcma_drv_cc *cc)
25{
26 if (cc->core->id.rev >= 11)
27 cc->status = bcma_cc_read32(cc, BCMA_CC_CHIPSTAT);
28 cc->capabilities = bcma_cc_read32(cc, BCMA_CC_CAP);
29 if (cc->core->id.rev >= 35)
30 cc->capabilities_ext = bcma_cc_read32(cc, BCMA_CC_CAP_EXT);
31
32 if (cc->core->id.rev >= 20) {
33 bcma_cc_write32(cc, BCMA_CC_GPIOPULLUP, 0);
34 bcma_cc_write32(cc, BCMA_CC_GPIOPULLDOWN, 0);
35 }
36
37 if (cc->capabilities & BCMA_CC_CAP_PMU)
38 bcma_pmu_init(cc);
39 if (cc->capabilities & BCMA_CC_CAP_PCTL)
40 pr_err("Power control not implemented!\n");
41}
42
43/* Set chip watchdog reset timer to fire in 'ticks' backplane cycles */
44void bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks)
45{
46 /* instant NMI */
47 bcma_cc_write32(cc, BCMA_CC_WATCHDOG, ticks);
48}
49
50void bcma_chipco_irq_mask(struct bcma_drv_cc *cc, u32 mask, u32 value)
51{
52 bcma_cc_write32_masked(cc, BCMA_CC_IRQMASK, mask, value);
53}
54
55u32 bcma_chipco_irq_status(struct bcma_drv_cc *cc, u32 mask)
56{
57 return bcma_cc_read32(cc, BCMA_CC_IRQSTAT) & mask;
58}
59
60u32 bcma_chipco_gpio_in(struct bcma_drv_cc *cc, u32 mask)
61{
62 return bcma_cc_read32(cc, BCMA_CC_GPIOIN) & mask;
63}
64
65u32 bcma_chipco_gpio_out(struct bcma_drv_cc *cc, u32 mask, u32 value)
66{
67 return bcma_cc_write32_masked(cc, BCMA_CC_GPIOOUT, mask, value);
68}
69
70u32 bcma_chipco_gpio_outen(struct bcma_drv_cc *cc, u32 mask, u32 value)
71{
72 return bcma_cc_write32_masked(cc, BCMA_CC_GPIOOUTEN, mask, value);
73}
74
75u32 bcma_chipco_gpio_control(struct bcma_drv_cc *cc, u32 mask, u32 value)
76{
77 return bcma_cc_write32_masked(cc, BCMA_CC_GPIOCTL, mask, value);
78}
79EXPORT_SYMBOL_GPL(bcma_chipco_gpio_control);
80
81u32 bcma_chipco_gpio_intmask(struct bcma_drv_cc *cc, u32 mask, u32 value)
82{
83 return bcma_cc_write32_masked(cc, BCMA_CC_GPIOIRQ, mask, value);
84}
85
86u32 bcma_chipco_gpio_polarity(struct bcma_drv_cc *cc, u32 mask, u32 value)
87{
88 return bcma_cc_write32_masked(cc, BCMA_CC_GPIOPOL, mask, value);
89}
diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c
new file mode 100644
index 000000000000..f44177a644c7
--- /dev/null
+++ b/drivers/bcma/driver_chipcommon_pmu.c
@@ -0,0 +1,134 @@
1/*
2 * Broadcom specific AMBA
3 * ChipCommon Power Management Unit driver
4 *
5 * Copyright 2009, Michael Buesch <mb@bu3sch.de>
6 * Copyright 2007, Broadcom Corporation
7 *
8 * Licensed under the GNU/GPL. See COPYING for details.
9 */
10
11#include "bcma_private.h"
12#include <linux/bcma/bcma.h>
13
14static void bcma_chipco_chipctl_maskset(struct bcma_drv_cc *cc,
15 u32 offset, u32 mask, u32 set)
16{
17 u32 value;
18
19 bcma_cc_read32(cc, BCMA_CC_CHIPCTL_ADDR);
20 bcma_cc_write32(cc, BCMA_CC_CHIPCTL_ADDR, offset);
21 bcma_cc_read32(cc, BCMA_CC_CHIPCTL_ADDR);
22 value = bcma_cc_read32(cc, BCMA_CC_CHIPCTL_DATA);
23 value &= mask;
24 value |= set;
25 bcma_cc_write32(cc, BCMA_CC_CHIPCTL_DATA, value);
26 bcma_cc_read32(cc, BCMA_CC_CHIPCTL_DATA);
27}
28
29static void bcma_pmu_pll_init(struct bcma_drv_cc *cc)
30{
31 struct bcma_bus *bus = cc->core->bus;
32
33 switch (bus->chipinfo.id) {
34 case 0x4313:
35 case 0x4331:
36 case 43224:
37 case 43225:
38 break;
39 default:
40 pr_err("PLL init unknown for device 0x%04X\n",
41 bus->chipinfo.id);
42 }
43}
44
45static void bcma_pmu_resources_init(struct bcma_drv_cc *cc)
46{
47 struct bcma_bus *bus = cc->core->bus;
48 u32 min_msk = 0, max_msk = 0;
49
50 switch (bus->chipinfo.id) {
51 case 0x4313:
52 min_msk = 0x200D;
53 max_msk = 0xFFFF;
54 break;
55 case 43224:
56 break;
57 default:
58 pr_err("PMU resource config unknown for device 0x%04X\n",
59 bus->chipinfo.id);
60 }
61
62 /* Set the resource masks. */
63 if (min_msk)
64 bcma_cc_write32(cc, BCMA_CC_PMU_MINRES_MSK, min_msk);
65 if (max_msk)
66 bcma_cc_write32(cc, BCMA_CC_PMU_MAXRES_MSK, max_msk);
67}
68
69void bcma_pmu_swreg_init(struct bcma_drv_cc *cc)
70{
71 struct bcma_bus *bus = cc->core->bus;
72
73 switch (bus->chipinfo.id) {
74 case 0x4313:
75 case 0x4331:
76 case 43224:
77 break;
78 default:
79 pr_err("PMU switch/regulators init unknown for device "
80 "0x%04X\n", bus->chipinfo.id);
81 }
82}
83
84void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
85{
86 struct bcma_bus *bus = cc->core->bus;
87
88 switch (bus->chipinfo.id) {
89 case 0x4313:
90 bcma_chipco_chipctl_maskset(cc, 0, ~0, 0x7);
91 break;
92 case 0x4331:
93 pr_err("Enabling Ext PA lines not implemented\n");
94 break;
95 case 43224:
96 if (bus->chipinfo.rev == 0) {
97 pr_err("Workarounds for 43224 rev 0 not fully "
98 "implemented\n");
99 bcma_chipco_chipctl_maskset(cc, 0, ~0, 0xF0);
100 } else {
101 bcma_chipco_chipctl_maskset(cc, 0, ~0, 0xF0);
102 }
103 break;
104 default:
105 pr_err("Workarounds unknown for device 0x%04X\n",
106 bus->chipinfo.id);
107 }
108}
109
110void bcma_pmu_init(struct bcma_drv_cc *cc)
111{
112 u32 pmucap;
113
114 pmucap = bcma_cc_read32(cc, BCMA_CC_PMU_CAP);
115 cc->pmu.rev = (pmucap & BCMA_CC_PMU_CAP_REVISION);
116
117 pr_debug("Found rev %u PMU (capabilities 0x%08X)\n", cc->pmu.rev,
118 pmucap);
119
120 if (cc->pmu.rev == 1)
121 bcma_cc_mask32(cc, BCMA_CC_PMU_CTL,
122 ~BCMA_CC_PMU_CTL_NOILPONW);
123 else
124 bcma_cc_set32(cc, BCMA_CC_PMU_CTL,
125 BCMA_CC_PMU_CTL_NOILPONW);
126
127 if (cc->core->id.id == 0x4329 && cc->core->id.rev == 2)
128 pr_err("Fix for 4329b0 bad LPOM state not implemented!\n");
129
130 bcma_pmu_pll_init(cc);
131 bcma_pmu_resources_init(cc);
132 bcma_pmu_swreg_init(cc);
133 bcma_pmu_workarounds(cc);
134}
diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c
new file mode 100644
index 000000000000..e757e4e3c7e2
--- /dev/null
+++ b/drivers/bcma/driver_pci.c
@@ -0,0 +1,163 @@
1/*
2 * Broadcom specific AMBA
3 * PCI Core
4 *
5 * Copyright 2005, Broadcom Corporation
6 * Copyright 2006, 2007, Michael Buesch <mb@bu3sch.de>
7 *
8 * Licensed under the GNU/GPL. See COPYING for details.
9 */
10
11#include "bcma_private.h"
12#include <linux/bcma/bcma.h>
13
14/**************************************************
15 * R/W ops.
16 **************************************************/
17
18static u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address)
19{
20 pcicore_write32(pc, 0x130, address);
21 pcicore_read32(pc, 0x130);
22 return pcicore_read32(pc, 0x134);
23}
24
25#if 0
26static void bcma_pcie_write(struct bcma_drv_pci *pc, u32 address, u32 data)
27{
28 pcicore_write32(pc, 0x130, address);
29 pcicore_read32(pc, 0x130);
30 pcicore_write32(pc, 0x134, data);
31}
32#endif
33
34static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u8 phy)
35{
36 const u16 mdio_control = 0x128;
37 const u16 mdio_data = 0x12C;
38 u32 v;
39 int i;
40
41 v = (1 << 30); /* Start of Transaction */
42 v |= (1 << 28); /* Write Transaction */
43 v |= (1 << 17); /* Turnaround */
44 v |= (0x1F << 18);
45 v |= (phy << 4);
46 pcicore_write32(pc, mdio_data, v);
47
48 udelay(10);
49 for (i = 0; i < 200; i++) {
50 v = pcicore_read32(pc, mdio_control);
51 if (v & 0x100 /* Trans complete */)
52 break;
53 msleep(1);
54 }
55}
56
57static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u8 device, u8 address)
58{
59 const u16 mdio_control = 0x128;
60 const u16 mdio_data = 0x12C;
61 int max_retries = 10;
62 u16 ret = 0;
63 u32 v;
64 int i;
65
66 v = 0x80; /* Enable Preamble Sequence */
67 v |= 0x2; /* MDIO Clock Divisor */
68 pcicore_write32(pc, mdio_control, v);
69
70 if (pc->core->id.rev >= 10) {
71 max_retries = 200;
72 bcma_pcie_mdio_set_phy(pc, device);
73 }
74
75 v = (1 << 30); /* Start of Transaction */
76 v |= (1 << 29); /* Read Transaction */
77 v |= (1 << 17); /* Turnaround */
78 if (pc->core->id.rev < 10)
79 v |= (u32)device << 22;
80 v |= (u32)address << 18;
81 pcicore_write32(pc, mdio_data, v);
82 /* Wait for the device to complete the transaction */
83 udelay(10);
84 for (i = 0; i < max_retries; i++) {
85 v = pcicore_read32(pc, mdio_control);
86 if (v & 0x100 /* Trans complete */) {
87 udelay(10);
88 ret = pcicore_read32(pc, mdio_data);
89 break;
90 }
91 msleep(1);
92 }
93 pcicore_write32(pc, mdio_control, 0);
94 return ret;
95}
96
97static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u8 device,
98 u8 address, u16 data)
99{
100 const u16 mdio_control = 0x128;
101 const u16 mdio_data = 0x12C;
102 int max_retries = 10;
103 u32 v;
104 int i;
105
106 v = 0x80; /* Enable Preamble Sequence */
107 v |= 0x2; /* MDIO Clock Divisor */
108 pcicore_write32(pc, mdio_control, v);
109
110 if (pc->core->id.rev >= 10) {
111 max_retries = 200;
112 bcma_pcie_mdio_set_phy(pc, device);
113 }
114
115 v = (1 << 30); /* Start of Transaction */
116 v |= (1 << 28); /* Write Transaction */
117 v |= (1 << 17); /* Turnaround */
118 if (pc->core->id.rev < 10)
119 v |= (u32)device << 22;
120 v |= (u32)address << 18;
121 v |= data;
122 pcicore_write32(pc, mdio_data, v);
123 /* Wait for the device to complete the transaction */
124 udelay(10);
125 for (i = 0; i < max_retries; i++) {
126 v = pcicore_read32(pc, mdio_control);
127 if (v & 0x100 /* Trans complete */)
128 break;
129 msleep(1);
130 }
131 pcicore_write32(pc, mdio_control, 0);
132}
133
134/**************************************************
135 * Workarounds.
136 **************************************************/
137
138static u8 bcma_pcicore_polarity_workaround(struct bcma_drv_pci *pc)
139{
140 return (bcma_pcie_read(pc, 0x204) & 0x10) ? 0xC0 : 0x80;
141}
142
143static void bcma_pcicore_serdes_workaround(struct bcma_drv_pci *pc)
144{
145 const u8 serdes_pll_device = 0x1D;
146 const u8 serdes_rx_device = 0x1F;
147 u16 tmp;
148
149 bcma_pcie_mdio_write(pc, serdes_rx_device, 1 /* Control */,
150 bcma_pcicore_polarity_workaround(pc));
151 tmp = bcma_pcie_mdio_read(pc, serdes_pll_device, 1 /* Control */);
152 if (tmp & 0x4000)
153 bcma_pcie_mdio_write(pc, serdes_pll_device, 1, tmp & ~0x4000);
154}
155
156/**************************************************
157 * Init.
158 **************************************************/
159
160void bcma_core_pci_init(struct bcma_drv_pci *pc)
161{
162 bcma_pcicore_serdes_workaround(pc);
163}
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
new file mode 100644
index 000000000000..99dd36e8500b
--- /dev/null
+++ b/drivers/bcma/host_pci.c
@@ -0,0 +1,196 @@
1/*
2 * Broadcom specific AMBA
3 * PCI Host
4 *
5 * Licensed under the GNU/GPL. See COPYING for details.
6 */
7
8#include "bcma_private.h"
9#include <linux/bcma/bcma.h>
10#include <linux/pci.h>
11
12static void bcma_host_pci_switch_core(struct bcma_device *core)
13{
14 pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN,
15 core->addr);
16 pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN2,
17 core->wrap);
18 core->bus->mapped_core = core;
19 pr_debug("Switched to core: 0x%X\n", core->id.id);
20}
21
22static u8 bcma_host_pci_read8(struct bcma_device *core, u16 offset)
23{
24 if (core->bus->mapped_core != core)
25 bcma_host_pci_switch_core(core);
26 return ioread8(core->bus->mmio + offset);
27}
28
29static u16 bcma_host_pci_read16(struct bcma_device *core, u16 offset)
30{
31 if (core->bus->mapped_core != core)
32 bcma_host_pci_switch_core(core);
33 return ioread16(core->bus->mmio + offset);
34}
35
36static u32 bcma_host_pci_read32(struct bcma_device *core, u16 offset)
37{
38 if (core->bus->mapped_core != core)
39 bcma_host_pci_switch_core(core);
40 return ioread32(core->bus->mmio + offset);
41}
42
43static void bcma_host_pci_write8(struct bcma_device *core, u16 offset,
44 u8 value)
45{
46 if (core->bus->mapped_core != core)
47 bcma_host_pci_switch_core(core);
48 iowrite8(value, core->bus->mmio + offset);
49}
50
51static void bcma_host_pci_write16(struct bcma_device *core, u16 offset,
52 u16 value)
53{
54 if (core->bus->mapped_core != core)
55 bcma_host_pci_switch_core(core);
56 iowrite16(value, core->bus->mmio + offset);
57}
58
59static void bcma_host_pci_write32(struct bcma_device *core, u16 offset,
60 u32 value)
61{
62 if (core->bus->mapped_core != core)
63 bcma_host_pci_switch_core(core);
64 iowrite32(value, core->bus->mmio + offset);
65}
66
67static u32 bcma_host_pci_aread32(struct bcma_device *core, u16 offset)
68{
69 if (core->bus->mapped_core != core)
70 bcma_host_pci_switch_core(core);
71 return ioread32(core->bus->mmio + (1 * BCMA_CORE_SIZE) + offset);
72}
73
74static void bcma_host_pci_awrite32(struct bcma_device *core, u16 offset,
75 u32 value)
76{
77 if (core->bus->mapped_core != core)
78 bcma_host_pci_switch_core(core);
79 iowrite32(value, core->bus->mmio + (1 * BCMA_CORE_SIZE) + offset);
80}
81
82const struct bcma_host_ops bcma_host_pci_ops = {
83 .read8 = bcma_host_pci_read8,
84 .read16 = bcma_host_pci_read16,
85 .read32 = bcma_host_pci_read32,
86 .write8 = bcma_host_pci_write8,
87 .write16 = bcma_host_pci_write16,
88 .write32 = bcma_host_pci_write32,
89 .aread32 = bcma_host_pci_aread32,
90 .awrite32 = bcma_host_pci_awrite32,
91};
92
93static int bcma_host_pci_probe(struct pci_dev *dev,
94 const struct pci_device_id *id)
95{
96 struct bcma_bus *bus;
97 int err = -ENOMEM;
98 const char *name;
99 u32 val;
100
101 /* Alloc */
102 bus = kzalloc(sizeof(*bus), GFP_KERNEL);
103 if (!bus)
104 goto out;
105
106 /* Basic PCI configuration */
107 err = pci_enable_device(dev);
108 if (err)
109 goto err_kfree_bus;
110
111 name = dev_name(&dev->dev);
112 if (dev->driver && dev->driver->name)
113 name = dev->driver->name;
114 err = pci_request_regions(dev, name);
115 if (err)
116 goto err_pci_disable;
117 pci_set_master(dev);
118
119 /* Disable the RETRY_TIMEOUT register (0x41) to keep
120 * PCI Tx retries from interfering with C3 CPU state */
121 pci_read_config_dword(dev, 0x40, &val);
122 if ((val & 0x0000ff00) != 0)
123 pci_write_config_dword(dev, 0x40, val & 0xffff00ff);
124
125 /* SSB needed additional powering up, do we have any AMBA PCI cards? */
126 if (!pci_is_pcie(dev))
127 pr_err("PCI card detected, report problems.\n");
128
129 /* Map MMIO */
130 err = -ENOMEM;
131 bus->mmio = pci_iomap(dev, 0, ~0UL);
132 if (!bus->mmio)
133 goto err_pci_release_regions;
134
135 /* Host specific */
136 bus->host_pci = dev;
137 bus->hosttype = BCMA_HOSTTYPE_PCI;
138 bus->ops = &bcma_host_pci_ops;
139
140 /* Register */
141 err = bcma_bus_register(bus);
142 if (err)
143 goto err_pci_unmap_mmio;
144
145 pci_set_drvdata(dev, bus);
146
147out:
148 return err;
149
150err_pci_unmap_mmio:
151 pci_iounmap(dev, bus->mmio);
152err_pci_release_regions:
153 pci_release_regions(dev);
154err_pci_disable:
155 pci_disable_device(dev);
156err_kfree_bus:
157 kfree(bus);
158 return err;
159}
160
161static void bcma_host_pci_remove(struct pci_dev *dev)
162{
163 struct bcma_bus *bus = pci_get_drvdata(dev);
164
165 bcma_bus_unregister(bus);
166 pci_iounmap(dev, bus->mmio);
167 pci_release_regions(dev);
168 pci_disable_device(dev);
169 kfree(bus);
170 pci_set_drvdata(dev, NULL);
171}
172
173static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
174 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
175 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
176 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
177 { 0, },
178};
179MODULE_DEVICE_TABLE(pci, bcma_pci_bridge_tbl);
180
181static struct pci_driver bcma_pci_bridge_driver = {
182 .name = "bcma-pci-bridge",
183 .id_table = bcma_pci_bridge_tbl,
184 .probe = bcma_host_pci_probe,
185 .remove = bcma_host_pci_remove,
186};
187
188int __init bcma_host_pci_init(void)
189{
190 return pci_register_driver(&bcma_pci_bridge_driver);
191}
192
193void __exit bcma_host_pci_exit(void)
194{
195 pci_unregister_driver(&bcma_pci_bridge_driver);
196}
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
new file mode 100644
index 000000000000..be52344ed19d
--- /dev/null
+++ b/drivers/bcma/main.c
@@ -0,0 +1,247 @@
1/*
2 * Broadcom specific AMBA
3 * Bus subsystem
4 *
5 * Licensed under the GNU/GPL. See COPYING for details.
6 */
7
8#include "bcma_private.h"
9#include <linux/bcma/bcma.h>
10
11MODULE_DESCRIPTION("Broadcom's specific AMBA driver");
12MODULE_LICENSE("GPL");
13
14static int bcma_bus_match(struct device *dev, struct device_driver *drv);
15static int bcma_device_probe(struct device *dev);
16static int bcma_device_remove(struct device *dev);
17
18static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf)
19{
20 struct bcma_device *core = container_of(dev, struct bcma_device, dev);
21 return sprintf(buf, "0x%03X\n", core->id.manuf);
22}
23static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf)
24{
25 struct bcma_device *core = container_of(dev, struct bcma_device, dev);
26 return sprintf(buf, "0x%03X\n", core->id.id);
27}
28static ssize_t rev_show(struct device *dev, struct device_attribute *attr, char *buf)
29{
30 struct bcma_device *core = container_of(dev, struct bcma_device, dev);
31 return sprintf(buf, "0x%02X\n", core->id.rev);
32}
33static ssize_t class_show(struct device *dev, struct device_attribute *attr, char *buf)
34{
35 struct bcma_device *core = container_of(dev, struct bcma_device, dev);
36 return sprintf(buf, "0x%X\n", core->id.class);
37}
38static struct device_attribute bcma_device_attrs[] = {
39 __ATTR_RO(manuf),
40 __ATTR_RO(id),
41 __ATTR_RO(rev),
42 __ATTR_RO(class),
43 __ATTR_NULL,
44};
45
46static struct bus_type bcma_bus_type = {
47 .name = "bcma",
48 .match = bcma_bus_match,
49 .probe = bcma_device_probe,
50 .remove = bcma_device_remove,
51 .dev_attrs = bcma_device_attrs,
52};
53
54static struct bcma_device *bcma_find_core(struct bcma_bus *bus, u16 coreid)
55{
56 struct bcma_device *core;
57
58 list_for_each_entry(core, &bus->cores, list) {
59 if (core->id.id == coreid)
60 return core;
61 }
62 return NULL;
63}
64
65static void bcma_release_core_dev(struct device *dev)
66{
67 struct bcma_device *core = container_of(dev, struct bcma_device, dev);
68 kfree(core);
69}
70
71static int bcma_register_cores(struct bcma_bus *bus)
72{
73 struct bcma_device *core;
74 int err, dev_id = 0;
75
76 list_for_each_entry(core, &bus->cores, list) {
77 /* We support that cores ourself */
78 switch (core->id.id) {
79 case BCMA_CORE_CHIPCOMMON:
80 case BCMA_CORE_PCI:
81 case BCMA_CORE_PCIE:
82 continue;
83 }
84
85 core->dev.release = bcma_release_core_dev;
86 core->dev.bus = &bcma_bus_type;
87 dev_set_name(&core->dev, "bcma%d:%d", 0/*bus->num*/, dev_id);
88
89 switch (bus->hosttype) {
90 case BCMA_HOSTTYPE_PCI:
91 core->dev.parent = &bus->host_pci->dev;
92 break;
93 case BCMA_HOSTTYPE_NONE:
94 case BCMA_HOSTTYPE_SDIO:
95 break;
96 }
97
98 err = device_register(&core->dev);
99 if (err) {
100 pr_err("Could not register dev for core 0x%03X\n",
101 core->id.id);
102 continue;
103 }
104 core->dev_registered = true;
105 dev_id++;
106 }
107
108 return 0;
109}
110
111static void bcma_unregister_cores(struct bcma_bus *bus)
112{
113 struct bcma_device *core;
114
115 list_for_each_entry(core, &bus->cores, list) {
116 if (core->dev_registered)
117 device_unregister(&core->dev);
118 }
119}
120
121int bcma_bus_register(struct bcma_bus *bus)
122{
123 int err;
124 struct bcma_device *core;
125
126 /* Scan for devices (cores) */
127 err = bcma_bus_scan(bus);
128 if (err) {
129 pr_err("Failed to scan: %d\n", err);
130 return -1;
131 }
132
133 /* Init CC core */
134 core = bcma_find_core(bus, BCMA_CORE_CHIPCOMMON);
135 if (core) {
136 bus->drv_cc.core = core;
137 bcma_core_chipcommon_init(&bus->drv_cc);
138 }
139
140 /* Init PCIE core */
141 core = bcma_find_core(bus, BCMA_CORE_PCIE);
142 if (core) {
143 bus->drv_pci.core = core;
144 bcma_core_pci_init(&bus->drv_pci);
145 }
146
147 /* Register found cores */
148 bcma_register_cores(bus);
149
150 pr_info("Bus registered\n");
151
152 return 0;
153}
154EXPORT_SYMBOL_GPL(bcma_bus_register);
155
156void bcma_bus_unregister(struct bcma_bus *bus)
157{
158 bcma_unregister_cores(bus);
159}
160EXPORT_SYMBOL_GPL(bcma_bus_unregister);
161
162int __bcma_driver_register(struct bcma_driver *drv, struct module *owner)
163{
164 drv->drv.name = drv->name;
165 drv->drv.bus = &bcma_bus_type;
166 drv->drv.owner = owner;
167
168 return driver_register(&drv->drv);
169}
170EXPORT_SYMBOL_GPL(__bcma_driver_register);
171
172void bcma_driver_unregister(struct bcma_driver *drv)
173{
174 driver_unregister(&drv->drv);
175}
176EXPORT_SYMBOL_GPL(bcma_driver_unregister);
177
178static int bcma_bus_match(struct device *dev, struct device_driver *drv)
179{
180 struct bcma_device *core = container_of(dev, struct bcma_device, dev);
181 struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
182 const struct bcma_device_id *cid = &core->id;
183 const struct bcma_device_id *did;
184
185 for (did = adrv->id_table; did->manuf || did->id || did->rev; did++) {
186 if ((did->manuf == cid->manuf || did->manuf == BCMA_ANY_MANUF) &&
187 (did->id == cid->id || did->id == BCMA_ANY_ID) &&
188 (did->rev == cid->rev || did->rev == BCMA_ANY_REV) &&
189 (did->class == cid->class || did->class == BCMA_ANY_CLASS))
190 return 1;
191 }
192 return 0;
193}
194
195static int bcma_device_probe(struct device *dev)
196{
197 struct bcma_device *core = container_of(dev, struct bcma_device, dev);
198 struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver,
199 drv);
200 int err = 0;
201
202 if (adrv->probe)
203 err = adrv->probe(core);
204
205 return err;
206}
207
208static int bcma_device_remove(struct device *dev)
209{
210 struct bcma_device *core = container_of(dev, struct bcma_device, dev);
211 struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver,
212 drv);
213
214 if (adrv->remove)
215 adrv->remove(core);
216
217 return 0;
218}
219
220static int __init bcma_modinit(void)
221{
222 int err;
223
224 err = bus_register(&bcma_bus_type);
225 if (err)
226 return err;
227
228#ifdef CONFIG_BCMA_HOST_PCI
229 err = bcma_host_pci_init();
230 if (err) {
231 pr_err("PCI host initialization failed\n");
232 err = 0;
233 }
234#endif
235
236 return err;
237}
238fs_initcall(bcma_modinit);
239
240static void __exit bcma_modexit(void)
241{
242#ifdef CONFIG_BCMA_HOST_PCI
243 bcma_host_pci_exit();
244#endif
245 bus_unregister(&bcma_bus_type);
246}
247module_exit(bcma_modexit)
diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c
new file mode 100644
index 000000000000..40d7dcce8933
--- /dev/null
+++ b/drivers/bcma/scan.c
@@ -0,0 +1,360 @@
1/*
2 * Broadcom specific AMBA
3 * Bus scanning
4 *
5 * Licensed under the GNU/GPL. See COPYING for details.
6 */
7
8#include "scan.h"
9#include "bcma_private.h"
10
11#include <linux/bcma/bcma.h>
12#include <linux/bcma/bcma_regs.h>
13#include <linux/pci.h>
14#include <linux/io.h>
15#include <linux/dma-mapping.h>
16#include <linux/slab.h>
17
18struct bcma_device_id_name {
19 u16 id;
20 const char *name;
21};
22struct bcma_device_id_name bcma_device_names[] = {
23 { BCMA_CORE_OOB_ROUTER, "OOB Router" },
24 { BCMA_CORE_INVALID, "Invalid" },
25 { BCMA_CORE_CHIPCOMMON, "ChipCommon" },
26 { BCMA_CORE_ILINE20, "ILine 20" },
27 { BCMA_CORE_SRAM, "SRAM" },
28 { BCMA_CORE_SDRAM, "SDRAM" },
29 { BCMA_CORE_PCI, "PCI" },
30 { BCMA_CORE_MIPS, "MIPS" },
31 { BCMA_CORE_ETHERNET, "Fast Ethernet" },
32 { BCMA_CORE_V90, "V90" },
33 { BCMA_CORE_USB11_HOSTDEV, "USB 1.1 Hostdev" },
34 { BCMA_CORE_ADSL, "ADSL" },
35 { BCMA_CORE_ILINE100, "ILine 100" },
36 { BCMA_CORE_IPSEC, "IPSEC" },
37 { BCMA_CORE_UTOPIA, "UTOPIA" },
38 { BCMA_CORE_PCMCIA, "PCMCIA" },
39 { BCMA_CORE_INTERNAL_MEM, "Internal Memory" },
40 { BCMA_CORE_MEMC_SDRAM, "MEMC SDRAM" },
41 { BCMA_CORE_OFDM, "OFDM" },
42 { BCMA_CORE_EXTIF, "EXTIF" },
43 { BCMA_CORE_80211, "IEEE 802.11" },
44 { BCMA_CORE_PHY_A, "PHY A" },
45 { BCMA_CORE_PHY_B, "PHY B" },
46 { BCMA_CORE_PHY_G, "PHY G" },
47 { BCMA_CORE_MIPS_3302, "MIPS 3302" },
48 { BCMA_CORE_USB11_HOST, "USB 1.1 Host" },
49 { BCMA_CORE_USB11_DEV, "USB 1.1 Device" },
50 { BCMA_CORE_USB20_HOST, "USB 2.0 Host" },
51 { BCMA_CORE_USB20_DEV, "USB 2.0 Device" },
52 { BCMA_CORE_SDIO_HOST, "SDIO Host" },
53 { BCMA_CORE_ROBOSWITCH, "Roboswitch" },
54 { BCMA_CORE_PARA_ATA, "PATA" },
55 { BCMA_CORE_SATA_XORDMA, "SATA XOR-DMA" },
56 { BCMA_CORE_ETHERNET_GBIT, "GBit Ethernet" },
57 { BCMA_CORE_PCIE, "PCIe" },
58 { BCMA_CORE_PHY_N, "PHY N" },
59 { BCMA_CORE_SRAM_CTL, "SRAM Controller" },
60 { BCMA_CORE_MINI_MACPHY, "Mini MACPHY" },
61 { BCMA_CORE_ARM_1176, "ARM 1176" },
62 { BCMA_CORE_ARM_7TDMI, "ARM 7TDMI" },
63 { BCMA_CORE_PHY_LP, "PHY LP" },
64 { BCMA_CORE_PMU, "PMU" },
65 { BCMA_CORE_PHY_SSN, "PHY SSN" },
66 { BCMA_CORE_SDIO_DEV, "SDIO Device" },
67 { BCMA_CORE_ARM_CM3, "ARM CM3" },
68 { BCMA_CORE_PHY_HT, "PHY HT" },
69 { BCMA_CORE_MIPS_74K, "MIPS 74K" },
70 { BCMA_CORE_MAC_GBIT, "GBit MAC" },
71 { BCMA_CORE_DDR12_MEM_CTL, "DDR1/DDR2 Memory Controller" },
72 { BCMA_CORE_PCIE_RC, "PCIe Root Complex" },
73 { BCMA_CORE_OCP_OCP_BRIDGE, "OCP to OCP Bridge" },
74 { BCMA_CORE_SHARED_COMMON, "Common Shared" },
75 { BCMA_CORE_OCP_AHB_BRIDGE, "OCP to AHB Bridge" },
76 { BCMA_CORE_SPI_HOST, "SPI Host" },
77 { BCMA_CORE_I2S, "I2S" },
78 { BCMA_CORE_SDR_DDR1_MEM_CTL, "SDR/DDR1 Memory Controller" },
79 { BCMA_CORE_SHIM, "SHIM" },
80 { BCMA_CORE_DEFAULT, "Default" },
81};
82const char *bcma_device_name(struct bcma_device_id *id)
83{
84 int i;
85
86 if (id->manuf == BCMA_MANUF_BCM) {
87 for (i = 0; i < ARRAY_SIZE(bcma_device_names); i++) {
88 if (bcma_device_names[i].id == id->id)
89 return bcma_device_names[i].name;
90 }
91 }
92 return "UNKNOWN";
93}
94
95static u32 bcma_scan_read32(struct bcma_bus *bus, u8 current_coreidx,
96 u16 offset)
97{
98 return readl(bus->mmio + offset);
99}
100
101static void bcma_scan_switch_core(struct bcma_bus *bus, u32 addr)
102{
103 if (bus->hosttype == BCMA_HOSTTYPE_PCI)
104 pci_write_config_dword(bus->host_pci, BCMA_PCI_BAR0_WIN,
105 addr);
106}
107
108static u32 bcma_erom_get_ent(struct bcma_bus *bus, u32 **eromptr)
109{
110 u32 ent = readl(*eromptr);
111 (*eromptr)++;
112 return ent;
113}
114
115static void bcma_erom_push_ent(u32 **eromptr)
116{
117 (*eromptr)--;
118}
119
120static s32 bcma_erom_get_ci(struct bcma_bus *bus, u32 **eromptr)
121{
122 u32 ent = bcma_erom_get_ent(bus, eromptr);
123 if (!(ent & SCAN_ER_VALID))
124 return -ENOENT;
125 if ((ent & SCAN_ER_TAG) != SCAN_ER_TAG_CI)
126 return -ENOENT;
127 return ent;
128}
129
130static bool bcma_erom_is_end(struct bcma_bus *bus, u32 **eromptr)
131{
132 u32 ent = bcma_erom_get_ent(bus, eromptr);
133 bcma_erom_push_ent(eromptr);
134 return (ent == (SCAN_ER_TAG_END | SCAN_ER_VALID));
135}
136
137static bool bcma_erom_is_bridge(struct bcma_bus *bus, u32 **eromptr)
138{
139 u32 ent = bcma_erom_get_ent(bus, eromptr);
140 bcma_erom_push_ent(eromptr);
141 return (((ent & SCAN_ER_VALID)) &&
142 ((ent & SCAN_ER_TAGX) == SCAN_ER_TAG_ADDR) &&
143 ((ent & SCAN_ADDR_TYPE) == SCAN_ADDR_TYPE_BRIDGE));
144}
145
146static void bcma_erom_skip_component(struct bcma_bus *bus, u32 **eromptr)
147{
148 u32 ent;
149 while (1) {
150 ent = bcma_erom_get_ent(bus, eromptr);
151 if ((ent & SCAN_ER_VALID) &&
152 ((ent & SCAN_ER_TAG) == SCAN_ER_TAG_CI))
153 break;
154 if (ent == (SCAN_ER_TAG_END | SCAN_ER_VALID))
155 break;
156 }
157 bcma_erom_push_ent(eromptr);
158}
159
160static s32 bcma_erom_get_mst_port(struct bcma_bus *bus, u32 **eromptr)
161{
162 u32 ent = bcma_erom_get_ent(bus, eromptr);
163 if (!(ent & SCAN_ER_VALID))
164 return -ENOENT;
165 if ((ent & SCAN_ER_TAG) != SCAN_ER_TAG_MP)
166 return -ENOENT;
167 return ent;
168}
169
170static s32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 **eromptr,
171 u32 type, u8 port)
172{
173 u32 addrl, addrh, sizel, sizeh = 0;
174 u32 size;
175
176 u32 ent = bcma_erom_get_ent(bus, eromptr);
177 if ((!(ent & SCAN_ER_VALID)) ||
178 ((ent & SCAN_ER_TAGX) != SCAN_ER_TAG_ADDR) ||
179 ((ent & SCAN_ADDR_TYPE) != type) ||
180 (((ent & SCAN_ADDR_PORT) >> SCAN_ADDR_PORT_SHIFT) != port)) {
181 bcma_erom_push_ent(eromptr);
182 return -EINVAL;
183 }
184
185 addrl = ent & SCAN_ADDR_ADDR;
186 if (ent & SCAN_ADDR_AG32)
187 addrh = bcma_erom_get_ent(bus, eromptr);
188 else
189 addrh = 0;
190
191 if ((ent & SCAN_ADDR_SZ) == SCAN_ADDR_SZ_SZD) {
192 size = bcma_erom_get_ent(bus, eromptr);
193 sizel = size & SCAN_SIZE_SZ;
194 if (size & SCAN_SIZE_SG32)
195 sizeh = bcma_erom_get_ent(bus, eromptr);
196 } else
197 sizel = SCAN_ADDR_SZ_BASE <<
198 ((ent & SCAN_ADDR_SZ) >> SCAN_ADDR_SZ_SHIFT);
199
200 return addrl;
201}
202
203int bcma_bus_scan(struct bcma_bus *bus)
204{
205 u32 erombase;
206 u32 __iomem *eromptr, *eromend;
207
208 s32 cia, cib;
209 u8 ports[2], wrappers[2];
210
211 s32 tmp;
212 u8 i, j;
213
214 int err;
215
216 INIT_LIST_HEAD(&bus->cores);
217 bus->nr_cores = 0;
218
219 bcma_scan_switch_core(bus, BCMA_ADDR_BASE);
220
221 tmp = bcma_scan_read32(bus, 0, BCMA_CC_ID);
222 bus->chipinfo.id = (tmp & BCMA_CC_ID_ID) >> BCMA_CC_ID_ID_SHIFT;
223 bus->chipinfo.rev = (tmp & BCMA_CC_ID_REV) >> BCMA_CC_ID_REV_SHIFT;
224 bus->chipinfo.pkg = (tmp & BCMA_CC_ID_PKG) >> BCMA_CC_ID_PKG_SHIFT;
225
226 erombase = bcma_scan_read32(bus, 0, BCMA_CC_EROM);
227 eromptr = bus->mmio;
228 eromend = eromptr + BCMA_CORE_SIZE / sizeof(u32);
229
230 bcma_scan_switch_core(bus, erombase);
231
232 while (eromptr < eromend) {
233 struct bcma_device *core = kzalloc(sizeof(*core), GFP_KERNEL);
234 if (!core)
235 return -ENOMEM;
236 INIT_LIST_HEAD(&core->list);
237 core->bus = bus;
238
239 /* get CIs */
240 cia = bcma_erom_get_ci(bus, &eromptr);
241 if (cia < 0) {
242 bcma_erom_push_ent(&eromptr);
243 if (bcma_erom_is_end(bus, &eromptr))
244 break;
245 err= -EILSEQ;
246 goto out;
247 }
248 cib = bcma_erom_get_ci(bus, &eromptr);
249 if (cib < 0) {
250 err= -EILSEQ;
251 goto out;
252 }
253
254 /* parse CIs */
255 core->id.class = (cia & SCAN_CIA_CLASS) >> SCAN_CIA_CLASS_SHIFT;
256 core->id.id = (cia & SCAN_CIA_ID) >> SCAN_CIA_ID_SHIFT;
257 core->id.manuf = (cia & SCAN_CIA_MANUF) >> SCAN_CIA_MANUF_SHIFT;
258 ports[0] = (cib & SCAN_CIB_NMP) >> SCAN_CIB_NMP_SHIFT;
259 ports[1] = (cib & SCAN_CIB_NSP) >> SCAN_CIB_NSP_SHIFT;
260 wrappers[0] = (cib & SCAN_CIB_NMW) >> SCAN_CIB_NMW_SHIFT;
261 wrappers[1] = (cib & SCAN_CIB_NSW) >> SCAN_CIB_NSW_SHIFT;
262 core->id.rev = (cib & SCAN_CIB_REV) >> SCAN_CIB_REV_SHIFT;
263
264 if (((core->id.manuf == BCMA_MANUF_ARM) &&
265 (core->id.id == 0xFFF)) ||
266 (ports[1] == 0)) {
267 bcma_erom_skip_component(bus, &eromptr);
268 continue;
269 }
270
271 /* check if component is a core at all */
272 if (wrappers[0] + wrappers[1] == 0) {
273 /* we could save addrl of the router
274 if (cid == BCMA_CORE_OOB_ROUTER)
275 */
276 bcma_erom_skip_component(bus, &eromptr);
277 continue;
278 }
279
280 if (bcma_erom_is_bridge(bus, &eromptr)) {
281 bcma_erom_skip_component(bus, &eromptr);
282 continue;
283 }
284
285 /* get & parse master ports */
286 for (i = 0; i < ports[0]; i++) {
287 u32 mst_port_d = bcma_erom_get_mst_port(bus, &eromptr);
288 if (mst_port_d < 0) {
289 err= -EILSEQ;
290 goto out;
291 }
292 }
293
294 /* get & parse slave ports */
295 for (i = 0; i < ports[1]; i++) {
296 for (j = 0; ; j++) {
297 tmp = bcma_erom_get_addr_desc(bus, &eromptr,
298 SCAN_ADDR_TYPE_SLAVE, i);
299 if (tmp < 0) {
300 /* no more entries for port _i_ */
301 /* pr_debug("erom: slave port %d "
302 * "has %d descriptors\n", i, j); */
303 break;
304 } else {
305 if (i == 0 && j == 0)
306 core->addr = tmp;
307 }
308 }
309 }
310
311 /* get & parse master wrappers */
312 for (i = 0; i < wrappers[0]; i++) {
313 for (j = 0; ; j++) {
314 tmp = bcma_erom_get_addr_desc(bus, &eromptr,
315 SCAN_ADDR_TYPE_MWRAP, i);
316 if (tmp < 0) {
317 /* no more entries for port _i_ */
318 /* pr_debug("erom: master wrapper %d "
319 * "has %d descriptors\n", i, j); */
320 break;
321 } else {
322 if (i == 0 && j == 0)
323 core->wrap = tmp;
324 }
325 }
326 }
327
328 /* get & parse slave wrappers */
329 for (i = 0; i < wrappers[1]; i++) {
330 u8 hack = (ports[1] == 1) ? 0 : 1;
331 for (j = 0; ; j++) {
332 tmp = bcma_erom_get_addr_desc(bus, &eromptr,
333 SCAN_ADDR_TYPE_SWRAP, i + hack);
334 if (tmp < 0) {
335 /* no more entries for port _i_ */
336 /* pr_debug("erom: master wrapper %d "
337 * has %d descriptors\n", i, j); */
338 break;
339 } else {
340 if (wrappers[0] == 0 && !i && !j)
341 core->wrap = tmp;
342 }
343 }
344 }
345
346 pr_info("Core %d found: %s "
347 "(manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n",
348 bus->nr_cores, bcma_device_name(&core->id),
349 core->id.manuf, core->id.id, core->id.rev,
350 core->id.class);
351
352 core->core_index = bus->nr_cores++;
353 list_add(&core->list, &bus->cores);
354 continue;
355out:
356 return err;
357 }
358
359 return 0;
360}
diff --git a/drivers/bcma/scan.h b/drivers/bcma/scan.h
new file mode 100644
index 000000000000..113e6a66884c
--- /dev/null
+++ b/drivers/bcma/scan.h
@@ -0,0 +1,56 @@
1#ifndef BCMA_SCAN_H_
2#define BCMA_SCAN_H_
3
4#define BCMA_ADDR_BASE 0x18000000
5#define BCMA_WRAP_BASE 0x18100000
6
7#define SCAN_ER_VALID 0x00000001
8#define SCAN_ER_TAGX 0x00000006 /* we have to ignore 0x8 bit when checking tag for SCAN_ER_TAG_ADDR */
9#define SCAN_ER_TAG 0x0000000E
10#define SCAN_ER_TAG_CI 0x00000000
11#define SCAN_ER_TAG_MP 0x00000002
12#define SCAN_ER_TAG_ADDR 0x00000004
13#define SCAN_ER_TAG_END 0x0000000E
14#define SCAN_ER_BAD 0xFFFFFFFF
15
16#define SCAN_CIA_CLASS 0x000000F0
17#define SCAN_CIA_CLASS_SHIFT 4
18#define SCAN_CIA_ID 0x000FFF00
19#define SCAN_CIA_ID_SHIFT 8
20#define SCAN_CIA_MANUF 0xFFF00000
21#define SCAN_CIA_MANUF_SHIFT 20
22
23#define SCAN_CIB_NMP 0x000001F0
24#define SCAN_CIB_NMP_SHIFT 4
25#define SCAN_CIB_NSP 0x00003E00
26#define SCAN_CIB_NSP_SHIFT 9
27#define SCAN_CIB_NMW 0x0007C000
28#define SCAN_CIB_NMW_SHIFT 14
29#define SCAN_CIB_NSW 0x00F80000
30#define SCAN_CIB_NSW_SHIFT 17
31#define SCAN_CIB_REV 0xFF000000
32#define SCAN_CIB_REV_SHIFT 24
33
34#define SCAN_ADDR_AG32 0x00000008
35#define SCAN_ADDR_SZ 0x00000030
36#define SCAN_ADDR_SZ_SHIFT 4
37#define SCAN_ADDR_SZ_4K 0x00000000
38#define SCAN_ADDR_SZ_8K 0x00000010
39#define SCAN_ADDR_SZ_16K 0x00000020
40#define SCAN_ADDR_SZ_SZD 0x00000030
41#define SCAN_ADDR_TYPE 0x000000C0
42#define SCAN_ADDR_TYPE_SLAVE 0x00000000
43#define SCAN_ADDR_TYPE_BRIDGE 0x00000040
44#define SCAN_ADDR_TYPE_SWRAP 0x00000080
45#define SCAN_ADDR_TYPE_MWRAP 0x000000C0
46#define SCAN_ADDR_PORT 0x00000F00
47#define SCAN_ADDR_PORT_SHIFT 8
48#define SCAN_ADDR_ADDR 0xFFFFF000
49
50#define SCAN_ADDR_SZ_BASE 0x00001000 /* 4KB */
51
52#define SCAN_SIZE_SZ_ALIGN 0x00000FFF
53#define SCAN_SIZE_SZ 0xFFFFF000
54#define SCAN_SIZE_SG32 0x00000008
55
56#endif /* BCMA_SCAN_H_ */
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 8e0de9a05867..11b41fd40c27 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -188,7 +188,7 @@ config BT_MRVL
188 The core driver to support Marvell Bluetooth devices. 188 The core driver to support Marvell Bluetooth devices.
189 189
190 This driver is required if you want to support 190 This driver is required if you want to support
191 Marvell Bluetooth devices, such as 8688. 191 Marvell Bluetooth devices, such as 8688/8787.
192 192
193 Say Y here to compile Marvell Bluetooth driver 193 Say Y here to compile Marvell Bluetooth driver
194 into the kernel or say M to compile it as module. 194 into the kernel or say M to compile it as module.
@@ -201,7 +201,7 @@ config BT_MRVL_SDIO
201 The driver for Marvell Bluetooth chipsets with SDIO interface. 201 The driver for Marvell Bluetooth chipsets with SDIO interface.
202 202
203 This driver is required if you want to use Marvell Bluetooth 203 This driver is required if you want to use Marvell Bluetooth
204 devices with SDIO interface. Currently only SD8688 chipset is 204 devices with SDIO interface. Currently SD8688/SD8787 chipsets are
205 supported. 205 supported.
206 206
207 Say Y here to compile support for Marvell BT-over-SDIO driver 207 Say Y here to compile support for Marvell BT-over-SDIO driver
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 5577ed656e2f..6bacef368fab 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -62,6 +62,7 @@ static struct usb_device_id ath3k_table[] = {
62 62
63 /* Atheros AR3011 with sflash firmware*/ 63 /* Atheros AR3011 with sflash firmware*/
64 { USB_DEVICE(0x0CF3, 0x3002) }, 64 { USB_DEVICE(0x0CF3, 0x3002) },
65 { USB_DEVICE(0x13d3, 0x3304) },
65 66
66 /* Atheros AR9285 Malbec with sflash firmware */ 67 /* Atheros AR9285 Malbec with sflash firmware */
67 { USB_DEVICE(0x03F0, 0x311D) }, 68 { USB_DEVICE(0x03F0, 0x311D) },
@@ -138,9 +139,6 @@ static int ath3k_load_firmware(struct usb_device *udev,
138 count -= size; 139 count -= size;
139 } 140 }
140 141
141 kfree(send_buf);
142 return 0;
143
144error: 142error:
145 kfree(send_buf); 143 kfree(send_buf);
146 return err; 144 return err;
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index dcc2a6ec23f0..7f521d4ac657 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -49,15 +49,59 @@
49static u8 user_rmmod; 49static u8 user_rmmod;
50static u8 sdio_ireg; 50static u8 sdio_ireg;
51 51
52static const struct btmrvl_sdio_card_reg btmrvl_reg_8688 = {
53 .cfg = 0x03,
54 .host_int_mask = 0x04,
55 .host_intstatus = 0x05,
56 .card_status = 0x20,
57 .sq_read_base_addr_a0 = 0x10,
58 .sq_read_base_addr_a1 = 0x11,
59 .card_fw_status0 = 0x40,
60 .card_fw_status1 = 0x41,
61 .card_rx_len = 0x42,
62 .card_rx_unit = 0x43,
63 .io_port_0 = 0x00,
64 .io_port_1 = 0x01,
65 .io_port_2 = 0x02,
66};
67static const struct btmrvl_sdio_card_reg btmrvl_reg_8787 = {
68 .cfg = 0x00,
69 .host_int_mask = 0x02,
70 .host_intstatus = 0x03,
71 .card_status = 0x30,
72 .sq_read_base_addr_a0 = 0x40,
73 .sq_read_base_addr_a1 = 0x41,
74 .card_revision = 0x5c,
75 .card_fw_status0 = 0x60,
76 .card_fw_status1 = 0x61,
77 .card_rx_len = 0x62,
78 .card_rx_unit = 0x63,
79 .io_port_0 = 0x78,
80 .io_port_1 = 0x79,
81 .io_port_2 = 0x7a,
82};
83
52static const struct btmrvl_sdio_device btmrvl_sdio_sd6888 = { 84static const struct btmrvl_sdio_device btmrvl_sdio_sd6888 = {
53 .helper = "sd8688_helper.bin", 85 .helper = "sd8688_helper.bin",
54 .firmware = "sd8688.bin", 86 .firmware = "sd8688.bin",
87 .reg = &btmrvl_reg_8688,
88 .sd_blksz_fw_dl = 64,
89};
90
91static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = {
92 .helper = NULL,
93 .firmware = "mrvl/sd8787_uapsta.bin",
94 .reg = &btmrvl_reg_8787,
95 .sd_blksz_fw_dl = 256,
55}; 96};
56 97
57static const struct sdio_device_id btmrvl_sdio_ids[] = { 98static const struct sdio_device_id btmrvl_sdio_ids[] = {
58 /* Marvell SD8688 Bluetooth device */ 99 /* Marvell SD8688 Bluetooth device */
59 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9105), 100 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9105),
60 .driver_data = (unsigned long) &btmrvl_sdio_sd6888 }, 101 .driver_data = (unsigned long) &btmrvl_sdio_sd6888 },
102 /* Marvell SD8787 Bluetooth device */
103 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A),
104 .driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
61 105
62 { } /* Terminating entry */ 106 { } /* Terminating entry */
63}; 107};
@@ -69,7 +113,7 @@ static int btmrvl_sdio_get_rx_unit(struct btmrvl_sdio_card *card)
69 u8 reg; 113 u8 reg;
70 int ret; 114 int ret;
71 115
72 reg = sdio_readb(card->func, CARD_RX_UNIT_REG, &ret); 116 reg = sdio_readb(card->func, card->reg->card_rx_unit, &ret);
73 if (!ret) 117 if (!ret)
74 card->rx_unit = reg; 118 card->rx_unit = reg;
75 119
@@ -83,11 +127,11 @@ static int btmrvl_sdio_read_fw_status(struct btmrvl_sdio_card *card, u16 *dat)
83 127
84 *dat = 0; 128 *dat = 0;
85 129
86 fws0 = sdio_readb(card->func, CARD_FW_STATUS0_REG, &ret); 130 fws0 = sdio_readb(card->func, card->reg->card_fw_status0, &ret);
87 if (ret) 131 if (ret)
88 return -EIO; 132 return -EIO;
89 133
90 fws1 = sdio_readb(card->func, CARD_FW_STATUS1_REG, &ret); 134 fws1 = sdio_readb(card->func, card->reg->card_fw_status1, &ret);
91 if (ret) 135 if (ret)
92 return -EIO; 136 return -EIO;
93 137
@@ -101,7 +145,7 @@ static int btmrvl_sdio_read_rx_len(struct btmrvl_sdio_card *card, u16 *dat)
101 u8 reg; 145 u8 reg;
102 int ret; 146 int ret;
103 147
104 reg = sdio_readb(card->func, CARD_RX_LEN_REG, &ret); 148 reg = sdio_readb(card->func, card->reg->card_rx_len, &ret);
105 if (!ret) 149 if (!ret)
106 *dat = (u16) reg << card->rx_unit; 150 *dat = (u16) reg << card->rx_unit;
107 151
@@ -113,7 +157,7 @@ static int btmrvl_sdio_enable_host_int_mask(struct btmrvl_sdio_card *card,
113{ 157{
114 int ret; 158 int ret;
115 159
116 sdio_writeb(card->func, mask, HOST_INT_MASK_REG, &ret); 160 sdio_writeb(card->func, mask, card->reg->host_int_mask, &ret);
117 if (ret) { 161 if (ret) {
118 BT_ERR("Unable to enable the host interrupt!"); 162 BT_ERR("Unable to enable the host interrupt!");
119 ret = -EIO; 163 ret = -EIO;
@@ -128,13 +172,13 @@ static int btmrvl_sdio_disable_host_int_mask(struct btmrvl_sdio_card *card,
128 u8 host_int_mask; 172 u8 host_int_mask;
129 int ret; 173 int ret;
130 174
131 host_int_mask = sdio_readb(card->func, HOST_INT_MASK_REG, &ret); 175 host_int_mask = sdio_readb(card->func, card->reg->host_int_mask, &ret);
132 if (ret) 176 if (ret)
133 return -EIO; 177 return -EIO;
134 178
135 host_int_mask &= ~mask; 179 host_int_mask &= ~mask;
136 180
137 sdio_writeb(card->func, host_int_mask, HOST_INT_MASK_REG, &ret); 181 sdio_writeb(card->func, host_int_mask, card->reg->host_int_mask, &ret);
138 if (ret < 0) { 182 if (ret < 0) {
139 BT_ERR("Unable to disable the host interrupt!"); 183 BT_ERR("Unable to disable the host interrupt!");
140 return -EIO; 184 return -EIO;
@@ -150,7 +194,7 @@ static int btmrvl_sdio_poll_card_status(struct btmrvl_sdio_card *card, u8 bits)
150 int ret; 194 int ret;
151 195
152 for (tries = 0; tries < MAX_POLL_TRIES * 1000; tries++) { 196 for (tries = 0; tries < MAX_POLL_TRIES * 1000; tries++) {
153 status = sdio_readb(card->func, CARD_STATUS_REG, &ret); 197 status = sdio_readb(card->func, card->reg->card_status, &ret);
154 if (ret) 198 if (ret)
155 goto failed; 199 goto failed;
156 if ((status & bits) == bits) 200 if ((status & bits) == bits)
@@ -299,7 +343,7 @@ static int btmrvl_sdio_download_fw_w_helper(struct btmrvl_sdio_card *card)
299 u8 base0, base1; 343 u8 base0, base1;
300 void *tmpfwbuf = NULL; 344 void *tmpfwbuf = NULL;
301 u8 *fwbuf; 345 u8 *fwbuf;
302 u16 len; 346 u16 len, blksz_dl = card->sd_blksz_fw_dl;
303 int txlen = 0, tx_blocks = 0, count = 0; 347 int txlen = 0, tx_blocks = 0, count = 0;
304 348
305 ret = request_firmware(&fw_firmware, card->firmware, 349 ret = request_firmware(&fw_firmware, card->firmware,
@@ -345,7 +389,7 @@ static int btmrvl_sdio_download_fw_w_helper(struct btmrvl_sdio_card *card)
345 389
346 for (tries = 0; tries < MAX_POLL_TRIES; tries++) { 390 for (tries = 0; tries < MAX_POLL_TRIES; tries++) {
347 base0 = sdio_readb(card->func, 391 base0 = sdio_readb(card->func,
348 SQ_READ_BASE_ADDRESS_A0_REG, &ret); 392 card->reg->sq_read_base_addr_a0, &ret);
349 if (ret) { 393 if (ret) {
350 BT_ERR("BASE0 register read failed:" 394 BT_ERR("BASE0 register read failed:"
351 " base0 = 0x%04X(%d)." 395 " base0 = 0x%04X(%d)."
@@ -355,7 +399,7 @@ static int btmrvl_sdio_download_fw_w_helper(struct btmrvl_sdio_card *card)
355 goto done; 399 goto done;
356 } 400 }
357 base1 = sdio_readb(card->func, 401 base1 = sdio_readb(card->func,
358 SQ_READ_BASE_ADDRESS_A1_REG, &ret); 402 card->reg->sq_read_base_addr_a1, &ret);
359 if (ret) { 403 if (ret) {
360 BT_ERR("BASE1 register read failed:" 404 BT_ERR("BASE1 register read failed:"
361 " base1 = 0x%04X(%d)." 405 " base1 = 0x%04X(%d)."
@@ -403,20 +447,19 @@ static int btmrvl_sdio_download_fw_w_helper(struct btmrvl_sdio_card *card)
403 if (firmwarelen - offset < txlen) 447 if (firmwarelen - offset < txlen)
404 txlen = firmwarelen - offset; 448 txlen = firmwarelen - offset;
405 449
406 tx_blocks = 450 tx_blocks = (txlen + blksz_dl - 1) / blksz_dl;
407 (txlen + SDIO_BLOCK_SIZE - 1) / SDIO_BLOCK_SIZE;
408 451
409 memcpy(fwbuf, &firmware[offset], txlen); 452 memcpy(fwbuf, &firmware[offset], txlen);
410 } 453 }
411 454
412 ret = sdio_writesb(card->func, card->ioport, fwbuf, 455 ret = sdio_writesb(card->func, card->ioport, fwbuf,
413 tx_blocks * SDIO_BLOCK_SIZE); 456 tx_blocks * blksz_dl);
414 457
415 if (ret < 0) { 458 if (ret < 0) {
416 BT_ERR("FW download, writesb(%d) failed @%d", 459 BT_ERR("FW download, writesb(%d) failed @%d",
417 count, offset); 460 count, offset);
418 sdio_writeb(card->func, HOST_CMD53_FIN, CONFIG_REG, 461 sdio_writeb(card->func, HOST_CMD53_FIN,
419 &ret); 462 card->reg->cfg, &ret);
420 if (ret) 463 if (ret)
421 BT_ERR("writeb failed (CFG)"); 464 BT_ERR("writeb failed (CFG)");
422 } 465 }
@@ -597,7 +640,7 @@ static void btmrvl_sdio_interrupt(struct sdio_func *func)
597 640
598 priv = card->priv; 641 priv = card->priv;
599 642
600 ireg = sdio_readb(card->func, HOST_INTSTATUS_REG, &ret); 643 ireg = sdio_readb(card->func, card->reg->host_intstatus, &ret);
601 if (ret) { 644 if (ret) {
602 BT_ERR("sdio_readb: read int status register failed"); 645 BT_ERR("sdio_readb: read int status register failed");
603 return; 646 return;
@@ -613,7 +656,7 @@ static void btmrvl_sdio_interrupt(struct sdio_func *func)
613 656
614 sdio_writeb(card->func, ~(ireg) & (DN_LD_HOST_INT_STATUS | 657 sdio_writeb(card->func, ~(ireg) & (DN_LD_HOST_INT_STATUS |
615 UP_LD_HOST_INT_STATUS), 658 UP_LD_HOST_INT_STATUS),
616 HOST_INTSTATUS_REG, &ret); 659 card->reg->host_intstatus, &ret);
617 if (ret) { 660 if (ret) {
618 BT_ERR("sdio_writeb: clear int status register failed"); 661 BT_ERR("sdio_writeb: clear int status register failed");
619 return; 662 return;
@@ -664,7 +707,7 @@ static int btmrvl_sdio_register_dev(struct btmrvl_sdio_card *card)
664 goto release_irq; 707 goto release_irq;
665 } 708 }
666 709
667 reg = sdio_readb(func, IO_PORT_0_REG, &ret); 710 reg = sdio_readb(func, card->reg->io_port_0, &ret);
668 if (ret < 0) { 711 if (ret < 0) {
669 ret = -EIO; 712 ret = -EIO;
670 goto release_irq; 713 goto release_irq;
@@ -672,7 +715,7 @@ static int btmrvl_sdio_register_dev(struct btmrvl_sdio_card *card)
672 715
673 card->ioport = reg; 716 card->ioport = reg;
674 717
675 reg = sdio_readb(func, IO_PORT_1_REG, &ret); 718 reg = sdio_readb(func, card->reg->io_port_1, &ret);
676 if (ret < 0) { 719 if (ret < 0) {
677 ret = -EIO; 720 ret = -EIO;
678 goto release_irq; 721 goto release_irq;
@@ -680,7 +723,7 @@ static int btmrvl_sdio_register_dev(struct btmrvl_sdio_card *card)
680 723
681 card->ioport |= (reg << 8); 724 card->ioport |= (reg << 8);
682 725
683 reg = sdio_readb(func, IO_PORT_2_REG, &ret); 726 reg = sdio_readb(func, card->reg->io_port_2, &ret);
684 if (ret < 0) { 727 if (ret < 0) {
685 ret = -EIO; 728 ret = -EIO;
686 goto release_irq; 729 goto release_irq;
@@ -815,6 +858,8 @@ exit:
815static int btmrvl_sdio_download_fw(struct btmrvl_sdio_card *card) 858static int btmrvl_sdio_download_fw(struct btmrvl_sdio_card *card)
816{ 859{
817 int ret = 0; 860 int ret = 0;
861 u8 fws0;
862 int pollnum = MAX_POLL_TRIES;
818 863
819 if (!card || !card->func) { 864 if (!card || !card->func) {
820 BT_ERR("card or function is NULL!"); 865 BT_ERR("card or function is NULL!");
@@ -827,20 +872,36 @@ static int btmrvl_sdio_download_fw(struct btmrvl_sdio_card *card)
827 goto done; 872 goto done;
828 } 873 }
829 874
830 ret = btmrvl_sdio_download_helper(card); 875 /* Check if other function driver is downloading the firmware */
876 fws0 = sdio_readb(card->func, card->reg->card_fw_status0, &ret);
831 if (ret) { 877 if (ret) {
832 BT_ERR("Failed to download helper!"); 878 BT_ERR("Failed to read FW downloading status!");
833 ret = -EIO; 879 ret = -EIO;
834 goto done; 880 goto done;
835 } 881 }
882 if (fws0) {
883 BT_DBG("BT not the winner (%#x). Skip FW downloading", fws0);
884
885 /* Give other function more time to download the firmware */
886 pollnum *= 10;
887 } else {
888 if (card->helper) {
889 ret = btmrvl_sdio_download_helper(card);
890 if (ret) {
891 BT_ERR("Failed to download helper!");
892 ret = -EIO;
893 goto done;
894 }
895 }
836 896
837 if (btmrvl_sdio_download_fw_w_helper(card)) { 897 if (btmrvl_sdio_download_fw_w_helper(card)) {
838 BT_ERR("Failed to download firmware!"); 898 BT_ERR("Failed to download firmware!");
839 ret = -EIO; 899 ret = -EIO;
840 goto done; 900 goto done;
901 }
841 } 902 }
842 903
843 if (btmrvl_sdio_verify_fw_download(card, MAX_POLL_TRIES)) { 904 if (btmrvl_sdio_verify_fw_download(card, pollnum)) {
844 BT_ERR("FW failed to be active in time!"); 905 BT_ERR("FW failed to be active in time!");
845 ret = -ETIMEDOUT; 906 ret = -ETIMEDOUT;
846 goto done; 907 goto done;
@@ -864,7 +925,7 @@ static int btmrvl_sdio_wakeup_fw(struct btmrvl_private *priv)
864 925
865 sdio_claim_host(card->func); 926 sdio_claim_host(card->func);
866 927
867 sdio_writeb(card->func, HOST_POWER_UP, CONFIG_REG, &ret); 928 sdio_writeb(card->func, HOST_POWER_UP, card->reg->cfg, &ret);
868 929
869 sdio_release_host(card->func); 930 sdio_release_host(card->func);
870 931
@@ -893,8 +954,10 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
893 954
894 if (id->driver_data) { 955 if (id->driver_data) {
895 struct btmrvl_sdio_device *data = (void *) id->driver_data; 956 struct btmrvl_sdio_device *data = (void *) id->driver_data;
896 card->helper = data->helper; 957 card->helper = data->helper;
897 card->firmware = data->firmware; 958 card->firmware = data->firmware;
959 card->reg = data->reg;
960 card->sd_blksz_fw_dl = data->sd_blksz_fw_dl;
898 } 961 }
899 962
900 if (btmrvl_sdio_register_dev(card) < 0) { 963 if (btmrvl_sdio_register_dev(card) < 0) {
@@ -1011,3 +1074,4 @@ MODULE_VERSION(VERSION);
1011MODULE_LICENSE("GPL v2"); 1074MODULE_LICENSE("GPL v2");
1012MODULE_FIRMWARE("sd8688_helper.bin"); 1075MODULE_FIRMWARE("sd8688_helper.bin");
1013MODULE_FIRMWARE("sd8688.bin"); 1076MODULE_FIRMWARE("sd8688.bin");
1077MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
diff --git a/drivers/bluetooth/btmrvl_sdio.h b/drivers/bluetooth/btmrvl_sdio.h
index 27329f107e5a..43d35a609ca9 100644
--- a/drivers/bluetooth/btmrvl_sdio.h
+++ b/drivers/bluetooth/btmrvl_sdio.h
@@ -47,44 +47,46 @@
47/* Max retry number of CMD53 write */ 47/* Max retry number of CMD53 write */
48#define MAX_WRITE_IOMEM_RETRY 2 48#define MAX_WRITE_IOMEM_RETRY 2
49 49
50/* Host Control Registers */ 50/* register bitmasks */
51#define IO_PORT_0_REG 0x00 51#define HOST_POWER_UP BIT(1)
52#define IO_PORT_1_REG 0x01 52#define HOST_CMD53_FIN BIT(2)
53#define IO_PORT_2_REG 0x02 53
54 54#define HIM_DISABLE 0xff
55#define CONFIG_REG 0x03 55#define HIM_ENABLE (BIT(0) | BIT(1))
56#define HOST_POWER_UP BIT(1) 56
57#define HOST_CMD53_FIN BIT(2) 57#define UP_LD_HOST_INT_STATUS BIT(0)
58 58#define DN_LD_HOST_INT_STATUS BIT(1)
59#define HOST_INT_MASK_REG 0x04 59
60#define HIM_DISABLE 0xff 60#define DN_LD_CARD_RDY BIT(0)
61#define HIM_ENABLE (BIT(0) | BIT(1)) 61#define CARD_IO_READY BIT(3)
62 62
63#define HOST_INTSTATUS_REG 0x05 63#define FIRMWARE_READY 0xfedc
64#define UP_LD_HOST_INT_STATUS BIT(0) 64
65#define DN_LD_HOST_INT_STATUS BIT(1) 65
66 66struct btmrvl_sdio_card_reg {
67/* Card Control Registers */ 67 u8 cfg;
68#define SQ_READ_BASE_ADDRESS_A0_REG 0x10 68 u8 host_int_mask;
69#define SQ_READ_BASE_ADDRESS_A1_REG 0x11 69 u8 host_intstatus;
70 70 u8 card_status;
71#define CARD_STATUS_REG 0x20 71 u8 sq_read_base_addr_a0;
72#define DN_LD_CARD_RDY BIT(0) 72 u8 sq_read_base_addr_a1;
73#define CARD_IO_READY BIT(3) 73 u8 card_revision;
74 74 u8 card_fw_status0;
75#define CARD_FW_STATUS0_REG 0x40 75 u8 card_fw_status1;
76#define CARD_FW_STATUS1_REG 0x41 76 u8 card_rx_len;
77#define FIRMWARE_READY 0xfedc 77 u8 card_rx_unit;
78 78 u8 io_port_0;
79#define CARD_RX_LEN_REG 0x42 79 u8 io_port_1;
80#define CARD_RX_UNIT_REG 0x43 80 u8 io_port_2;
81 81};
82 82
83struct btmrvl_sdio_card { 83struct btmrvl_sdio_card {
84 struct sdio_func *func; 84 struct sdio_func *func;
85 u32 ioport; 85 u32 ioport;
86 const char *helper; 86 const char *helper;
87 const char *firmware; 87 const char *firmware;
88 const struct btmrvl_sdio_card_reg *reg;
89 u16 sd_blksz_fw_dl;
88 u8 rx_unit; 90 u8 rx_unit;
89 struct btmrvl_private *priv; 91 struct btmrvl_private *priv;
90}; 92};
@@ -92,6 +94,8 @@ struct btmrvl_sdio_card {
92struct btmrvl_sdio_device { 94struct btmrvl_sdio_device {
93 const char *helper; 95 const char *helper;
94 const char *firmware; 96 const char *firmware;
97 const struct btmrvl_sdio_card_reg *reg;
98 u16 sd_blksz_fw_dl;
95}; 99};
96 100
97 101
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 762a5109c68a..c2de8951e3fb 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -104,6 +104,7 @@ static struct usb_device_id blacklist_table[] = {
104 104
105 /* Atheros 3011 with sflash firmware */ 105 /* Atheros 3011 with sflash firmware */
106 { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE }, 106 { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
107 { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE },
107 108
108 /* Atheros AR9285 Malbec with sflash firmware */ 109 /* Atheros AR9285 Malbec with sflash firmware */
109 { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE }, 110 { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c
index bd34406faaae..4093935ddf42 100644
--- a/drivers/bluetooth/hci_ath.c
+++ b/drivers/bluetooth/hci_ath.c
@@ -201,8 +201,13 @@ static struct sk_buff *ath_dequeue(struct hci_uart *hu)
201/* Recv data */ 201/* Recv data */
202static int ath_recv(struct hci_uart *hu, void *data, int count) 202static int ath_recv(struct hci_uart *hu, void *data, int count)
203{ 203{
204 if (hci_recv_stream_fragment(hu->hdev, data, count) < 0) 204 int ret;
205
206 ret = hci_recv_stream_fragment(hu->hdev, data, count);
207 if (ret < 0) {
205 BT_ERR("Frame Reassembly Failed"); 208 BT_ERR("Frame Reassembly Failed");
209 return ret;
210 }
206 211
207 return count; 212 return count;
208} 213}
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 7b8ad93e2c36..2fcd8b387d69 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -151,8 +151,13 @@ static inline int h4_check_data_len(struct h4_struct *h4, int len)
151/* Recv data */ 151/* Recv data */
152static int h4_recv(struct hci_uart *hu, void *data, int count) 152static int h4_recv(struct hci_uart *hu, void *data, int count)
153{ 153{
154 if (hci_recv_stream_fragment(hu->hdev, data, count) < 0) 154 int ret;
155
156 ret = hci_recv_stream_fragment(hu->hdev, data, count);
157 if (ret < 0) {
155 BT_ERR("Frame Reassembly Failed"); 158 BT_ERR("Frame Reassembly Failed");
159 return ret;
160 }
156 161
157 return count; 162 return count;
158} 163}
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 48ad2a7ab080..320f71803a2b 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -359,6 +359,7 @@ static void hci_uart_tty_wakeup(struct tty_struct *tty)
359 */ 359 */
360static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, char *flags, int count) 360static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, char *flags, int count)
361{ 361{
362 int ret;
362 struct hci_uart *hu = (void *)tty->disc_data; 363 struct hci_uart *hu = (void *)tty->disc_data;
363 364
364 if (!hu || tty != hu->tty) 365 if (!hu || tty != hu->tty)
@@ -368,8 +369,9 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, char *f
368 return; 369 return;
369 370
370 spin_lock(&hu->rx_lock); 371 spin_lock(&hu->rx_lock);
371 hu->proto->recv(hu, (void *) data, count); 372 ret = hu->proto->recv(hu, (void *) data, count);
372 hu->hdev->stat.byte_rx += count; 373 if (ret > 0)
374 hu->hdev->stat.byte_rx += count;
373 spin_unlock(&hu->rx_lock); 375 spin_unlock(&hu->rx_lock);
374 376
375 tty_unthrottle(tty); 377 tty_unthrottle(tty);
diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
index d9d0e13efe47..a5a49a1baae7 100644
--- a/drivers/ieee802154/fakehard.c
+++ b/drivers/ieee802154/fakehard.c
@@ -393,16 +393,6 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
393 priv = netdev_priv(dev); 393 priv = netdev_priv(dev);
394 priv->phy = phy; 394 priv->phy = phy;
395 395
396 /*
397 * If the name is a format string the caller wants us to do a
398 * name allocation.
399 */
400 if (strchr(dev->name, '%')) {
401 err = dev_alloc_name(dev, dev->name);
402 if (err < 0)
403 goto out;
404 }
405
406 wpan_phy_set_dev(phy, &pdev->dev); 396 wpan_phy_set_dev(phy, &pdev->dev);
407 SET_NETDEV_DEV(dev, &phy->dev); 397 SET_NETDEV_DEV(dev, &phy->dev);
408 398
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 4ffc224faa7f..8e21d457b899 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -185,15 +185,20 @@ static int addr4_resolve(struct sockaddr_in *src_in,
185 __be32 dst_ip = dst_in->sin_addr.s_addr; 185 __be32 dst_ip = dst_in->sin_addr.s_addr;
186 struct rtable *rt; 186 struct rtable *rt;
187 struct neighbour *neigh; 187 struct neighbour *neigh;
188 struct flowi4 fl4;
188 int ret; 189 int ret;
189 190
190 rt = ip_route_output(&init_net, dst_ip, src_ip, 0, addr->bound_dev_if); 191 memset(&fl4, 0, sizeof(fl4));
192 fl4.daddr = dst_ip;
193 fl4.saddr = src_ip;
194 fl4.flowi4_oif = addr->bound_dev_if;
195 rt = ip_route_output_key(&init_net, &fl4);
191 if (IS_ERR(rt)) { 196 if (IS_ERR(rt)) {
192 ret = PTR_ERR(rt); 197 ret = PTR_ERR(rt);
193 goto out; 198 goto out;
194 } 199 }
195 src_in->sin_family = AF_INET; 200 src_in->sin_family = AF_INET;
196 src_in->sin_addr.s_addr = rt->rt_src; 201 src_in->sin_addr.s_addr = fl4.saddr;
197 202
198 if (rt->dst.dev->flags & IFF_LOOPBACK) { 203 if (rt->dst.dev->flags & IFF_LOOPBACK) {
199 ret = rdma_translate_ip((struct sockaddr *) dst_in, addr); 204 ret = rdma_translate_ip((struct sockaddr *) dst_in, addr);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 3216bcad7e82..239184138994 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -338,8 +338,9 @@ static struct rtable *find_route(struct t3cdev *dev, __be32 local_ip,
338 __be16 peer_port, u8 tos) 338 __be16 peer_port, u8 tos)
339{ 339{
340 struct rtable *rt; 340 struct rtable *rt;
341 struct flowi4 fl4;
341 342
342 rt = ip_route_output_ports(&init_net, NULL, peer_ip, local_ip, 343 rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
343 peer_port, local_port, IPPROTO_TCP, 344 peer_port, local_port, IPPROTO_TCP,
344 tos, 0); 345 tos, 0);
345 if (IS_ERR(rt)) 346 if (IS_ERR(rt))
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 9d8dcfab2b38..6aa53cd69478 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -315,8 +315,9 @@ static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip,
315 __be16 peer_port, u8 tos) 315 __be16 peer_port, u8 tos)
316{ 316{
317 struct rtable *rt; 317 struct rtable *rt;
318 struct flowi4 fl4;
318 319
319 rt = ip_route_output_ports(&init_net, NULL, peer_ip, local_ip, 320 rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
320 peer_port, local_port, IPPROTO_TCP, 321 peer_port, local_port, IPPROTO_TCP,
321 tos, 0); 322 tos, 0);
322 if (IS_ERR(rt)) 323 if (IS_ERR(rt))
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 10d0a5ec9add..96fa9a4cafdf 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -2885,9 +2885,8 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
2885 if ((cqe_errv & 2885 if ((cqe_errv &
2886 (NES_NIC_ERRV_BITS_IPV4_CSUM_ERR | NES_NIC_ERRV_BITS_TCPUDP_CSUM_ERR | 2886 (NES_NIC_ERRV_BITS_IPV4_CSUM_ERR | NES_NIC_ERRV_BITS_TCPUDP_CSUM_ERR |
2887 NES_NIC_ERRV_BITS_IPH_ERR | NES_NIC_ERRV_BITS_WQE_OVERRUN)) == 0) { 2887 NES_NIC_ERRV_BITS_IPH_ERR | NES_NIC_ERRV_BITS_WQE_OVERRUN)) == 0) {
2888 if (nesvnic->rx_checksum_disabled == 0) { 2888 if (nesvnic->netdev->features & NETIF_F_RXCSUM)
2889 rx_skb->ip_summed = CHECKSUM_UNNECESSARY; 2889 rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
2890 }
2891 } else 2890 } else
2892 nes_debug(NES_DBG_CQ, "%s: unsuccessfully checksummed TCP or UDP packet." 2891 nes_debug(NES_DBG_CQ, "%s: unsuccessfully checksummed TCP or UDP packet."
2893 " errv = 0x%X, pkt_type = 0x%X.\n", 2892 " errv = 0x%X, pkt_type = 0x%X.\n",
@@ -2897,7 +2896,7 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
2897 if ((cqe_errv & 2896 if ((cqe_errv &
2898 (NES_NIC_ERRV_BITS_IPV4_CSUM_ERR | NES_NIC_ERRV_BITS_IPH_ERR | 2897 (NES_NIC_ERRV_BITS_IPV4_CSUM_ERR | NES_NIC_ERRV_BITS_IPH_ERR |
2899 NES_NIC_ERRV_BITS_WQE_OVERRUN)) == 0) { 2898 NES_NIC_ERRV_BITS_WQE_OVERRUN)) == 0) {
2900 if (nesvnic->rx_checksum_disabled == 0) { 2899 if (nesvnic->netdev->features & NETIF_F_RXCSUM) {
2901 rx_skb->ip_summed = CHECKSUM_UNNECESSARY; 2900 rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
2902 /* nes_debug(NES_DBG_CQ, "%s: Reporting successfully checksummed IPv4 packet.\n", 2901 /* nes_debug(NES_DBG_CQ, "%s: Reporting successfully checksummed IPv4 packet.\n",
2903 nesvnic->netdev->name); */ 2902 nesvnic->netdev->name); */
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index d2abe07133a5..91594116f947 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -1245,7 +1245,6 @@ struct nes_vnic {
1245 u8 next_qp_nic_index; 1245 u8 next_qp_nic_index;
1246 u8 of_device_registered; 1246 u8 of_device_registered;
1247 u8 rdma_enabled; 1247 u8 rdma_enabled;
1248 u8 rx_checksum_disabled;
1249 u32 lro_max_aggr; 1248 u32 lro_max_aggr;
1250 struct net_lro_mgr lro_mgr; 1249 struct net_lro_mgr lro_mgr;
1251 struct net_lro_desc lro_desc[NES_MAX_LRO_DESCRIPTORS]; 1250 struct net_lro_desc lro_desc[NES_MAX_LRO_DESCRIPTORS];
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index e96b8fb5d44c..d3a1c41cfd27 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1093,34 +1093,6 @@ static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = {
1093}; 1093};
1094#define NES_ETHTOOL_STAT_COUNT ARRAY_SIZE(nes_ethtool_stringset) 1094#define NES_ETHTOOL_STAT_COUNT ARRAY_SIZE(nes_ethtool_stringset)
1095 1095
1096/**
1097 * nes_netdev_get_rx_csum
1098 */
1099static u32 nes_netdev_get_rx_csum (struct net_device *netdev)
1100{
1101 struct nes_vnic *nesvnic = netdev_priv(netdev);
1102
1103 if (nesvnic->rx_checksum_disabled)
1104 return 0;
1105 else
1106 return 1;
1107}
1108
1109
1110/**
1111 * nes_netdev_set_rc_csum
1112 */
1113static int nes_netdev_set_rx_csum(struct net_device *netdev, u32 enable)
1114{
1115 struct nes_vnic *nesvnic = netdev_priv(netdev);
1116
1117 if (enable)
1118 nesvnic->rx_checksum_disabled = 0;
1119 else
1120 nesvnic->rx_checksum_disabled = 1;
1121 return 0;
1122}
1123
1124 1096
1125/** 1097/**
1126 * nes_netdev_get_sset_count 1098 * nes_netdev_get_sset_count
@@ -1521,7 +1493,7 @@ static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd
1521 et_cmd->maxrxpkt = 511; 1493 et_cmd->maxrxpkt = 511;
1522 1494
1523 if (nesadapter->OneG_Mode) { 1495 if (nesadapter->OneG_Mode) {
1524 et_cmd->speed = SPEED_1000; 1496 ethtool_cmd_speed_set(et_cmd, SPEED_1000);
1525 if (phy_type == NES_PHY_TYPE_PUMA_1G) { 1497 if (phy_type == NES_PHY_TYPE_PUMA_1G) {
1526 et_cmd->supported = SUPPORTED_1000baseT_Full; 1498 et_cmd->supported = SUPPORTED_1000baseT_Full;
1527 et_cmd->advertising = ADVERTISED_1000baseT_Full; 1499 et_cmd->advertising = ADVERTISED_1000baseT_Full;
@@ -1560,7 +1532,7 @@ static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd
1560 et_cmd->advertising = ADVERTISED_10000baseT_Full; 1532 et_cmd->advertising = ADVERTISED_10000baseT_Full;
1561 et_cmd->phy_address = mac_index; 1533 et_cmd->phy_address = mac_index;
1562 } 1534 }
1563 et_cmd->speed = SPEED_10000; 1535 ethtool_cmd_speed_set(et_cmd, SPEED_10000);
1564 et_cmd->autoneg = AUTONEG_DISABLE; 1536 et_cmd->autoneg = AUTONEG_DISABLE;
1565 return 0; 1537 return 0;
1566} 1538}
@@ -1598,19 +1570,10 @@ static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd
1598} 1570}
1599 1571
1600 1572
1601static int nes_netdev_set_flags(struct net_device *netdev, u32 flags)
1602{
1603 return ethtool_op_set_flags(netdev, flags, ETH_FLAG_LRO);
1604}
1605
1606
1607static const struct ethtool_ops nes_ethtool_ops = { 1573static const struct ethtool_ops nes_ethtool_ops = {
1608 .get_link = ethtool_op_get_link, 1574 .get_link = ethtool_op_get_link,
1609 .get_settings = nes_netdev_get_settings, 1575 .get_settings = nes_netdev_get_settings,
1610 .set_settings = nes_netdev_set_settings, 1576 .set_settings = nes_netdev_set_settings,
1611 .get_tx_csum = ethtool_op_get_tx_csum,
1612 .get_rx_csum = nes_netdev_get_rx_csum,
1613 .get_sg = ethtool_op_get_sg,
1614 .get_strings = nes_netdev_get_strings, 1577 .get_strings = nes_netdev_get_strings,
1615 .get_sset_count = nes_netdev_get_sset_count, 1578 .get_sset_count = nes_netdev_get_sset_count,
1616 .get_ethtool_stats = nes_netdev_get_ethtool_stats, 1579 .get_ethtool_stats = nes_netdev_get_ethtool_stats,
@@ -1619,13 +1582,6 @@ static const struct ethtool_ops nes_ethtool_ops = {
1619 .set_coalesce = nes_netdev_set_coalesce, 1582 .set_coalesce = nes_netdev_set_coalesce,
1620 .get_pauseparam = nes_netdev_get_pauseparam, 1583 .get_pauseparam = nes_netdev_get_pauseparam,
1621 .set_pauseparam = nes_netdev_set_pauseparam, 1584 .set_pauseparam = nes_netdev_set_pauseparam,
1622 .set_tx_csum = ethtool_op_set_tx_csum,
1623 .set_rx_csum = nes_netdev_set_rx_csum,
1624 .set_sg = ethtool_op_set_sg,
1625 .get_tso = ethtool_op_get_tso,
1626 .set_tso = ethtool_op_set_tso,
1627 .get_flags = ethtool_op_get_flags,
1628 .set_flags = nes_netdev_set_flags,
1629}; 1585};
1630 1586
1631 1587
@@ -1727,12 +1683,11 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
1727 netdev->dev_addr[5] = (u8)u64temp; 1683 netdev->dev_addr[5] = (u8)u64temp;
1728 memcpy(netdev->perm_addr, netdev->dev_addr, 6); 1684 memcpy(netdev->perm_addr, netdev->dev_addr, 6);
1729 1685
1730 if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV)) { 1686 netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_IP_CSUM;
1731 netdev->features |= NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM; 1687 if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV))
1732 netdev->features |= NETIF_F_GSO | NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM; 1688 netdev->hw_features |= NETIF_F_TSO;
1733 } else { 1689 netdev->features |= netdev->hw_features;
1734 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; 1690 netdev->hw_features |= NETIF_F_LRO;
1735 }
1736 1691
1737 nes_debug(NES_DBG_INIT, "nesvnic = %p, reported features = 0x%lX, QPid = %d," 1692 nes_debug(NES_DBG_INIT, "nesvnic = %p, reported features = 0x%lX, QPid = %d,"
1738 " nic_index = %d, logical_port = %d, mac_index = %d.\n", 1693 " nic_index = %d, logical_port = %d, mac_index = %d.\n",
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index ab97f92fc257..7b6985a2e652 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -91,7 +91,6 @@ enum {
91 IPOIB_STOP_REAPER = 7, 91 IPOIB_STOP_REAPER = 7,
92 IPOIB_FLAG_ADMIN_CM = 9, 92 IPOIB_FLAG_ADMIN_CM = 9,
93 IPOIB_FLAG_UMCAST = 10, 93 IPOIB_FLAG_UMCAST = 10,
94 IPOIB_FLAG_CSUM = 11,
95 94
96 IPOIB_MAX_BACKOFF_SECONDS = 16, 95 IPOIB_MAX_BACKOFF_SECONDS = 16,
97 96
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 93d55806b967..39913a065f99 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1463,8 +1463,7 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
1463 set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); 1463 set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
1464 ipoib_warn(priv, "enabling connected mode " 1464 ipoib_warn(priv, "enabling connected mode "
1465 "will cause multicast packet drops\n"); 1465 "will cause multicast packet drops\n");
1466 1466 netdev_update_features(dev);
1467 dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO);
1468 rtnl_unlock(); 1467 rtnl_unlock();
1469 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM; 1468 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
1470 1469
@@ -1474,13 +1473,7 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
1474 1473
1475 if (!strcmp(buf, "datagram\n")) { 1474 if (!strcmp(buf, "datagram\n")) {
1476 clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); 1475 clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
1477 1476 netdev_update_features(dev);
1478 if (test_bit(IPOIB_FLAG_CSUM, &priv->flags)) {
1479 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
1480 priv->dev->features |= NETIF_F_GRO;
1481 if (priv->hca_caps & IB_DEVICE_UD_TSO)
1482 dev->features |= NETIF_F_TSO;
1483 }
1484 dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu)); 1477 dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
1485 rtnl_unlock(); 1478 rtnl_unlock();
1486 ipoib_flush_paths(dev); 1479 ipoib_flush_paths(dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 19f7f5206f78..29bc7b5724ac 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -42,32 +42,6 @@ static void ipoib_get_drvinfo(struct net_device *netdev,
42 strncpy(drvinfo->driver, "ipoib", sizeof(drvinfo->driver) - 1); 42 strncpy(drvinfo->driver, "ipoib", sizeof(drvinfo->driver) - 1);
43} 43}
44 44
45static u32 ipoib_get_rx_csum(struct net_device *dev)
46{
47 struct ipoib_dev_priv *priv = netdev_priv(dev);
48 return test_bit(IPOIB_FLAG_CSUM, &priv->flags) &&
49 !test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
50}
51
52static int ipoib_set_tso(struct net_device *dev, u32 data)
53{
54 struct ipoib_dev_priv *priv = netdev_priv(dev);
55
56 if (data) {
57 if (!test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) &&
58 (dev->features & NETIF_F_SG) &&
59 (priv->hca_caps & IB_DEVICE_UD_TSO)) {
60 dev->features |= NETIF_F_TSO;
61 } else {
62 ipoib_warn(priv, "can't set TSO on\n");
63 return -EOPNOTSUPP;
64 }
65 } else
66 dev->features &= ~NETIF_F_TSO;
67
68 return 0;
69}
70
71static int ipoib_get_coalesce(struct net_device *dev, 45static int ipoib_get_coalesce(struct net_device *dev,
72 struct ethtool_coalesce *coal) 46 struct ethtool_coalesce *coal)
73{ 47{
@@ -108,8 +82,6 @@ static int ipoib_set_coalesce(struct net_device *dev,
108 82
109static const struct ethtool_ops ipoib_ethtool_ops = { 83static const struct ethtool_ops ipoib_ethtool_ops = {
110 .get_drvinfo = ipoib_get_drvinfo, 84 .get_drvinfo = ipoib_get_drvinfo,
111 .get_rx_csum = ipoib_get_rx_csum,
112 .set_tso = ipoib_set_tso,
113 .get_coalesce = ipoib_get_coalesce, 85 .get_coalesce = ipoib_get_coalesce,
114 .set_coalesce = ipoib_set_coalesce, 86 .set_coalesce = ipoib_set_coalesce,
115}; 87};
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 806d0292dc39..81ae61d68a22 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -292,7 +292,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
292 dev->stats.rx_bytes += skb->len; 292 dev->stats.rx_bytes += skb->len;
293 293
294 skb->dev = dev; 294 skb->dev = dev;
295 if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->csum_ok)) 295 if ((dev->features & NETIF_F_RXCSUM) && likely(wc->csum_ok))
296 skb->ip_summed = CHECKSUM_UNNECESSARY; 296 skb->ip_summed = CHECKSUM_UNNECESSARY;
297 297
298 napi_gro_receive(&priv->napi, skb); 298 napi_gro_receive(&priv->napi, skb);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index aca3b44f7aed..86addca9ddf6 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -171,6 +171,16 @@ static int ipoib_stop(struct net_device *dev)
171 return 0; 171 return 0;
172} 172}
173 173
174static u32 ipoib_fix_features(struct net_device *dev, u32 features)
175{
176 struct ipoib_dev_priv *priv = netdev_priv(dev);
177
178 if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
179 features &= ~(NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
180
181 return features;
182}
183
174static int ipoib_change_mtu(struct net_device *dev, int new_mtu) 184static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
175{ 185{
176 struct ipoib_dev_priv *priv = netdev_priv(dev); 186 struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -970,6 +980,7 @@ static const struct net_device_ops ipoib_netdev_ops = {
970 .ndo_open = ipoib_open, 980 .ndo_open = ipoib_open,
971 .ndo_stop = ipoib_stop, 981 .ndo_stop = ipoib_stop,
972 .ndo_change_mtu = ipoib_change_mtu, 982 .ndo_change_mtu = ipoib_change_mtu,
983 .ndo_fix_features = ipoib_fix_features,
973 .ndo_start_xmit = ipoib_start_xmit, 984 .ndo_start_xmit = ipoib_start_xmit,
974 .ndo_tx_timeout = ipoib_timeout, 985 .ndo_tx_timeout = ipoib_timeout,
975 .ndo_set_multicast_list = ipoib_set_mcast_list, 986 .ndo_set_multicast_list = ipoib_set_mcast_list,
@@ -1154,19 +1165,18 @@ int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca)
1154 kfree(device_attr); 1165 kfree(device_attr);
1155 1166
1156 if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) { 1167 if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
1157 set_bit(IPOIB_FLAG_CSUM, &priv->flags); 1168 priv->dev->hw_features = NETIF_F_SG |
1158 priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; 1169 NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
1159 }
1160 1170
1161 priv->dev->features |= NETIF_F_GRO; 1171 if (priv->hca_caps & IB_DEVICE_UD_TSO)
1172 priv->dev->hw_features |= NETIF_F_TSO;
1162 1173
1163 if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO) 1174 priv->dev->features |= priv->dev->hw_features;
1164 priv->dev->features |= NETIF_F_TSO; 1175 }
1165 1176
1166 return 0; 1177 return 0;
1167} 1178}
1168 1179
1169
1170static struct net_device *ipoib_add_port(const char *format, 1180static struct net_device *ipoib_add_port(const char *format,
1171 struct ib_device *hca, u8 port) 1181 struct ib_device *hca, u8 port)
1172{ 1182{
diff --git a/drivers/isdn/capi/Kconfig b/drivers/isdn/capi/Kconfig
index a168e8a891be..15c3ffd9d860 100644
--- a/drivers/isdn/capi/Kconfig
+++ b/drivers/isdn/capi/Kconfig
@@ -33,21 +33,6 @@ config ISDN_CAPI_CAPI20
33 standardized libcapi20 to access this functionality. You should say 33 standardized libcapi20 to access this functionality. You should say
34 Y/M here. 34 Y/M here.
35 35
36config ISDN_CAPI_CAPIFS_BOOL
37 bool "CAPI2.0 filesystem support (DEPRECATED)"
38 depends on ISDN_CAPI_MIDDLEWARE && ISDN_CAPI_CAPI20
39 help
40 This option provides a special file system, similar to /dev/pts with
41 device nodes for the special ttys established by using the
42 middleware extension above.
43 You no longer need this, udev fully replaces it. This feature is
44 scheduled for removal.
45
46config ISDN_CAPI_CAPIFS
47 tristate
48 depends on ISDN_CAPI_CAPIFS_BOOL
49 default ISDN_CAPI_CAPI20
50
51config ISDN_CAPI_CAPIDRV 36config ISDN_CAPI_CAPIDRV
52 tristate "CAPI2.0 capidrv interface support" 37 tristate "CAPI2.0 capidrv interface support"
53 depends on ISDN_I4L 38 depends on ISDN_I4L
diff --git a/drivers/isdn/capi/Makefile b/drivers/isdn/capi/Makefile
index 57123e3e4978..4d5b4b71db1e 100644
--- a/drivers/isdn/capi/Makefile
+++ b/drivers/isdn/capi/Makefile
@@ -7,7 +7,6 @@
7obj-$(CONFIG_ISDN_CAPI) += kernelcapi.o 7obj-$(CONFIG_ISDN_CAPI) += kernelcapi.o
8obj-$(CONFIG_ISDN_CAPI_CAPI20) += capi.o 8obj-$(CONFIG_ISDN_CAPI_CAPI20) += capi.o
9obj-$(CONFIG_ISDN_CAPI_CAPIDRV) += capidrv.o 9obj-$(CONFIG_ISDN_CAPI_CAPIDRV) += capidrv.o
10obj-$(CONFIG_ISDN_CAPI_CAPIFS) += capifs.o
11 10
12# Multipart objects. 11# Multipart objects.
13 12
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 0d7088367038..bea100983336 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -38,8 +38,6 @@
38#include <linux/isdn/capiutil.h> 38#include <linux/isdn/capiutil.h>
39#include <linux/isdn/capicmd.h> 39#include <linux/isdn/capicmd.h>
40 40
41#include "capifs.h"
42
43MODULE_DESCRIPTION("CAPI4Linux: Userspace /dev/capi20 interface"); 41MODULE_DESCRIPTION("CAPI4Linux: Userspace /dev/capi20 interface");
44MODULE_AUTHOR("Carsten Paeth"); 42MODULE_AUTHOR("Carsten Paeth");
45MODULE_LICENSE("GPL"); 43MODULE_LICENSE("GPL");
@@ -85,7 +83,6 @@ struct capiminor {
85 struct kref kref; 83 struct kref kref;
86 84
87 unsigned int minor; 85 unsigned int minor;
88 struct dentry *capifs_dentry;
89 86
90 struct capi20_appl *ap; 87 struct capi20_appl *ap;
91 u32 ncci; 88 u32 ncci;
@@ -300,17 +297,8 @@ static void capiminor_free(struct capiminor *mp)
300 297
301static void capincci_alloc_minor(struct capidev *cdev, struct capincci *np) 298static void capincci_alloc_minor(struct capidev *cdev, struct capincci *np)
302{ 299{
303 struct capiminor *mp; 300 if (cdev->userflags & CAPIFLAG_HIGHJACKING)
304 dev_t device; 301 np->minorp = capiminor_alloc(&cdev->ap, np->ncci);
305
306 if (!(cdev->userflags & CAPIFLAG_HIGHJACKING))
307 return;
308
309 mp = np->minorp = capiminor_alloc(&cdev->ap, np->ncci);
310 if (mp) {
311 device = MKDEV(capinc_tty_driver->major, mp->minor);
312 mp->capifs_dentry = capifs_new_ncci(mp->minor, device);
313 }
314} 302}
315 303
316static void capincci_free_minor(struct capincci *np) 304static void capincci_free_minor(struct capincci *np)
@@ -319,8 +307,6 @@ static void capincci_free_minor(struct capincci *np)
319 struct tty_struct *tty; 307 struct tty_struct *tty;
320 308
321 if (mp) { 309 if (mp) {
322 capifs_free_ncci(mp->capifs_dentry);
323
324 tty = tty_port_tty_get(&mp->port); 310 tty = tty_port_tty_get(&mp->port);
325 if (tty) { 311 if (tty) {
326 tty_vhangup(tty); 312 tty_vhangup(tty);
@@ -1514,10 +1500,8 @@ static int __init capi_init(void)
1514 1500
1515 proc_init(); 1501 proc_init();
1516 1502
1517#if defined(CONFIG_ISDN_CAPI_CAPIFS) || defined(CONFIG_ISDN_CAPI_CAPIFS_MODULE) 1503#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
1518 compileinfo = " (middleware+capifs)"; 1504 compileinfo = " (middleware)";
1519#elif defined(CONFIG_ISDN_CAPI_MIDDLEWARE)
1520 compileinfo = " (no capifs)";
1521#else 1505#else
1522 compileinfo = " (no middleware)"; 1506 compileinfo = " (no middleware)";
1523#endif 1507#endif
diff --git a/drivers/isdn/capi/capifs.c b/drivers/isdn/capi/capifs.c
deleted file mode 100644
index b4faed7fe0d3..000000000000
--- a/drivers/isdn/capi/capifs.c
+++ /dev/null
@@ -1,239 +0,0 @@
1/* $Id: capifs.c,v 1.1.2.3 2004/01/16 21:09:26 keil Exp $
2 *
3 * Copyright 2000 by Carsten Paeth <calle@calle.de>
4 *
5 * Heavily based on devpts filesystem from H. Peter Anvin
6 *
7 * This software may be used and distributed according to the terms
8 * of the GNU General Public License, incorporated herein by reference.
9 *
10 */
11
12#include <linux/fs.h>
13#include <linux/mount.h>
14#include <linux/slab.h>
15#include <linux/namei.h>
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/ctype.h>
19#include <linux/sched.h> /* current */
20
21#include "capifs.h"
22
23MODULE_DESCRIPTION("CAPI4Linux: /dev/capi/ filesystem");
24MODULE_AUTHOR("Carsten Paeth");
25MODULE_LICENSE("GPL");
26
27/* ------------------------------------------------------------------ */
28
29#define CAPIFS_SUPER_MAGIC (('C'<<8)|'N')
30
31static struct vfsmount *capifs_mnt;
32static int capifs_mnt_count;
33
34static struct {
35 int setuid;
36 int setgid;
37 uid_t uid;
38 gid_t gid;
39 umode_t mode;
40} config = {.mode = 0600};
41
42/* ------------------------------------------------------------------ */
43
44static int capifs_remount(struct super_block *s, int *flags, char *data)
45{
46 int setuid = 0;
47 int setgid = 0;
48 uid_t uid = 0;
49 gid_t gid = 0;
50 umode_t mode = 0600;
51 char *this_char;
52 char *new_opt = kstrdup(data, GFP_KERNEL);
53
54 this_char = NULL;
55 while ((this_char = strsep(&data, ",")) != NULL) {
56 int n;
57 char dummy;
58 if (!*this_char)
59 continue;
60 if (sscanf(this_char, "uid=%i%c", &n, &dummy) == 1) {
61 setuid = 1;
62 uid = n;
63 } else if (sscanf(this_char, "gid=%i%c", &n, &dummy) == 1) {
64 setgid = 1;
65 gid = n;
66 } else if (sscanf(this_char, "mode=%o%c", &n, &dummy) == 1)
67 mode = n & ~S_IFMT;
68 else {
69 kfree(new_opt);
70 printk("capifs: called with bogus options\n");
71 return -EINVAL;
72 }
73 }
74
75 mutex_lock(&s->s_root->d_inode->i_mutex);
76
77 replace_mount_options(s, new_opt);
78 config.setuid = setuid;
79 config.setgid = setgid;
80 config.uid = uid;
81 config.gid = gid;
82 config.mode = mode;
83
84 mutex_unlock(&s->s_root->d_inode->i_mutex);
85
86 return 0;
87}
88
89static const struct super_operations capifs_sops =
90{
91 .statfs = simple_statfs,
92 .remount_fs = capifs_remount,
93 .show_options = generic_show_options,
94};
95
96
97static int
98capifs_fill_super(struct super_block *s, void *data, int silent)
99{
100 struct inode * inode;
101
102 s->s_blocksize = 1024;
103 s->s_blocksize_bits = 10;
104 s->s_magic = CAPIFS_SUPER_MAGIC;
105 s->s_op = &capifs_sops;
106 s->s_time_gran = 1;
107
108 inode = new_inode(s);
109 if (!inode)
110 goto fail;
111 inode->i_ino = 1;
112 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
113 inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR;
114 inode->i_op = &simple_dir_inode_operations;
115 inode->i_fop = &simple_dir_operations;
116 inode->i_nlink = 2;
117
118 s->s_root = d_alloc_root(inode);
119 if (s->s_root)
120 return 0;
121
122 printk("capifs: get root dentry failed\n");
123 iput(inode);
124fail:
125 return -ENOMEM;
126}
127
128static struct dentry *capifs_mount(struct file_system_type *fs_type,
129 int flags, const char *dev_name, void *data)
130{
131 return mount_single(fs_type, flags, data, capifs_fill_super);
132}
133
134static struct file_system_type capifs_fs_type = {
135 .owner = THIS_MODULE,
136 .name = "capifs",
137 .mount = capifs_mount,
138 .kill_sb = kill_anon_super,
139};
140
141static struct dentry *new_ncci(unsigned int number, dev_t device)
142{
143 struct super_block *s = capifs_mnt->mnt_sb;
144 struct dentry *root = s->s_root;
145 struct dentry *dentry;
146 struct inode *inode;
147 char name[10];
148 int namelen;
149
150 mutex_lock(&root->d_inode->i_mutex);
151
152 namelen = sprintf(name, "%d", number);
153 dentry = lookup_one_len(name, root, namelen);
154 if (IS_ERR(dentry)) {
155 dentry = NULL;
156 goto unlock_out;
157 }
158
159 if (dentry->d_inode) {
160 dput(dentry);
161 dentry = NULL;
162 goto unlock_out;
163 }
164
165 inode = new_inode(s);
166 if (!inode) {
167 dput(dentry);
168 dentry = NULL;
169 goto unlock_out;
170 }
171
172 /* config contents is protected by root's i_mutex */
173 inode->i_uid = config.setuid ? config.uid : current_fsuid();
174 inode->i_gid = config.setgid ? config.gid : current_fsgid();
175 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
176 inode->i_ino = number + 2;
177 init_special_inode(inode, S_IFCHR|config.mode, device);
178
179 d_instantiate(dentry, inode);
180 dget(dentry);
181
182unlock_out:
183 mutex_unlock(&root->d_inode->i_mutex);
184
185 return dentry;
186}
187
188struct dentry *capifs_new_ncci(unsigned int number, dev_t device)
189{
190 struct dentry *dentry;
191
192 if (simple_pin_fs(&capifs_fs_type, &capifs_mnt, &capifs_mnt_count) < 0)
193 return NULL;
194
195 dentry = new_ncci(number, device);
196 if (!dentry)
197 simple_release_fs(&capifs_mnt, &capifs_mnt_count);
198
199 return dentry;
200}
201
202void capifs_free_ncci(struct dentry *dentry)
203{
204 struct dentry *root = capifs_mnt->mnt_sb->s_root;
205 struct inode *inode;
206
207 if (!dentry)
208 return;
209
210 mutex_lock(&root->d_inode->i_mutex);
211
212 inode = dentry->d_inode;
213 if (inode) {
214 drop_nlink(inode);
215 d_delete(dentry);
216 dput(dentry);
217 }
218 dput(dentry);
219
220 mutex_unlock(&root->d_inode->i_mutex);
221
222 simple_release_fs(&capifs_mnt, &capifs_mnt_count);
223}
224
225static int __init capifs_init(void)
226{
227 return register_filesystem(&capifs_fs_type);
228}
229
230static void __exit capifs_exit(void)
231{
232 unregister_filesystem(&capifs_fs_type);
233}
234
235EXPORT_SYMBOL(capifs_new_ncci);
236EXPORT_SYMBOL(capifs_free_ncci);
237
238module_init(capifs_init);
239module_exit(capifs_exit);
diff --git a/drivers/isdn/capi/capifs.h b/drivers/isdn/capi/capifs.h
deleted file mode 100644
index e193d1189531..000000000000
--- a/drivers/isdn/capi/capifs.h
+++ /dev/null
@@ -1,28 +0,0 @@
1/* $Id: capifs.h,v 1.1.2.2 2004/01/16 21:09:26 keil Exp $
2 *
3 * Copyright 2000 by Carsten Paeth <calle@calle.de>
4 *
5 * This software may be used and distributed according to the terms
6 * of the GNU General Public License, incorporated herein by reference.
7 *
8 */
9
10#include <linux/dcache.h>
11
12#if defined(CONFIG_ISDN_CAPI_CAPIFS) || defined(CONFIG_ISDN_CAPI_CAPIFS_MODULE)
13
14struct dentry *capifs_new_ncci(unsigned int num, dev_t device);
15void capifs_free_ncci(struct dentry *dentry);
16
17#else
18
19static inline struct dentry *capifs_new_ncci(unsigned int num, dev_t device)
20{
21 return NULL;
22}
23
24static inline void capifs_free_ncci(struct dentry *dentry)
25{
26}
27
28#endif
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index 8a3c5cfc4fea..3913f47ef86d 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -1157,7 +1157,6 @@ static void write_iso_tasklet(unsigned long data)
1157 struct urb *urb; 1157 struct urb *urb;
1158 int status; 1158 int status;
1159 struct usb_iso_packet_descriptor *ifd; 1159 struct usb_iso_packet_descriptor *ifd;
1160 int offset;
1161 unsigned long flags; 1160 unsigned long flags;
1162 int i; 1161 int i;
1163 struct sk_buff *skb; 1162 struct sk_buff *skb;
@@ -1225,7 +1224,6 @@ static void write_iso_tasklet(unsigned long data)
1225 * successfully sent 1224 * successfully sent
1226 * - all following frames are not sent at all 1225 * - all following frames are not sent at all
1227 */ 1226 */
1228 offset = done->limit; /* default (no error) */
1229 for (i = 0; i < BAS_NUMFRAMES; i++) { 1227 for (i = 0; i < BAS_NUMFRAMES; i++) {
1230 ifd = &urb->iso_frame_desc[i]; 1228 ifd = &urb->iso_frame_desc[i];
1231 if (ifd->status || 1229 if (ifd->status ||
@@ -1235,9 +1233,6 @@ static void write_iso_tasklet(unsigned long data)
1235 i, ifd->actual_length, 1233 i, ifd->actual_length,
1236 ifd->length, 1234 ifd->length,
1237 get_usb_statmsg(ifd->status)); 1235 get_usb_statmsg(ifd->status));
1238 offset = (ifd->offset +
1239 ifd->actual_length)
1240 % BAS_OUTBUFSIZE;
1241 break; 1236 break;
1242 } 1237 }
1243 } 1238 }
diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c
index a14187605f5e..ba74646cf0e4 100644
--- a/drivers/isdn/gigaset/ev-layer.c
+++ b/drivers/isdn/gigaset/ev-layer.c
@@ -390,12 +390,12 @@ static const struct zsau_resp_t {
390 */ 390 */
391static int cid_of_response(char *s) 391static int cid_of_response(char *s)
392{ 392{
393 unsigned long cid; 393 int cid;
394 int rc; 394 int rc;
395 395
396 if (s[-1] != ';') 396 if (s[-1] != ';')
397 return 0; /* no CID separator */ 397 return 0; /* no CID separator */
398 rc = strict_strtoul(s, 10, &cid); 398 rc = kstrtoint(s, 10, &cid);
399 if (rc) 399 if (rc)
400 return 0; /* CID not numeric */ 400 return 0; /* CID not numeric */
401 if (cid < 1 || cid > 65535) 401 if (cid < 1 || cid > 65535)
@@ -566,27 +566,19 @@ void gigaset_handle_modem_response(struct cardstate *cs)
566 case RT_ZCAU: 566 case RT_ZCAU:
567 event->parameter = -1; 567 event->parameter = -1;
568 if (curarg + 1 < params) { 568 if (curarg + 1 < params) {
569 unsigned long type, value; 569 u8 type, value;
570
571 i = strict_strtoul(argv[curarg++], 16, &type);
572 j = strict_strtoul(argv[curarg++], 16, &value);
573 570
574 if (i == 0 && type < 256 && 571 i = kstrtou8(argv[curarg++], 16, &type);
575 j == 0 && value < 256) 572 j = kstrtou8(argv[curarg++], 16, &value);
573 if (i == 0 && j == 0)
576 event->parameter = (type << 8) | value; 574 event->parameter = (type << 8) | value;
577 } else 575 } else
578 curarg = params - 1; 576 curarg = params - 1;
579 break; 577 break;
580 case RT_NUMBER: 578 case RT_NUMBER:
581 event->parameter = -1; 579 if (curarg >= params ||
582 if (curarg < params) { 580 kstrtoint(argv[curarg++], 10, &event->parameter))
583 unsigned long res; 581 event->parameter = -1;
584 int rc;
585
586 rc = strict_strtoul(argv[curarg++], 10, &res);
587 if (rc == 0)
588 event->parameter = res;
589 }
590 gig_dbg(DEBUG_EVENT, "parameter==%d", event->parameter); 582 gig_dbg(DEBUG_EVENT, "parameter==%d", event->parameter);
591 break; 583 break;
592 } 584 }
diff --git a/drivers/isdn/hardware/eicon/debug.c b/drivers/isdn/hardware/eicon/debug.c
index 362640120886..7a9894cb4557 100644
--- a/drivers/isdn/hardware/eicon/debug.c
+++ b/drivers/isdn/hardware/eicon/debug.c
@@ -861,7 +861,7 @@ static int diva_get_idi_adapter_info (IDI_CALL request, dword* serial, dword* lo
861void diva_mnt_add_xdi_adapter (const DESCRIPTOR* d) { 861void diva_mnt_add_xdi_adapter (const DESCRIPTOR* d) {
862 diva_os_spin_lock_magic_t old_irql, old_irql1; 862 diva_os_spin_lock_magic_t old_irql, old_irql1;
863 dword sec, usec, logical, serial, org_mask; 863 dword sec, usec, logical, serial, org_mask;
864 int id, best_id = 0, free_id = -1; 864 int id, free_id = -1;
865 char tmp[128]; 865 char tmp[128];
866 diva_dbg_entry_head_t* pmsg = NULL; 866 diva_dbg_entry_head_t* pmsg = NULL;
867 int len; 867 int len;
@@ -906,7 +906,6 @@ void diva_mnt_add_xdi_adapter (const DESCRIPTOR* d) {
906 and slot is still free - reuse it 906 and slot is still free - reuse it
907 */ 907 */
908 free_id = id; 908 free_id = id;
909 best_id = 1;
910 break; 909 break;
911 } 910 }
912 } 911 }
diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
index 8c5c563c4f12..a3395986df3d 100644
--- a/drivers/isdn/hardware/eicon/message.c
+++ b/drivers/isdn/hardware/eicon/message.c
@@ -1198,7 +1198,6 @@ static byte connect_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
1198 word ch; 1198 word ch;
1199 word i; 1199 word i;
1200 word Info; 1200 word Info;
1201 word CIP;
1202 byte LinkLayer; 1201 byte LinkLayer;
1203 API_PARSE * ai; 1202 API_PARSE * ai;
1204 API_PARSE * bp; 1203 API_PARSE * bp;
@@ -1340,7 +1339,6 @@ static byte connect_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
1340 add_s(plci,BC,&parms[6]); 1339 add_s(plci,BC,&parms[6]);
1341 add_s(plci,LLC,&parms[7]); 1340 add_s(plci,LLC,&parms[7]);
1342 add_s(plci,HLC,&parms[8]); 1341 add_s(plci,HLC,&parms[8]);
1343 CIP = GET_WORD(parms[0].info);
1344 if (a->Info_Mask[appl->Id-1] & 0x200) 1342 if (a->Info_Mask[appl->Id-1] & 0x200)
1345 { 1343 {
1346 /* early B3 connect (CIP mask bit 9) no release after a disc */ 1344 /* early B3 connect (CIP mask bit 9) no release after a disc */
@@ -4830,7 +4828,6 @@ static void sig_ind(PLCI *plci)
4830 dword x_Id; 4828 dword x_Id;
4831 dword Id; 4829 dword Id;
4832 dword rId; 4830 dword rId;
4833 word Number = 0;
4834 word i; 4831 word i;
4835 word cip; 4832 word cip;
4836 dword cip_mask; 4833 dword cip_mask;
@@ -5106,7 +5103,7 @@ static void sig_ind(PLCI *plci)
5106 } 5103 }
5107 } 5104 }
5108 5105
5109 if(plci->appl) Number = plci->appl->Number++; 5106 if(plci->appl) plci->appl->Number++;
5110 5107
5111 switch(plci->Sig.Ind) { 5108 switch(plci->Sig.Ind) {
5112 /* Response to Get_Supported_Services request */ 5109 /* Response to Get_Supported_Services request */
@@ -5894,7 +5891,6 @@ static void sig_ind(PLCI *plci)
5894 break; 5891 break;
5895 5892
5896 case TEL_CTRL: 5893 case TEL_CTRL:
5897 Number = 0;
5898 ie = multi_fac_parms[0]; /* inspect the facility hook indications */ 5894 ie = multi_fac_parms[0]; /* inspect the facility hook indications */
5899 if(plci->State==ADVANCED_VOICE_SIG && ie[0]){ 5895 if(plci->State==ADVANCED_VOICE_SIG && ie[0]){
5900 switch (ie[1]&0x91) { 5896 switch (ie[1]&0x91) {
@@ -10119,14 +10115,12 @@ static byte dtmf_request (dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI
10119 10115
10120static void dtmf_confirmation (dword Id, PLCI *plci) 10116static void dtmf_confirmation (dword Id, PLCI *plci)
10121{ 10117{
10122 word Info;
10123 word i; 10118 word i;
10124 byte result[4]; 10119 byte result[4];
10125 10120
10126 dbug (1, dprintf ("[%06lx] %s,%d: dtmf_confirmation", 10121 dbug (1, dprintf ("[%06lx] %s,%d: dtmf_confirmation",
10127 UnMapId (Id), (char *)(FILE_), __LINE__)); 10122 UnMapId (Id), (char *)(FILE_), __LINE__));
10128 10123
10129 Info = GOOD;
10130 result[0] = 2; 10124 result[0] = 2;
10131 PUT_WORD (&result[1], DTMF_SUCCESS); 10125 PUT_WORD (&result[1], DTMF_SUCCESS);
10132 if (plci->dtmf_send_requests != 0) 10126 if (plci->dtmf_send_requests != 0)
@@ -11520,13 +11514,12 @@ static word mixer_restore_config (dword Id, PLCI *plci, byte Rc)
11520static void mixer_command (dword Id, PLCI *plci, byte Rc) 11514static void mixer_command (dword Id, PLCI *plci, byte Rc)
11521{ 11515{
11522 DIVA_CAPI_ADAPTER *a; 11516 DIVA_CAPI_ADAPTER *a;
11523 word i, internal_command, Info; 11517 word i, internal_command;
11524 11518
11525 dbug (1, dprintf ("[%06lx] %s,%d: mixer_command %02x %04x %04x", 11519 dbug (1, dprintf ("[%06lx] %s,%d: mixer_command %02x %04x %04x",
11526 UnMapId (Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command, 11520 UnMapId (Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command,
11527 plci->li_cmd)); 11521 plci->li_cmd));
11528 11522
11529 Info = GOOD;
11530 a = plci->adapter; 11523 a = plci->adapter;
11531 internal_command = plci->internal_command; 11524 internal_command = plci->internal_command;
11532 plci->internal_command = 0; 11525 plci->internal_command = 0;
@@ -11550,7 +11543,6 @@ static void mixer_command (dword Id, PLCI *plci, byte Rc)
11550 { 11543 {
11551 dbug (1, dprintf ("[%06lx] %s,%d: Load mixer failed", 11544 dbug (1, dprintf ("[%06lx] %s,%d: Load mixer failed",
11552 UnMapId (Id), (char *)(FILE_), __LINE__)); 11545 UnMapId (Id), (char *)(FILE_), __LINE__));
11553 Info = _FACILITY_NOT_SUPPORTED;
11554 break; 11546 break;
11555 } 11547 }
11556 if (plci->internal_command) 11548 if (plci->internal_command)
@@ -11592,7 +11584,6 @@ static void mixer_command (dword Id, PLCI *plci, byte Rc)
11592 } while ((plci->li_plci_b_write_pos != plci->li_plci_b_req_pos) 11584 } while ((plci->li_plci_b_write_pos != plci->li_plci_b_req_pos)
11593 && !(plci->li_plci_b_queue[i] & LI_PLCI_B_LAST_FLAG)); 11585 && !(plci->li_plci_b_queue[i] & LI_PLCI_B_LAST_FLAG));
11594 } 11586 }
11595 Info = _FACILITY_NOT_SUPPORTED;
11596 break; 11587 break;
11597 } 11588 }
11598 if (plci->internal_command) 11589 if (plci->internal_command)
@@ -11610,7 +11601,6 @@ static void mixer_command (dword Id, PLCI *plci, byte Rc)
11610 { 11601 {
11611 dbug (1, dprintf ("[%06lx] %s,%d: Unload mixer failed", 11602 dbug (1, dprintf ("[%06lx] %s,%d: Unload mixer failed",
11612 UnMapId (Id), (char *)(FILE_), __LINE__)); 11603 UnMapId (Id), (char *)(FILE_), __LINE__));
11613 Info = _FACILITY_NOT_SUPPORTED;
11614 break; 11604 break;
11615 } 11605 }
11616 if (plci->internal_command) 11606 if (plci->internal_command)
@@ -12448,13 +12438,11 @@ static byte mixer_request (dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI
12448static void mixer_indication_coefs_set (dword Id, PLCI *plci) 12438static void mixer_indication_coefs_set (dword Id, PLCI *plci)
12449{ 12439{
12450 dword d; 12440 dword d;
12451 DIVA_CAPI_ADAPTER *a;
12452 byte result[12]; 12441 byte result[12];
12453 12442
12454 dbug (1, dprintf ("[%06lx] %s,%d: mixer_indication_coefs_set", 12443 dbug (1, dprintf ("[%06lx] %s,%d: mixer_indication_coefs_set",
12455 UnMapId (Id), (char *)(FILE_), __LINE__)); 12444 UnMapId (Id), (char *)(FILE_), __LINE__));
12456 12445
12457 a = plci->adapter;
12458 if (plci->li_plci_b_read_pos != plci->li_plci_b_req_pos) 12446 if (plci->li_plci_b_read_pos != plci->li_plci_b_req_pos)
12459 { 12447 {
12460 do 12448 do
@@ -14111,13 +14099,11 @@ static void select_b_command (dword Id, PLCI *plci, byte Rc)
14111 14099
14112static void fax_connect_ack_command (dword Id, PLCI *plci, byte Rc) 14100static void fax_connect_ack_command (dword Id, PLCI *plci, byte Rc)
14113{ 14101{
14114 word Info;
14115 word internal_command; 14102 word internal_command;
14116 14103
14117 dbug (1, dprintf ("[%06lx] %s,%d: fax_connect_ack_command %02x %04x", 14104 dbug (1, dprintf ("[%06lx] %s,%d: fax_connect_ack_command %02x %04x",
14118 UnMapId (Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command)); 14105 UnMapId (Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
14119 14106
14120 Info = GOOD;
14121 internal_command = plci->internal_command; 14107 internal_command = plci->internal_command;
14122 plci->internal_command = 0; 14108 plci->internal_command = 0;
14123 switch (internal_command) 14109 switch (internal_command)
@@ -14160,13 +14146,11 @@ static void fax_connect_ack_command (dword Id, PLCI *plci, byte Rc)
14160 14146
14161static void fax_edata_ack_command (dword Id, PLCI *plci, byte Rc) 14147static void fax_edata_ack_command (dword Id, PLCI *plci, byte Rc)
14162{ 14148{
14163 word Info;
14164 word internal_command; 14149 word internal_command;
14165 14150
14166 dbug (1, dprintf ("[%06lx] %s,%d: fax_edata_ack_command %02x %04x", 14151 dbug (1, dprintf ("[%06lx] %s,%d: fax_edata_ack_command %02x %04x",
14167 UnMapId (Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command)); 14152 UnMapId (Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
14168 14153
14169 Info = GOOD;
14170 internal_command = plci->internal_command; 14154 internal_command = plci->internal_command;
14171 plci->internal_command = 0; 14155 plci->internal_command = 0;
14172 switch (internal_command) 14156 switch (internal_command)
@@ -14395,13 +14379,11 @@ static void rtp_connect_b3_req_command (dword Id, PLCI *plci, byte Rc)
14395 14379
14396static void rtp_connect_b3_res_command (dword Id, PLCI *plci, byte Rc) 14380static void rtp_connect_b3_res_command (dword Id, PLCI *plci, byte Rc)
14397{ 14381{
14398 word Info;
14399 word internal_command; 14382 word internal_command;
14400 14383
14401 dbug (1, dprintf ("[%06lx] %s,%d: rtp_connect_b3_res_command %02x %04x", 14384 dbug (1, dprintf ("[%06lx] %s,%d: rtp_connect_b3_res_command %02x %04x",
14402 UnMapId (Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command)); 14385 UnMapId (Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
14403 14386
14404 Info = GOOD;
14405 internal_command = plci->internal_command; 14387 internal_command = plci->internal_command;
14406 plci->internal_command = 0; 14388 plci->internal_command = 0;
14407 switch (internal_command) 14389 switch (internal_command)
@@ -14423,7 +14405,6 @@ static void rtp_connect_b3_res_command (dword Id, PLCI *plci, byte Rc)
14423 { 14405 {
14424 dbug (1, dprintf ("[%06lx] %s,%d: RTP setting connect resp info failed %02x", 14406 dbug (1, dprintf ("[%06lx] %s,%d: RTP setting connect resp info failed %02x",
14425 UnMapId (Id), (char *)(FILE_), __LINE__, Rc)); 14407 UnMapId (Id), (char *)(FILE_), __LINE__, Rc));
14426 Info = _WRONG_STATE;
14427 break; 14408 break;
14428 } 14409 }
14429 if (plci_nl_busy (plci)) 14410 if (plci_nl_busy (plci))
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 4343abac0b13..b01a7be1300f 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -405,7 +405,7 @@ hfcpci_empty_bfifo(struct bchannel *bch, struct bzfifo *bz,
405 u_char *bdata, int count) 405 u_char *bdata, int count)
406{ 406{
407 u_char *ptr, *ptr1, new_f2; 407 u_char *ptr, *ptr1, new_f2;
408 int total, maxlen, new_z2; 408 int maxlen, new_z2;
409 struct zt *zp; 409 struct zt *zp;
410 410
411 if ((bch->debug & DEBUG_HW_BCHANNEL) && !(bch->debug & DEBUG_HW_BFIFO)) 411 if ((bch->debug & DEBUG_HW_BCHANNEL) && !(bch->debug & DEBUG_HW_BFIFO))
@@ -431,7 +431,6 @@ hfcpci_empty_bfifo(struct bchannel *bch, struct bzfifo *bz,
431 printk(KERN_WARNING "HFCPCI: receive out of memory\n"); 431 printk(KERN_WARNING "HFCPCI: receive out of memory\n");
432 return; 432 return;
433 } 433 }
434 total = count;
435 count -= 3; 434 count -= 3;
436 ptr = skb_put(bch->rx_skb, count); 435 ptr = skb_put(bch->rx_skb, count);
437 436
@@ -968,7 +967,6 @@ static void
968ph_state_nt(struct dchannel *dch) 967ph_state_nt(struct dchannel *dch)
969{ 968{
970 struct hfc_pci *hc = dch->hw; 969 struct hfc_pci *hc = dch->hw;
971 u_char val;
972 970
973 if (dch->debug) 971 if (dch->debug)
974 printk(KERN_DEBUG "%s: NT newstate %x\n", 972 printk(KERN_DEBUG "%s: NT newstate %x\n",
@@ -982,7 +980,7 @@ ph_state_nt(struct dchannel *dch)
982 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER; 980 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
983 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1); 981 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
984 /* Clear already pending ints */ 982 /* Clear already pending ints */
985 val = Read_hfc(hc, HFCPCI_INT_S1); 983 (void) Read_hfc(hc, HFCPCI_INT_S1);
986 Write_hfc(hc, HFCPCI_STATES, 4 | HFCPCI_LOAD_STATE); 984 Write_hfc(hc, HFCPCI_STATES, 4 | HFCPCI_LOAD_STATE);
987 udelay(10); 985 udelay(10);
988 Write_hfc(hc, HFCPCI_STATES, 4); 986 Write_hfc(hc, HFCPCI_STATES, 4);
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 8700474747e8..3ccbff13eaf2 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -118,14 +118,12 @@ static void
118ctrl_complete(struct urb *urb) 118ctrl_complete(struct urb *urb)
119{ 119{
120 struct hfcsusb *hw = (struct hfcsusb *) urb->context; 120 struct hfcsusb *hw = (struct hfcsusb *) urb->context;
121 struct ctrl_buf *buf;
122 121
123 if (debug & DBG_HFC_CALL_TRACE) 122 if (debug & DBG_HFC_CALL_TRACE)
124 printk(KERN_DEBUG "%s: %s\n", hw->name, __func__); 123 printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
125 124
126 urb->dev = hw->dev; 125 urb->dev = hw->dev;
127 if (hw->ctrl_cnt) { 126 if (hw->ctrl_cnt) {
128 buf = &hw->ctrl_buff[hw->ctrl_out_idx];
129 hw->ctrl_cnt--; /* decrement actual count */ 127 hw->ctrl_cnt--; /* decrement actual count */
130 if (++hw->ctrl_out_idx >= HFC_CTRL_BUFSIZE) 128 if (++hw->ctrl_out_idx >= HFC_CTRL_BUFSIZE)
131 hw->ctrl_out_idx = 0; /* pointer wrap */ 129 hw->ctrl_out_idx = 0; /* pointer wrap */
@@ -1726,7 +1724,6 @@ hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel)
1726static int 1724static int
1727setup_hfcsusb(struct hfcsusb *hw) 1725setup_hfcsusb(struct hfcsusb *hw)
1728{ 1726{
1729 int err;
1730 u_char b; 1727 u_char b;
1731 1728
1732 if (debug & DBG_HFC_CALL_TRACE) 1729 if (debug & DBG_HFC_CALL_TRACE)
@@ -1745,7 +1742,7 @@ setup_hfcsusb(struct hfcsusb *hw)
1745 } 1742 }
1746 1743
1747 /* first set the needed config, interface and alternate */ 1744 /* first set the needed config, interface and alternate */
1748 err = usb_set_interface(hw->dev, hw->if_used, hw->alt_used); 1745 (void) usb_set_interface(hw->dev, hw->if_used, hw->alt_used);
1749 1746
1750 hw->led_state = 0; 1747 hw->led_state = 0;
1751 1748
diff --git a/drivers/isdn/hisax/arcofi.c b/drivers/isdn/hisax/arcofi.c
index 85a8fd8dd0b7..21cbbe1d5563 100644
--- a/drivers/isdn/hisax/arcofi.c
+++ b/drivers/isdn/hisax/arcofi.c
@@ -30,8 +30,6 @@ add_arcofi_timer(struct IsdnCardState *cs) {
30 30
31static void 31static void
32send_arcofi(struct IsdnCardState *cs) { 32send_arcofi(struct IsdnCardState *cs) {
33 u_char val;
34
35 add_arcofi_timer(cs); 33 add_arcofi_timer(cs);
36 cs->dc.isac.mon_txp = 0; 34 cs->dc.isac.mon_txp = 0;
37 cs->dc.isac.mon_txc = cs->dc.isac.arcofi_list->len; 35 cs->dc.isac.mon_txc = cs->dc.isac.arcofi_list->len;
@@ -45,7 +43,7 @@ send_arcofi(struct IsdnCardState *cs) {
45 cs->dc.isac.mocr &= 0x0f; 43 cs->dc.isac.mocr &= 0x0f;
46 cs->dc.isac.mocr |= 0xa0; 44 cs->dc.isac.mocr |= 0xa0;
47 cs->writeisac(cs, ISAC_MOCR, cs->dc.isac.mocr); 45 cs->writeisac(cs, ISAC_MOCR, cs->dc.isac.mocr);
48 val = cs->readisac(cs, ISAC_MOSR); 46 (void) cs->readisac(cs, ISAC_MOSR);
49 cs->writeisac(cs, ISAC_MOX1, cs->dc.isac.mon_tx[cs->dc.isac.mon_txp++]); 47 cs->writeisac(cs, ISAC_MOX1, cs->dc.isac.mon_tx[cs->dc.isac.mon_txp++]);
50 cs->dc.isac.mocr |= 0x10; 48 cs->dc.isac.mocr |= 0x10;
51 cs->writeisac(cs, ISAC_MOCR, cs->dc.isac.mocr); 49 cs->writeisac(cs, ISAC_MOCR, cs->dc.isac.mocr);
diff --git a/drivers/isdn/hisax/elsa_cs.c b/drivers/isdn/hisax/elsa_cs.c
index 496d477af0f8..9e5e87be756b 100644
--- a/drivers/isdn/hisax/elsa_cs.c
+++ b/drivers/isdn/hisax/elsa_cs.c
@@ -129,12 +129,10 @@ static int elsa_cs_configcheck(struct pcmcia_device *p_dev, void *priv_data)
129 129
130static int __devinit elsa_cs_config(struct pcmcia_device *link) 130static int __devinit elsa_cs_config(struct pcmcia_device *link)
131{ 131{
132 local_info_t *dev;
133 int i; 132 int i;
134 IsdnCard_t icard; 133 IsdnCard_t icard;
135 134
136 dev_dbg(&link->dev, "elsa_config(0x%p)\n", link); 135 dev_dbg(&link->dev, "elsa_config(0x%p)\n", link);
137 dev = link->priv;
138 136
139 link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; 137 link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
140 138
diff --git a/drivers/isdn/hisax/elsa_ser.c b/drivers/isdn/hisax/elsa_ser.c
index cbda3790a10d..3fa9f6171095 100644
--- a/drivers/isdn/hisax/elsa_ser.c
+++ b/drivers/isdn/hisax/elsa_ser.c
@@ -109,11 +109,10 @@ static void change_speed(struct IsdnCardState *cs, int baud)
109{ 109{
110 int quot = 0, baud_base; 110 int quot = 0, baud_base;
111 unsigned cval, fcr = 0; 111 unsigned cval, fcr = 0;
112 int bits;
113 112
114 113
115 /* byte size and parity */ 114 /* byte size and parity */
116 cval = 0x03; bits = 10; 115 cval = 0x03;
117 /* Determine divisor based on baud rate */ 116 /* Determine divisor based on baud rate */
118 baud_base = BASE_BAUD; 117 baud_base = BASE_BAUD;
119 quot = baud_base / baud; 118 quot = baud_base / baud;
diff --git a/drivers/isdn/hisax/hfc_usb.c b/drivers/isdn/hisax/hfc_usb.c
index ed9527aa5f2c..f407de0e006d 100644
--- a/drivers/isdn/hisax/hfc_usb.c
+++ b/drivers/isdn/hisax/hfc_usb.c
@@ -258,11 +258,9 @@ static void
258ctrl_complete(struct urb *urb) 258ctrl_complete(struct urb *urb)
259{ 259{
260 hfcusb_data *hfc = (hfcusb_data *) urb->context; 260 hfcusb_data *hfc = (hfcusb_data *) urb->context;
261 ctrl_buft *buf;
262 261
263 urb->dev = hfc->dev; 262 urb->dev = hfc->dev;
264 if (hfc->ctrl_cnt) { 263 if (hfc->ctrl_cnt) {
265 buf = &hfc->ctrl_buff[hfc->ctrl_out_idx];
266 hfc->ctrl_cnt--; /* decrement actual count */ 264 hfc->ctrl_cnt--; /* decrement actual count */
267 if (++hfc->ctrl_out_idx >= HFC_CTRL_BUFSIZE) 265 if (++hfc->ctrl_out_idx >= HFC_CTRL_BUFSIZE)
268 hfc->ctrl_out_idx = 0; /* pointer wrap */ 266 hfc->ctrl_out_idx = 0; /* pointer wrap */
@@ -1097,7 +1095,7 @@ static int
1097hfc_usb_init(hfcusb_data * hfc) 1095hfc_usb_init(hfcusb_data * hfc)
1098{ 1096{
1099 usb_fifo *fifo; 1097 usb_fifo *fifo;
1100 int i, err; 1098 int i;
1101 u_char b; 1099 u_char b;
1102 struct hisax_b_if *p_b_if[2]; 1100 struct hisax_b_if *p_b_if[2];
1103 1101
@@ -1112,7 +1110,7 @@ hfc_usb_init(hfcusb_data * hfc)
1112 } 1110 }
1113 1111
1114 /* first set the needed config, interface and alternate */ 1112 /* first set the needed config, interface and alternate */
1115 err = usb_set_interface(hfc->dev, hfc->if_used, hfc->alt_used); 1113 usb_set_interface(hfc->dev, hfc->if_used, hfc->alt_used);
1116 1114
1117 /* do Chip reset */ 1115 /* do Chip reset */
1118 write_usb(hfc, HFCUSB_CIRM, 8); 1116 write_usb(hfc, HFCUSB_CIRM, 8);
diff --git a/drivers/isdn/hisax/ipacx.c b/drivers/isdn/hisax/ipacx.c
index 332104103e18..690840444184 100644
--- a/drivers/isdn/hisax/ipacx.c
+++ b/drivers/isdn/hisax/ipacx.c
@@ -96,7 +96,7 @@ dch_l2l1(struct PStack *st, int pr, void *arg)
96{ 96{
97 struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware; 97 struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware;
98 struct sk_buff *skb = arg; 98 struct sk_buff *skb = arg;
99 u_char cda1_cr, cda2_cr; 99 u_char cda1_cr;
100 100
101 switch (pr) { 101 switch (pr) {
102 case (PH_DATA |REQUEST): 102 case (PH_DATA |REQUEST):
@@ -163,7 +163,7 @@ dch_l2l1(struct PStack *st, int pr, void *arg)
163 cs->writeisac(cs, IPACX_CDA_TSDP10, 0x80); // Timeslot 0 is B1 163 cs->writeisac(cs, IPACX_CDA_TSDP10, 0x80); // Timeslot 0 is B1
164 cs->writeisac(cs, IPACX_CDA_TSDP11, 0x81); // Timeslot 0 is B1 164 cs->writeisac(cs, IPACX_CDA_TSDP11, 0x81); // Timeslot 0 is B1
165 cda1_cr = cs->readisac(cs, IPACX_CDA1_CR); 165 cda1_cr = cs->readisac(cs, IPACX_CDA1_CR);
166 cda2_cr = cs->readisac(cs, IPACX_CDA2_CR); 166 (void) cs->readisac(cs, IPACX_CDA2_CR);
167 if ((long)arg &1) { // loop B1 167 if ((long)arg &1) { // loop B1
168 cs->writeisac(cs, IPACX_CDA1_CR, cda1_cr |0x0a); 168 cs->writeisac(cs, IPACX_CDA1_CR, cda1_cr |0x0a);
169 } 169 }
diff --git a/drivers/isdn/hisax/jade.c b/drivers/isdn/hisax/jade.c
index ea8f840871d0..a06cea09158b 100644
--- a/drivers/isdn/hisax/jade.c
+++ b/drivers/isdn/hisax/jade.c
@@ -23,10 +23,9 @@
23int 23int
24JadeVersion(struct IsdnCardState *cs, char *s) 24JadeVersion(struct IsdnCardState *cs, char *s)
25{ 25{
26 int ver,i; 26 int ver;
27 int to = 50; 27 int to = 50;
28 cs->BC_Write_Reg(cs, -1, 0x50, 0x19); 28 cs->BC_Write_Reg(cs, -1, 0x50, 0x19);
29 i=0;
30 while (to) { 29 while (to) {
31 udelay(1); 30 udelay(1);
32 ver = cs->BC_Read_Reg(cs, -1, 0x60); 31 ver = cs->BC_Read_Reg(cs, -1, 0x60);
diff --git a/drivers/isdn/hisax/l3dss1.c b/drivers/isdn/hisax/l3dss1.c
index 8e2fd02ecce0..b0d9ab1f21c0 100644
--- a/drivers/isdn/hisax/l3dss1.c
+++ b/drivers/isdn/hisax/l3dss1.c
@@ -2943,7 +2943,7 @@ global_handler(struct PStack *st, int mt, struct sk_buff *skb)
2943static void 2943static void
2944dss1up(struct PStack *st, int pr, void *arg) 2944dss1up(struct PStack *st, int pr, void *arg)
2945{ 2945{
2946 int i, mt, cr, cause, callState; 2946 int i, mt, cr, callState;
2947 char *ptr; 2947 char *ptr;
2948 u_char *p; 2948 u_char *p;
2949 struct sk_buff *skb = arg; 2949 struct sk_buff *skb = arg;
@@ -3034,12 +3034,10 @@ dss1up(struct PStack *st, int pr, void *arg)
3034 return; 3034 return;
3035 } 3035 }
3036 } else if (mt == MT_STATUS) { 3036 } else if (mt == MT_STATUS) {
3037 cause = 0;
3038 if ((ptr = findie(skb->data, skb->len, IE_CAUSE, 0)) != NULL) { 3037 if ((ptr = findie(skb->data, skb->len, IE_CAUSE, 0)) != NULL) {
3039 ptr++; 3038 ptr++;
3040 if (*ptr++ == 2) 3039 if (*ptr++ == 2)
3041 ptr++; 3040 ptr++;
3042 cause = *ptr & 0x7f;
3043 } 3041 }
3044 callState = 0; 3042 callState = 0;
3045 if ((ptr = findie(skb->data, skb->len, IE_CALL_STATE, 0)) != NULL) { 3043 if ((ptr = findie(skb->data, skb->len, IE_CALL_STATE, 0)) != NULL) {
diff --git a/drivers/isdn/hisax/l3ni1.c b/drivers/isdn/hisax/l3ni1.c
index 7b229c0ce115..092dcbb39d94 100644
--- a/drivers/isdn/hisax/l3ni1.c
+++ b/drivers/isdn/hisax/l3ni1.c
@@ -2883,7 +2883,7 @@ global_handler(struct PStack *st, int mt, struct sk_buff *skb)
2883static void 2883static void
2884ni1up(struct PStack *st, int pr, void *arg) 2884ni1up(struct PStack *st, int pr, void *arg)
2885{ 2885{
2886 int i, mt, cr, cause, callState; 2886 int i, mt, cr, callState;
2887 char *ptr; 2887 char *ptr;
2888 u_char *p; 2888 u_char *p;
2889 struct sk_buff *skb = arg; 2889 struct sk_buff *skb = arg;
@@ -2986,12 +2986,10 @@ ni1up(struct PStack *st, int pr, void *arg)
2986 return; 2986 return;
2987 } 2987 }
2988 } else if (mt == MT_STATUS) { 2988 } else if (mt == MT_STATUS) {
2989 cause = 0;
2990 if ((ptr = findie(skb->data, skb->len, IE_CAUSE, 0)) != NULL) { 2989 if ((ptr = findie(skb->data, skb->len, IE_CAUSE, 0)) != NULL) {
2991 ptr++; 2990 ptr++;
2992 if (*ptr++ == 2) 2991 if (*ptr++ == 2)
2993 ptr++; 2992 ptr++;
2994 cause = *ptr & 0x7f;
2995 } 2993 }
2996 callState = 0; 2994 callState = 0;
2997 if ((ptr = findie(skb->data, skb->len, IE_CALL_STATE, 0)) != NULL) { 2995 if ((ptr = findie(skb->data, skb->len, IE_CALL_STATE, 0)) != NULL) {
diff --git a/drivers/isdn/hisax/st5481.h b/drivers/isdn/hisax/st5481.h
index 64f78a8c28c5..b9054cb7a0da 100644
--- a/drivers/isdn/hisax/st5481.h
+++ b/drivers/isdn/hisax/st5481.h
@@ -377,7 +377,6 @@ struct st5481_bcs {
377}; 377};
378 378
379struct st5481_adapter { 379struct st5481_adapter {
380 struct list_head list;
381 int number_of_leds; 380 int number_of_leds;
382 struct usb_device *usb_dev; 381 struct usb_device *usb_dev;
383 struct hisax_d_if hisax_d_if; 382 struct hisax_d_if hisax_d_if;
diff --git a/drivers/isdn/hisax/st5481_init.c b/drivers/isdn/hisax/st5481_init.c
index 13751237bfcd..9f7fd18ff773 100644
--- a/drivers/isdn/hisax/st5481_init.c
+++ b/drivers/isdn/hisax/st5481_init.c
@@ -46,8 +46,6 @@ module_param(debug, int, 0);
46#endif 46#endif
47int st5481_debug; 47int st5481_debug;
48 48
49static LIST_HEAD(adapter_list);
50
51/* ====================================================================== 49/* ======================================================================
52 * registration/deregistration with the USB layer 50 * registration/deregistration with the USB layer
53 */ 51 */
@@ -86,7 +84,6 @@ static int probe_st5481(struct usb_interface *intf,
86 adapter->bcs[i].b_if.ifc.priv = &adapter->bcs[i]; 84 adapter->bcs[i].b_if.ifc.priv = &adapter->bcs[i];
87 adapter->bcs[i].b_if.ifc.l2l1 = st5481_b_l2l1; 85 adapter->bcs[i].b_if.ifc.l2l1 = st5481_b_l2l1;
88 } 86 }
89 list_add(&adapter->list, &adapter_list);
90 87
91 retval = st5481_setup_usb(adapter); 88 retval = st5481_setup_usb(adapter);
92 if (retval < 0) 89 if (retval < 0)
@@ -125,6 +122,7 @@ static int probe_st5481(struct usb_interface *intf,
125 err_usb: 122 err_usb:
126 st5481_release_usb(adapter); 123 st5481_release_usb(adapter);
127 err: 124 err:
125 kfree(adapter);
128 return -EIO; 126 return -EIO;
129} 127}
130 128
@@ -142,8 +140,6 @@ static void disconnect_st5481(struct usb_interface *intf)
142 if (!adapter) 140 if (!adapter)
143 return; 141 return;
144 142
145 list_del(&adapter->list);
146
147 st5481_stop(adapter); 143 st5481_stop(adapter);
148 st5481_release_b(&adapter->bcs[1]); 144 st5481_release_b(&adapter->bcs[1]);
149 st5481_release_b(&adapter->bcs[0]); 145 st5481_release_b(&adapter->bcs[0]);
diff --git a/drivers/isdn/hisax/teles_cs.c b/drivers/isdn/hisax/teles_cs.c
index aa25e183bf79..360f9ec7c802 100644
--- a/drivers/isdn/hisax/teles_cs.c
+++ b/drivers/isdn/hisax/teles_cs.c
@@ -111,12 +111,10 @@ static int teles_cs_configcheck(struct pcmcia_device *p_dev, void *priv_data)
111 111
112static int __devinit teles_cs_config(struct pcmcia_device *link) 112static int __devinit teles_cs_config(struct pcmcia_device *link)
113{ 113{
114 local_info_t *dev;
115 int i; 114 int i;
116 IsdnCard_t icard; 115 IsdnCard_t icard;
117 116
118 dev_dbg(&link->dev, "teles_config(0x%p)\n", link); 117 dev_dbg(&link->dev, "teles_config(0x%p)\n", link);
119 dev = link->priv;
120 118
121 i = pcmcia_loop_config(link, teles_cs_configcheck, NULL); 119 i = pcmcia_loop_config(link, teles_cs_configcheck, NULL);
122 if (i != 0) 120 if (i != 0)
diff --git a/drivers/isdn/hysdn/hysdn_proclog.c b/drivers/isdn/hysdn/hysdn_proclog.c
index 2ee93d04b2dd..236cc7dadfd0 100644
--- a/drivers/isdn/hysdn/hysdn_proclog.c
+++ b/drivers/isdn/hysdn/hysdn_proclog.c
@@ -155,7 +155,6 @@ put_log_buffer(hysdn_card * card, char *cp)
155static ssize_t 155static ssize_t
156hysdn_log_write(struct file *file, const char __user *buf, size_t count, loff_t * off) 156hysdn_log_write(struct file *file, const char __user *buf, size_t count, loff_t * off)
157{ 157{
158 unsigned long u = 0;
159 int rc; 158 int rc;
160 unsigned char valbuf[128]; 159 unsigned char valbuf[128];
161 hysdn_card *card = file->private_data; 160 hysdn_card *card = file->private_data;
@@ -167,12 +166,10 @@ hysdn_log_write(struct file *file, const char __user *buf, size_t count, loff_t
167 166
168 valbuf[count] = 0; /* terminating 0 */ 167 valbuf[count] = 0; /* terminating 0 */
169 168
170 rc = strict_strtoul(valbuf, 0, &u); 169 rc = kstrtoul(valbuf, 0, &card->debug_flags);
171 170 if (rc < 0)
172 if (rc == 0) { 171 return rc;
173 card->debug_flags = u; /* remember debug flags */ 172 hysdn_addlog(card, "debug set to 0x%lx", card->debug_flags);
174 hysdn_addlog(card, "debug set to 0x%lx", card->debug_flags);
175 }
176 return (count); 173 return (count);
177} /* hysdn_log_write */ 174} /* hysdn_log_write */
178 175
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index 15632bd2f643..6ed82add6ffa 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -399,13 +399,8 @@ isdn_all_eaz(int di, int ch)
399#include <linux/isdn/capicmd.h> 399#include <linux/isdn/capicmd.h>
400 400
401static int 401static int
402isdn_capi_rec_hl_msg(capi_msg *cm) { 402isdn_capi_rec_hl_msg(capi_msg *cm)
403 403{
404 int di;
405 int ch;
406
407 di = (cm->adr.Controller & 0x7f) -1;
408 ch = isdn_dc2minor(di, (cm->adr.Controller>>8)& 0x7f);
409 switch(cm->Command) { 404 switch(cm->Command) {
410 case CAPI_FACILITY: 405 case CAPI_FACILITY:
411 /* in the moment only handled in tty */ 406 /* in the moment only handled in tty */
@@ -1278,7 +1273,6 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
1278 uint minor = iminor(file->f_path.dentry->d_inode); 1273 uint minor = iminor(file->f_path.dentry->d_inode);
1279 isdn_ctrl c; 1274 isdn_ctrl c;
1280 int drvidx; 1275 int drvidx;
1281 int chidx;
1282 int ret; 1276 int ret;
1283 int i; 1277 int i;
1284 char __user *p; 1278 char __user *p;
@@ -1340,7 +1334,6 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
1340 drvidx = isdn_minor2drv(minor); 1334 drvidx = isdn_minor2drv(minor);
1341 if (drvidx < 0) 1335 if (drvidx < 0)
1342 return -ENODEV; 1336 return -ENODEV;
1343 chidx = isdn_minor2chan(minor);
1344 if (!(dev->drv[drvidx]->flags & DRV_FLAG_RUNNING)) 1337 if (!(dev->drv[drvidx]->flags & DRV_FLAG_RUNNING))
1345 return -ENODEV; 1338 return -ENODEV;
1346 return 0; 1339 return 0;
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index 2a7d17c19489..97988111e45a 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -1678,7 +1678,6 @@ isdn_net_ciscohdlck_slarp_in(isdn_net_local *lp, struct sk_buff *skb)
1678 u32 your_seq; 1678 u32 your_seq;
1679 __be32 local; 1679 __be32 local;
1680 __be32 *addr, *mask; 1680 __be32 *addr, *mask;
1681 u16 unused;
1682 1681
1683 if (skb->len < 14) 1682 if (skb->len < 14)
1684 return; 1683 return;
@@ -1722,7 +1721,6 @@ isdn_net_ciscohdlck_slarp_in(isdn_net_local *lp, struct sk_buff *skb)
1722 lp->cisco_last_slarp_in = jiffies; 1721 lp->cisco_last_slarp_in = jiffies;
1723 my_seq = be32_to_cpup((__be32 *)(p + 0)); 1722 my_seq = be32_to_cpup((__be32 *)(p + 0));
1724 your_seq = be32_to_cpup((__be32 *)(p + 4)); 1723 your_seq = be32_to_cpup((__be32 *)(p + 4));
1725 unused = be16_to_cpup((__be16 *)(p + 8));
1726 p += 10; 1724 p += 10;
1727 lp->cisco_yourseq = my_seq; 1725 lp->cisco_yourseq = my_seq;
1728 lp->cisco_mineseen = your_seq; 1726 lp->cisco_mineseen = your_seq;
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index 607d846ae063..d8504279e502 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -998,7 +998,6 @@ isdn_tty_change_speed(modem_info * info)
998{ 998{
999 uint cflag, 999 uint cflag,
1000 cval, 1000 cval,
1001 fcr,
1002 quot; 1001 quot;
1003 int i; 1002 int i;
1004 1003
@@ -1037,7 +1036,6 @@ isdn_tty_change_speed(modem_info * info)
1037 cval |= UART_LCR_PARITY; 1036 cval |= UART_LCR_PARITY;
1038 if (!(cflag & PARODD)) 1037 if (!(cflag & PARODD))
1039 cval |= UART_LCR_EPAR; 1038 cval |= UART_LCR_EPAR;
1040 fcr = 0;
1041 1039
1042 /* CTS flow control flag and modem status interrupts */ 1040 /* CTS flow control flag and modem status interrupts */
1043 if (cflag & CRTSCTS) { 1041 if (cflag & CRTSCTS) {
diff --git a/drivers/isdn/mISDN/layer2.c b/drivers/isdn/mISDN/layer2.c
index d0aeb44ee7c0..5bc00156315e 100644
--- a/drivers/isdn/mISDN/layer2.c
+++ b/drivers/isdn/mISDN/layer2.c
@@ -1640,7 +1640,7 @@ l2_tei_remove(struct FsmInst *fi, int event, void *arg)
1640} 1640}
1641 1641
1642static void 1642static void
1643l2_st14_persistant_da(struct FsmInst *fi, int event, void *arg) 1643l2_st14_persistent_da(struct FsmInst *fi, int event, void *arg)
1644{ 1644{
1645 struct layer2 *l2 = fi->userdata; 1645 struct layer2 *l2 = fi->userdata;
1646 struct sk_buff *skb = arg; 1646 struct sk_buff *skb = arg;
@@ -1654,7 +1654,7 @@ l2_st14_persistant_da(struct FsmInst *fi, int event, void *arg)
1654} 1654}
1655 1655
1656static void 1656static void
1657l2_st5_persistant_da(struct FsmInst *fi, int event, void *arg) 1657l2_st5_persistent_da(struct FsmInst *fi, int event, void *arg)
1658{ 1658{
1659 struct layer2 *l2 = fi->userdata; 1659 struct layer2 *l2 = fi->userdata;
1660 struct sk_buff *skb = arg; 1660 struct sk_buff *skb = arg;
@@ -1671,7 +1671,7 @@ l2_st5_persistant_da(struct FsmInst *fi, int event, void *arg)
1671} 1671}
1672 1672
1673static void 1673static void
1674l2_st6_persistant_da(struct FsmInst *fi, int event, void *arg) 1674l2_st6_persistent_da(struct FsmInst *fi, int event, void *arg)
1675{ 1675{
1676 struct layer2 *l2 = fi->userdata; 1676 struct layer2 *l2 = fi->userdata;
1677 struct sk_buff *skb = arg; 1677 struct sk_buff *skb = arg;
@@ -1685,7 +1685,7 @@ l2_st6_persistant_da(struct FsmInst *fi, int event, void *arg)
1685} 1685}
1686 1686
1687static void 1687static void
1688l2_persistant_da(struct FsmInst *fi, int event, void *arg) 1688l2_persistent_da(struct FsmInst *fi, int event, void *arg)
1689{ 1689{
1690 struct layer2 *l2 = fi->userdata; 1690 struct layer2 *l2 = fi->userdata;
1691 struct sk_buff *skb = arg; 1691 struct sk_buff *skb = arg;
@@ -1829,14 +1829,14 @@ static struct FsmNode L2FnList[] =
1829 {ST_L2_6, EV_L2_FRAME_ERROR, l2_frame_error}, 1829 {ST_L2_6, EV_L2_FRAME_ERROR, l2_frame_error},
1830 {ST_L2_7, EV_L2_FRAME_ERROR, l2_frame_error_reest}, 1830 {ST_L2_7, EV_L2_FRAME_ERROR, l2_frame_error_reest},
1831 {ST_L2_8, EV_L2_FRAME_ERROR, l2_frame_error_reest}, 1831 {ST_L2_8, EV_L2_FRAME_ERROR, l2_frame_error_reest},
1832 {ST_L2_1, EV_L1_DEACTIVATE, l2_st14_persistant_da}, 1832 {ST_L2_1, EV_L1_DEACTIVATE, l2_st14_persistent_da},
1833 {ST_L2_2, EV_L1_DEACTIVATE, l2_st24_tei_remove}, 1833 {ST_L2_2, EV_L1_DEACTIVATE, l2_st24_tei_remove},
1834 {ST_L2_3, EV_L1_DEACTIVATE, l2_st3_tei_remove}, 1834 {ST_L2_3, EV_L1_DEACTIVATE, l2_st3_tei_remove},
1835 {ST_L2_4, EV_L1_DEACTIVATE, l2_st14_persistant_da}, 1835 {ST_L2_4, EV_L1_DEACTIVATE, l2_st14_persistent_da},
1836 {ST_L2_5, EV_L1_DEACTIVATE, l2_st5_persistant_da}, 1836 {ST_L2_5, EV_L1_DEACTIVATE, l2_st5_persistent_da},
1837 {ST_L2_6, EV_L1_DEACTIVATE, l2_st6_persistant_da}, 1837 {ST_L2_6, EV_L1_DEACTIVATE, l2_st6_persistent_da},
1838 {ST_L2_7, EV_L1_DEACTIVATE, l2_persistant_da}, 1838 {ST_L2_7, EV_L1_DEACTIVATE, l2_persistent_da},
1839 {ST_L2_8, EV_L1_DEACTIVATE, l2_persistant_da}, 1839 {ST_L2_8, EV_L1_DEACTIVATE, l2_persistent_da},
1840}; 1840};
1841 1841
1842static int 1842static int
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 7446d8b4282d..8e325227b4c0 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -457,6 +457,9 @@ static int data_sock_getsockopt(struct socket *sock, int level, int optname,
457 if (get_user(len, optlen)) 457 if (get_user(len, optlen))
458 return -EFAULT; 458 return -EFAULT;
459 459
460 if (len != sizeof(char))
461 return -EINVAL;
462
460 switch (optname) { 463 switch (optname) {
461 case MISDN_TIME_STAMP: 464 case MISDN_TIME_STAMP:
462 if (_pms(sk)->cmask & MISDN_TIME_STAMP) 465 if (_pms(sk)->cmask & MISDN_TIME_STAMP)
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index 91abb965fb44..5f25889e27ef 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -185,7 +185,7 @@ static int max_interrupt_work = 10;
185static int nopnp; 185static int nopnp;
186#endif 186#endif
187 187
188static int __devinit el3_common_init(struct net_device *dev); 188static int el3_common_init(struct net_device *dev);
189static void el3_common_remove(struct net_device *dev); 189static void el3_common_remove(struct net_device *dev);
190static ushort id_read_eeprom(int index); 190static ushort id_read_eeprom(int index);
191static ushort read_eeprom(int ioaddr, int index); 191static ushort read_eeprom(int ioaddr, int index);
@@ -395,7 +395,7 @@ static struct isa_driver el3_isa_driver = {
395static int isa_registered; 395static int isa_registered;
396 396
397#ifdef CONFIG_PNP 397#ifdef CONFIG_PNP
398static struct pnp_device_id el3_pnp_ids[] = { 398static const struct pnp_device_id el3_pnp_ids[] __devinitconst = {
399 { .id = "TCM5090" }, /* 3Com Etherlink III (TP) */ 399 { .id = "TCM5090" }, /* 3Com Etherlink III (TP) */
400 { .id = "TCM5091" }, /* 3Com Etherlink III */ 400 { .id = "TCM5091" }, /* 3Com Etherlink III */
401 { .id = "TCM5094" }, /* 3Com Etherlink III (combo) */ 401 { .id = "TCM5094" }, /* 3Com Etherlink III (combo) */
@@ -478,7 +478,7 @@ static int pnp_registered;
478#endif /* CONFIG_PNP */ 478#endif /* CONFIG_PNP */
479 479
480#ifdef CONFIG_EISA 480#ifdef CONFIG_EISA
481static struct eisa_device_id el3_eisa_ids[] = { 481static const struct eisa_device_id el3_eisa_ids[] __devinitconst = {
482 { "TCM5090" }, 482 { "TCM5090" },
483 { "TCM5091" }, 483 { "TCM5091" },
484 { "TCM5092" }, 484 { "TCM5092" },
@@ -508,7 +508,7 @@ static int eisa_registered;
508#ifdef CONFIG_MCA 508#ifdef CONFIG_MCA
509static int el3_mca_probe(struct device *dev); 509static int el3_mca_probe(struct device *dev);
510 510
511static short el3_mca_adapter_ids[] __initdata = { 511static const short el3_mca_adapter_ids[] __devinitconst = {
512 0x627c, 512 0x627c,
513 0x627d, 513 0x627d,
514 0x62db, 514 0x62db,
@@ -517,7 +517,7 @@ static short el3_mca_adapter_ids[] __initdata = {
517 0x0000 517 0x0000
518}; 518};
519 519
520static char *el3_mca_adapter_names[] __initdata = { 520static const char *const el3_mca_adapter_names[] __devinitconst = {
521 "3Com 3c529 EtherLink III (10base2)", 521 "3Com 3c529 EtherLink III (10base2)",
522 "3Com 3c529 EtherLink III (10baseT)", 522 "3Com 3c529 EtherLink III (10baseT)",
523 "3Com 3c529 EtherLink III (test mode)", 523 "3Com 3c529 EtherLink III (test mode)",
@@ -601,7 +601,7 @@ static void el3_common_remove (struct net_device *dev)
601} 601}
602 602
603#ifdef CONFIG_MCA 603#ifdef CONFIG_MCA
604static int __init el3_mca_probe(struct device *device) 604static int __devinit el3_mca_probe(struct device *device)
605{ 605{
606 /* Based on Erik Nygren's (nygren@mit.edu) 3c529 patch, 606 /* Based on Erik Nygren's (nygren@mit.edu) 3c529 patch,
607 * heavily modified by Chris Beauregard 607 * heavily modified by Chris Beauregard
@@ -671,7 +671,7 @@ static int __init el3_mca_probe(struct device *device)
671#endif /* CONFIG_MCA */ 671#endif /* CONFIG_MCA */
672 672
673#ifdef CONFIG_EISA 673#ifdef CONFIG_EISA
674static int __init el3_eisa_probe (struct device *device) 674static int __devinit el3_eisa_probe (struct device *device)
675{ 675{
676 short i; 676 short i;
677 int ioaddr, irq, if_port; 677 int ioaddr, irq, if_port;
@@ -1207,7 +1207,7 @@ el3_netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
1207 ecmd->duplex = DUPLEX_FULL; 1207 ecmd->duplex = DUPLEX_FULL;
1208 } 1208 }
1209 1209
1210 ecmd->speed = SPEED_10; 1210 ethtool_cmd_speed_set(ecmd, SPEED_10);
1211 EL3WINDOW(1); 1211 EL3WINDOW(1);
1212 return 0; 1212 return 0;
1213} 1213}
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 8cc22568ebd3..99f43d275442 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -901,14 +901,14 @@ static const struct dev_pm_ops vortex_pm_ops = {
901#endif /* !CONFIG_PM */ 901#endif /* !CONFIG_PM */
902 902
903#ifdef CONFIG_EISA 903#ifdef CONFIG_EISA
904static struct eisa_device_id vortex_eisa_ids[] = { 904static const struct eisa_device_id vortex_eisa_ids[] __devinitconst = {
905 { "TCM5920", CH_3C592 }, 905 { "TCM5920", CH_3C592 },
906 { "TCM5970", CH_3C597 }, 906 { "TCM5970", CH_3C597 },
907 { "" } 907 { "" }
908}; 908};
909MODULE_DEVICE_TABLE(eisa, vortex_eisa_ids); 909MODULE_DEVICE_TABLE(eisa, vortex_eisa_ids);
910 910
911static int __init vortex_eisa_probe(struct device *device) 911static int __devinit vortex_eisa_probe(struct device *device)
912{ 912{
913 void __iomem *ioaddr; 913 void __iomem *ioaddr;
914 struct eisa_device *edev; 914 struct eisa_device *edev;
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index dd16e83933a2..10c45051caea 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -758,8 +758,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
758 758
759 entry = cp->tx_head; 759 entry = cp->tx_head;
760 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; 760 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
761 if (dev->features & NETIF_F_TSO) 761 mss = skb_shinfo(skb)->gso_size;
762 mss = skb_shinfo(skb)->gso_size;
763 762
764 if (skb_shinfo(skb)->nr_frags == 0) { 763 if (skb_shinfo(skb)->nr_frags == 0) {
765 struct cp_desc *txd = &cp->tx_ring[entry]; 764 struct cp_desc *txd = &cp->tx_ring[entry];
@@ -1416,32 +1415,23 @@ static void cp_set_msglevel(struct net_device *dev, u32 value)
1416 cp->msg_enable = value; 1415 cp->msg_enable = value;
1417} 1416}
1418 1417
1419static u32 cp_get_rx_csum(struct net_device *dev) 1418static int cp_set_features(struct net_device *dev, u32 features)
1420{ 1419{
1421 struct cp_private *cp = netdev_priv(dev); 1420 struct cp_private *cp = netdev_priv(dev);
1422 return (cpr16(CpCmd) & RxChkSum) ? 1 : 0; 1421 unsigned long flags;
1423}
1424 1422
1425static int cp_set_rx_csum(struct net_device *dev, u32 data) 1423 if (!((dev->features ^ features) & NETIF_F_RXCSUM))
1426{ 1424 return 0;
1427 struct cp_private *cp = netdev_priv(dev);
1428 u16 cmd = cp->cpcmd, newcmd;
1429 1425
1430 newcmd = cmd; 1426 spin_lock_irqsave(&cp->lock, flags);
1431 1427
1432 if (data) 1428 if (features & NETIF_F_RXCSUM)
1433 newcmd |= RxChkSum; 1429 cp->cpcmd |= RxChkSum;
1434 else 1430 else
1435 newcmd &= ~RxChkSum; 1431 cp->cpcmd &= ~RxChkSum;
1436
1437 if (newcmd != cmd) {
1438 unsigned long flags;
1439 1432
1440 spin_lock_irqsave(&cp->lock, flags); 1433 cpw16_f(CpCmd, cp->cpcmd);
1441 cp->cpcmd = newcmd; 1434 spin_unlock_irqrestore(&cp->lock, flags);
1442 cpw16_f(CpCmd, newcmd);
1443 spin_unlock_irqrestore(&cp->lock, flags);
1444 }
1445 1435
1446 return 0; 1436 return 0;
1447} 1437}
@@ -1554,11 +1544,6 @@ static const struct ethtool_ops cp_ethtool_ops = {
1554 .get_link = ethtool_op_get_link, 1544 .get_link = ethtool_op_get_link,
1555 .get_msglevel = cp_get_msglevel, 1545 .get_msglevel = cp_get_msglevel,
1556 .set_msglevel = cp_set_msglevel, 1546 .set_msglevel = cp_set_msglevel,
1557 .get_rx_csum = cp_get_rx_csum,
1558 .set_rx_csum = cp_set_rx_csum,
1559 .set_tx_csum = ethtool_op_set_tx_csum, /* local! */
1560 .set_sg = ethtool_op_set_sg,
1561 .set_tso = ethtool_op_set_tso,
1562 .get_regs = cp_get_regs, 1547 .get_regs = cp_get_regs,
1563 .get_wol = cp_get_wol, 1548 .get_wol = cp_get_wol,
1564 .set_wol = cp_set_wol, 1549 .set_wol = cp_set_wol,
@@ -1831,6 +1816,7 @@ static const struct net_device_ops cp_netdev_ops = {
1831 .ndo_do_ioctl = cp_ioctl, 1816 .ndo_do_ioctl = cp_ioctl,
1832 .ndo_start_xmit = cp_start_xmit, 1817 .ndo_start_xmit = cp_start_xmit,
1833 .ndo_tx_timeout = cp_tx_timeout, 1818 .ndo_tx_timeout = cp_tx_timeout,
1819 .ndo_set_features = cp_set_features,
1834#if CP_VLAN_TAG_USED 1820#if CP_VLAN_TAG_USED
1835 .ndo_vlan_rx_register = cp_vlan_rx_register, 1821 .ndo_vlan_rx_register = cp_vlan_rx_register,
1836#endif 1822#endif
@@ -1934,6 +1920,9 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1934 cp->cpcmd = (pci_using_dac ? PCIDAC : 0) | 1920 cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
1935 PCIMulRW | RxChkSum | CpRxOn | CpTxOn; 1921 PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
1936 1922
1923 dev->features |= NETIF_F_RXCSUM;
1924 dev->hw_features |= NETIF_F_RXCSUM;
1925
1937 regs = ioremap(pciaddr, CP_REGS_SIZE); 1926 regs = ioremap(pciaddr, CP_REGS_SIZE);
1938 if (!regs) { 1927 if (!regs) {
1939 rc = -EIO; 1928 rc = -EIO;
@@ -1966,9 +1955,8 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1966 if (pci_using_dac) 1955 if (pci_using_dac)
1967 dev->features |= NETIF_F_HIGHDMA; 1956 dev->features |= NETIF_F_HIGHDMA;
1968 1957
1969#if 0 /* disabled by default until verified */ 1958 /* disabled by default until verified */
1970 dev->features |= NETIF_F_TSO; 1959 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
1971#endif
1972 1960
1973 dev->irq = pdev->irq; 1961 dev->irq = pdev->irq;
1974 1962
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index ee648fe5d96f..82260ca70323 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -2658,15 +2658,15 @@ static int ace_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2658 2658
2659 link = readl(&regs->GigLnkState); 2659 link = readl(&regs->GigLnkState);
2660 if (link & LNK_1000MB) 2660 if (link & LNK_1000MB)
2661 ecmd->speed = SPEED_1000; 2661 ethtool_cmd_speed_set(ecmd, SPEED_1000);
2662 else { 2662 else {
2663 link = readl(&regs->FastLnkState); 2663 link = readl(&regs->FastLnkState);
2664 if (link & LNK_100MB) 2664 if (link & LNK_100MB)
2665 ecmd->speed = SPEED_100; 2665 ethtool_cmd_speed_set(ecmd, SPEED_100);
2666 else if (link & LNK_10MB) 2666 else if (link & LNK_10MB)
2667 ecmd->speed = SPEED_10; 2667 ethtool_cmd_speed_set(ecmd, SPEED_10);
2668 else 2668 else
2669 ecmd->speed = 0; 2669 ethtool_cmd_speed_set(ecmd, 0);
2670 } 2670 }
2671 if (link & LNK_FULL_DUPLEX) 2671 if (link & LNK_FULL_DUPLEX)
2672 ecmd->duplex = DUPLEX_FULL; 2672 ecmd->duplex = DUPLEX_FULL;
@@ -2718,9 +2718,9 @@ static int ace_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2718 link |= LNK_TX_FLOW_CTL_Y; 2718 link |= LNK_TX_FLOW_CTL_Y;
2719 if (ecmd->autoneg == AUTONEG_ENABLE) 2719 if (ecmd->autoneg == AUTONEG_ENABLE)
2720 link |= LNK_NEGOTIATE; 2720 link |= LNK_NEGOTIATE;
2721 if (ecmd->speed != speed) { 2721 if (ethtool_cmd_speed(ecmd) != speed) {
2722 link &= ~(LNK_1000MB | LNK_100MB | LNK_10MB); 2722 link &= ~(LNK_1000MB | LNK_100MB | LNK_10MB);
2723 switch (speed) { 2723 switch (ethtool_cmd_speed(ecmd)) {
2724 case SPEED_1000: 2724 case SPEED_1000:
2725 link |= LNK_1000MB; 2725 link |= LNK_1000MB;
2726 break; 2726 break;
diff --git a/drivers/net/arm/etherh.c b/drivers/net/arm/etherh.c
index 4af235d41fda..e252cd595016 100644
--- a/drivers/net/arm/etherh.c
+++ b/drivers/net/arm/etherh.c
@@ -591,10 +591,11 @@ static void etherh_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
591static int etherh_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 591static int etherh_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
592{ 592{
593 cmd->supported = etherh_priv(dev)->supported; 593 cmd->supported = etherh_priv(dev)->supported;
594 cmd->speed = SPEED_10; 594 ethtool_cmd_speed_set(cmd, SPEED_10);
595 cmd->duplex = DUPLEX_HALF; 595 cmd->duplex = DUPLEX_HALF;
596 cmd->port = dev->if_port == IF_PORT_10BASET ? PORT_TP : PORT_BNC; 596 cmd->port = dev->if_port == IF_PORT_10BASET ? PORT_TP : PORT_BNC;
597 cmd->autoneg = dev->flags & IFF_AUTOMEDIA ? AUTONEG_ENABLE : AUTONEG_DISABLE; 597 cmd->autoneg = (dev->flags & IFF_AUTOMEDIA ?
598 AUTONEG_ENABLE : AUTONEG_DISABLE);
598 return 0; 599 return 0;
599} 600}
600 601
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index aa07657744c3..a7b0caa18179 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -891,15 +891,16 @@ ks8695_wan_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
891 cmd->advertising |= ADVERTISED_Pause; 891 cmd->advertising |= ADVERTISED_Pause;
892 cmd->autoneg = AUTONEG_ENABLE; 892 cmd->autoneg = AUTONEG_ENABLE;
893 893
894 cmd->speed = (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10; 894 ethtool_cmd_speed_set(cmd,
895 (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10);
895 cmd->duplex = (ctrl & WMC_WDS) ? 896 cmd->duplex = (ctrl & WMC_WDS) ?
896 DUPLEX_FULL : DUPLEX_HALF; 897 DUPLEX_FULL : DUPLEX_HALF;
897 } else { 898 } else {
898 /* auto-negotiation is disabled */ 899 /* auto-negotiation is disabled */
899 cmd->autoneg = AUTONEG_DISABLE; 900 cmd->autoneg = AUTONEG_DISABLE;
900 901
901 cmd->speed = (ctrl & WMC_WANF100) ? 902 ethtool_cmd_speed_set(cmd, ((ctrl & WMC_WANF100) ?
902 SPEED_100 : SPEED_10; 903 SPEED_100 : SPEED_10));
903 cmd->duplex = (ctrl & WMC_WANFF) ? 904 cmd->duplex = (ctrl & WMC_WANFF) ?
904 DUPLEX_FULL : DUPLEX_HALF; 905 DUPLEX_FULL : DUPLEX_HALF;
905 } 906 }
diff --git a/drivers/net/atl1c/atl1c_ethtool.c b/drivers/net/atl1c/atl1c_ethtool.c
index 7c521508313c..7be884d0aaf6 100644
--- a/drivers/net/atl1c/atl1c_ethtool.c
+++ b/drivers/net/atl1c/atl1c_ethtool.c
@@ -50,13 +50,13 @@ static int atl1c_get_settings(struct net_device *netdev,
50 ecmd->transceiver = XCVR_INTERNAL; 50 ecmd->transceiver = XCVR_INTERNAL;
51 51
52 if (adapter->link_speed != SPEED_0) { 52 if (adapter->link_speed != SPEED_0) {
53 ecmd->speed = adapter->link_speed; 53 ethtool_cmd_speed_set(ecmd, adapter->link_speed);
54 if (adapter->link_duplex == FULL_DUPLEX) 54 if (adapter->link_duplex == FULL_DUPLEX)
55 ecmd->duplex = DUPLEX_FULL; 55 ecmd->duplex = DUPLEX_FULL;
56 else 56 else
57 ecmd->duplex = DUPLEX_HALF; 57 ecmd->duplex = DUPLEX_HALF;
58 } else { 58 } else {
59 ecmd->speed = -1; 59 ethtool_cmd_speed_set(ecmd, -1);
60 ecmd->duplex = -1; 60 ecmd->duplex = -1;
61 } 61 }
62 62
@@ -77,7 +77,8 @@ static int atl1c_set_settings(struct net_device *netdev,
77 if (ecmd->autoneg == AUTONEG_ENABLE) { 77 if (ecmd->autoneg == AUTONEG_ENABLE) {
78 autoneg_advertised = ADVERTISED_Autoneg; 78 autoneg_advertised = ADVERTISED_Autoneg;
79 } else { 79 } else {
80 if (ecmd->speed == SPEED_1000) { 80 u32 speed = ethtool_cmd_speed(ecmd);
81 if (speed == SPEED_1000) {
81 if (ecmd->duplex != DUPLEX_FULL) { 82 if (ecmd->duplex != DUPLEX_FULL) {
82 if (netif_msg_link(adapter)) 83 if (netif_msg_link(adapter))
83 dev_warn(&adapter->pdev->dev, 84 dev_warn(&adapter->pdev->dev,
@@ -86,7 +87,7 @@ static int atl1c_set_settings(struct net_device *netdev,
86 return -EINVAL; 87 return -EINVAL;
87 } 88 }
88 autoneg_advertised = ADVERTISED_1000baseT_Full; 89 autoneg_advertised = ADVERTISED_1000baseT_Full;
89 } else if (ecmd->speed == SPEED_100) { 90 } else if (speed == SPEED_100) {
90 if (ecmd->duplex == DUPLEX_FULL) 91 if (ecmd->duplex == DUPLEX_FULL)
91 autoneg_advertised = ADVERTISED_100baseT_Full; 92 autoneg_advertised = ADVERTISED_100baseT_Full;
92 else 93 else
@@ -113,11 +114,6 @@ static int atl1c_set_settings(struct net_device *netdev,
113 return 0; 114 return 0;
114} 115}
115 116
116static u32 atl1c_get_tx_csum(struct net_device *netdev)
117{
118 return (netdev->features & NETIF_F_HW_CSUM) != 0;
119}
120
121static u32 atl1c_get_msglevel(struct net_device *netdev) 117static u32 atl1c_get_msglevel(struct net_device *netdev)
122{ 118{
123 struct atl1c_adapter *adapter = netdev_priv(netdev); 119 struct atl1c_adapter *adapter = netdev_priv(netdev);
@@ -307,9 +303,6 @@ static const struct ethtool_ops atl1c_ethtool_ops = {
307 .get_link = ethtool_op_get_link, 303 .get_link = ethtool_op_get_link,
308 .get_eeprom_len = atl1c_get_eeprom_len, 304 .get_eeprom_len = atl1c_get_eeprom_len,
309 .get_eeprom = atl1c_get_eeprom, 305 .get_eeprom = atl1c_get_eeprom,
310 .get_tx_csum = atl1c_get_tx_csum,
311 .get_sg = ethtool_op_get_sg,
312 .set_sg = ethtool_op_set_sg,
313}; 306};
314 307
315void atl1c_set_ethtool_ops(struct net_device *netdev) 308void atl1c_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index a6e1c36e48e6..48868de386a0 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -480,6 +480,15 @@ static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter,
480 adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ? 480 adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ?
481 roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE; 481 roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE;
482} 482}
483
484static u32 atl1c_fix_features(struct net_device *netdev, u32 features)
485{
486 if (netdev->mtu > MAX_TSO_FRAME_SIZE)
487 features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
488
489 return features;
490}
491
483/* 492/*
484 * atl1c_change_mtu - Change the Maximum Transfer Unit 493 * atl1c_change_mtu - Change the Maximum Transfer Unit
485 * @netdev: network interface device structure 494 * @netdev: network interface device structure
@@ -506,14 +515,8 @@ static int atl1c_change_mtu(struct net_device *netdev, int new_mtu)
506 netdev->mtu = new_mtu; 515 netdev->mtu = new_mtu;
507 adapter->hw.max_frame_size = new_mtu; 516 adapter->hw.max_frame_size = new_mtu;
508 atl1c_set_rxbufsize(adapter, netdev); 517 atl1c_set_rxbufsize(adapter, netdev);
509 if (new_mtu > MAX_TSO_FRAME_SIZE) {
510 adapter->netdev->features &= ~NETIF_F_TSO;
511 adapter->netdev->features &= ~NETIF_F_TSO6;
512 } else {
513 adapter->netdev->features |= NETIF_F_TSO;
514 adapter->netdev->features |= NETIF_F_TSO6;
515 }
516 atl1c_down(adapter); 518 atl1c_down(adapter);
519 netdev_update_features(netdev);
517 atl1c_up(adapter); 520 atl1c_up(adapter);
518 clear_bit(__AT_RESETTING, &adapter->flags); 521 clear_bit(__AT_RESETTING, &adapter->flags);
519 if (adapter->hw.ctrl_flags & ATL1C_FPGA_VERSION) { 522 if (adapter->hw.ctrl_flags & ATL1C_FPGA_VERSION) {
@@ -1088,10 +1091,8 @@ static void atl1c_configure_tx(struct atl1c_adapter *adapter)
1088 u32 max_pay_load; 1091 u32 max_pay_load;
1089 u16 tx_offload_thresh; 1092 u16 tx_offload_thresh;
1090 u32 txq_ctrl_data; 1093 u32 txq_ctrl_data;
1091 u32 extra_size = 0; /* Jumbo frame threshold in QWORD unit */
1092 u32 max_pay_load_data; 1094 u32 max_pay_load_data;
1093 1095
1094 extra_size = ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
1095 tx_offload_thresh = MAX_TX_OFFLOAD_THRESH; 1096 tx_offload_thresh = MAX_TX_OFFLOAD_THRESH;
1096 AT_WRITE_REG(hw, REG_TX_TSO_OFFLOAD_THRESH, 1097 AT_WRITE_REG(hw, REG_TX_TSO_OFFLOAD_THRESH,
1097 (tx_offload_thresh >> 3) & TX_TSO_OFFLOAD_THRESH_MASK); 1098 (tx_offload_thresh >> 3) & TX_TSO_OFFLOAD_THRESH_MASK);
@@ -2581,6 +2582,7 @@ static const struct net_device_ops atl1c_netdev_ops = {
2581 .ndo_set_mac_address = atl1c_set_mac_addr, 2582 .ndo_set_mac_address = atl1c_set_mac_addr,
2582 .ndo_set_multicast_list = atl1c_set_multi, 2583 .ndo_set_multicast_list = atl1c_set_multi,
2583 .ndo_change_mtu = atl1c_change_mtu, 2584 .ndo_change_mtu = atl1c_change_mtu,
2585 .ndo_fix_features = atl1c_fix_features,
2584 .ndo_do_ioctl = atl1c_ioctl, 2586 .ndo_do_ioctl = atl1c_ioctl,
2585 .ndo_tx_timeout = atl1c_tx_timeout, 2587 .ndo_tx_timeout = atl1c_tx_timeout,
2586 .ndo_get_stats = atl1c_get_stats, 2588 .ndo_get_stats = atl1c_get_stats,
@@ -2601,12 +2603,13 @@ static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
2601 atl1c_set_ethtool_ops(netdev); 2603 atl1c_set_ethtool_ops(netdev);
2602 2604
2603 /* TODO: add when ready */ 2605 /* TODO: add when ready */
2604 netdev->features = NETIF_F_SG | 2606 netdev->hw_features = NETIF_F_SG |
2605 NETIF_F_HW_CSUM | 2607 NETIF_F_HW_CSUM |
2606 NETIF_F_HW_VLAN_TX | 2608 NETIF_F_HW_VLAN_TX |
2607 NETIF_F_HW_VLAN_RX |
2608 NETIF_F_TSO | 2609 NETIF_F_TSO |
2609 NETIF_F_TSO6; 2610 NETIF_F_TSO6;
2611 netdev->features = netdev->hw_features |
2612 NETIF_F_HW_VLAN_RX;
2610 return 0; 2613 return 0;
2611} 2614}
2612 2615
diff --git a/drivers/net/atl1e/atl1e_ethtool.c b/drivers/net/atl1e/atl1e_ethtool.c
index 1209297433b8..6269438d365f 100644
--- a/drivers/net/atl1e/atl1e_ethtool.c
+++ b/drivers/net/atl1e/atl1e_ethtool.c
@@ -51,13 +51,13 @@ static int atl1e_get_settings(struct net_device *netdev,
51 ecmd->transceiver = XCVR_INTERNAL; 51 ecmd->transceiver = XCVR_INTERNAL;
52 52
53 if (adapter->link_speed != SPEED_0) { 53 if (adapter->link_speed != SPEED_0) {
54 ecmd->speed = adapter->link_speed; 54 ethtool_cmd_speed_set(ecmd, adapter->link_speed);
55 if (adapter->link_duplex == FULL_DUPLEX) 55 if (adapter->link_duplex == FULL_DUPLEX)
56 ecmd->duplex = DUPLEX_FULL; 56 ecmd->duplex = DUPLEX_FULL;
57 else 57 else
58 ecmd->duplex = DUPLEX_HALF; 58 ecmd->duplex = DUPLEX_HALF;
59 } else { 59 } else {
60 ecmd->speed = -1; 60 ethtool_cmd_speed_set(ecmd, -1);
61 ecmd->duplex = -1; 61 ecmd->duplex = -1;
62 } 62 }
63 63
@@ -382,9 +382,6 @@ static const struct ethtool_ops atl1e_ethtool_ops = {
382 .get_eeprom_len = atl1e_get_eeprom_len, 382 .get_eeprom_len = atl1e_get_eeprom_len,
383 .get_eeprom = atl1e_get_eeprom, 383 .get_eeprom = atl1e_get_eeprom,
384 .set_eeprom = atl1e_set_eeprom, 384 .set_eeprom = atl1e_set_eeprom,
385 .set_tx_csum = ethtool_op_set_tx_hw_csum,
386 .set_sg = ethtool_op_set_sg,
387 .set_tso = ethtool_op_set_tso,
388}; 385};
389 386
390void atl1e_set_ethtool_ops(struct net_device *netdev) 387void atl1e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index b0a71e2f28a9..86a912283134 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -691,10 +691,8 @@ static void atl1e_cal_ring_size(struct atl1e_adapter *adapter, u32 *ring_size)
691 691
692static void atl1e_init_ring_resources(struct atl1e_adapter *adapter) 692static void atl1e_init_ring_resources(struct atl1e_adapter *adapter)
693{ 693{
694 struct atl1e_tx_ring *tx_ring = NULL;
695 struct atl1e_rx_ring *rx_ring = NULL; 694 struct atl1e_rx_ring *rx_ring = NULL;
696 695
697 tx_ring = &adapter->tx_ring;
698 rx_ring = &adapter->rx_ring; 696 rx_ring = &adapter->rx_ring;
699 697
700 rx_ring->real_page_size = adapter->rx_ring.page_size 698 rx_ring->real_page_size = adapter->rx_ring.page_size
@@ -1927,11 +1925,7 @@ void atl1e_down(struct atl1e_adapter *adapter)
1927 * reschedule our watchdog timer */ 1925 * reschedule our watchdog timer */
1928 set_bit(__AT_DOWN, &adapter->flags); 1926 set_bit(__AT_DOWN, &adapter->flags);
1929 1927
1930#ifdef NETIF_F_LLTX
1931 netif_stop_queue(netdev); 1928 netif_stop_queue(netdev);
1932#else
1933 netif_tx_disable(netdev);
1934#endif
1935 1929
1936 /* reset MAC to disable all RX/TX */ 1930 /* reset MAC to disable all RX/TX */
1937 atl1e_reset_hw(&adapter->hw); 1931 atl1e_reset_hw(&adapter->hw);
@@ -2223,10 +2217,10 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
2223 netdev->watchdog_timeo = AT_TX_WATCHDOG; 2217 netdev->watchdog_timeo = AT_TX_WATCHDOG;
2224 atl1e_set_ethtool_ops(netdev); 2218 atl1e_set_ethtool_ops(netdev);
2225 2219
2226 netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM | 2220 netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO |
2227 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 2221 NETIF_F_HW_VLAN_TX;
2228 netdev->features |= NETIF_F_LLTX; 2222 netdev->features = netdev->hw_features |
2229 netdev->features |= NETIF_F_TSO; 2223 NETIF_F_HW_VLAN_RX | NETIF_F_LLTX;
2230 2224
2231 return 0; 2225 return 0;
2232} 2226}
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 67f40b9c16ed..c5298d1ab744 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -2074,9 +2074,6 @@ static void atl1_intr_tx(struct atl1_adapter *adapter)
2074 cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx); 2074 cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx);
2075 2075
2076 while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) { 2076 while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) {
2077 struct tx_packet_desc *tpd;
2078
2079 tpd = ATL1_TPD_DESC(tpd_ring, sw_tpd_next_to_clean);
2080 buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean]; 2077 buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean];
2081 if (buffer_info->dma) { 2078 if (buffer_info->dma) {
2082 pci_unmap_page(adapter->pdev, buffer_info->dma, 2079 pci_unmap_page(adapter->pdev, buffer_info->dma,
@@ -2572,7 +2569,7 @@ static s32 atl1_up(struct atl1_adapter *adapter)
2572{ 2569{
2573 struct net_device *netdev = adapter->netdev; 2570 struct net_device *netdev = adapter->netdev;
2574 int err; 2571 int err;
2575 int irq_flags = IRQF_SAMPLE_RANDOM; 2572 int irq_flags = 0;
2576 2573
2577 /* hardware has been reset, we need to reload some things */ 2574 /* hardware has been reset, we need to reload some things */
2578 atlx_set_multi(netdev); 2575 atlx_set_multi(netdev);
@@ -2986,6 +2983,11 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
2986 netdev->features |= NETIF_F_SG; 2983 netdev->features |= NETIF_F_SG;
2987 netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); 2984 netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
2988 2985
2986 netdev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_TSO;
2987
2988 /* is this valid? see atl1_setup_mac_ctrl() */
2989 netdev->features |= NETIF_F_RXCSUM;
2990
2989 /* 2991 /*
2990 * patch for some L1 of old version, 2992 * patch for some L1 of old version,
2991 * the final version of L1 may not need these 2993 * the final version of L1 may not need these
@@ -3229,13 +3231,13 @@ static int atl1_get_settings(struct net_device *netdev,
3229 if (netif_carrier_ok(adapter->netdev)) { 3231 if (netif_carrier_ok(adapter->netdev)) {
3230 u16 link_speed, link_duplex; 3232 u16 link_speed, link_duplex;
3231 atl1_get_speed_and_duplex(hw, &link_speed, &link_duplex); 3233 atl1_get_speed_and_duplex(hw, &link_speed, &link_duplex);
3232 ecmd->speed = link_speed; 3234 ethtool_cmd_speed_set(ecmd, link_speed);
3233 if (link_duplex == FULL_DUPLEX) 3235 if (link_duplex == FULL_DUPLEX)
3234 ecmd->duplex = DUPLEX_FULL; 3236 ecmd->duplex = DUPLEX_FULL;
3235 else 3237 else
3236 ecmd->duplex = DUPLEX_HALF; 3238 ecmd->duplex = DUPLEX_HALF;
3237 } else { 3239 } else {
3238 ecmd->speed = -1; 3240 ethtool_cmd_speed_set(ecmd, -1);
3239 ecmd->duplex = -1; 3241 ecmd->duplex = -1;
3240 } 3242 }
3241 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || 3243 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
@@ -3266,7 +3268,8 @@ static int atl1_set_settings(struct net_device *netdev,
3266 if (ecmd->autoneg == AUTONEG_ENABLE) 3268 if (ecmd->autoneg == AUTONEG_ENABLE)
3267 hw->media_type = MEDIA_TYPE_AUTO_SENSOR; 3269 hw->media_type = MEDIA_TYPE_AUTO_SENSOR;
3268 else { 3270 else {
3269 if (ecmd->speed == SPEED_1000) { 3271 u32 speed = ethtool_cmd_speed(ecmd);
3272 if (speed == SPEED_1000) {
3270 if (ecmd->duplex != DUPLEX_FULL) { 3273 if (ecmd->duplex != DUPLEX_FULL) {
3271 if (netif_msg_link(adapter)) 3274 if (netif_msg_link(adapter))
3272 dev_warn(&adapter->pdev->dev, 3275 dev_warn(&adapter->pdev->dev,
@@ -3275,7 +3278,7 @@ static int atl1_set_settings(struct net_device *netdev,
3275 goto exit_sset; 3278 goto exit_sset;
3276 } 3279 }
3277 hw->media_type = MEDIA_TYPE_1000M_FULL; 3280 hw->media_type = MEDIA_TYPE_1000M_FULL;
3278 } else if (ecmd->speed == SPEED_100) { 3281 } else if (speed == SPEED_100) {
3279 if (ecmd->duplex == DUPLEX_FULL) 3282 if (ecmd->duplex == DUPLEX_FULL)
3280 hw->media_type = MEDIA_TYPE_100M_FULL; 3283 hw->media_type = MEDIA_TYPE_100M_FULL;
3281 else 3284 else
@@ -3595,12 +3598,6 @@ static int atl1_set_pauseparam(struct net_device *netdev,
3595 return 0; 3598 return 0;
3596} 3599}
3597 3600
3598/* FIXME: is this right? -- CHS */
3599static u32 atl1_get_rx_csum(struct net_device *netdev)
3600{
3601 return 1;
3602}
3603
3604static void atl1_get_strings(struct net_device *netdev, u32 stringset, 3601static void atl1_get_strings(struct net_device *netdev, u32 stringset,
3605 u8 *data) 3602 u8 *data)
3606{ 3603{
@@ -3668,13 +3665,9 @@ static const struct ethtool_ops atl1_ethtool_ops = {
3668 .set_ringparam = atl1_set_ringparam, 3665 .set_ringparam = atl1_set_ringparam,
3669 .get_pauseparam = atl1_get_pauseparam, 3666 .get_pauseparam = atl1_get_pauseparam,
3670 .set_pauseparam = atl1_set_pauseparam, 3667 .set_pauseparam = atl1_set_pauseparam,
3671 .get_rx_csum = atl1_get_rx_csum,
3672 .set_tx_csum = ethtool_op_set_tx_hw_csum,
3673 .get_link = ethtool_op_get_link, 3668 .get_link = ethtool_op_get_link,
3674 .set_sg = ethtool_op_set_sg,
3675 .get_strings = atl1_get_strings, 3669 .get_strings = atl1_get_strings,
3676 .nway_reset = atl1_nway_reset, 3670 .nway_reset = atl1_nway_reset,
3677 .get_ethtool_stats = atl1_get_ethtool_stats, 3671 .get_ethtool_stats = atl1_get_ethtool_stats,
3678 .get_sset_count = atl1_get_sset_count, 3672 .get_sset_count = atl1_get_sset_count,
3679 .set_tso = ethtool_op_set_tso,
3680}; 3673};
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index e3cbf45dc612..16249e9b6b95 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -1411,9 +1411,8 @@ static int __devinit atl2_probe(struct pci_dev *pdev,
1411 1411
1412 err = -EIO; 1412 err = -EIO;
1413 1413
1414#ifdef NETIF_F_HW_VLAN_TX 1414 netdev->hw_features = NETIF_F_SG;
1415 netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); 1415 netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
1416#endif
1417 1416
1418 /* Init PHY as early as possible due to power saving issue */ 1417 /* Init PHY as early as possible due to power saving issue */
1419 atl2_phy_init(&adapter->hw); 1418 atl2_phy_init(&adapter->hw);
@@ -1770,13 +1769,13 @@ static int atl2_get_settings(struct net_device *netdev,
1770 ecmd->transceiver = XCVR_INTERNAL; 1769 ecmd->transceiver = XCVR_INTERNAL;
1771 1770
1772 if (adapter->link_speed != SPEED_0) { 1771 if (adapter->link_speed != SPEED_0) {
1773 ecmd->speed = adapter->link_speed; 1772 ethtool_cmd_speed_set(ecmd, adapter->link_speed);
1774 if (adapter->link_duplex == FULL_DUPLEX) 1773 if (adapter->link_duplex == FULL_DUPLEX)
1775 ecmd->duplex = DUPLEX_FULL; 1774 ecmd->duplex = DUPLEX_FULL;
1776 else 1775 else
1777 ecmd->duplex = DUPLEX_HALF; 1776 ecmd->duplex = DUPLEX_HALF;
1778 } else { 1777 } else {
1779 ecmd->speed = -1; 1778 ethtool_cmd_speed_set(ecmd, -1);
1780 ecmd->duplex = -1; 1779 ecmd->duplex = -1;
1781 } 1780 }
1782 1781
@@ -1840,11 +1839,6 @@ static int atl2_set_settings(struct net_device *netdev,
1840 return 0; 1839 return 0;
1841} 1840}
1842 1841
1843static u32 atl2_get_tx_csum(struct net_device *netdev)
1844{
1845 return (netdev->features & NETIF_F_HW_CSUM) != 0;
1846}
1847
1848static u32 atl2_get_msglevel(struct net_device *netdev) 1842static u32 atl2_get_msglevel(struct net_device *netdev)
1849{ 1843{
1850 return 0; 1844 return 0;
@@ -2112,12 +2106,6 @@ static const struct ethtool_ops atl2_ethtool_ops = {
2112 .get_eeprom_len = atl2_get_eeprom_len, 2106 .get_eeprom_len = atl2_get_eeprom_len,
2113 .get_eeprom = atl2_get_eeprom, 2107 .get_eeprom = atl2_get_eeprom,
2114 .set_eeprom = atl2_set_eeprom, 2108 .set_eeprom = atl2_set_eeprom,
2115 .get_tx_csum = atl2_get_tx_csum,
2116 .get_sg = ethtool_op_get_sg,
2117 .set_sg = ethtool_op_set_sg,
2118#ifdef NETIF_F_TSO
2119 .get_tso = ethtool_op_get_tso,
2120#endif
2121}; 2109};
2122 2110
2123static void atl2_set_ethtool_ops(struct net_device *netdev) 2111static void atl2_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 2e2b76258ab4..a69331e06b8d 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -1807,8 +1807,8 @@ static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1807 if (bp->flags & B44_FLAG_ADV_100FULL) 1807 if (bp->flags & B44_FLAG_ADV_100FULL)
1808 cmd->advertising |= ADVERTISED_100baseT_Full; 1808 cmd->advertising |= ADVERTISED_100baseT_Full;
1809 cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; 1809 cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1810 cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ? 1810 ethtool_cmd_speed_set(cmd, ((bp->flags & B44_FLAG_100_BASE_T) ?
1811 SPEED_100 : SPEED_10; 1811 SPEED_100 : SPEED_10));
1812 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ? 1812 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1813 DUPLEX_FULL : DUPLEX_HALF; 1813 DUPLEX_FULL : DUPLEX_HALF;
1814 cmd->port = 0; 1814 cmd->port = 0;
@@ -1820,7 +1820,7 @@ static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1820 if (cmd->autoneg == AUTONEG_ENABLE) 1820 if (cmd->autoneg == AUTONEG_ENABLE)
1821 cmd->advertising |= ADVERTISED_Autoneg; 1821 cmd->advertising |= ADVERTISED_Autoneg;
1822 if (!netif_running(dev)){ 1822 if (!netif_running(dev)){
1823 cmd->speed = 0; 1823 ethtool_cmd_speed_set(cmd, 0);
1824 cmd->duplex = 0xff; 1824 cmd->duplex = 0xff;
1825 } 1825 }
1826 cmd->maxtxpkt = 0; 1826 cmd->maxtxpkt = 0;
@@ -1831,6 +1831,7 @@ static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1831static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1831static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1832{ 1832{
1833 struct b44 *bp = netdev_priv(dev); 1833 struct b44 *bp = netdev_priv(dev);
1834 u32 speed = ethtool_cmd_speed(cmd);
1834 1835
1835 /* We do not support gigabit. */ 1836 /* We do not support gigabit. */
1836 if (cmd->autoneg == AUTONEG_ENABLE) { 1837 if (cmd->autoneg == AUTONEG_ENABLE) {
@@ -1838,8 +1839,8 @@ static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1838 (ADVERTISED_1000baseT_Half | 1839 (ADVERTISED_1000baseT_Half |
1839 ADVERTISED_1000baseT_Full)) 1840 ADVERTISED_1000baseT_Full))
1840 return -EINVAL; 1841 return -EINVAL;
1841 } else if ((cmd->speed != SPEED_100 && 1842 } else if ((speed != SPEED_100 &&
1842 cmd->speed != SPEED_10) || 1843 speed != SPEED_10) ||
1843 (cmd->duplex != DUPLEX_HALF && 1844 (cmd->duplex != DUPLEX_HALF &&
1844 cmd->duplex != DUPLEX_FULL)) { 1845 cmd->duplex != DUPLEX_FULL)) {
1845 return -EINVAL; 1846 return -EINVAL;
@@ -1873,7 +1874,7 @@ static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1873 } else { 1874 } else {
1874 bp->flags |= B44_FLAG_FORCE_LINK; 1875 bp->flags |= B44_FLAG_FORCE_LINK;
1875 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX); 1876 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1876 if (cmd->speed == SPEED_100) 1877 if (speed == SPEED_100)
1877 bp->flags |= B44_FLAG_100_BASE_T; 1878 bp->flags |= B44_FLAG_100_BASE_T;
1878 if (cmd->duplex == DUPLEX_FULL) 1879 if (cmd->duplex == DUPLEX_FULL)
1879 bp->flags |= B44_FLAG_FULL_DUPLEX; 1880 bp->flags |= B44_FLAG_FULL_DUPLEX;
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index c48104b08861..f1573d492e90 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -839,8 +839,8 @@ static int bcm_enet_open(struct net_device *dev)
839 if (ret) 839 if (ret)
840 goto out_phy_disconnect; 840 goto out_phy_disconnect;
841 841
842 ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 842 ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, IRQF_DISABLED,
843 IRQF_SAMPLE_RANDOM | IRQF_DISABLED, dev->name, dev); 843 dev->name, dev);
844 if (ret) 844 if (ret)
845 goto out_freeirq; 845 goto out_freeirq;
846 846
@@ -1346,7 +1346,8 @@ static int bcm_enet_get_settings(struct net_device *dev,
1346 return phy_ethtool_gset(priv->phydev, cmd); 1346 return phy_ethtool_gset(priv->phydev, cmd);
1347 } else { 1347 } else {
1348 cmd->autoneg = 0; 1348 cmd->autoneg = 0;
1349 cmd->speed = (priv->force_speed_100) ? SPEED_100 : SPEED_10; 1349 ethtool_cmd_speed_set(cmd, ((priv->force_speed_100)
1350 ? SPEED_100 : SPEED_10));
1350 cmd->duplex = (priv->force_duplex_full) ? 1351 cmd->duplex = (priv->force_duplex_full) ?
1351 DUPLEX_FULL : DUPLEX_HALF; 1352 DUPLEX_FULL : DUPLEX_HALF;
1352 cmd->supported = ADVERTISED_10baseT_Half | 1353 cmd->supported = ADVERTISED_10baseT_Half |
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 2353eca32593..0b73dcf26924 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -84,15 +84,14 @@ static inline char *nic_name(struct pci_dev *pdev)
84#define MCC_CQ_LEN 256 84#define MCC_CQ_LEN 256
85 85
86#define MAX_RSS_QS 4 /* BE limit is 4 queues/port */ 86#define MAX_RSS_QS 4 /* BE limit is 4 queues/port */
87#define BE_MAX_MSIX_VECTORS (MAX_RSS_QS + 1 + 1)/* RSS qs + 1 def Rx + Tx */ 87#define MAX_RX_QS (MAX_RSS_QS + 1) /* RSS qs + 1 def Rx */
88#define BE_MAX_MSIX_VECTORS (MAX_RX_QS + 1)/* RX + TX */
88#define BE_NAPI_WEIGHT 64 89#define BE_NAPI_WEIGHT 64
89#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */ 90#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
90#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST) 91#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
91 92
92#define FW_VER_LEN 32 93#define FW_VER_LEN 32
93 94
94#define BE_MAX_VF 32
95
96struct be_dma_mem { 95struct be_dma_mem {
97 void *va; 96 void *va;
98 dma_addr_t dma; 97 dma_addr_t dma;
@@ -245,6 +244,43 @@ struct be_rx_obj {
245 244
246struct be_drv_stats { 245struct be_drv_stats {
247 u8 be_on_die_temperature; 246 u8 be_on_die_temperature;
247 u64 be_tx_events;
248 u64 eth_red_drops;
249 u64 rx_drops_no_pbuf;
250 u64 rx_drops_no_txpb;
251 u64 rx_drops_no_erx_descr;
252 u64 rx_drops_no_tpre_descr;
253 u64 rx_drops_too_many_frags;
254 u64 rx_drops_invalid_ring;
255 u64 forwarded_packets;
256 u64 rx_drops_mtu;
257 u64 rx_crc_errors;
258 u64 rx_alignment_symbol_errors;
259 u64 rx_pause_frames;
260 u64 rx_priority_pause_frames;
261 u64 rx_control_frames;
262 u64 rx_in_range_errors;
263 u64 rx_out_range_errors;
264 u64 rx_frame_too_long;
265 u64 rx_address_match_errors;
266 u64 rx_dropped_too_small;
267 u64 rx_dropped_too_short;
268 u64 rx_dropped_header_too_small;
269 u64 rx_dropped_tcp_length;
270 u64 rx_dropped_runt;
271 u64 rx_ip_checksum_errs;
272 u64 rx_tcp_checksum_errs;
273 u64 rx_udp_checksum_errs;
274 u64 rx_switched_unicast_packets;
275 u64 rx_switched_multicast_packets;
276 u64 rx_switched_broadcast_packets;
277 u64 tx_pauseframes;
278 u64 tx_priority_pauseframes;
279 u64 tx_controlframes;
280 u64 rxpp_fifo_overflow_drop;
281 u64 rx_input_fifo_overflow_drop;
282 u64 pmem_fifo_overflow_drop;
283 u64 jabber_events;
248}; 284};
249 285
250struct be_vf_cfg { 286struct be_vf_cfg {
@@ -276,7 +312,7 @@ struct be_adapter {
276 spinlock_t mcc_cq_lock; 312 spinlock_t mcc_cq_lock;
277 313
278 struct msix_entry msix_entries[BE_MAX_MSIX_VECTORS]; 314 struct msix_entry msix_entries[BE_MAX_MSIX_VECTORS];
279 bool msix_enabled; 315 u32 num_msix_vec;
280 bool isr_registered; 316 bool isr_registered;
281 317
282 /* TX Rings */ 318 /* TX Rings */
@@ -287,7 +323,7 @@ struct be_adapter {
287 u32 cache_line_break[8]; 323 u32 cache_line_break[8];
288 324
289 /* Rx rings */ 325 /* Rx rings */
290 struct be_rx_obj rx_obj[MAX_RSS_QS + 1]; /* one default non-rss Q */ 326 struct be_rx_obj rx_obj[MAX_RX_QS];
291 u32 num_rx_qs; 327 u32 num_rx_qs;
292 u32 big_page_size; /* Compounded page size shared by rx wrbs */ 328 u32 big_page_size; /* Compounded page size shared by rx wrbs */
293 329
@@ -308,10 +344,10 @@ struct be_adapter {
308 u16 work_counter; 344 u16 work_counter;
309 345
310 /* Ethtool knobs and info */ 346 /* Ethtool knobs and info */
311 bool rx_csum; /* BE card must perform rx-checksumming */
312 char fw_ver[FW_VER_LEN]; 347 char fw_ver[FW_VER_LEN];
313 u32 if_handle; /* Used to configure filtering */ 348 u32 if_handle; /* Used to configure filtering */
314 u32 pmac_id; /* MAC addr handle used by BE card */ 349 u32 pmac_id; /* MAC addr handle used by BE card */
350 u32 beacon_state; /* for set_phys_id */
315 351
316 bool eeh_err; 352 bool eeh_err;
317 bool link_up; 353 bool link_up;
@@ -334,7 +370,7 @@ struct be_adapter {
334 370
335 bool be3_native; 371 bool be3_native;
336 bool sriov_enabled; 372 bool sriov_enabled;
337 struct be_vf_cfg vf_cfg[BE_MAX_VF]; 373 struct be_vf_cfg *vf_cfg;
338 u8 is_virtfn; 374 u8 is_virtfn;
339 u32 sli_family; 375 u32 sli_family;
340 u8 hba_port_num; 376 u8 hba_port_num;
@@ -351,6 +387,7 @@ struct be_adapter {
351 387
352extern const struct ethtool_ops be_ethtool_ops; 388extern const struct ethtool_ops be_ethtool_ops;
353 389
390#define msix_enabled(adapter) (adapter->num_msix_vec > 0)
354#define tx_stats(adapter) (&adapter->tx_stats) 391#define tx_stats(adapter) (&adapter->tx_stats)
355#define rx_stats(rxo) (&rxo->stats) 392#define rx_stats(rxo) (&rxo->stats)
356 393
@@ -455,18 +492,10 @@ static inline u8 is_udp_pkt(struct sk_buff *skb)
455 492
456static inline void be_check_sriov_fn_type(struct be_adapter *adapter) 493static inline void be_check_sriov_fn_type(struct be_adapter *adapter)
457{ 494{
458 u8 data;
459 u32 sli_intf; 495 u32 sli_intf;
460 496
461 if (lancer_chip(adapter)) { 497 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
462 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, 498 adapter->is_virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
463 &sli_intf);
464 adapter->is_virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
465 } else {
466 pci_write_config_byte(adapter->pdev, 0xFE, 0xAA);
467 pci_read_config_byte(adapter->pdev, 0xFE, &data);
468 adapter->is_virtfn = (data != 0xAA);
469 }
470} 499}
471 500
472static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac) 501static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
@@ -482,9 +511,15 @@ static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
482 memcpy(mac, adapter->netdev->dev_addr, 3); 511 memcpy(mac, adapter->netdev->dev_addr, 3);
483} 512}
484 513
514static inline bool be_multi_rxq(const struct be_adapter *adapter)
515{
516 return adapter->num_rx_qs > 1;
517}
518
485extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, 519extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
486 u16 num_popped); 520 u16 num_popped);
487extern void be_link_status_update(struct be_adapter *adapter, bool link_up); 521extern void be_link_status_update(struct be_adapter *adapter, bool link_up);
488extern void netdev_stats_update(struct be_adapter *adapter); 522extern void netdev_stats_update(struct be_adapter *adapter);
523extern void be_parse_stats(struct be_adapter *adapter);
489extern int be_load_fw(struct be_adapter *adapter, u8 *func); 524extern int be_load_fw(struct be_adapter *adapter, u8 *func);
490#endif /* BE_H */ 525#endif /* BE_H */
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 9dc9394fd4ca..2463b1c97922 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -71,18 +71,38 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
71 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) & 71 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
72 CQE_STATUS_COMPL_MASK; 72 CQE_STATUS_COMPL_MASK;
73 73
74 if ((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) && 74 if (((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) ||
75 (compl->tag0 == OPCODE_COMMON_WRITE_OBJECT)) &&
75 (compl->tag1 == CMD_SUBSYSTEM_COMMON)) { 76 (compl->tag1 == CMD_SUBSYSTEM_COMMON)) {
76 adapter->flash_status = compl_status; 77 adapter->flash_status = compl_status;
77 complete(&adapter->flash_compl); 78 complete(&adapter->flash_compl);
78 } 79 }
79 80
80 if (compl_status == MCC_STATUS_SUCCESS) { 81 if (compl_status == MCC_STATUS_SUCCESS) {
81 if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) { 82 if (((compl->tag0 == OPCODE_ETH_GET_STATISTICS) ||
82 struct be_cmd_resp_get_stats *resp = 83 (compl->tag0 == OPCODE_ETH_GET_PPORT_STATS)) &&
83 adapter->stats_cmd.va; 84 (compl->tag1 == CMD_SUBSYSTEM_ETH)) {
84 be_dws_le_to_cpu(&resp->hw_stats, 85 if (adapter->generation == BE_GEN3) {
85 sizeof(resp->hw_stats)); 86 if (lancer_chip(adapter)) {
87 struct lancer_cmd_resp_pport_stats
88 *resp = adapter->stats_cmd.va;
89 be_dws_le_to_cpu(&resp->pport_stats,
90 sizeof(resp->pport_stats));
91 } else {
92 struct be_cmd_resp_get_stats_v1 *resp =
93 adapter->stats_cmd.va;
94
95 be_dws_le_to_cpu(&resp->hw_stats,
96 sizeof(resp->hw_stats));
97 }
98 } else {
99 struct be_cmd_resp_get_stats_v0 *resp =
100 adapter->stats_cmd.va;
101
102 be_dws_le_to_cpu(&resp->hw_stats,
103 sizeof(resp->hw_stats));
104 }
105 be_parse_stats(adapter);
86 netdev_stats_update(adapter); 106 netdev_stats_update(adapter);
87 adapter->stats_cmd_sent = false; 107 adapter->stats_cmd_sent = false;
88 } 108 }
@@ -292,12 +312,12 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
292 312
293 if (msecs > 4000) { 313 if (msecs > 4000) {
294 dev_err(&adapter->pdev->dev, "mbox poll timed out\n"); 314 dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
295 be_detect_dump_ue(adapter); 315 if (!lancer_chip(adapter))
316 be_detect_dump_ue(adapter);
296 return -1; 317 return -1;
297 } 318 }
298 319
299 set_current_state(TASK_INTERRUPTIBLE); 320 msleep(1);
300 schedule_timeout(msecs_to_jiffies(1));
301 msecs++; 321 msecs++;
302 } while (true); 322 } while (true);
303 323
@@ -374,23 +394,25 @@ int be_cmd_POST(struct be_adapter *adapter)
374{ 394{
375 u16 stage; 395 u16 stage;
376 int status, timeout = 0; 396 int status, timeout = 0;
397 struct device *dev = &adapter->pdev->dev;
377 398
378 do { 399 do {
379 status = be_POST_stage_get(adapter, &stage); 400 status = be_POST_stage_get(adapter, &stage);
380 if (status) { 401 if (status) {
381 dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n", 402 dev_err(dev, "POST error; stage=0x%x\n", stage);
382 stage);
383 return -1; 403 return -1;
384 } else if (stage != POST_STAGE_ARMFW_RDY) { 404 } else if (stage != POST_STAGE_ARMFW_RDY) {
385 set_current_state(TASK_INTERRUPTIBLE); 405 if (msleep_interruptible(2000)) {
386 schedule_timeout(2 * HZ); 406 dev_err(dev, "Waiting for POST aborted\n");
407 return -EINTR;
408 }
387 timeout += 2; 409 timeout += 2;
388 } else { 410 } else {
389 return 0; 411 return 0;
390 } 412 }
391 } while (timeout < 40); 413 } while (timeout < 40);
392 414
393 dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage); 415 dev_err(dev, "POST timeout; stage=0x%x\n", stage);
394 return -1; 416 return -1;
395} 417}
396 418
@@ -728,8 +750,6 @@ int be_cmd_cq_create(struct be_adapter *adapter,
728 if (lancer_chip(adapter)) { 750 if (lancer_chip(adapter)) {
729 req->hdr.version = 2; 751 req->hdr.version = 2;
730 req->page_size = 1; /* 1 for 4K */ 752 req->page_size = 1; /* 1 for 4K */
731 AMAP_SET_BITS(struct amap_cq_context_lancer, coalescwm, ctxt,
732 coalesce_wm);
733 AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt, 753 AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
734 no_delay); 754 no_delay);
735 AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt, 755 AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
@@ -1074,7 +1094,7 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
1074int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) 1094int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1075{ 1095{
1076 struct be_mcc_wrb *wrb; 1096 struct be_mcc_wrb *wrb;
1077 struct be_cmd_req_get_stats *req; 1097 struct be_cmd_req_hdr *hdr;
1078 struct be_sge *sge; 1098 struct be_sge *sge;
1079 int status = 0; 1099 int status = 0;
1080 1100
@@ -1088,14 +1108,62 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1088 status = -EBUSY; 1108 status = -EBUSY;
1089 goto err; 1109 goto err;
1090 } 1110 }
1091 req = nonemb_cmd->va; 1111 hdr = nonemb_cmd->va;
1092 sge = nonembedded_sgl(wrb); 1112 sge = nonembedded_sgl(wrb);
1093 1113
1094 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1, 1114 be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1,
1095 OPCODE_ETH_GET_STATISTICS); 1115 OPCODE_ETH_GET_STATISTICS);
1096 1116
1117 be_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1118 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size);
1119
1120 if (adapter->generation == BE_GEN3)
1121 hdr->version = 1;
1122
1123 wrb->tag1 = CMD_SUBSYSTEM_ETH;
1124 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
1125 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
1126 sge->len = cpu_to_le32(nonemb_cmd->size);
1127
1128 be_mcc_notify(adapter);
1129 adapter->stats_cmd_sent = true;
1130
1131err:
1132 spin_unlock_bh(&adapter->mcc_lock);
1133 return status;
1134}
1135
1136/* Lancer Stats */
1137int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1138 struct be_dma_mem *nonemb_cmd)
1139{
1140
1141 struct be_mcc_wrb *wrb;
1142 struct lancer_cmd_req_pport_stats *req;
1143 struct be_sge *sge;
1144 int status = 0;
1145
1146 spin_lock_bh(&adapter->mcc_lock);
1147
1148 wrb = wrb_from_mccq(adapter);
1149 if (!wrb) {
1150 status = -EBUSY;
1151 goto err;
1152 }
1153 req = nonemb_cmd->va;
1154 sge = nonembedded_sgl(wrb);
1155
1156 be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1,
1157 OPCODE_ETH_GET_PPORT_STATS);
1158
1097 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1159 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1098 OPCODE_ETH_GET_STATISTICS, sizeof(*req)); 1160 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size);
1161
1162
1163 req->cmd_params.params.pport_num = cpu_to_le16(adapter->port_num);
1164 req->cmd_params.params.reset_stats = 0;
1165
1166 wrb->tag1 = CMD_SUBSYSTEM_ETH;
1099 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); 1167 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
1100 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); 1168 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
1101 sge->len = cpu_to_le32(nonemb_cmd->size); 1169 sge->len = cpu_to_le32(nonemb_cmd->size);
@@ -1110,7 +1178,7 @@ err:
1110 1178
1111/* Uses synchronous mcc */ 1179/* Uses synchronous mcc */
1112int be_cmd_link_status_query(struct be_adapter *adapter, 1180int be_cmd_link_status_query(struct be_adapter *adapter,
1113 bool *link_up, u8 *mac_speed, u16 *link_speed) 1181 bool *link_up, u8 *mac_speed, u16 *link_speed, u32 dom)
1114{ 1182{
1115 struct be_mcc_wrb *wrb; 1183 struct be_mcc_wrb *wrb;
1116 struct be_cmd_req_link_status *req; 1184 struct be_cmd_req_link_status *req;
@@ -1186,6 +1254,116 @@ err:
1186 return status; 1254 return status;
1187} 1255}
1188 1256
1257/* Uses synchronous mcc */
1258int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1259{
1260 struct be_mcc_wrb *wrb;
1261 struct be_cmd_req_get_fat *req;
1262 int status;
1263
1264 spin_lock_bh(&adapter->mcc_lock);
1265
1266 wrb = wrb_from_mccq(adapter);
1267 if (!wrb) {
1268 status = -EBUSY;
1269 goto err;
1270 }
1271 req = embedded_payload(wrb);
1272
1273 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1274 OPCODE_COMMON_MANAGE_FAT);
1275
1276 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1277 OPCODE_COMMON_MANAGE_FAT, sizeof(*req));
1278 req->fat_operation = cpu_to_le32(QUERY_FAT);
1279 status = be_mcc_notify_wait(adapter);
1280 if (!status) {
1281 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1282 if (log_size && resp->log_size)
1283 *log_size = le32_to_cpu(resp->log_size) -
1284 sizeof(u32);
1285 }
1286err:
1287 spin_unlock_bh(&adapter->mcc_lock);
1288 return status;
1289}
1290
1291void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1292{
1293 struct be_dma_mem get_fat_cmd;
1294 struct be_mcc_wrb *wrb;
1295 struct be_cmd_req_get_fat *req;
1296 struct be_sge *sge;
1297 u32 offset = 0, total_size, buf_size,
1298 log_offset = sizeof(u32), payload_len;
1299 int status;
1300
1301 if (buf_len == 0)
1302 return;
1303
1304 total_size = buf_len;
1305
1306 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1307 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1308 get_fat_cmd.size,
1309 &get_fat_cmd.dma);
1310 if (!get_fat_cmd.va) {
1311 status = -ENOMEM;
1312 dev_err(&adapter->pdev->dev,
1313 "Memory allocation failure while retrieving FAT data\n");
1314 return;
1315 }
1316
1317 spin_lock_bh(&adapter->mcc_lock);
1318
1319 while (total_size) {
1320 buf_size = min(total_size, (u32)60*1024);
1321 total_size -= buf_size;
1322
1323 wrb = wrb_from_mccq(adapter);
1324 if (!wrb) {
1325 status = -EBUSY;
1326 goto err;
1327 }
1328 req = get_fat_cmd.va;
1329 sge = nonembedded_sgl(wrb);
1330
1331 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1332 be_wrb_hdr_prepare(wrb, payload_len, false, 1,
1333 OPCODE_COMMON_MANAGE_FAT);
1334
1335 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1336 OPCODE_COMMON_MANAGE_FAT, payload_len);
1337
1338 sge->pa_hi = cpu_to_le32(upper_32_bits(get_fat_cmd.dma));
1339 sge->pa_lo = cpu_to_le32(get_fat_cmd.dma & 0xFFFFFFFF);
1340 sge->len = cpu_to_le32(get_fat_cmd.size);
1341
1342 req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1343 req->read_log_offset = cpu_to_le32(log_offset);
1344 req->read_log_length = cpu_to_le32(buf_size);
1345 req->data_buffer_size = cpu_to_le32(buf_size);
1346
1347 status = be_mcc_notify_wait(adapter);
1348 if (!status) {
1349 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1350 memcpy(buf + offset,
1351 resp->data_buffer,
1352 resp->read_log_length);
1353 } else {
1354 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1355 goto err;
1356 }
1357 offset += buf_size;
1358 log_offset += buf_size;
1359 }
1360err:
1361 pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1362 get_fat_cmd.va,
1363 get_fat_cmd.dma);
1364 spin_unlock_bh(&adapter->mcc_lock);
1365}
1366
1189/* Uses Mbox */ 1367/* Uses Mbox */
1190int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver) 1368int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
1191{ 1369{
@@ -1293,12 +1471,24 @@ err:
1293/* Uses MCC for this command as it may be called in BH context 1471/* Uses MCC for this command as it may be called in BH context
1294 * Uses synchronous mcc 1472 * Uses synchronous mcc
1295 */ 1473 */
1296int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en) 1474int be_cmd_promiscuous_config(struct be_adapter *adapter, bool en)
1297{ 1475{
1298 struct be_mcc_wrb *wrb; 1476 struct be_mcc_wrb *wrb;
1299 struct be_cmd_req_promiscuous_config *req; 1477 struct be_cmd_req_rx_filter *req;
1478 struct be_dma_mem promiscous_cmd;
1479 struct be_sge *sge;
1300 int status; 1480 int status;
1301 1481
1482 memset(&promiscous_cmd, 0, sizeof(struct be_dma_mem));
1483 promiscous_cmd.size = sizeof(struct be_cmd_req_rx_filter);
1484 promiscous_cmd.va = pci_alloc_consistent(adapter->pdev,
1485 promiscous_cmd.size, &promiscous_cmd.dma);
1486 if (!promiscous_cmd.va) {
1487 dev_err(&adapter->pdev->dev,
1488 "Memory allocation failure\n");
1489 return -ENOMEM;
1490 }
1491
1302 spin_lock_bh(&adapter->mcc_lock); 1492 spin_lock_bh(&adapter->mcc_lock);
1303 1493
1304 wrb = wrb_from_mccq(adapter); 1494 wrb = wrb_from_mccq(adapter);
@@ -1306,26 +1496,30 @@ int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
1306 status = -EBUSY; 1496 status = -EBUSY;
1307 goto err; 1497 goto err;
1308 } 1498 }
1309 req = embedded_payload(wrb);
1310 1499
1311 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_PROMISCUOUS); 1500 req = promiscous_cmd.va;
1501 sge = nonembedded_sgl(wrb);
1312 1502
1313 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1503 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1314 OPCODE_ETH_PROMISCUOUS, sizeof(*req)); 1504 OPCODE_COMMON_NTWK_RX_FILTER);
1315 1505 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1316 /* In FW versions X.102.149/X.101.487 and later, 1506 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req));
1317 * the port setting associated only with the 1507
1318 * issuing pci function will take effect 1508 req->if_id = cpu_to_le32(adapter->if_handle);
1319 */ 1509 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS);
1320 if (port_num) 1510 if (en)
1321 req->port1_promiscuous = en; 1511 req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS);
1322 else 1512
1323 req->port0_promiscuous = en; 1513 sge->pa_hi = cpu_to_le32(upper_32_bits(promiscous_cmd.dma));
1514 sge->pa_lo = cpu_to_le32(promiscous_cmd.dma & 0xFFFFFFFF);
1515 sge->len = cpu_to_le32(promiscous_cmd.size);
1324 1516
1325 status = be_mcc_notify_wait(adapter); 1517 status = be_mcc_notify_wait(adapter);
1326 1518
1327err: 1519err:
1328 spin_unlock_bh(&adapter->mcc_lock); 1520 spin_unlock_bh(&adapter->mcc_lock);
1521 pci_free_consistent(adapter->pdev, promiscous_cmd.size,
1522 promiscous_cmd.va, promiscous_cmd.dma);
1329 return status; 1523 return status;
1330} 1524}
1331 1525
@@ -1608,6 +1802,81 @@ err:
1608 return status; 1802 return status;
1609} 1803}
1610 1804
1805int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
1806 u32 data_size, u32 data_offset, const char *obj_name,
1807 u32 *data_written, u8 *addn_status)
1808{
1809 struct be_mcc_wrb *wrb;
1810 struct lancer_cmd_req_write_object *req;
1811 struct lancer_cmd_resp_write_object *resp;
1812 void *ctxt = NULL;
1813 int status;
1814
1815 spin_lock_bh(&adapter->mcc_lock);
1816 adapter->flash_status = 0;
1817
1818 wrb = wrb_from_mccq(adapter);
1819 if (!wrb) {
1820 status = -EBUSY;
1821 goto err_unlock;
1822 }
1823
1824 req = embedded_payload(wrb);
1825
1826 be_wrb_hdr_prepare(wrb, sizeof(struct lancer_cmd_req_write_object),
1827 true, 1, OPCODE_COMMON_WRITE_OBJECT);
1828 wrb->tag1 = CMD_SUBSYSTEM_COMMON;
1829
1830 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1831 OPCODE_COMMON_WRITE_OBJECT,
1832 sizeof(struct lancer_cmd_req_write_object));
1833
1834 ctxt = &req->context;
1835 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1836 write_length, ctxt, data_size);
1837
1838 if (data_size == 0)
1839 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1840 eof, ctxt, 1);
1841 else
1842 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1843 eof, ctxt, 0);
1844
1845 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1846 req->write_offset = cpu_to_le32(data_offset);
1847 strcpy(req->object_name, obj_name);
1848 req->descriptor_count = cpu_to_le32(1);
1849 req->buf_len = cpu_to_le32(data_size);
1850 req->addr_low = cpu_to_le32((cmd->dma +
1851 sizeof(struct lancer_cmd_req_write_object))
1852 & 0xFFFFFFFF);
1853 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
1854 sizeof(struct lancer_cmd_req_write_object)));
1855
1856 be_mcc_notify(adapter);
1857 spin_unlock_bh(&adapter->mcc_lock);
1858
1859 if (!wait_for_completion_timeout(&adapter->flash_compl,
1860 msecs_to_jiffies(12000)))
1861 status = -1;
1862 else
1863 status = adapter->flash_status;
1864
1865 resp = embedded_payload(wrb);
1866 if (!status) {
1867 *data_written = le32_to_cpu(resp->actual_write_len);
1868 } else {
1869 *addn_status = resp->additional_status;
1870 status = resp->status;
1871 }
1872
1873 return status;
1874
1875err_unlock:
1876 spin_unlock_bh(&adapter->mcc_lock);
1877 return status;
1878}
1879
1611int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, 1880int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1612 u32 flash_type, u32 flash_opcode, u32 buf_size) 1881 u32 flash_type, u32 flash_opcode, u32 buf_size)
1613{ 1882{
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 4f254cfaabe2..8148cc66cbe9 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -186,12 +186,14 @@ struct be_mcc_mailbox {
186#define OPCODE_COMMON_NTWK_PMAC_ADD 59 186#define OPCODE_COMMON_NTWK_PMAC_ADD 59
187#define OPCODE_COMMON_NTWK_PMAC_DEL 60 187#define OPCODE_COMMON_NTWK_PMAC_DEL 60
188#define OPCODE_COMMON_FUNCTION_RESET 61 188#define OPCODE_COMMON_FUNCTION_RESET 61
189#define OPCODE_COMMON_MANAGE_FAT 68
189#define OPCODE_COMMON_ENABLE_DISABLE_BEACON 69 190#define OPCODE_COMMON_ENABLE_DISABLE_BEACON 69
190#define OPCODE_COMMON_GET_BEACON_STATE 70 191#define OPCODE_COMMON_GET_BEACON_STATE 70
191#define OPCODE_COMMON_READ_TRANSRECV_DATA 73 192#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
192#define OPCODE_COMMON_GET_PHY_DETAILS 102 193#define OPCODE_COMMON_GET_PHY_DETAILS 102
193#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103 194#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
194#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121 195#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
196#define OPCODE_COMMON_WRITE_OBJECT 172
195 197
196#define OPCODE_ETH_RSS_CONFIG 1 198#define OPCODE_ETH_RSS_CONFIG 1
197#define OPCODE_ETH_ACPI_CONFIG 2 199#define OPCODE_ETH_ACPI_CONFIG 2
@@ -202,6 +204,7 @@ struct be_mcc_mailbox {
202#define OPCODE_ETH_TX_DESTROY 9 204#define OPCODE_ETH_TX_DESTROY 9
203#define OPCODE_ETH_RX_DESTROY 10 205#define OPCODE_ETH_RX_DESTROY 10
204#define OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG 12 206#define OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG 12
207#define OPCODE_ETH_GET_PPORT_STATS 18
205 208
206#define OPCODE_LOWLEVEL_HOST_DDR_DMA 17 209#define OPCODE_LOWLEVEL_HOST_DDR_DMA 17
207#define OPCODE_LOWLEVEL_LOOPBACK_TEST 18 210#define OPCODE_LOWLEVEL_LOOPBACK_TEST 18
@@ -380,6 +383,24 @@ struct be_cmd_resp_cq_create {
380 u16 rsvd0; 383 u16 rsvd0;
381} __packed; 384} __packed;
382 385
386struct be_cmd_req_get_fat {
387 struct be_cmd_req_hdr hdr;
388 u32 fat_operation;
389 u32 read_log_offset;
390 u32 read_log_length;
391 u32 data_buffer_size;
392 u32 data_buffer[1];
393} __packed;
394
395struct be_cmd_resp_get_fat {
396 struct be_cmd_resp_hdr hdr;
397 u32 log_size;
398 u32 read_log_length;
399 u32 rsvd[2];
400 u32 data_buffer[1];
401} __packed;
402
403
383/******************** Create MCCQ ***************************/ 404/******************** Create MCCQ ***************************/
384/* Pseudo amap definition in which each bit of the actual structure is defined 405/* Pseudo amap definition in which each bit of the actual structure is defined
385 * as a byte: used to calculate offset/shift/mask of each field */ 406 * as a byte: used to calculate offset/shift/mask of each field */
@@ -549,7 +570,7 @@ struct be_cmd_req_if_destroy {
549}; 570};
550 571
551/*************** HW Stats Get **********************************/ 572/*************** HW Stats Get **********************************/
552struct be_port_rxf_stats { 573struct be_port_rxf_stats_v0 {
553 u32 rx_bytes_lsd; /* dword 0*/ 574 u32 rx_bytes_lsd; /* dword 0*/
554 u32 rx_bytes_msd; /* dword 1*/ 575 u32 rx_bytes_msd; /* dword 1*/
555 u32 rx_total_frames; /* dword 2*/ 576 u32 rx_total_frames; /* dword 2*/
@@ -618,8 +639,8 @@ struct be_port_rxf_stats {
618 u32 rx_input_fifo_overflow; /* dword 65*/ 639 u32 rx_input_fifo_overflow; /* dword 65*/
619}; 640};
620 641
621struct be_rxf_stats { 642struct be_rxf_stats_v0 {
622 struct be_port_rxf_stats port[2]; 643 struct be_port_rxf_stats_v0 port[2];
623 u32 rx_drops_no_pbuf; /* dword 132*/ 644 u32 rx_drops_no_pbuf; /* dword 132*/
624 u32 rx_drops_no_txpb; /* dword 133*/ 645 u32 rx_drops_no_txpb; /* dword 133*/
625 u32 rx_drops_no_erx_descr; /* dword 134*/ 646 u32 rx_drops_no_erx_descr; /* dword 134*/
@@ -642,36 +663,227 @@ struct be_rxf_stats {
642 u32 rsvd1[6]; 663 u32 rsvd1[6];
643}; 664};
644 665
645struct be_erx_stats { 666struct be_erx_stats_v0 {
646 u32 rx_drops_no_fragments[44]; /* dwordS 0 to 43*/ 667 u32 rx_drops_no_fragments[44]; /* dwordS 0 to 43*/
647 u32 debug_wdma_sent_hold; /* dword 44*/ 668 u32 rsvd[4];
648 u32 debug_wdma_pbfree_sent_hold; /* dword 45*/
649 u32 debug_wdma_zerobyte_pbfree_sent_hold; /* dword 46*/
650 u32 debug_pmem_pbuf_dealloc; /* dword 47*/
651}; 669};
652 670
653struct be_pmem_stats { 671struct be_pmem_stats {
654 u32 eth_red_drops; 672 u32 eth_red_drops;
655 u32 rsvd[4]; 673 u32 rsvd[5];
656}; 674};
657 675
658struct be_hw_stats { 676struct be_hw_stats_v0 {
659 struct be_rxf_stats rxf; 677 struct be_rxf_stats_v0 rxf;
660 u32 rsvd[48]; 678 u32 rsvd[48];
661 struct be_erx_stats erx; 679 struct be_erx_stats_v0 erx;
662 struct be_pmem_stats pmem; 680 struct be_pmem_stats pmem;
663}; 681};
664 682
665struct be_cmd_req_get_stats { 683struct be_cmd_req_get_stats_v0 {
684 struct be_cmd_req_hdr hdr;
685 u8 rsvd[sizeof(struct be_hw_stats_v0)];
686};
687
688struct be_cmd_resp_get_stats_v0 {
689 struct be_cmd_resp_hdr hdr;
690 struct be_hw_stats_v0 hw_stats;
691};
692
693#define make_64bit_val(hi_32, lo_32) (((u64)hi_32<<32) | lo_32)
694struct lancer_cmd_pport_stats {
695 u32 tx_packets_lo;
696 u32 tx_packets_hi;
697 u32 tx_unicast_packets_lo;
698 u32 tx_unicast_packets_hi;
699 u32 tx_multicast_packets_lo;
700 u32 tx_multicast_packets_hi;
701 u32 tx_broadcast_packets_lo;
702 u32 tx_broadcast_packets_hi;
703 u32 tx_bytes_lo;
704 u32 tx_bytes_hi;
705 u32 tx_unicast_bytes_lo;
706 u32 tx_unicast_bytes_hi;
707 u32 tx_multicast_bytes_lo;
708 u32 tx_multicast_bytes_hi;
709 u32 tx_broadcast_bytes_lo;
710 u32 tx_broadcast_bytes_hi;
711 u32 tx_discards_lo;
712 u32 tx_discards_hi;
713 u32 tx_errors_lo;
714 u32 tx_errors_hi;
715 u32 tx_pause_frames_lo;
716 u32 tx_pause_frames_hi;
717 u32 tx_pause_on_frames_lo;
718 u32 tx_pause_on_frames_hi;
719 u32 tx_pause_off_frames_lo;
720 u32 tx_pause_off_frames_hi;
721 u32 tx_internal_mac_errors_lo;
722 u32 tx_internal_mac_errors_hi;
723 u32 tx_control_frames_lo;
724 u32 tx_control_frames_hi;
725 u32 tx_packets_64_bytes_lo;
726 u32 tx_packets_64_bytes_hi;
727 u32 tx_packets_65_to_127_bytes_lo;
728 u32 tx_packets_65_to_127_bytes_hi;
729 u32 tx_packets_128_to_255_bytes_lo;
730 u32 tx_packets_128_to_255_bytes_hi;
731 u32 tx_packets_256_to_511_bytes_lo;
732 u32 tx_packets_256_to_511_bytes_hi;
733 u32 tx_packets_512_to_1023_bytes_lo;
734 u32 tx_packets_512_to_1023_bytes_hi;
735 u32 tx_packets_1024_to_1518_bytes_lo;
736 u32 tx_packets_1024_to_1518_bytes_hi;
737 u32 tx_packets_1519_to_2047_bytes_lo;
738 u32 tx_packets_1519_to_2047_bytes_hi;
739 u32 tx_packets_2048_to_4095_bytes_lo;
740 u32 tx_packets_2048_to_4095_bytes_hi;
741 u32 tx_packets_4096_to_8191_bytes_lo;
742 u32 tx_packets_4096_to_8191_bytes_hi;
743 u32 tx_packets_8192_to_9216_bytes_lo;
744 u32 tx_packets_8192_to_9216_bytes_hi;
745 u32 tx_lso_packets_lo;
746 u32 tx_lso_packets_hi;
747 u32 rx_packets_lo;
748 u32 rx_packets_hi;
749 u32 rx_unicast_packets_lo;
750 u32 rx_unicast_packets_hi;
751 u32 rx_multicast_packets_lo;
752 u32 rx_multicast_packets_hi;
753 u32 rx_broadcast_packets_lo;
754 u32 rx_broadcast_packets_hi;
755 u32 rx_bytes_lo;
756 u32 rx_bytes_hi;
757 u32 rx_unicast_bytes_lo;
758 u32 rx_unicast_bytes_hi;
759 u32 rx_multicast_bytes_lo;
760 u32 rx_multicast_bytes_hi;
761 u32 rx_broadcast_bytes_lo;
762 u32 rx_broadcast_bytes_hi;
763 u32 rx_unknown_protos;
764 u32 rsvd_69; /* Word 69 is reserved */
765 u32 rx_discards_lo;
766 u32 rx_discards_hi;
767 u32 rx_errors_lo;
768 u32 rx_errors_hi;
769 u32 rx_crc_errors_lo;
770 u32 rx_crc_errors_hi;
771 u32 rx_alignment_errors_lo;
772 u32 rx_alignment_errors_hi;
773 u32 rx_symbol_errors_lo;
774 u32 rx_symbol_errors_hi;
775 u32 rx_pause_frames_lo;
776 u32 rx_pause_frames_hi;
777 u32 rx_pause_on_frames_lo;
778 u32 rx_pause_on_frames_hi;
779 u32 rx_pause_off_frames_lo;
780 u32 rx_pause_off_frames_hi;
781 u32 rx_frames_too_long_lo;
782 u32 rx_frames_too_long_hi;
783 u32 rx_internal_mac_errors_lo;
784 u32 rx_internal_mac_errors_hi;
785 u32 rx_undersize_packets;
786 u32 rx_oversize_packets;
787 u32 rx_fragment_packets;
788 u32 rx_jabbers;
789 u32 rx_control_frames_lo;
790 u32 rx_control_frames_hi;
791 u32 rx_control_frames_unknown_opcode_lo;
792 u32 rx_control_frames_unknown_opcode_hi;
793 u32 rx_in_range_errors;
794 u32 rx_out_of_range_errors;
795 u32 rx_address_match_errors;
796 u32 rx_vlan_mismatch_errors;
797 u32 rx_dropped_too_small;
798 u32 rx_dropped_too_short;
799 u32 rx_dropped_header_too_small;
800 u32 rx_dropped_invalid_tcp_length;
801 u32 rx_dropped_runt;
802 u32 rx_ip_checksum_errors;
803 u32 rx_tcp_checksum_errors;
804 u32 rx_udp_checksum_errors;
805 u32 rx_non_rss_packets;
806 u32 rsvd_111;
807 u32 rx_ipv4_packets_lo;
808 u32 rx_ipv4_packets_hi;
809 u32 rx_ipv6_packets_lo;
810 u32 rx_ipv6_packets_hi;
811 u32 rx_ipv4_bytes_lo;
812 u32 rx_ipv4_bytes_hi;
813 u32 rx_ipv6_bytes_lo;
814 u32 rx_ipv6_bytes_hi;
815 u32 rx_nic_packets_lo;
816 u32 rx_nic_packets_hi;
817 u32 rx_tcp_packets_lo;
818 u32 rx_tcp_packets_hi;
819 u32 rx_iscsi_packets_lo;
820 u32 rx_iscsi_packets_hi;
821 u32 rx_management_packets_lo;
822 u32 rx_management_packets_hi;
823 u32 rx_switched_unicast_packets_lo;
824 u32 rx_switched_unicast_packets_hi;
825 u32 rx_switched_multicast_packets_lo;
826 u32 rx_switched_multicast_packets_hi;
827 u32 rx_switched_broadcast_packets_lo;
828 u32 rx_switched_broadcast_packets_hi;
829 u32 num_forwards_lo;
830 u32 num_forwards_hi;
831 u32 rx_fifo_overflow;
832 u32 rx_input_fifo_overflow;
833 u32 rx_drops_too_many_frags_lo;
834 u32 rx_drops_too_many_frags_hi;
835 u32 rx_drops_invalid_queue;
836 u32 rsvd_141;
837 u32 rx_drops_mtu_lo;
838 u32 rx_drops_mtu_hi;
839 u32 rx_packets_64_bytes_lo;
840 u32 rx_packets_64_bytes_hi;
841 u32 rx_packets_65_to_127_bytes_lo;
842 u32 rx_packets_65_to_127_bytes_hi;
843 u32 rx_packets_128_to_255_bytes_lo;
844 u32 rx_packets_128_to_255_bytes_hi;
845 u32 rx_packets_256_to_511_bytes_lo;
846 u32 rx_packets_256_to_511_bytes_hi;
847 u32 rx_packets_512_to_1023_bytes_lo;
848 u32 rx_packets_512_to_1023_bytes_hi;
849 u32 rx_packets_1024_to_1518_bytes_lo;
850 u32 rx_packets_1024_to_1518_bytes_hi;
851 u32 rx_packets_1519_to_2047_bytes_lo;
852 u32 rx_packets_1519_to_2047_bytes_hi;
853 u32 rx_packets_2048_to_4095_bytes_lo;
854 u32 rx_packets_2048_to_4095_bytes_hi;
855 u32 rx_packets_4096_to_8191_bytes_lo;
856 u32 rx_packets_4096_to_8191_bytes_hi;
857 u32 rx_packets_8192_to_9216_bytes_lo;
858 u32 rx_packets_8192_to_9216_bytes_hi;
859};
860
861struct pport_stats_params {
862 u16 pport_num;
863 u8 rsvd;
864 u8 reset_stats;
865};
866
867struct lancer_cmd_req_pport_stats {
666 struct be_cmd_req_hdr hdr; 868 struct be_cmd_req_hdr hdr;
667 u8 rsvd[sizeof(struct be_hw_stats)]; 869 union {
870 struct pport_stats_params params;
871 u8 rsvd[sizeof(struct lancer_cmd_pport_stats)];
872 } cmd_params;
668}; 873};
669 874
670struct be_cmd_resp_get_stats { 875struct lancer_cmd_resp_pport_stats {
671 struct be_cmd_resp_hdr hdr; 876 struct be_cmd_resp_hdr hdr;
672 struct be_hw_stats hw_stats; 877 struct lancer_cmd_pport_stats pport_stats;
673}; 878};
674 879
880static inline struct lancer_cmd_pport_stats*
881 pport_stats_from_cmd(struct be_adapter *adapter)
882{
883 struct lancer_cmd_resp_pport_stats *cmd = adapter->stats_cmd.va;
884 return &cmd->pport_stats;
885}
886
675struct be_cmd_req_get_cntl_addnl_attribs { 887struct be_cmd_req_get_cntl_addnl_attribs {
676 struct be_cmd_req_hdr hdr; 888 struct be_cmd_req_hdr hdr;
677 u8 rsvd[8]; 889 u8 rsvd[8];
@@ -695,13 +907,6 @@ struct be_cmd_req_vlan_config {
695 u16 normal_vlan[64]; 907 u16 normal_vlan[64];
696} __packed; 908} __packed;
697 909
698struct be_cmd_req_promiscuous_config {
699 struct be_cmd_req_hdr hdr;
700 u8 port0_promiscuous;
701 u8 port1_promiscuous;
702 u16 rsvd0;
703} __packed;
704
705/******************** Multicast MAC Config *******************/ 910/******************** Multicast MAC Config *******************/
706#define BE_MAX_MC 64 /* set mcast promisc if > 64 */ 911#define BE_MAX_MC 64 /* set mcast promisc if > 64 */
707struct macaddr { 912struct macaddr {
@@ -716,11 +921,18 @@ struct be_cmd_req_mcast_mac_config {
716 struct macaddr mac[BE_MAX_MC]; 921 struct macaddr mac[BE_MAX_MC];
717} __packed; 922} __packed;
718 923
719static inline struct be_hw_stats * 924/******************* RX FILTER ******************************/
720hw_stats_from_cmd(struct be_cmd_resp_get_stats *cmd) 925struct be_cmd_req_rx_filter {
721{ 926 struct be_cmd_req_hdr hdr;
722 return &cmd->hw_stats; 927 u32 global_flags_mask;
723} 928 u32 global_flags;
929 u32 if_flags_mask;
930 u32 if_flags;
931 u32 if_id;
932 u32 multicast_num;
933 struct macaddr mac[BE_MAX_MC];
934};
935
724 936
725/******************** Link Status Query *******************/ 937/******************** Link Status Query *******************/
726struct be_cmd_req_link_status { 938struct be_cmd_req_link_status {
@@ -920,6 +1132,36 @@ struct be_cmd_write_flashrom {
920 struct flashrom_params params; 1132 struct flashrom_params params;
921}; 1133};
922 1134
1135/**************** Lancer Firmware Flash ************/
1136struct amap_lancer_write_obj_context {
1137 u8 write_length[24];
1138 u8 reserved1[7];
1139 u8 eof;
1140} __packed;
1141
1142struct lancer_cmd_req_write_object {
1143 struct be_cmd_req_hdr hdr;
1144 u8 context[sizeof(struct amap_lancer_write_obj_context) / 8];
1145 u32 write_offset;
1146 u8 object_name[104];
1147 u32 descriptor_count;
1148 u32 buf_len;
1149 u32 addr_low;
1150 u32 addr_high;
1151};
1152
1153struct lancer_cmd_resp_write_object {
1154 u8 opcode;
1155 u8 subsystem;
1156 u8 rsvd1[2];
1157 u8 status;
1158 u8 additional_status;
1159 u8 rsvd2[2];
1160 u32 resp_len;
1161 u32 actual_resp_len;
1162 u32 actual_write_len;
1163};
1164
923/************************ WOL *******************************/ 1165/************************ WOL *******************************/
924struct be_cmd_req_acpi_wol_magic_config{ 1166struct be_cmd_req_acpi_wol_magic_config{
925 struct be_cmd_req_hdr hdr; 1167 struct be_cmd_req_hdr hdr;
@@ -1061,6 +1303,151 @@ struct be_cmd_resp_set_func_cap {
1061 u8 rsvd[212]; 1303 u8 rsvd[212];
1062}; 1304};
1063 1305
1306/*************** HW Stats Get v1 **********************************/
1307#define BE_TXP_SW_SZ 48
1308struct be_port_rxf_stats_v1 {
1309 u32 rsvd0[12];
1310 u32 rx_crc_errors;
1311 u32 rx_alignment_symbol_errors;
1312 u32 rx_pause_frames;
1313 u32 rx_priority_pause_frames;
1314 u32 rx_control_frames;
1315 u32 rx_in_range_errors;
1316 u32 rx_out_range_errors;
1317 u32 rx_frame_too_long;
1318 u32 rx_address_match_errors;
1319 u32 rx_dropped_too_small;
1320 u32 rx_dropped_too_short;
1321 u32 rx_dropped_header_too_small;
1322 u32 rx_dropped_tcp_length;
1323 u32 rx_dropped_runt;
1324 u32 rsvd1[10];
1325 u32 rx_ip_checksum_errs;
1326 u32 rx_tcp_checksum_errs;
1327 u32 rx_udp_checksum_errs;
1328 u32 rsvd2[7];
1329 u32 rx_switched_unicast_packets;
1330 u32 rx_switched_multicast_packets;
1331 u32 rx_switched_broadcast_packets;
1332 u32 rsvd3[3];
1333 u32 tx_pauseframes;
1334 u32 tx_priority_pauseframes;
1335 u32 tx_controlframes;
1336 u32 rsvd4[10];
1337 u32 rxpp_fifo_overflow_drop;
1338 u32 rx_input_fifo_overflow_drop;
1339 u32 pmem_fifo_overflow_drop;
1340 u32 jabber_events;
1341 u32 rsvd5[3];
1342};
1343
1344
1345struct be_rxf_stats_v1 {
1346 struct be_port_rxf_stats_v1 port[4];
1347 u32 rsvd0[2];
1348 u32 rx_drops_no_pbuf;
1349 u32 rx_drops_no_txpb;
1350 u32 rx_drops_no_erx_descr;
1351 u32 rx_drops_no_tpre_descr;
1352 u32 rsvd1[6];
1353 u32 rx_drops_too_many_frags;
1354 u32 rx_drops_invalid_ring;
1355 u32 forwarded_packets;
1356 u32 rx_drops_mtu;
1357 u32 rsvd2[14];
1358};
1359
1360struct be_erx_stats_v1 {
1361 u32 rx_drops_no_fragments[68]; /* dwordS 0 to 67*/
1362 u32 rsvd[4];
1363};
1364
1365struct be_hw_stats_v1 {
1366 struct be_rxf_stats_v1 rxf;
1367 u32 rsvd0[BE_TXP_SW_SZ];
1368 struct be_erx_stats_v1 erx;
1369 struct be_pmem_stats pmem;
1370 u32 rsvd1[3];
1371};
1372
1373struct be_cmd_req_get_stats_v1 {
1374 struct be_cmd_req_hdr hdr;
1375 u8 rsvd[sizeof(struct be_hw_stats_v1)];
1376};
1377
1378struct be_cmd_resp_get_stats_v1 {
1379 struct be_cmd_resp_hdr hdr;
1380 struct be_hw_stats_v1 hw_stats;
1381};
1382
1383static inline void *
1384hw_stats_from_cmd(struct be_adapter *adapter)
1385{
1386 if (adapter->generation == BE_GEN3) {
1387 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
1388
1389 return &cmd->hw_stats;
1390 } else {
1391 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
1392
1393 return &cmd->hw_stats;
1394 }
1395}
1396
1397static inline void *be_port_rxf_stats_from_cmd(struct be_adapter *adapter)
1398{
1399 if (adapter->generation == BE_GEN3) {
1400 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
1401 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
1402
1403 return &rxf_stats->port[adapter->port_num];
1404 } else {
1405 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
1406 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
1407
1408 return &rxf_stats->port[adapter->port_num];
1409 }
1410}
1411
1412static inline void *be_rxf_stats_from_cmd(struct be_adapter *adapter)
1413{
1414 if (adapter->generation == BE_GEN3) {
1415 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
1416
1417 return &hw_stats->rxf;
1418 } else {
1419 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
1420
1421 return &hw_stats->rxf;
1422 }
1423}
1424
1425static inline void *be_erx_stats_from_cmd(struct be_adapter *adapter)
1426{
1427 if (adapter->generation == BE_GEN3) {
1428 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
1429
1430 return &hw_stats->erx;
1431 } else {
1432 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
1433
1434 return &hw_stats->erx;
1435 }
1436}
1437
1438static inline void *be_pmem_stats_from_cmd(struct be_adapter *adapter)
1439{
1440 if (adapter->generation == BE_GEN3) {
1441 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
1442
1443 return &hw_stats->pmem;
1444 } else {
1445 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
1446
1447 return &hw_stats->pmem;
1448 }
1449}
1450
1064extern int be_pci_fnum_get(struct be_adapter *adapter); 1451extern int be_pci_fnum_get(struct be_adapter *adapter);
1065extern int be_cmd_POST(struct be_adapter *adapter); 1452extern int be_cmd_POST(struct be_adapter *adapter);
1066extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, 1453extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
@@ -1093,18 +1480,19 @@ extern int be_cmd_rxq_create(struct be_adapter *adapter,
1093extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, 1480extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1094 int type); 1481 int type);
1095extern int be_cmd_link_status_query(struct be_adapter *adapter, 1482extern int be_cmd_link_status_query(struct be_adapter *adapter,
1096 bool *link_up, u8 *mac_speed, u16 *link_speed); 1483 bool *link_up, u8 *mac_speed, u16 *link_speed, u32 dom);
1097extern int be_cmd_reset(struct be_adapter *adapter); 1484extern int be_cmd_reset(struct be_adapter *adapter);
1098extern int be_cmd_get_stats(struct be_adapter *adapter, 1485extern int be_cmd_get_stats(struct be_adapter *adapter,
1099 struct be_dma_mem *nonemb_cmd); 1486 struct be_dma_mem *nonemb_cmd);
1487extern int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1488 struct be_dma_mem *nonemb_cmd);
1100extern int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver); 1489extern int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver);
1101 1490
1102extern int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd); 1491extern int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd);
1103extern int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, 1492extern int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id,
1104 u16 *vtag_array, u32 num, bool untagged, 1493 u16 *vtag_array, u32 num, bool untagged,
1105 bool promiscuous); 1494 bool promiscuous);
1106extern int be_cmd_promiscuous_config(struct be_adapter *adapter, 1495extern int be_cmd_promiscuous_config(struct be_adapter *adapter, bool en);
1107 u8 port_num, bool en);
1108extern int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id, 1496extern int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
1109 struct net_device *netdev, struct be_dma_mem *mem); 1497 struct net_device *netdev, struct be_dma_mem *mem);
1110extern int be_cmd_set_flow_control(struct be_adapter *adapter, 1498extern int be_cmd_set_flow_control(struct be_adapter *adapter,
@@ -1124,6 +1512,11 @@ extern int be_cmd_get_beacon_state(struct be_adapter *adapter,
1124extern int be_cmd_write_flashrom(struct be_adapter *adapter, 1512extern int be_cmd_write_flashrom(struct be_adapter *adapter,
1125 struct be_dma_mem *cmd, u32 flash_oper, 1513 struct be_dma_mem *cmd, u32 flash_oper,
1126 u32 flash_opcode, u32 buf_size); 1514 u32 flash_opcode, u32 buf_size);
1515extern int lancer_cmd_write_object(struct be_adapter *adapter,
1516 struct be_dma_mem *cmd,
1517 u32 data_size, u32 data_offset,
1518 const char *obj_name,
1519 u32 *data_written, u8 *addn_status);
1127int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, 1520int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
1128 int offset); 1521 int offset);
1129extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, 1522extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
@@ -1148,4 +1541,6 @@ extern void be_detect_dump_ue(struct be_adapter *adapter);
1148extern int be_cmd_get_die_temperature(struct be_adapter *adapter); 1541extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
1149extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter); 1542extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
1150extern int be_cmd_check_native_mode(struct be_adapter *adapter); 1543extern int be_cmd_check_native_mode(struct be_adapter *adapter);
1544extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
1545extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
1151 1546
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index aac248fbd18b..facfe3ca5c40 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -26,8 +26,8 @@ struct be_ethtool_stat {
26 int offset; 26 int offset;
27}; 27};
28 28
29enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT, 29enum {NETSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT,
30 PMEMSTAT, DRVSTAT}; 30 DRVSTAT};
31#define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \ 31#define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
32 offsetof(_struct, field) 32 offsetof(_struct, field)
33#define NETSTAT_INFO(field) #field, NETSTAT,\ 33#define NETSTAT_INFO(field) #field, NETSTAT,\
@@ -37,15 +37,8 @@ enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT,
37 FIELDINFO(struct be_tx_stats, field) 37 FIELDINFO(struct be_tx_stats, field)
38#define DRVSTAT_RX_INFO(field) #field, DRVSTAT_RX,\ 38#define DRVSTAT_RX_INFO(field) #field, DRVSTAT_RX,\
39 FIELDINFO(struct be_rx_stats, field) 39 FIELDINFO(struct be_rx_stats, field)
40#define MISCSTAT_INFO(field) #field, MISCSTAT,\ 40#define ERXSTAT_INFO(field) #field, ERXSTAT,\
41 FIELDINFO(struct be_rxf_stats, field) 41 FIELDINFO(struct be_erx_stats_v1, field)
42#define PORTSTAT_INFO(field) #field, PORTSTAT,\
43 FIELDINFO(struct be_port_rxf_stats, \
44 field)
45#define ERXSTAT_INFO(field) #field, ERXSTAT,\
46 FIELDINFO(struct be_erx_stats, field)
47#define PMEMSTAT_INFO(field) #field, PMEMSTAT,\
48 FIELDINFO(struct be_pmem_stats, field)
49#define DRVSTAT_INFO(field) #field, DRVSTAT,\ 42#define DRVSTAT_INFO(field) #field, DRVSTAT,\
50 FIELDINFO(struct be_drv_stats, \ 43 FIELDINFO(struct be_drv_stats, \
51 field) 44 field)
@@ -65,50 +58,41 @@ static const struct be_ethtool_stat et_stats[] = {
65 {DRVSTAT_TX_INFO(be_tx_stops)}, 58 {DRVSTAT_TX_INFO(be_tx_stops)},
66 {DRVSTAT_TX_INFO(be_tx_events)}, 59 {DRVSTAT_TX_INFO(be_tx_events)},
67 {DRVSTAT_TX_INFO(be_tx_compl)}, 60 {DRVSTAT_TX_INFO(be_tx_compl)},
68 {PORTSTAT_INFO(rx_unicast_frames)}, 61 {DRVSTAT_INFO(rx_crc_errors)},
69 {PORTSTAT_INFO(rx_multicast_frames)}, 62 {DRVSTAT_INFO(rx_alignment_symbol_errors)},
70 {PORTSTAT_INFO(rx_broadcast_frames)}, 63 {DRVSTAT_INFO(rx_pause_frames)},
71 {PORTSTAT_INFO(rx_crc_errors)}, 64 {DRVSTAT_INFO(rx_control_frames)},
72 {PORTSTAT_INFO(rx_alignment_symbol_errors)}, 65 {DRVSTAT_INFO(rx_in_range_errors)},
73 {PORTSTAT_INFO(rx_pause_frames)}, 66 {DRVSTAT_INFO(rx_out_range_errors)},
74 {PORTSTAT_INFO(rx_control_frames)}, 67 {DRVSTAT_INFO(rx_frame_too_long)},
75 {PORTSTAT_INFO(rx_in_range_errors)}, 68 {DRVSTAT_INFO(rx_address_match_errors)},
76 {PORTSTAT_INFO(rx_out_range_errors)}, 69 {DRVSTAT_INFO(rx_dropped_too_small)},
77 {PORTSTAT_INFO(rx_frame_too_long)}, 70 {DRVSTAT_INFO(rx_dropped_too_short)},
78 {PORTSTAT_INFO(rx_address_match_errors)}, 71 {DRVSTAT_INFO(rx_dropped_header_too_small)},
79 {PORTSTAT_INFO(rx_vlan_mismatch)}, 72 {DRVSTAT_INFO(rx_dropped_tcp_length)},
80 {PORTSTAT_INFO(rx_dropped_too_small)}, 73 {DRVSTAT_INFO(rx_dropped_runt)},
81 {PORTSTAT_INFO(rx_dropped_too_short)}, 74 {DRVSTAT_INFO(rxpp_fifo_overflow_drop)},
82 {PORTSTAT_INFO(rx_dropped_header_too_small)}, 75 {DRVSTAT_INFO(rx_input_fifo_overflow_drop)},
83 {PORTSTAT_INFO(rx_dropped_tcp_length)}, 76 {DRVSTAT_INFO(rx_ip_checksum_errs)},
84 {PORTSTAT_INFO(rx_dropped_runt)}, 77 {DRVSTAT_INFO(rx_tcp_checksum_errs)},
85 {PORTSTAT_INFO(rx_fifo_overflow)}, 78 {DRVSTAT_INFO(rx_udp_checksum_errs)},
86 {PORTSTAT_INFO(rx_input_fifo_overflow)}, 79 {DRVSTAT_INFO(rx_switched_unicast_packets)},
87 {PORTSTAT_INFO(rx_ip_checksum_errs)}, 80 {DRVSTAT_INFO(rx_switched_multicast_packets)},
88 {PORTSTAT_INFO(rx_tcp_checksum_errs)}, 81 {DRVSTAT_INFO(rx_switched_broadcast_packets)},
89 {PORTSTAT_INFO(rx_udp_checksum_errs)}, 82 {DRVSTAT_INFO(tx_pauseframes)},
90 {PORTSTAT_INFO(rx_non_rss_packets)}, 83 {DRVSTAT_INFO(tx_controlframes)},
91 {PORTSTAT_INFO(rx_ipv4_packets)}, 84 {DRVSTAT_INFO(rx_priority_pause_frames)},
92 {PORTSTAT_INFO(rx_ipv6_packets)}, 85 {DRVSTAT_INFO(pmem_fifo_overflow_drop)},
93 {PORTSTAT_INFO(rx_switched_unicast_packets)}, 86 {DRVSTAT_INFO(jabber_events)},
94 {PORTSTAT_INFO(rx_switched_multicast_packets)}, 87 {DRVSTAT_INFO(rx_drops_no_pbuf)},
95 {PORTSTAT_INFO(rx_switched_broadcast_packets)}, 88 {DRVSTAT_INFO(rx_drops_no_txpb)},
96 {PORTSTAT_INFO(tx_unicastframes)}, 89 {DRVSTAT_INFO(rx_drops_no_erx_descr)},
97 {PORTSTAT_INFO(tx_multicastframes)}, 90 {DRVSTAT_INFO(rx_drops_no_tpre_descr)},
98 {PORTSTAT_INFO(tx_broadcastframes)}, 91 {DRVSTAT_INFO(rx_drops_too_many_frags)},
99 {PORTSTAT_INFO(tx_pauseframes)}, 92 {DRVSTAT_INFO(rx_drops_invalid_ring)},
100 {PORTSTAT_INFO(tx_controlframes)}, 93 {DRVSTAT_INFO(forwarded_packets)},
101 {MISCSTAT_INFO(rx_drops_no_pbuf)}, 94 {DRVSTAT_INFO(rx_drops_mtu)},
102 {MISCSTAT_INFO(rx_drops_no_txpb)}, 95 {DRVSTAT_INFO(eth_red_drops)},
103 {MISCSTAT_INFO(rx_drops_no_erx_descr)},
104 {MISCSTAT_INFO(rx_drops_no_tpre_descr)},
105 {MISCSTAT_INFO(rx_drops_too_many_frags)},
106 {MISCSTAT_INFO(rx_drops_invalid_ring)},
107 {MISCSTAT_INFO(forwarded_packets)},
108 {MISCSTAT_INFO(rx_drops_mtu)},
109 {MISCSTAT_INFO(port0_jabber_events)},
110 {MISCSTAT_INFO(port1_jabber_events)},
111 {PMEMSTAT_INFO(eth_red_drops)},
112 {DRVSTAT_INFO(be_on_die_temperature)} 96 {DRVSTAT_INFO(be_on_die_temperature)}
113}; 97};
114#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats) 98#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
@@ -156,6 +140,29 @@ be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
156} 140}
157 141
158static int 142static int
143be_get_reg_len(struct net_device *netdev)
144{
145 struct be_adapter *adapter = netdev_priv(netdev);
146 u32 log_size = 0;
147
148 if (be_physfn(adapter))
149 be_cmd_get_reg_len(adapter, &log_size);
150
151 return log_size;
152}
153
154static void
155be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
156{
157 struct be_adapter *adapter = netdev_priv(netdev);
158
159 if (be_physfn(adapter)) {
160 memset(buf, 0, regs->len);
161 be_cmd_get_regs(adapter, regs->len, buf);
162 }
163}
164
165static int
159be_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) 166be_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
160{ 167{
161 struct be_adapter *adapter = netdev_priv(netdev); 168 struct be_adapter *adapter = netdev_priv(netdev);
@@ -186,9 +193,9 @@ be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
186 struct be_rx_obj *rxo; 193 struct be_rx_obj *rxo;
187 struct be_eq_obj *rx_eq; 194 struct be_eq_obj *rx_eq;
188 struct be_eq_obj *tx_eq = &adapter->tx_eq; 195 struct be_eq_obj *tx_eq = &adapter->tx_eq;
189 u32 tx_max, tx_min, tx_cur;
190 u32 rx_max, rx_min, rx_cur; 196 u32 rx_max, rx_min, rx_cur;
191 int status = 0, i; 197 int status = 0, i;
198 u32 tx_cur;
192 199
193 if (coalesce->use_adaptive_tx_coalesce == 1) 200 if (coalesce->use_adaptive_tx_coalesce == 1)
194 return -EINVAL; 201 return -EINVAL;
@@ -227,8 +234,6 @@ be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
227 } 234 }
228 } 235 }
229 236
230 tx_max = coalesce->tx_coalesce_usecs_high;
231 tx_min = coalesce->tx_coalesce_usecs_low;
232 tx_cur = coalesce->tx_coalesce_usecs; 237 tx_cur = coalesce->tx_coalesce_usecs;
233 238
234 if (tx_cur > BE_MAX_EQD) 239 if (tx_cur > BE_MAX_EQD)
@@ -242,32 +247,11 @@ be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
242 return 0; 247 return 0;
243} 248}
244 249
245static u32 be_get_rx_csum(struct net_device *netdev)
246{
247 struct be_adapter *adapter = netdev_priv(netdev);
248
249 return adapter->rx_csum;
250}
251
252static int be_set_rx_csum(struct net_device *netdev, uint32_t data)
253{
254 struct be_adapter *adapter = netdev_priv(netdev);
255
256 if (data)
257 adapter->rx_csum = true;
258 else
259 adapter->rx_csum = false;
260
261 return 0;
262}
263
264static void 250static void
265be_get_ethtool_stats(struct net_device *netdev, 251be_get_ethtool_stats(struct net_device *netdev,
266 struct ethtool_stats *stats, uint64_t *data) 252 struct ethtool_stats *stats, uint64_t *data)
267{ 253{
268 struct be_adapter *adapter = netdev_priv(netdev); 254 struct be_adapter *adapter = netdev_priv(netdev);
269 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
270 struct be_erx_stats *erx_stats = &hw_stats->erx;
271 struct be_rx_obj *rxo; 255 struct be_rx_obj *rxo;
272 void *p = NULL; 256 void *p = NULL;
273 int i, j; 257 int i, j;
@@ -280,15 +264,6 @@ be_get_ethtool_stats(struct net_device *netdev,
280 case DRVSTAT_TX: 264 case DRVSTAT_TX:
281 p = &adapter->tx_stats; 265 p = &adapter->tx_stats;
282 break; 266 break;
283 case PORTSTAT:
284 p = &hw_stats->rxf.port[adapter->port_num];
285 break;
286 case MISCSTAT:
287 p = &hw_stats->rxf;
288 break;
289 case PMEMSTAT:
290 p = &hw_stats->pmem;
291 break;
292 case DRVSTAT: 267 case DRVSTAT:
293 p = &adapter->drv_stats; 268 p = &adapter->drv_stats;
294 break; 269 break;
@@ -306,7 +281,8 @@ be_get_ethtool_stats(struct net_device *netdev,
306 p = (u8 *)&rxo->stats + et_rx_stats[i].offset; 281 p = (u8 *)&rxo->stats + et_rx_stats[i].offset;
307 break; 282 break;
308 case ERXSTAT: 283 case ERXSTAT:
309 p = (u32 *)erx_stats + rxo->q.id; 284 p = (u32 *)be_erx_stats_from_cmd(adapter) +
285 rxo->q.id;
310 break; 286 break;
311 } 287 }
312 data[ETHTOOL_STATS_NUM + j * ETHTOOL_RXSTATS_NUM + i] = 288 data[ETHTOOL_STATS_NUM + j * ETHTOOL_RXSTATS_NUM + i] =
@@ -374,19 +350,28 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
374 350
375 if ((adapter->link_speed < 0) || (!(netdev->flags & IFF_UP))) { 351 if ((adapter->link_speed < 0) || (!(netdev->flags & IFF_UP))) {
376 status = be_cmd_link_status_query(adapter, &link_up, 352 status = be_cmd_link_status_query(adapter, &link_up,
377 &mac_speed, &link_speed); 353 &mac_speed, &link_speed, 0);
378 354
379 be_link_status_update(adapter, link_up); 355 be_link_status_update(adapter, link_up);
380 /* link_speed is in units of 10 Mbps */ 356 /* link_speed is in units of 10 Mbps */
381 if (link_speed) { 357 if (link_speed) {
382 ecmd->speed = link_speed*10; 358 ethtool_cmd_speed_set(ecmd, link_speed*10);
383 } else { 359 } else {
384 switch (mac_speed) { 360 switch (mac_speed) {
361 case PHY_LINK_SPEED_10MBPS:
362 ethtool_cmd_speed_set(ecmd, SPEED_10);
363 break;
364 case PHY_LINK_SPEED_100MBPS:
365 ethtool_cmd_speed_set(ecmd, SPEED_100);
366 break;
385 case PHY_LINK_SPEED_1GBPS: 367 case PHY_LINK_SPEED_1GBPS:
386 ecmd->speed = SPEED_1000; 368 ethtool_cmd_speed_set(ecmd, SPEED_1000);
387 break; 369 break;
388 case PHY_LINK_SPEED_10GBPS: 370 case PHY_LINK_SPEED_10GBPS:
389 ecmd->speed = SPEED_10000; 371 ethtool_cmd_speed_set(ecmd, SPEED_10000);
372 break;
373 case PHY_LINK_SPEED_ZERO:
374 ethtool_cmd_speed_set(ecmd, 0);
390 break; 375 break;
391 } 376 }
392 } 377 }
@@ -429,14 +414,14 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
429 } 414 }
430 415
431 /* Save for future use */ 416 /* Save for future use */
432 adapter->link_speed = ecmd->speed; 417 adapter->link_speed = ethtool_cmd_speed(ecmd);
433 adapter->port_type = ecmd->port; 418 adapter->port_type = ecmd->port;
434 adapter->transceiver = ecmd->transceiver; 419 adapter->transceiver = ecmd->transceiver;
435 adapter->autoneg = ecmd->autoneg; 420 adapter->autoneg = ecmd->autoneg;
436 dma_free_coherent(&adapter->pdev->dev, phy_cmd.size, phy_cmd.va, 421 dma_free_coherent(&adapter->pdev->dev, phy_cmd.size, phy_cmd.va,
437 phy_cmd.dma); 422 phy_cmd.dma);
438 } else { 423 } else {
439 ecmd->speed = adapter->link_speed; 424 ethtool_cmd_speed_set(ecmd, adapter->link_speed);
440 ecmd->port = adapter->port_type; 425 ecmd->port = adapter->port_type;
441 ecmd->transceiver = adapter->transceiver; 426 ecmd->transceiver = adapter->transceiver;
442 ecmd->autoneg = adapter->autoneg; 427 ecmd->autoneg = adapter->autoneg;
@@ -507,29 +492,33 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
507} 492}
508 493
509static int 494static int
510be_phys_id(struct net_device *netdev, u32 data) 495be_set_phys_id(struct net_device *netdev,
496 enum ethtool_phys_id_state state)
511{ 497{
512 struct be_adapter *adapter = netdev_priv(netdev); 498 struct be_adapter *adapter = netdev_priv(netdev);
513 int status;
514 u32 cur;
515
516 be_cmd_get_beacon_state(adapter, adapter->hba_port_num, &cur);
517 499
518 if (cur == BEACON_STATE_ENABLED) 500 switch (state) {
519 return 0; 501 case ETHTOOL_ID_ACTIVE:
502 be_cmd_get_beacon_state(adapter, adapter->hba_port_num,
503 &adapter->beacon_state);
504 return 1; /* cycle on/off once per second */
520 505
521 if (data < 2) 506 case ETHTOOL_ID_ON:
522 data = 2; 507 be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
508 BEACON_STATE_ENABLED);
509 break;
523 510
524 status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0, 511 case ETHTOOL_ID_OFF:
525 BEACON_STATE_ENABLED); 512 be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
526 set_current_state(TASK_INTERRUPTIBLE); 513 BEACON_STATE_DISABLED);
527 schedule_timeout(data*HZ); 514 break;
528 515
529 status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0, 516 case ETHTOOL_ID_INACTIVE:
530 BEACON_STATE_DISABLED); 517 be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
518 adapter->beacon_state);
519 }
531 520
532 return status; 521 return 0;
533} 522}
534 523
535static bool 524static bool
@@ -646,7 +635,7 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
646 } 635 }
647 636
648 if (be_cmd_link_status_query(adapter, &link_up, &mac_speed, 637 if (be_cmd_link_status_query(adapter, &link_up, &mac_speed,
649 &qos_link_speed) != 0) { 638 &qos_link_speed, 0) != 0) {
650 test->flags |= ETH_TEST_FL_FAILED; 639 test->flags |= ETH_TEST_FL_FAILED;
651 data[4] = -1; 640 data[4] = -1;
652 } else if (!mac_speed) { 641 } else if (!mac_speed) {
@@ -660,11 +649,9 @@ be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
660{ 649{
661 struct be_adapter *adapter = netdev_priv(netdev); 650 struct be_adapter *adapter = netdev_priv(netdev);
662 char file_name[ETHTOOL_FLASH_MAX_FILENAME]; 651 char file_name[ETHTOOL_FLASH_MAX_FILENAME];
663 u32 region;
664 652
665 file_name[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0; 653 file_name[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0;
666 strcpy(file_name, efl->data); 654 strcpy(file_name, efl->data);
667 region = efl->region;
668 655
669 return be_load_fw(adapter, file_name); 656 return be_load_fw(adapter, file_name);
670} 657}
@@ -725,18 +712,12 @@ const struct ethtool_ops be_ethtool_ops = {
725 .get_ringparam = be_get_ringparam, 712 .get_ringparam = be_get_ringparam,
726 .get_pauseparam = be_get_pauseparam, 713 .get_pauseparam = be_get_pauseparam,
727 .set_pauseparam = be_set_pauseparam, 714 .set_pauseparam = be_set_pauseparam,
728 .get_rx_csum = be_get_rx_csum,
729 .set_rx_csum = be_set_rx_csum,
730 .get_tx_csum = ethtool_op_get_tx_csum,
731 .set_tx_csum = ethtool_op_set_tx_hw_csum,
732 .get_sg = ethtool_op_get_sg,
733 .set_sg = ethtool_op_set_sg,
734 .get_tso = ethtool_op_get_tso,
735 .set_tso = ethtool_op_set_tso,
736 .get_strings = be_get_stat_strings, 715 .get_strings = be_get_stat_strings,
737 .phys_id = be_phys_id, 716 .set_phys_id = be_set_phys_id,
738 .get_sset_count = be_get_sset_count, 717 .get_sset_count = be_get_sset_count,
739 .get_ethtool_stats = be_get_ethtool_stats, 718 .get_ethtool_stats = be_get_ethtool_stats,
719 .get_regs_len = be_get_reg_len,
720 .get_regs = be_get_regs,
740 .flash_device = be_do_flash, 721 .flash_device = be_do_flash,
741 .self_test = be_self_test, 722 .self_test = be_self_test,
742}; 723};
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index d4344a06090b..53d658afea2a 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -155,6 +155,10 @@
155/********** SRIOV VF PCICFG OFFSET ********/ 155/********** SRIOV VF PCICFG OFFSET ********/
156#define SRIOV_VF_PCICFG_OFFSET (4096) 156#define SRIOV_VF_PCICFG_OFFSET (4096)
157 157
158/********** FAT TABLE ********/
159#define RETRIEVE_FAT 0
160#define QUERY_FAT 1
161
158/* Flashrom related descriptors */ 162/* Flashrom related descriptors */
159#define IMAGE_TYPE_FIRMWARE 160 163#define IMAGE_TYPE_FIRMWARE 160
160#define IMAGE_TYPE_BOOTCODE 224 164#define IMAGE_TYPE_BOOTCODE 224
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 9187fb4e08f1..7322a511e936 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -116,11 +116,6 @@ static char *ue_status_hi_desc[] = {
116 "Unknown" 116 "Unknown"
117}; 117};
118 118
119static inline bool be_multi_rxq(struct be_adapter *adapter)
120{
121 return (adapter->num_rx_qs > 1);
122}
123
124static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q) 119static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
125{ 120{
126 struct be_dma_mem *mem = &q->dma_mem; 121 struct be_dma_mem *mem = &q->dma_mem;
@@ -250,14 +245,185 @@ netdev_addr:
250 return status; 245 return status;
251} 246}
252 247
248static void populate_be2_stats(struct be_adapter *adapter)
249{
250
251 struct be_drv_stats *drvs = &adapter->drv_stats;
252 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
253 struct be_port_rxf_stats_v0 *port_stats =
254 be_port_rxf_stats_from_cmd(adapter);
255 struct be_rxf_stats_v0 *rxf_stats =
256 be_rxf_stats_from_cmd(adapter);
257
258 drvs->rx_pause_frames = port_stats->rx_pause_frames;
259 drvs->rx_crc_errors = port_stats->rx_crc_errors;
260 drvs->rx_control_frames = port_stats->rx_control_frames;
261 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
262 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
263 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
264 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
265 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
266 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
267 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
268 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
269 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
270 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
271 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
272 drvs->rx_input_fifo_overflow_drop =
273 port_stats->rx_input_fifo_overflow;
274 drvs->rx_dropped_header_too_small =
275 port_stats->rx_dropped_header_too_small;
276 drvs->rx_address_match_errors =
277 port_stats->rx_address_match_errors;
278 drvs->rx_alignment_symbol_errors =
279 port_stats->rx_alignment_symbol_errors;
280
281 drvs->tx_pauseframes = port_stats->tx_pauseframes;
282 drvs->tx_controlframes = port_stats->tx_controlframes;
283
284 if (adapter->port_num)
285 drvs->jabber_events =
286 rxf_stats->port1_jabber_events;
287 else
288 drvs->jabber_events =
289 rxf_stats->port0_jabber_events;
290 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
291 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
292 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
293 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
294 drvs->forwarded_packets = rxf_stats->forwarded_packets;
295 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
296 drvs->rx_drops_no_tpre_descr =
297 rxf_stats->rx_drops_no_tpre_descr;
298 drvs->rx_drops_too_many_frags =
299 rxf_stats->rx_drops_too_many_frags;
300 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
301}
302
303static void populate_be3_stats(struct be_adapter *adapter)
304{
305 struct be_drv_stats *drvs = &adapter->drv_stats;
306 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
307
308 struct be_rxf_stats_v1 *rxf_stats =
309 be_rxf_stats_from_cmd(adapter);
310 struct be_port_rxf_stats_v1 *port_stats =
311 be_port_rxf_stats_from_cmd(adapter);
312
313 drvs->rx_priority_pause_frames = 0;
314 drvs->pmem_fifo_overflow_drop = 0;
315 drvs->rx_pause_frames = port_stats->rx_pause_frames;
316 drvs->rx_crc_errors = port_stats->rx_crc_errors;
317 drvs->rx_control_frames = port_stats->rx_control_frames;
318 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
319 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
320 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
321 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
322 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
323 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
324 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
325 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
326 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
327 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
328 drvs->rx_dropped_header_too_small =
329 port_stats->rx_dropped_header_too_small;
330 drvs->rx_input_fifo_overflow_drop =
331 port_stats->rx_input_fifo_overflow_drop;
332 drvs->rx_address_match_errors =
333 port_stats->rx_address_match_errors;
334 drvs->rx_alignment_symbol_errors =
335 port_stats->rx_alignment_symbol_errors;
336 drvs->rxpp_fifo_overflow_drop =
337 port_stats->rxpp_fifo_overflow_drop;
338 drvs->tx_pauseframes = port_stats->tx_pauseframes;
339 drvs->tx_controlframes = port_stats->tx_controlframes;
340 drvs->jabber_events = port_stats->jabber_events;
341 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
342 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
343 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
344 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
345 drvs->forwarded_packets = rxf_stats->forwarded_packets;
346 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
347 drvs->rx_drops_no_tpre_descr =
348 rxf_stats->rx_drops_no_tpre_descr;
349 drvs->rx_drops_too_many_frags =
350 rxf_stats->rx_drops_too_many_frags;
351 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
352}
353
354static void populate_lancer_stats(struct be_adapter *adapter)
355{
356
357 struct be_drv_stats *drvs = &adapter->drv_stats;
358 struct lancer_cmd_pport_stats *pport_stats = pport_stats_from_cmd
359 (adapter);
360 drvs->rx_priority_pause_frames = 0;
361 drvs->pmem_fifo_overflow_drop = 0;
362 drvs->rx_pause_frames =
363 make_64bit_val(pport_stats->rx_pause_frames_lo,
364 pport_stats->rx_pause_frames_hi);
365 drvs->rx_crc_errors = make_64bit_val(pport_stats->rx_crc_errors_hi,
366 pport_stats->rx_crc_errors_lo);
367 drvs->rx_control_frames =
368 make_64bit_val(pport_stats->rx_control_frames_hi,
369 pport_stats->rx_control_frames_lo);
370 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
371 drvs->rx_frame_too_long =
372 make_64bit_val(pport_stats->rx_internal_mac_errors_hi,
373 pport_stats->rx_frames_too_long_lo);
374 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
375 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
376 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
377 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
378 drvs->rx_dropped_tcp_length =
379 pport_stats->rx_dropped_invalid_tcp_length;
380 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
381 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
382 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
383 drvs->rx_dropped_header_too_small =
384 pport_stats->rx_dropped_header_too_small;
385 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
386 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
387 drvs->rx_alignment_symbol_errors =
388 make_64bit_val(pport_stats->rx_symbol_errors_hi,
389 pport_stats->rx_symbol_errors_lo);
390 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
391 drvs->tx_pauseframes = make_64bit_val(pport_stats->tx_pause_frames_hi,
392 pport_stats->tx_pause_frames_lo);
393 drvs->tx_controlframes =
394 make_64bit_val(pport_stats->tx_control_frames_hi,
395 pport_stats->tx_control_frames_lo);
396 drvs->jabber_events = pport_stats->rx_jabbers;
397 drvs->rx_drops_no_pbuf = 0;
398 drvs->rx_drops_no_txpb = 0;
399 drvs->rx_drops_no_erx_descr = 0;
400 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
401 drvs->forwarded_packets = make_64bit_val(pport_stats->num_forwards_hi,
402 pport_stats->num_forwards_lo);
403 drvs->rx_drops_mtu = make_64bit_val(pport_stats->rx_drops_mtu_hi,
404 pport_stats->rx_drops_mtu_lo);
405 drvs->rx_drops_no_tpre_descr = 0;
406 drvs->rx_drops_too_many_frags =
407 make_64bit_val(pport_stats->rx_drops_too_many_frags_hi,
408 pport_stats->rx_drops_too_many_frags_lo);
409}
410
411void be_parse_stats(struct be_adapter *adapter)
412{
413 if (adapter->generation == BE_GEN3) {
414 if (lancer_chip(adapter))
415 populate_lancer_stats(adapter);
416 else
417 populate_be3_stats(adapter);
418 } else {
419 populate_be2_stats(adapter);
420 }
421}
422
253void netdev_stats_update(struct be_adapter *adapter) 423void netdev_stats_update(struct be_adapter *adapter)
254{ 424{
255 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va); 425 struct be_drv_stats *drvs = &adapter->drv_stats;
256 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
257 struct be_port_rxf_stats *port_stats =
258 &rxf_stats->port[adapter->port_num];
259 struct net_device_stats *dev_stats = &adapter->netdev->stats; 426 struct net_device_stats *dev_stats = &adapter->netdev->stats;
260 struct be_erx_stats *erx_stats = &hw_stats->erx;
261 struct be_rx_obj *rxo; 427 struct be_rx_obj *rxo;
262 int i; 428 int i;
263 429
@@ -267,43 +433,54 @@ void netdev_stats_update(struct be_adapter *adapter)
267 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes; 433 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
268 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts; 434 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
269 /* no space in linux buffers: best possible approximation */ 435 /* no space in linux buffers: best possible approximation */
270 dev_stats->rx_dropped += 436 if (adapter->generation == BE_GEN3) {
271 erx_stats->rx_drops_no_fragments[rxo->q.id]; 437 if (!(lancer_chip(adapter))) {
438 struct be_erx_stats_v1 *erx_stats =
439 be_erx_stats_from_cmd(adapter);
440 dev_stats->rx_dropped +=
441 erx_stats->rx_drops_no_fragments[rxo->q.id];
442 }
443 } else {
444 struct be_erx_stats_v0 *erx_stats =
445 be_erx_stats_from_cmd(adapter);
446 dev_stats->rx_dropped +=
447 erx_stats->rx_drops_no_fragments[rxo->q.id];
448 }
272 } 449 }
273 450
274 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts; 451 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
275 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes; 452 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
276 453
277 /* bad pkts received */ 454 /* bad pkts received */
278 dev_stats->rx_errors = port_stats->rx_crc_errors + 455 dev_stats->rx_errors = drvs->rx_crc_errors +
279 port_stats->rx_alignment_symbol_errors + 456 drvs->rx_alignment_symbol_errors +
280 port_stats->rx_in_range_errors + 457 drvs->rx_in_range_errors +
281 port_stats->rx_out_range_errors + 458 drvs->rx_out_range_errors +
282 port_stats->rx_frame_too_long + 459 drvs->rx_frame_too_long +
283 port_stats->rx_dropped_too_small + 460 drvs->rx_dropped_too_small +
284 port_stats->rx_dropped_too_short + 461 drvs->rx_dropped_too_short +
285 port_stats->rx_dropped_header_too_small + 462 drvs->rx_dropped_header_too_small +
286 port_stats->rx_dropped_tcp_length + 463 drvs->rx_dropped_tcp_length +
287 port_stats->rx_dropped_runt + 464 drvs->rx_dropped_runt +
288 port_stats->rx_tcp_checksum_errs + 465 drvs->rx_tcp_checksum_errs +
289 port_stats->rx_ip_checksum_errs + 466 drvs->rx_ip_checksum_errs +
290 port_stats->rx_udp_checksum_errs; 467 drvs->rx_udp_checksum_errs;
291 468
292 /* detailed rx errors */ 469 /* detailed rx errors */
293 dev_stats->rx_length_errors = port_stats->rx_in_range_errors + 470 dev_stats->rx_length_errors = drvs->rx_in_range_errors +
294 port_stats->rx_out_range_errors + 471 drvs->rx_out_range_errors +
295 port_stats->rx_frame_too_long; 472 drvs->rx_frame_too_long;
296 473
297 dev_stats->rx_crc_errors = port_stats->rx_crc_errors; 474 dev_stats->rx_crc_errors = drvs->rx_crc_errors;
298 475
299 /* frame alignment errors */ 476 /* frame alignment errors */
300 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors; 477 dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
301 478
302 /* receiver fifo overrun */ 479 /* receiver fifo overrun */
303 /* drops_no_pbuf is no per i/f, it's per BE card */ 480 /* drops_no_pbuf is no per i/f, it's per BE card */
304 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow + 481 dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
305 port_stats->rx_input_fifo_overflow + 482 drvs->rx_input_fifo_overflow_drop +
306 rxf_stats->rx_drops_no_pbuf; 483 drvs->rx_drops_no_pbuf;
307} 484}
308 485
309void be_link_status_update(struct be_adapter *adapter, bool link_up) 486void be_link_status_update(struct be_adapter *adapter, bool link_up)
@@ -703,7 +880,7 @@ static void be_set_multicast_list(struct net_device *netdev)
703 struct be_adapter *adapter = netdev_priv(netdev); 880 struct be_adapter *adapter = netdev_priv(netdev);
704 881
705 if (netdev->flags & IFF_PROMISC) { 882 if (netdev->flags & IFF_PROMISC) {
706 be_cmd_promiscuous_config(adapter, adapter->port_num, 1); 883 be_cmd_promiscuous_config(adapter, true);
707 adapter->promiscuous = true; 884 adapter->promiscuous = true;
708 goto done; 885 goto done;
709 } 886 }
@@ -711,7 +888,7 @@ static void be_set_multicast_list(struct net_device *netdev)
711 /* BE was previously in promiscuous mode; disable it */ 888 /* BE was previously in promiscuous mode; disable it */
712 if (adapter->promiscuous) { 889 if (adapter->promiscuous) {
713 adapter->promiscuous = false; 890 adapter->promiscuous = false;
714 be_cmd_promiscuous_config(adapter, adapter->port_num, 0); 891 be_cmd_promiscuous_config(adapter, false);
715 } 892 }
716 893
717 /* Enable multicast promisc if num configured exceeds what we support */ 894 /* Enable multicast promisc if num configured exceeds what we support */
@@ -993,9 +1170,10 @@ static void be_rx_compl_process(struct be_adapter *adapter,
993 struct be_rx_obj *rxo, 1170 struct be_rx_obj *rxo,
994 struct be_rx_compl_info *rxcp) 1171 struct be_rx_compl_info *rxcp)
995{ 1172{
1173 struct net_device *netdev = adapter->netdev;
996 struct sk_buff *skb; 1174 struct sk_buff *skb;
997 1175
998 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN); 1176 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
999 if (unlikely(!skb)) { 1177 if (unlikely(!skb)) {
1000 if (net_ratelimit()) 1178 if (net_ratelimit())
1001 dev_warn(&adapter->pdev->dev, "skb alloc failed\n"); 1179 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
@@ -1005,13 +1183,16 @@ static void be_rx_compl_process(struct be_adapter *adapter,
1005 1183
1006 skb_fill_rx_data(adapter, rxo, skb, rxcp); 1184 skb_fill_rx_data(adapter, rxo, skb, rxcp);
1007 1185
1008 if (likely(adapter->rx_csum && csum_passed(rxcp))) 1186 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1009 skb->ip_summed = CHECKSUM_UNNECESSARY; 1187 skb->ip_summed = CHECKSUM_UNNECESSARY;
1010 else 1188 else
1011 skb_checksum_none_assert(skb); 1189 skb_checksum_none_assert(skb);
1012 1190
1013 skb->truesize = skb->len + sizeof(struct sk_buff); 1191 skb->truesize = skb->len + sizeof(struct sk_buff);
1014 skb->protocol = eth_type_trans(skb, adapter->netdev); 1192 skb->protocol = eth_type_trans(skb, netdev);
1193 if (adapter->netdev->features & NETIF_F_RXHASH)
1194 skb->rxhash = rxcp->rss_hash;
1195
1015 1196
1016 if (unlikely(rxcp->vlanf)) { 1197 if (unlikely(rxcp->vlanf)) {
1017 if (!adapter->vlan_grp || adapter->vlans_added == 0) { 1198 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
@@ -1073,6 +1254,8 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
1073 skb->data_len = rxcp->pkt_size; 1254 skb->data_len = rxcp->pkt_size;
1074 skb->truesize += rxcp->pkt_size; 1255 skb->truesize += rxcp->pkt_size;
1075 skb->ip_summed = CHECKSUM_UNNECESSARY; 1256 skb->ip_summed = CHECKSUM_UNNECESSARY;
1257 if (adapter->netdev->features & NETIF_F_RXHASH)
1258 skb->rxhash = rxcp->rss_hash;
1076 1259
1077 if (likely(!rxcp->vlanf)) 1260 if (likely(!rxcp->vlanf))
1078 napi_gro_frags(&eq_obj->napi); 1261 napi_gro_frags(&eq_obj->napi);
@@ -1103,9 +1286,14 @@ static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1103 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl); 1286 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1104 rxcp->pkt_type = 1287 rxcp->pkt_type =
1105 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl); 1288 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1106 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm, compl); 1289 rxcp->rss_hash =
1107 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, 1290 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1108 compl); 1291 if (rxcp->vlanf) {
1292 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1293 compl);
1294 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1295 compl);
1296 }
1109} 1297}
1110 1298
1111static void be_parse_rx_compl_v0(struct be_adapter *adapter, 1299static void be_parse_rx_compl_v0(struct be_adapter *adapter,
@@ -1130,9 +1318,14 @@ static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1130 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl); 1318 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1131 rxcp->pkt_type = 1319 rxcp->pkt_type =
1132 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl); 1320 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1133 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm, compl); 1321 rxcp->rss_hash =
1134 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, 1322 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1135 compl); 1323 if (rxcp->vlanf) {
1324 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1325 compl);
1326 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1327 compl);
1328 }
1136} 1329}
1137 1330
1138static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo) 1331static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
@@ -1154,17 +1347,20 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1154 else 1347 else
1155 be_parse_rx_compl_v0(adapter, compl, rxcp); 1348 be_parse_rx_compl_v0(adapter, compl, rxcp);
1156 1349
1157 /* vlanf could be wrongly set in some cards. ignore if vtm is not set */ 1350 if (rxcp->vlanf) {
1158 if ((adapter->function_mode & 0x400) && !rxcp->vtm) 1351 /* vlanf could be wrongly set in some cards.
1159 rxcp->vlanf = 0; 1352 * ignore if vtm is not set */
1353 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1354 rxcp->vlanf = 0;
1160 1355
1161 if (!lancer_chip(adapter)) 1356 if (!lancer_chip(adapter))
1162 rxcp->vlan_tag = swab16(rxcp->vlan_tag); 1357 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1163 1358
1164 if (((adapter->pvid & VLAN_VID_MASK) == 1359 if (((adapter->pvid & VLAN_VID_MASK) ==
1165 (rxcp->vlan_tag & VLAN_VID_MASK)) && 1360 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1166 !adapter->vlan_tag[rxcp->vlan_tag]) 1361 !adapter->vlan_tag[rxcp->vlan_tag])
1167 rxcp->vlanf = 0; 1362 rxcp->vlanf = 0;
1363 }
1168 1364
1169 /* As the compl has been parsed, reset it; we wont touch it again */ 1365 /* As the compl has been parsed, reset it; we wont touch it again */
1170 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0; 1366 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
@@ -1261,7 +1457,7 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1261 return txcp; 1457 return txcp;
1262} 1458}
1263 1459
1264static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index) 1460static u16 be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1265{ 1461{
1266 struct be_queue_info *txq = &adapter->tx_obj.q; 1462 struct be_queue_info *txq = &adapter->tx_obj.q;
1267 struct be_eth_wrb *wrb; 1463 struct be_eth_wrb *wrb;
@@ -1288,9 +1484,8 @@ static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1288 queue_tail_inc(txq); 1484 queue_tail_inc(txq);
1289 } while (cur_index != last_index); 1485 } while (cur_index != last_index);
1290 1486
1291 atomic_sub(num_wrbs, &txq->used);
1292
1293 kfree_skb(sent_skb); 1487 kfree_skb(sent_skb);
1488 return num_wrbs;
1294} 1489}
1295 1490
1296static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj) 1491static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
@@ -1373,7 +1568,7 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
1373 struct be_queue_info *tx_cq = &adapter->tx_obj.cq; 1568 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1374 struct be_queue_info *txq = &adapter->tx_obj.q; 1569 struct be_queue_info *txq = &adapter->tx_obj.q;
1375 struct be_eth_tx_compl *txcp; 1570 struct be_eth_tx_compl *txcp;
1376 u16 end_idx, cmpl = 0, timeo = 0; 1571 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1377 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list; 1572 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1378 struct sk_buff *sent_skb; 1573 struct sk_buff *sent_skb;
1379 bool dummy_wrb; 1574 bool dummy_wrb;
@@ -1383,12 +1578,14 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
1383 while ((txcp = be_tx_compl_get(tx_cq))) { 1578 while ((txcp = be_tx_compl_get(tx_cq))) {
1384 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl, 1579 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1385 wrb_index, txcp); 1580 wrb_index, txcp);
1386 be_tx_compl_process(adapter, end_idx); 1581 num_wrbs += be_tx_compl_process(adapter, end_idx);
1387 cmpl++; 1582 cmpl++;
1388 } 1583 }
1389 if (cmpl) { 1584 if (cmpl) {
1390 be_cq_notify(adapter, tx_cq->id, false, cmpl); 1585 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1586 atomic_sub(num_wrbs, &txq->used);
1391 cmpl = 0; 1587 cmpl = 0;
1588 num_wrbs = 0;
1392 } 1589 }
1393 1590
1394 if (atomic_read(&txq->used) == 0 || ++timeo > 200) 1591 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
@@ -1408,7 +1605,8 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
1408 index_adv(&end_idx, 1605 index_adv(&end_idx,
1409 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1, 1606 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1410 txq->len); 1607 txq->len);
1411 be_tx_compl_process(adapter, end_idx); 1608 num_wrbs = be_tx_compl_process(adapter, end_idx);
1609 atomic_sub(num_wrbs, &txq->used);
1412 } 1610 }
1413} 1611}
1414 1612
@@ -1573,12 +1771,31 @@ static void be_rx_queues_destroy(struct be_adapter *adapter)
1573 } 1771 }
1574} 1772}
1575 1773
1774static u32 be_num_rxqs_want(struct be_adapter *adapter)
1775{
1776 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1777 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1778 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1779 } else {
1780 dev_warn(&adapter->pdev->dev,
1781 "No support for multiple RX queues\n");
1782 return 1;
1783 }
1784}
1785
1576static int be_rx_queues_create(struct be_adapter *adapter) 1786static int be_rx_queues_create(struct be_adapter *adapter)
1577{ 1787{
1578 struct be_queue_info *eq, *q, *cq; 1788 struct be_queue_info *eq, *q, *cq;
1579 struct be_rx_obj *rxo; 1789 struct be_rx_obj *rxo;
1580 int rc, i; 1790 int rc, i;
1581 1791
1792 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1793 msix_enabled(adapter) ?
1794 adapter->num_msix_vec - 1 : 1);
1795 if (adapter->num_rx_qs != MAX_RX_QS)
1796 dev_warn(&adapter->pdev->dev,
1797 "Can create only %d RX queues", adapter->num_rx_qs);
1798
1582 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE; 1799 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1583 for_all_rx_queues(adapter, rxo, i) { 1800 for_all_rx_queues(adapter, rxo, i) {
1584 rxo->adapter = adapter; 1801 rxo->adapter = adapter;
@@ -1724,12 +1941,15 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
1724 break; 1941 break;
1725 1942
1726 /* Ignore flush completions */ 1943 /* Ignore flush completions */
1727 if (rxcp->num_rcvd) { 1944 if (rxcp->num_rcvd && rxcp->pkt_size) {
1728 if (do_gro(rxcp)) 1945 if (do_gro(rxcp))
1729 be_rx_compl_process_gro(adapter, rxo, rxcp); 1946 be_rx_compl_process_gro(adapter, rxo, rxcp);
1730 else 1947 else
1731 be_rx_compl_process(adapter, rxo, rxcp); 1948 be_rx_compl_process(adapter, rxo, rxcp);
1949 } else if (rxcp->pkt_size == 0) {
1950 be_rx_compl_discard(adapter, rxo, rxcp);
1732 } 1951 }
1952
1733 be_rx_stats_update(rxo, rxcp); 1953 be_rx_stats_update(rxo, rxcp);
1734 } 1954 }
1735 1955
@@ -1760,12 +1980,12 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1760 struct be_queue_info *tx_cq = &adapter->tx_obj.cq; 1980 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1761 struct be_eth_tx_compl *txcp; 1981 struct be_eth_tx_compl *txcp;
1762 int tx_compl = 0, mcc_compl, status = 0; 1982 int tx_compl = 0, mcc_compl, status = 0;
1763 u16 end_idx; 1983 u16 end_idx, num_wrbs = 0;
1764 1984
1765 while ((txcp = be_tx_compl_get(tx_cq))) { 1985 while ((txcp = be_tx_compl_get(tx_cq))) {
1766 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl, 1986 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1767 wrb_index, txcp); 1987 wrb_index, txcp);
1768 be_tx_compl_process(adapter, end_idx); 1988 num_wrbs += be_tx_compl_process(adapter, end_idx);
1769 tx_compl++; 1989 tx_compl++;
1770 } 1990 }
1771 1991
@@ -1781,6 +2001,8 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1781 if (tx_compl) { 2001 if (tx_compl) {
1782 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl); 2002 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1783 2003
2004 atomic_sub(num_wrbs, &txq->used);
2005
1784 /* As Tx wrbs have been freed up, wake up netdev queue if 2006 /* As Tx wrbs have been freed up, wake up netdev queue if
1785 * it was stopped due to lack of tx wrbs. 2007 * it was stopped due to lack of tx wrbs.
1786 */ 2008 */
@@ -1843,6 +2065,9 @@ static void be_worker(struct work_struct *work)
1843 struct be_rx_obj *rxo; 2065 struct be_rx_obj *rxo;
1844 int i; 2066 int i;
1845 2067
2068 if (!adapter->ue_detected && !lancer_chip(adapter))
2069 be_detect_dump_ue(adapter);
2070
1846 /* when interrupts are not yet enabled, just reap any pending 2071 /* when interrupts are not yet enabled, just reap any pending
1847 * mcc completions */ 2072 * mcc completions */
1848 if (!netif_running(adapter->netdev)) { 2073 if (!netif_running(adapter->netdev)) {
@@ -1855,15 +2080,16 @@ static void be_worker(struct work_struct *work)
1855 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl); 2080 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1856 } 2081 }
1857 2082
1858 if (!adapter->ue_detected && !lancer_chip(adapter))
1859 be_detect_dump_ue(adapter);
1860
1861 goto reschedule; 2083 goto reschedule;
1862 } 2084 }
1863 2085
1864 if (!adapter->stats_cmd_sent) 2086 if (!adapter->stats_cmd_sent) {
1865 be_cmd_get_stats(adapter, &adapter->stats_cmd); 2087 if (lancer_chip(adapter))
1866 2088 lancer_cmd_get_pport_stats(adapter,
2089 &adapter->stats_cmd);
2090 else
2091 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2092 }
1867 be_tx_rate_update(adapter); 2093 be_tx_rate_update(adapter);
1868 2094
1869 for_all_rx_queues(adapter, rxo, i) { 2095 for_all_rx_queues(adapter, rxo, i) {
@@ -1875,8 +2101,6 @@ static void be_worker(struct work_struct *work)
1875 be_post_rx_frags(rxo, GFP_KERNEL); 2101 be_post_rx_frags(rxo, GFP_KERNEL);
1876 } 2102 }
1877 } 2103 }
1878 if (!adapter->ue_detected && !lancer_chip(adapter))
1879 be_detect_dump_ue(adapter);
1880 2104
1881reschedule: 2105reschedule:
1882 adapter->work_counter++; 2106 adapter->work_counter++;
@@ -1885,51 +2109,35 @@ reschedule:
1885 2109
1886static void be_msix_disable(struct be_adapter *adapter) 2110static void be_msix_disable(struct be_adapter *adapter)
1887{ 2111{
1888 if (adapter->msix_enabled) { 2112 if (msix_enabled(adapter)) {
1889 pci_disable_msix(adapter->pdev); 2113 pci_disable_msix(adapter->pdev);
1890 adapter->msix_enabled = false; 2114 adapter->num_msix_vec = 0;
1891 }
1892}
1893
1894static int be_num_rxqs_get(struct be_adapter *adapter)
1895{
1896 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1897 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1898 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1899 } else {
1900 dev_warn(&adapter->pdev->dev,
1901 "No support for multiple RX queues\n");
1902 return 1;
1903 } 2115 }
1904} 2116}
1905 2117
1906static void be_msix_enable(struct be_adapter *adapter) 2118static void be_msix_enable(struct be_adapter *adapter)
1907{ 2119{
1908#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */ 2120#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
1909 int i, status; 2121 int i, status, num_vec;
1910 2122
1911 adapter->num_rx_qs = be_num_rxqs_get(adapter); 2123 num_vec = be_num_rxqs_want(adapter) + 1;
1912 2124
1913 for (i = 0; i < (adapter->num_rx_qs + 1); i++) 2125 for (i = 0; i < num_vec; i++)
1914 adapter->msix_entries[i].entry = i; 2126 adapter->msix_entries[i].entry = i;
1915 2127
1916 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, 2128 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
1917 adapter->num_rx_qs + 1);
1918 if (status == 0) { 2129 if (status == 0) {
1919 goto done; 2130 goto done;
1920 } else if (status >= BE_MIN_MSIX_VECTORS) { 2131 } else if (status >= BE_MIN_MSIX_VECTORS) {
2132 num_vec = status;
1921 if (pci_enable_msix(adapter->pdev, adapter->msix_entries, 2133 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1922 status) == 0) { 2134 num_vec) == 0)
1923 adapter->num_rx_qs = status - 1;
1924 dev_warn(&adapter->pdev->dev,
1925 "Could alloc only %d MSIx vectors. "
1926 "Using %d RX Qs\n", status, adapter->num_rx_qs);
1927 goto done; 2135 goto done;
1928 }
1929 } 2136 }
1930 return; 2137 return;
1931done: 2138done:
1932 adapter->msix_enabled = true; 2139 adapter->num_msix_vec = num_vec;
2140 return;
1933} 2141}
1934 2142
1935static void be_sriov_enable(struct be_adapter *adapter) 2143static void be_sriov_enable(struct be_adapter *adapter)
@@ -1937,7 +2145,20 @@ static void be_sriov_enable(struct be_adapter *adapter)
1937 be_check_sriov_fn_type(adapter); 2145 be_check_sriov_fn_type(adapter);
1938#ifdef CONFIG_PCI_IOV 2146#ifdef CONFIG_PCI_IOV
1939 if (be_physfn(adapter) && num_vfs) { 2147 if (be_physfn(adapter) && num_vfs) {
1940 int status; 2148 int status, pos;
2149 u16 nvfs;
2150
2151 pos = pci_find_ext_capability(adapter->pdev,
2152 PCI_EXT_CAP_ID_SRIOV);
2153 pci_read_config_word(adapter->pdev,
2154 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2155
2156 if (num_vfs > nvfs) {
2157 dev_info(&adapter->pdev->dev,
2158 "Device supports %d VFs and not %d\n",
2159 nvfs, num_vfs);
2160 num_vfs = nvfs;
2161 }
1941 2162
1942 status = pci_enable_sriov(adapter->pdev, num_vfs); 2163 status = pci_enable_sriov(adapter->pdev, num_vfs);
1943 adapter->sriov_enabled = status ? false : true; 2164 adapter->sriov_enabled = status ? false : true;
@@ -2010,8 +2231,7 @@ err_msix:
2010err: 2231err:
2011 dev_warn(&adapter->pdev->dev, 2232 dev_warn(&adapter->pdev->dev,
2012 "MSIX Request IRQ failed - err %d\n", status); 2233 "MSIX Request IRQ failed - err %d\n", status);
2013 pci_disable_msix(adapter->pdev); 2234 be_msix_disable(adapter);
2014 adapter->msix_enabled = false;
2015 return status; 2235 return status;
2016} 2236}
2017 2237
@@ -2020,7 +2240,7 @@ static int be_irq_register(struct be_adapter *adapter)
2020 struct net_device *netdev = adapter->netdev; 2240 struct net_device *netdev = adapter->netdev;
2021 int status; 2241 int status;
2022 2242
2023 if (adapter->msix_enabled) { 2243 if (msix_enabled(adapter)) {
2024 status = be_msix_register(adapter); 2244 status = be_msix_register(adapter);
2025 if (status == 0) 2245 if (status == 0)
2026 goto done; 2246 goto done;
@@ -2053,7 +2273,7 @@ static void be_irq_unregister(struct be_adapter *adapter)
2053 return; 2273 return;
2054 2274
2055 /* INTx */ 2275 /* INTx */
2056 if (!adapter->msix_enabled) { 2276 if (!msix_enabled(adapter)) {
2057 free_irq(netdev->irq, adapter); 2277 free_irq(netdev->irq, adapter);
2058 goto done; 2278 goto done;
2059 } 2279 }
@@ -2095,7 +2315,7 @@ static int be_close(struct net_device *netdev)
2095 be_cq_notify(adapter, rxo->cq.id, false, 0); 2315 be_cq_notify(adapter, rxo->cq.id, false, 0);
2096 } 2316 }
2097 2317
2098 if (adapter->msix_enabled) { 2318 if (msix_enabled(adapter)) {
2099 vec = be_msix_vec_get(adapter, tx_eq); 2319 vec = be_msix_vec_get(adapter, tx_eq);
2100 synchronize_irq(vec); 2320 synchronize_irq(vec);
2101 2321
@@ -2148,7 +2368,7 @@ static int be_open(struct net_device *netdev)
2148 be_async_mcc_enable(adapter); 2368 be_async_mcc_enable(adapter);
2149 2369
2150 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed, 2370 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2151 &link_speed); 2371 &link_speed, 0);
2152 if (status) 2372 if (status)
2153 goto err; 2373 goto err;
2154 be_link_status_update(adapter, link_up); 2374 be_link_status_update(adapter, link_up);
@@ -2268,7 +2488,7 @@ static int be_setup(struct be_adapter *adapter)
2268 BE_IF_FLAGS_PASS_L3L4_ERRORS; 2488 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2269 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS; 2489 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2270 2490
2271 if (be_multi_rxq(adapter)) { 2491 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2272 cap_flags |= BE_IF_FLAGS_RSS; 2492 cap_flags |= BE_IF_FLAGS_RSS;
2273 en_flags |= BE_IF_FLAGS_RSS; 2493 en_flags |= BE_IF_FLAGS_RSS;
2274 } 2494 }
@@ -2325,7 +2545,6 @@ static int be_setup(struct be_adapter *adapter)
2325 2545
2326 return 0; 2546 return 0;
2327 2547
2328 be_mcc_queues_destroy(adapter);
2329rx_qs_destroy: 2548rx_qs_destroy:
2330 be_rx_queues_destroy(adapter); 2549 be_rx_queues_destroy(adapter);
2331tx_qs_destroy: 2550tx_qs_destroy:
@@ -2493,7 +2712,6 @@ static int be_flash_data(struct be_adapter *adapter,
2493 "cmd to write to flash rom failed.\n"); 2712 "cmd to write to flash rom failed.\n");
2494 return -1; 2713 return -1;
2495 } 2714 }
2496 yield();
2497 } 2715 }
2498 } 2716 }
2499 return 0; 2717 return 0;
@@ -2511,32 +2729,98 @@ static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2511 return 0; 2729 return 0;
2512} 2730}
2513 2731
2514int be_load_fw(struct be_adapter *adapter, u8 *func) 2732static int lancer_fw_download(struct be_adapter *adapter,
2733 const struct firmware *fw)
2515{ 2734{
2516 char fw_file[ETHTOOL_FLASH_MAX_FILENAME]; 2735#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2517 const struct firmware *fw; 2736#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2518 struct flash_file_hdr_g2 *fhdr;
2519 struct flash_file_hdr_g3 *fhdr3;
2520 struct image_hdr *img_hdr_ptr = NULL;
2521 struct be_dma_mem flash_cmd; 2737 struct be_dma_mem flash_cmd;
2522 int status, i = 0, num_imgs = 0; 2738 struct lancer_cmd_req_write_object *req;
2523 const u8 *p; 2739 const u8 *data_ptr = NULL;
2740 u8 *dest_image_ptr = NULL;
2741 size_t image_size = 0;
2742 u32 chunk_size = 0;
2743 u32 data_written = 0;
2744 u32 offset = 0;
2745 int status = 0;
2746 u8 add_status = 0;
2524 2747
2525 if (!netif_running(adapter->netdev)) { 2748 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2526 dev_err(&adapter->pdev->dev, 2749 dev_err(&adapter->pdev->dev,
2527 "Firmware load not allowed (interface is down)\n"); 2750 "FW Image not properly aligned. "
2528 return -EPERM; 2751 "Length must be 4 byte aligned.\n");
2752 status = -EINVAL;
2753 goto lancer_fw_exit;
2529 } 2754 }
2530 2755
2531 strcpy(fw_file, func); 2756 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2757 + LANCER_FW_DOWNLOAD_CHUNK;
2758 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2759 &flash_cmd.dma, GFP_KERNEL);
2760 if (!flash_cmd.va) {
2761 status = -ENOMEM;
2762 dev_err(&adapter->pdev->dev,
2763 "Memory allocation failure while flashing\n");
2764 goto lancer_fw_exit;
2765 }
2532 2766
2533 status = request_firmware(&fw, fw_file, &adapter->pdev->dev); 2767 req = flash_cmd.va;
2534 if (status) 2768 dest_image_ptr = flash_cmd.va +
2535 goto fw_exit; 2769 sizeof(struct lancer_cmd_req_write_object);
2770 image_size = fw->size;
2771 data_ptr = fw->data;
2772
2773 while (image_size) {
2774 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2775
2776 /* Copy the image chunk content. */
2777 memcpy(dest_image_ptr, data_ptr, chunk_size);
2778
2779 status = lancer_cmd_write_object(adapter, &flash_cmd,
2780 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2781 &data_written, &add_status);
2782
2783 if (status)
2784 break;
2785
2786 offset += data_written;
2787 data_ptr += data_written;
2788 image_size -= data_written;
2789 }
2790
2791 if (!status) {
2792 /* Commit the FW written */
2793 status = lancer_cmd_write_object(adapter, &flash_cmd,
2794 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2795 &data_written, &add_status);
2796 }
2797
2798 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2799 flash_cmd.dma);
2800 if (status) {
2801 dev_err(&adapter->pdev->dev,
2802 "Firmware load error. "
2803 "Status code: 0x%x Additional Status: 0x%x\n",
2804 status, add_status);
2805 goto lancer_fw_exit;
2806 }
2807
2808 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2809lancer_fw_exit:
2810 return status;
2811}
2812
2813static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2814{
2815 struct flash_file_hdr_g2 *fhdr;
2816 struct flash_file_hdr_g3 *fhdr3;
2817 struct image_hdr *img_hdr_ptr = NULL;
2818 struct be_dma_mem flash_cmd;
2819 const u8 *p;
2820 int status = 0, i = 0, num_imgs = 0;
2536 2821
2537 p = fw->data; 2822 p = fw->data;
2538 fhdr = (struct flash_file_hdr_g2 *) p; 2823 fhdr = (struct flash_file_hdr_g2 *) p;
2539 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2540 2824
2541 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024; 2825 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2542 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size, 2826 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
@@ -2545,7 +2829,7 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
2545 status = -ENOMEM; 2829 status = -ENOMEM;
2546 dev_err(&adapter->pdev->dev, 2830 dev_err(&adapter->pdev->dev,
2547 "Memory allocation failure while flashing\n"); 2831 "Memory allocation failure while flashing\n");
2548 goto fw_exit; 2832 goto be_fw_exit;
2549 } 2833 }
2550 2834
2551 if ((adapter->generation == BE_GEN3) && 2835 if ((adapter->generation == BE_GEN3) &&
@@ -2573,11 +2857,37 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
2573 flash_cmd.dma); 2857 flash_cmd.dma);
2574 if (status) { 2858 if (status) {
2575 dev_err(&adapter->pdev->dev, "Firmware load error\n"); 2859 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2576 goto fw_exit; 2860 goto be_fw_exit;
2577 } 2861 }
2578 2862
2579 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n"); 2863 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2580 2864
2865be_fw_exit:
2866 return status;
2867}
2868
2869int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2870{
2871 const struct firmware *fw;
2872 int status;
2873
2874 if (!netif_running(adapter->netdev)) {
2875 dev_err(&adapter->pdev->dev,
2876 "Firmware load not allowed (interface is down)\n");
2877 return -1;
2878 }
2879
2880 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2881 if (status)
2882 goto fw_exit;
2883
2884 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2885
2886 if (lancer_chip(adapter))
2887 status = lancer_fw_download(adapter, fw);
2888 else
2889 status = be_fw_download(adapter, fw);
2890
2581fw_exit: 2891fw_exit:
2582 release_firmware(fw); 2892 release_firmware(fw);
2583 return status; 2893 return status;
@@ -2606,10 +2916,14 @@ static void be_netdev_init(struct net_device *netdev)
2606 struct be_rx_obj *rxo; 2916 struct be_rx_obj *rxo;
2607 int i; 2917 int i;
2608 2918
2609 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO | 2919 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2610 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | 2920 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2611 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2921 NETIF_F_HW_VLAN_TX;
2612 NETIF_F_GRO | NETIF_F_TSO6; 2922 if (be_multi_rxq(adapter))
2923 netdev->hw_features |= NETIF_F_RXHASH;
2924
2925 netdev->features |= netdev->hw_features |
2926 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2613 2927
2614 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | 2928 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2615 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 2929 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
@@ -2619,8 +2933,6 @@ static void be_netdev_init(struct net_device *netdev)
2619 2933
2620 netdev->flags |= IFF_MULTICAST; 2934 netdev->flags |= IFF_MULTICAST;
2621 2935
2622 adapter->rx_csum = true;
2623
2624 /* Default settings for Rx and Tx flow control */ 2936 /* Default settings for Rx and Tx flow control */
2625 adapter->rx_fc = true; 2937 adapter->rx_fc = true;
2626 adapter->tx_fc = true; 2938 adapter->tx_fc = true;
@@ -2788,7 +3100,14 @@ static int be_stats_init(struct be_adapter *adapter)
2788{ 3100{
2789 struct be_dma_mem *cmd = &adapter->stats_cmd; 3101 struct be_dma_mem *cmd = &adapter->stats_cmd;
2790 3102
2791 cmd->size = sizeof(struct be_cmd_req_get_stats); 3103 if (adapter->generation == BE_GEN2) {
3104 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3105 } else {
3106 if (lancer_chip(adapter))
3107 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3108 else
3109 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3110 }
2792 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma, 3111 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2793 GFP_KERNEL); 3112 GFP_KERNEL);
2794 if (cmd->va == NULL) 3113 if (cmd->va == NULL)
@@ -2814,6 +3133,7 @@ static void __devexit be_remove(struct pci_dev *pdev)
2814 3133
2815 be_ctrl_cleanup(adapter); 3134 be_ctrl_cleanup(adapter);
2816 3135
3136 kfree(adapter->vf_cfg);
2817 be_sriov_disable(adapter); 3137 be_sriov_disable(adapter);
2818 3138
2819 be_msix_disable(adapter); 3139 be_msix_disable(adapter);
@@ -2998,16 +3318,23 @@ static int __devinit be_probe(struct pci_dev *pdev,
2998 } 3318 }
2999 3319
3000 be_sriov_enable(adapter); 3320 be_sriov_enable(adapter);
3321 if (adapter->sriov_enabled) {
3322 adapter->vf_cfg = kcalloc(num_vfs,
3323 sizeof(struct be_vf_cfg), GFP_KERNEL);
3324
3325 if (!adapter->vf_cfg)
3326 goto free_netdev;
3327 }
3001 3328
3002 status = be_ctrl_init(adapter); 3329 status = be_ctrl_init(adapter);
3003 if (status) 3330 if (status)
3004 goto free_netdev; 3331 goto free_vf_cfg;
3005 3332
3006 if (lancer_chip(adapter)) { 3333 if (lancer_chip(adapter)) {
3007 status = lancer_test_and_set_rdy_state(adapter); 3334 status = lancer_test_and_set_rdy_state(adapter);
3008 if (status) { 3335 if (status) {
3009 dev_err(&pdev->dev, "Adapter in non recoverable error\n"); 3336 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3010 goto free_netdev; 3337 goto ctrl_clean;
3011 } 3338 }
3012 } 3339 }
3013 3340
@@ -3050,9 +3377,22 @@ static int __devinit be_probe(struct pci_dev *pdev,
3050 netif_carrier_off(netdev); 3377 netif_carrier_off(netdev);
3051 3378
3052 if (be_physfn(adapter) && adapter->sriov_enabled) { 3379 if (be_physfn(adapter) && adapter->sriov_enabled) {
3380 u8 mac_speed;
3381 bool link_up;
3382 u16 vf, lnk_speed;
3383
3053 status = be_vf_eth_addr_config(adapter); 3384 status = be_vf_eth_addr_config(adapter);
3054 if (status) 3385 if (status)
3055 goto unreg_netdev; 3386 goto unreg_netdev;
3387
3388 for (vf = 0; vf < num_vfs; vf++) {
3389 status = be_cmd_link_status_query(adapter, &link_up,
3390 &mac_speed, &lnk_speed, vf + 1);
3391 if (!status)
3392 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3393 else
3394 goto unreg_netdev;
3395 }
3056 } 3396 }
3057 3397
3058 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num); 3398 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
@@ -3069,6 +3409,8 @@ stats_clean:
3069 be_stats_cleanup(adapter); 3409 be_stats_cleanup(adapter);
3070ctrl_clean: 3410ctrl_clean:
3071 be_ctrl_cleanup(adapter); 3411 be_ctrl_cleanup(adapter);
3412free_vf_cfg:
3413 kfree(adapter->vf_cfg);
3072free_netdev: 3414free_netdev:
3073 be_sriov_disable(adapter); 3415 be_sriov_disable(adapter);
3074 free_netdev(netdev); 3416 free_netdev(netdev);
@@ -3153,16 +3495,15 @@ static void be_shutdown(struct pci_dev *pdev)
3153 if (!adapter) 3495 if (!adapter)
3154 return; 3496 return;
3155 3497
3156 if (netif_running(adapter->netdev)) 3498 cancel_delayed_work_sync(&adapter->work);
3157 cancel_delayed_work_sync(&adapter->work);
3158 3499
3159 netif_device_detach(adapter->netdev); 3500 netif_device_detach(adapter->netdev);
3160 3501
3161 be_cmd_reset_function(adapter);
3162
3163 if (adapter->wol) 3502 if (adapter->wol)
3164 be_setup_wol(adapter, true); 3503 be_setup_wol(adapter, true);
3165 3504
3505 be_cmd_reset_function(adapter);
3506
3166 pci_disable_device(pdev); 3507 pci_disable_device(pdev);
3167} 3508}
3168 3509
@@ -3274,13 +3615,6 @@ static int __init be_init_module(void)
3274 rx_frag_size = 2048; 3615 rx_frag_size = 2048;
3275 } 3616 }
3276 3617
3277 if (num_vfs > 32) {
3278 printk(KERN_WARNING DRV_NAME
3279 " : Module param num_vfs must not be greater than 32."
3280 "Using 32\n");
3281 num_vfs = 32;
3282 }
3283
3284 return pci_register_driver(&be_driver); 3618 return pci_register_driver(&be_driver);
3285} 3619}
3286module_init(be_init_module); 3620module_init(be_init_module);
diff --git a/drivers/net/bna/bfa_ioc.c b/drivers/net/bna/bfa_ioc.c
index 7581518ecfa2..fcb9bb3169e0 100644
--- a/drivers/net/bna/bfa_ioc.c
+++ b/drivers/net/bna/bfa_ioc.c
@@ -82,7 +82,6 @@ static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
82static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, 82static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
83 u32 boot_param); 83 u32 boot_param);
84static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr); 84static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
85static u32 bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr);
86static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, 85static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
87 char *serial_num); 86 char *serial_num);
88static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, 87static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc,
@@ -1274,13 +1273,12 @@ bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
1274void 1273void
1275bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) 1274bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1276{ 1275{
1277 u32 pgnum, pgoff; 1276 u32 pgnum;
1278 u32 loff = 0; 1277 u32 loff = 0;
1279 int i; 1278 int i;
1280 u32 *fwsig = (u32 *) fwhdr; 1279 u32 *fwsig = (u32 *) fwhdr;
1281 1280
1282 pgnum = bfa_ioc_smem_pgnum(ioc, loff); 1281 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1283 pgoff = bfa_ioc_smem_pgoff(ioc, loff);
1284 writel(pgnum, ioc->ioc_regs.host_page_num_fn); 1282 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1285 1283
1286 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); 1284 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
@@ -1514,7 +1512,7 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
1514 u32 boot_env) 1512 u32 boot_env)
1515{ 1513{
1516 u32 *fwimg; 1514 u32 *fwimg;
1517 u32 pgnum, pgoff; 1515 u32 pgnum;
1518 u32 loff = 0; 1516 u32 loff = 0;
1519 u32 chunkno = 0; 1517 u32 chunkno = 0;
1520 u32 i; 1518 u32 i;
@@ -1527,7 +1525,6 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
1527 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno); 1525 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
1528 1526
1529 pgnum = bfa_ioc_smem_pgnum(ioc, loff); 1527 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1530 pgoff = bfa_ioc_smem_pgoff(ioc, loff);
1531 1528
1532 writel(pgnum, ioc->ioc_regs.host_page_num_fn); 1529 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1533 1530
@@ -1925,12 +1922,6 @@ bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
1925 return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr); 1922 return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
1926} 1923}
1927 1924
1928static u32
1929bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr)
1930{
1931 return PSS_SMEM_PGOFF(fmaddr);
1932}
1933
1934/** 1925/**
1935 * Register mailbox message handler function, to be called by common modules 1926 * Register mailbox message handler function, to be called by common modules
1936 */ 1927 */
diff --git a/drivers/net/bna/bna_ctrl.c b/drivers/net/bna/bna_ctrl.c
index e1527472b961..53b14169e363 100644
--- a/drivers/net/bna/bna_ctrl.c
+++ b/drivers/net/bna/bna_ctrl.c
@@ -246,7 +246,6 @@ static void
246bna_mbox_flush_q(struct bna *bna, struct list_head *q) 246bna_mbox_flush_q(struct bna *bna, struct list_head *q)
247{ 247{
248 struct bna_mbox_qe *mb_qe = NULL; 248 struct bna_mbox_qe *mb_qe = NULL;
249 struct bfi_mhdr *cmd_h;
250 struct list_head *mb_q; 249 struct list_head *mb_q;
251 void (*cbfn)(void *arg, int status); 250 void (*cbfn)(void *arg, int status);
252 void *cbarg; 251 void *cbarg;
@@ -260,7 +259,6 @@ bna_mbox_flush_q(struct bna *bna, struct list_head *q)
260 bfa_q_qe_init(mb_qe); 259 bfa_q_qe_init(mb_qe);
261 bna->mbox_mod.msg_pending--; 260 bna->mbox_mod.msg_pending--;
262 261
263 cmd_h = (struct bfi_mhdr *)(&mb_qe->cmd.msg[0]);
264 if (cbfn) 262 if (cbfn)
265 cbfn(cbarg, BNA_CB_NOT_EXEC); 263 cbfn(cbarg, BNA_CB_NOT_EXEC);
266 } 264 }
@@ -2774,23 +2772,6 @@ bna_rit_mod_init(struct bna_rit_mod *rit_mod,
2774 } 2772 }
2775} 2773}
2776 2774
2777static void
2778bna_rit_mod_uninit(struct bna_rit_mod *rit_mod)
2779{
2780 struct bna_rit_segment *rit_segment;
2781 struct list_head *qe;
2782 int i;
2783 int j;
2784
2785 for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
2786 j = 0;
2787 list_for_each(qe, &rit_mod->rit_seg_pool[i]) {
2788 rit_segment = (struct bna_rit_segment *)qe;
2789 j++;
2790 }
2791 }
2792}
2793
2794/* 2775/*
2795 * Public functions 2776 * Public functions
2796 */ 2777 */
@@ -2977,8 +2958,6 @@ bna_uninit(struct bna *bna)
2977 2958
2978 bna_ucam_mod_uninit(&bna->ucam_mod); 2959 bna_ucam_mod_uninit(&bna->ucam_mod);
2979 2960
2980 bna_rit_mod_uninit(&bna->rit_mod);
2981
2982 bna_ib_mod_uninit(&bna->ib_mod); 2961 bna_ib_mod_uninit(&bna->ib_mod);
2983 2962
2984 bna_rx_mod_uninit(&bna->rx_mod); 2963 bna_rx_mod_uninit(&bna->rx_mod);
diff --git a/drivers/net/bna/bna_txrx.c b/drivers/net/bna/bna_txrx.c
index 58c7664040dc..380085cc3088 100644
--- a/drivers/net/bna/bna_txrx.c
+++ b/drivers/net/bna/bna_txrx.c
@@ -2229,14 +2229,11 @@ void
2229bna_rit_create(struct bna_rx *rx) 2229bna_rit_create(struct bna_rx *rx)
2230{ 2230{
2231 struct list_head *qe_rxp; 2231 struct list_head *qe_rxp;
2232 struct bna *bna;
2233 struct bna_rxp *rxp; 2232 struct bna_rxp *rxp;
2234 struct bna_rxq *q0 = NULL; 2233 struct bna_rxq *q0 = NULL;
2235 struct bna_rxq *q1 = NULL; 2234 struct bna_rxq *q1 = NULL;
2236 int offset; 2235 int offset;
2237 2236
2238 bna = rx->bna;
2239
2240 offset = 0; 2237 offset = 0;
2241 list_for_each(qe_rxp, &rx->rxp_q) { 2238 list_for_each(qe_rxp, &rx->rxp_q) {
2242 rxp = (struct bna_rxp *)qe_rxp; 2239 rxp = (struct bna_rxp *)qe_rxp;
@@ -2830,7 +2827,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
2830 struct bna_mem_descr *dsqpt_mem; /* s/w qpt for data */ 2827 struct bna_mem_descr *dsqpt_mem; /* s/w qpt for data */
2831 struct bna_mem_descr *hpage_mem; /* hdr page mem */ 2828 struct bna_mem_descr *hpage_mem; /* hdr page mem */
2832 struct bna_mem_descr *dpage_mem; /* data page mem */ 2829 struct bna_mem_descr *dpage_mem; /* data page mem */
2833 int i, cpage_idx = 0, dpage_idx = 0, hpage_idx = 0, ret; 2830 int i, cpage_idx = 0, dpage_idx = 0, hpage_idx = 0;
2834 int dpage_count, hpage_count, rcb_idx; 2831 int dpage_count, hpage_count, rcb_idx;
2835 struct bna_ib_config ibcfg; 2832 struct bna_ib_config ibcfg;
2836 /* Fail if we don't have enough RXPs, RXQs */ 2833 /* Fail if we don't have enough RXPs, RXQs */
@@ -2924,7 +2921,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
2924 ibcfg.interpkt_timeo = BFI_RX_INTERPKT_TIMEO; 2921 ibcfg.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
2925 ibcfg.ctrl_flags = BFI_IB_CF_INT_ENABLE; 2922 ibcfg.ctrl_flags = BFI_IB_CF_INT_ENABLE;
2926 2923
2927 ret = bna_ib_config(rxp->cq.ib, &ibcfg); 2924 bna_ib_config(rxp->cq.ib, &ibcfg);
2928 2925
2929 /* Link rxqs to rxp */ 2926 /* Link rxqs to rxp */
2930 _rxp_add_rxqs(rxp, q0, q1); 2927 _rxp_add_rxqs(rxp, q0, q1);
diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
index 8e6ceab9f4d8..e588511f47fb 100644
--- a/drivers/net/bna/bnad.c
+++ b/drivers/net/bna/bnad.c
@@ -501,7 +501,7 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
501 501
502 skb_put(skb, ntohs(cmpl->length)); 502 skb_put(skb, ntohs(cmpl->length));
503 if (likely 503 if (likely
504 (bnad->rx_csum && 504 ((bnad->netdev->features & NETIF_F_RXCSUM) &&
505 (((flags & BNA_CQ_EF_IPV4) && 505 (((flags & BNA_CQ_EF_IPV4) &&
506 (flags & BNA_CQ_EF_L3_CKSUM_OK)) || 506 (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
507 (flags & BNA_CQ_EF_IPV6)) && 507 (flags & BNA_CQ_EF_IPV6)) &&
@@ -2902,23 +2902,20 @@ bnad_netdev_init(struct bnad *bnad, bool using_dac)
2902{ 2902{
2903 struct net_device *netdev = bnad->netdev; 2903 struct net_device *netdev = bnad->netdev;
2904 2904
2905 netdev->features |= NETIF_F_IPV6_CSUM; 2905 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
2906 netdev->features |= NETIF_F_TSO; 2906 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2907 netdev->features |= NETIF_F_TSO6; 2907 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_TX;
2908 2908
2909 netdev->features |= NETIF_F_GRO; 2909 netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
2910 pr_warn("bna: GRO enabled, using kernel stack GRO\n"); 2910 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2911 NETIF_F_TSO | NETIF_F_TSO6;
2911 2912
2912 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; 2913 netdev->features |= netdev->hw_features |
2914 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2913 2915
2914 if (using_dac) 2916 if (using_dac)
2915 netdev->features |= NETIF_F_HIGHDMA; 2917 netdev->features |= NETIF_F_HIGHDMA;
2916 2918
2917 netdev->features |=
2918 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
2919 NETIF_F_HW_VLAN_FILTER;
2920
2921 netdev->vlan_features = netdev->features;
2922 netdev->mem_start = bnad->mmio_start; 2919 netdev->mem_start = bnad->mmio_start;
2923 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1; 2920 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
2924 2921
@@ -2969,7 +2966,6 @@ bnad_init(struct bnad *bnad,
2969 2966
2970 bnad->txq_depth = BNAD_TXQ_DEPTH; 2967 bnad->txq_depth = BNAD_TXQ_DEPTH;
2971 bnad->rxq_depth = BNAD_RXQ_DEPTH; 2968 bnad->rxq_depth = BNAD_RXQ_DEPTH;
2972 bnad->rx_csum = true;
2973 2969
2974 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO; 2970 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
2975 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO; 2971 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
diff --git a/drivers/net/bna/bnad.h b/drivers/net/bna/bnad.h
index a89117fa4970..ccdabad0a40c 100644
--- a/drivers/net/bna/bnad.h
+++ b/drivers/net/bna/bnad.h
@@ -237,8 +237,6 @@ struct bnad {
237 struct bna_rx_config rx_config[BNAD_MAX_RXS]; 237 struct bna_rx_config rx_config[BNAD_MAX_RXS];
238 struct bna_tx_config tx_config[BNAD_MAX_TXS]; 238 struct bna_tx_config tx_config[BNAD_MAX_TXS];
239 239
240 u32 rx_csum;
241
242 void __iomem *bar0; /* BAR0 address */ 240 void __iomem *bar0; /* BAR0 address */
243 241
244 struct bna bna; 242 struct bna bna;
diff --git a/drivers/net/bna/bnad_ethtool.c b/drivers/net/bna/bnad_ethtool.c
index 142d6047da27..3330cd78da2c 100644
--- a/drivers/net/bna/bnad_ethtool.c
+++ b/drivers/net/bna/bnad_ethtool.c
@@ -237,10 +237,10 @@ bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
237 cmd->phy_address = 0; 237 cmd->phy_address = 0;
238 238
239 if (netif_carrier_ok(netdev)) { 239 if (netif_carrier_ok(netdev)) {
240 cmd->speed = SPEED_10000; 240 ethtool_cmd_speed_set(cmd, SPEED_10000);
241 cmd->duplex = DUPLEX_FULL; 241 cmd->duplex = DUPLEX_FULL;
242 } else { 242 } else {
243 cmd->speed = -1; 243 ethtool_cmd_speed_set(cmd, -1);
244 cmd->duplex = -1; 244 cmd->duplex = -1;
245 } 245 }
246 cmd->transceiver = XCVR_EXTERNAL; 246 cmd->transceiver = XCVR_EXTERNAL;
@@ -256,7 +256,8 @@ bnad_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
256 /* 10G full duplex setting supported only */ 256 /* 10G full duplex setting supported only */
257 if (cmd->autoneg == AUTONEG_ENABLE) 257 if (cmd->autoneg == AUTONEG_ENABLE)
258 return -EOPNOTSUPP; else { 258 return -EOPNOTSUPP; else {
259 if ((cmd->speed == SPEED_10000) && (cmd->duplex == DUPLEX_FULL)) 259 if ((ethtool_cmd_speed(cmd) == SPEED_10000)
260 && (cmd->duplex == DUPLEX_FULL))
260 return 0; 261 return 0;
261 } 262 }
262 263
@@ -806,61 +807,6 @@ bnad_set_pauseparam(struct net_device *netdev,
806 return 0; 807 return 0;
807} 808}
808 809
809static u32
810bnad_get_rx_csum(struct net_device *netdev)
811{
812 u32 rx_csum;
813 struct bnad *bnad = netdev_priv(netdev);
814
815 rx_csum = bnad->rx_csum;
816 return rx_csum;
817}
818
819static int
820bnad_set_rx_csum(struct net_device *netdev, u32 rx_csum)
821{
822 struct bnad *bnad = netdev_priv(netdev);
823
824 mutex_lock(&bnad->conf_mutex);
825 bnad->rx_csum = rx_csum;
826 mutex_unlock(&bnad->conf_mutex);
827 return 0;
828}
829
830static int
831bnad_set_tx_csum(struct net_device *netdev, u32 tx_csum)
832{
833 struct bnad *bnad = netdev_priv(netdev);
834
835 mutex_lock(&bnad->conf_mutex);
836 if (tx_csum) {
837 netdev->features |= NETIF_F_IP_CSUM;
838 netdev->features |= NETIF_F_IPV6_CSUM;
839 } else {
840 netdev->features &= ~NETIF_F_IP_CSUM;
841 netdev->features &= ~NETIF_F_IPV6_CSUM;
842 }
843 mutex_unlock(&bnad->conf_mutex);
844 return 0;
845}
846
847static int
848bnad_set_tso(struct net_device *netdev, u32 tso)
849{
850 struct bnad *bnad = netdev_priv(netdev);
851
852 mutex_lock(&bnad->conf_mutex);
853 if (tso) {
854 netdev->features |= NETIF_F_TSO;
855 netdev->features |= NETIF_F_TSO6;
856 } else {
857 netdev->features &= ~NETIF_F_TSO;
858 netdev->features &= ~NETIF_F_TSO6;
859 }
860 mutex_unlock(&bnad->conf_mutex);
861 return 0;
862}
863
864static void 810static void
865bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string) 811bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string)
866{ 812{
@@ -1256,14 +1202,6 @@ static struct ethtool_ops bnad_ethtool_ops = {
1256 .set_ringparam = bnad_set_ringparam, 1202 .set_ringparam = bnad_set_ringparam,
1257 .get_pauseparam = bnad_get_pauseparam, 1203 .get_pauseparam = bnad_get_pauseparam,
1258 .set_pauseparam = bnad_set_pauseparam, 1204 .set_pauseparam = bnad_set_pauseparam,
1259 .get_rx_csum = bnad_get_rx_csum,
1260 .set_rx_csum = bnad_set_rx_csum,
1261 .get_tx_csum = ethtool_op_get_tx_csum,
1262 .set_tx_csum = bnad_set_tx_csum,
1263 .get_sg = ethtool_op_get_sg,
1264 .set_sg = ethtool_op_set_sg,
1265 .get_tso = ethtool_op_get_tso,
1266 .set_tso = bnad_set_tso,
1267 .get_strings = bnad_get_strings, 1205 .get_strings = bnad_get_strings,
1268 .get_ethtool_stats = bnad_get_ethtool_stats, 1206 .get_ethtool_stats = bnad_get_ethtool_stats,
1269 .get_sset_count = bnad_get_sset_count 1207 .get_sset_count = bnad_get_sset_count
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index d8383a9af9ad..57d3293c65bd 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -3174,7 +3174,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3174 } 3174 }
3175 3175
3176 skb_checksum_none_assert(skb); 3176 skb_checksum_none_assert(skb);
3177 if (bp->rx_csum && 3177 if ((bp->dev->features & NETIF_F_RXCSUM) &&
3178 (status & (L2_FHDR_STATUS_TCP_SEGMENT | 3178 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3179 L2_FHDR_STATUS_UDP_DATAGRAM))) { 3179 L2_FHDR_STATUS_UDP_DATAGRAM))) {
3180 3180
@@ -6696,17 +6696,16 @@ bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6696 6696
6697 if (bp->autoneg & AUTONEG_SPEED) { 6697 if (bp->autoneg & AUTONEG_SPEED) {
6698 cmd->autoneg = AUTONEG_ENABLE; 6698 cmd->autoneg = AUTONEG_ENABLE;
6699 } 6699 } else {
6700 else {
6701 cmd->autoneg = AUTONEG_DISABLE; 6700 cmd->autoneg = AUTONEG_DISABLE;
6702 } 6701 }
6703 6702
6704 if (netif_carrier_ok(dev)) { 6703 if (netif_carrier_ok(dev)) {
6705 cmd->speed = bp->line_speed; 6704 ethtool_cmd_speed_set(cmd, bp->line_speed);
6706 cmd->duplex = bp->duplex; 6705 cmd->duplex = bp->duplex;
6707 } 6706 }
6708 else { 6707 else {
6709 cmd->speed = -1; 6708 ethtool_cmd_speed_set(cmd, -1);
6710 cmd->duplex = -1; 6709 cmd->duplex = -1;
6711 } 6710 }
6712 spin_unlock_bh(&bp->phy_lock); 6711 spin_unlock_bh(&bp->phy_lock);
@@ -6758,21 +6757,21 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6758 advertising |= ADVERTISED_Autoneg; 6757 advertising |= ADVERTISED_Autoneg;
6759 } 6758 }
6760 else { 6759 else {
6760 u32 speed = ethtool_cmd_speed(cmd);
6761 if (cmd->port == PORT_FIBRE) { 6761 if (cmd->port == PORT_FIBRE) {
6762 if ((cmd->speed != SPEED_1000 && 6762 if ((speed != SPEED_1000 &&
6763 cmd->speed != SPEED_2500) || 6763 speed != SPEED_2500) ||
6764 (cmd->duplex != DUPLEX_FULL)) 6764 (cmd->duplex != DUPLEX_FULL))
6765 goto err_out_unlock; 6765 goto err_out_unlock;
6766 6766
6767 if (cmd->speed == SPEED_2500 && 6767 if (speed == SPEED_2500 &&
6768 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) 6768 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6769 goto err_out_unlock; 6769 goto err_out_unlock;
6770 } 6770 } else if (speed == SPEED_1000 || speed == SPEED_2500)
6771 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6772 goto err_out_unlock; 6771 goto err_out_unlock;
6773 6772
6774 autoneg &= ~AUTONEG_SPEED; 6773 autoneg &= ~AUTONEG_SPEED;
6775 req_line_speed = cmd->speed; 6774 req_line_speed = speed;
6776 req_duplex = cmd->duplex; 6775 req_duplex = cmd->duplex;
6777 advertising = 0; 6776 advertising = 0;
6778 } 6777 }
@@ -7189,38 +7188,6 @@ bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7189 return 0; 7188 return 0;
7190} 7189}
7191 7190
7192static u32
7193bnx2_get_rx_csum(struct net_device *dev)
7194{
7195 struct bnx2 *bp = netdev_priv(dev);
7196
7197 return bp->rx_csum;
7198}
7199
7200static int
7201bnx2_set_rx_csum(struct net_device *dev, u32 data)
7202{
7203 struct bnx2 *bp = netdev_priv(dev);
7204
7205 bp->rx_csum = data;
7206 return 0;
7207}
7208
7209static int
7210bnx2_set_tso(struct net_device *dev, u32 data)
7211{
7212 struct bnx2 *bp = netdev_priv(dev);
7213
7214 if (data) {
7215 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7216 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7217 dev->features |= NETIF_F_TSO6;
7218 } else
7219 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
7220 NETIF_F_TSO_ECN);
7221 return 0;
7222}
7223
7224static struct { 7191static struct {
7225 char string[ETH_GSTRING_LEN]; 7192 char string[ETH_GSTRING_LEN];
7226} bnx2_stats_str_arr[] = { 7193} bnx2_stats_str_arr[] = {
@@ -7495,82 +7462,74 @@ bnx2_get_ethtool_stats(struct net_device *dev,
7495} 7462}
7496 7463
7497static int 7464static int
7498bnx2_phys_id(struct net_device *dev, u32 data) 7465bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7499{ 7466{
7500 struct bnx2 *bp = netdev_priv(dev); 7467 struct bnx2 *bp = netdev_priv(dev);
7501 int i;
7502 u32 save;
7503 7468
7504 bnx2_set_power_state(bp, PCI_D0); 7469 switch (state) {
7470 case ETHTOOL_ID_ACTIVE:
7471 bnx2_set_power_state(bp, PCI_D0);
7472
7473 bp->leds_save = REG_RD(bp, BNX2_MISC_CFG);
7474 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7475 return 1; /* cycle on/off once per second */
7476
7477 case ETHTOOL_ID_ON:
7478 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7479 BNX2_EMAC_LED_1000MB_OVERRIDE |
7480 BNX2_EMAC_LED_100MB_OVERRIDE |
7481 BNX2_EMAC_LED_10MB_OVERRIDE |
7482 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7483 BNX2_EMAC_LED_TRAFFIC);
7484 break;
7505 7485
7506 if (data == 0) 7486 case ETHTOOL_ID_OFF:
7507 data = 2; 7487 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7488 break;
7508 7489
7509 save = REG_RD(bp, BNX2_MISC_CFG); 7490 case ETHTOOL_ID_INACTIVE:
7510 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC); 7491 REG_WR(bp, BNX2_EMAC_LED, 0);
7492 REG_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7511 7493
7512 for (i = 0; i < (data * 2); i++) { 7494 if (!netif_running(dev))
7513 if ((i % 2) == 0) { 7495 bnx2_set_power_state(bp, PCI_D3hot);
7514 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE); 7496 break;
7515 }
7516 else {
7517 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7518 BNX2_EMAC_LED_1000MB_OVERRIDE |
7519 BNX2_EMAC_LED_100MB_OVERRIDE |
7520 BNX2_EMAC_LED_10MB_OVERRIDE |
7521 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7522 BNX2_EMAC_LED_TRAFFIC);
7523 }
7524 msleep_interruptible(500);
7525 if (signal_pending(current))
7526 break;
7527 } 7497 }
7528 REG_WR(bp, BNX2_EMAC_LED, 0);
7529 REG_WR(bp, BNX2_MISC_CFG, save);
7530
7531 if (!netif_running(dev))
7532 bnx2_set_power_state(bp, PCI_D3hot);
7533 7498
7534 return 0; 7499 return 0;
7535} 7500}
7536 7501
7537static int 7502static u32
7538bnx2_set_tx_csum(struct net_device *dev, u32 data) 7503bnx2_fix_features(struct net_device *dev, u32 features)
7539{ 7504{
7540 struct bnx2 *bp = netdev_priv(dev); 7505 struct bnx2 *bp = netdev_priv(dev);
7541 7506
7542 if (CHIP_NUM(bp) == CHIP_NUM_5709) 7507 if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
7543 return ethtool_op_set_tx_ipv6_csum(dev, data); 7508 features |= NETIF_F_HW_VLAN_RX;
7544 else 7509
7545 return ethtool_op_set_tx_csum(dev, data); 7510 return features;
7546} 7511}
7547 7512
7548static int 7513static int
7549bnx2_set_flags(struct net_device *dev, u32 data) 7514bnx2_set_features(struct net_device *dev, u32 features)
7550{ 7515{
7551 struct bnx2 *bp = netdev_priv(dev); 7516 struct bnx2 *bp = netdev_priv(dev);
7552 int rc;
7553
7554 if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN) &&
7555 !(data & ETH_FLAG_RXVLAN))
7556 return -EINVAL;
7557 7517
7558 /* TSO with VLAN tag won't work with current firmware */ 7518 /* TSO with VLAN tag won't work with current firmware */
7559 if (!(data & ETH_FLAG_TXVLAN)) 7519 if (features & NETIF_F_HW_VLAN_TX)
7560 return -EINVAL; 7520 dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7561 7521 else
7562 rc = ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH | ETH_FLAG_RXVLAN | 7522 dev->vlan_features &= ~NETIF_F_ALL_TSO;
7563 ETH_FLAG_TXVLAN);
7564 if (rc)
7565 return rc;
7566 7523
7567 if ((!!(data & ETH_FLAG_RXVLAN) != 7524 if ((!!(features & NETIF_F_HW_VLAN_RX) !=
7568 !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) && 7525 !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7569 netif_running(dev)) { 7526 netif_running(dev)) {
7570 bnx2_netif_stop(bp, false); 7527 bnx2_netif_stop(bp, false);
7528 dev->features = features;
7571 bnx2_set_rx_mode(dev); 7529 bnx2_set_rx_mode(dev);
7572 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1); 7530 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7573 bnx2_netif_start(bp, false); 7531 bnx2_netif_start(bp, false);
7532 return 1;
7574 } 7533 }
7575 7534
7576 return 0; 7535 return 0;
@@ -7595,18 +7554,11 @@ static const struct ethtool_ops bnx2_ethtool_ops = {
7595 .set_ringparam = bnx2_set_ringparam, 7554 .set_ringparam = bnx2_set_ringparam,
7596 .get_pauseparam = bnx2_get_pauseparam, 7555 .get_pauseparam = bnx2_get_pauseparam,
7597 .set_pauseparam = bnx2_set_pauseparam, 7556 .set_pauseparam = bnx2_set_pauseparam,
7598 .get_rx_csum = bnx2_get_rx_csum,
7599 .set_rx_csum = bnx2_set_rx_csum,
7600 .set_tx_csum = bnx2_set_tx_csum,
7601 .set_sg = ethtool_op_set_sg,
7602 .set_tso = bnx2_set_tso,
7603 .self_test = bnx2_self_test, 7557 .self_test = bnx2_self_test,
7604 .get_strings = bnx2_get_strings, 7558 .get_strings = bnx2_get_strings,
7605 .phys_id = bnx2_phys_id, 7559 .set_phys_id = bnx2_set_phys_id,
7606 .get_ethtool_stats = bnx2_get_ethtool_stats, 7560 .get_ethtool_stats = bnx2_get_ethtool_stats,
7607 .get_sset_count = bnx2_get_sset_count, 7561 .get_sset_count = bnx2_get_sset_count,
7608 .set_flags = bnx2_set_flags,
7609 .get_flags = ethtool_op_get_flags,
7610}; 7562};
7611 7563
7612/* Called with rtnl_lock */ 7564/* Called with rtnl_lock */
@@ -8118,8 +8070,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8118 bp->tx_ring_size = MAX_TX_DESC_CNT; 8070 bp->tx_ring_size = MAX_TX_DESC_CNT;
8119 bnx2_set_rx_ring_size(bp, 255); 8071 bnx2_set_rx_ring_size(bp, 255);
8120 8072
8121 bp->rx_csum = 1;
8122
8123 bp->tx_quick_cons_trip_int = 2; 8073 bp->tx_quick_cons_trip_int = 2;
8124 bp->tx_quick_cons_trip = 20; 8074 bp->tx_quick_cons_trip = 20;
8125 bp->tx_ticks_int = 18; 8075 bp->tx_ticks_int = 18;
@@ -8311,17 +8261,14 @@ static const struct net_device_ops bnx2_netdev_ops = {
8311 .ndo_validate_addr = eth_validate_addr, 8261 .ndo_validate_addr = eth_validate_addr,
8312 .ndo_set_mac_address = bnx2_change_mac_addr, 8262 .ndo_set_mac_address = bnx2_change_mac_addr,
8313 .ndo_change_mtu = bnx2_change_mtu, 8263 .ndo_change_mtu = bnx2_change_mtu,
8264 .ndo_fix_features = bnx2_fix_features,
8265 .ndo_set_features = bnx2_set_features,
8314 .ndo_tx_timeout = bnx2_tx_timeout, 8266 .ndo_tx_timeout = bnx2_tx_timeout,
8315#ifdef CONFIG_NET_POLL_CONTROLLER 8267#ifdef CONFIG_NET_POLL_CONTROLLER
8316 .ndo_poll_controller = poll_bnx2, 8268 .ndo_poll_controller = poll_bnx2,
8317#endif 8269#endif
8318}; 8270};
8319 8271
8320static inline void vlan_features_add(struct net_device *dev, u32 flags)
8321{
8322 dev->vlan_features |= flags;
8323}
8324
8325static int __devinit 8272static int __devinit
8326bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 8273bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8327{ 8274{
@@ -8361,20 +8308,17 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8361 memcpy(dev->dev_addr, bp->mac_addr, 6); 8308 memcpy(dev->dev_addr, bp->mac_addr, 6);
8362 memcpy(dev->perm_addr, bp->mac_addr, 6); 8309 memcpy(dev->perm_addr, bp->mac_addr, 6);
8363 8310
8364 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO | 8311 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8365 NETIF_F_RXHASH; 8312 NETIF_F_TSO | NETIF_F_TSO_ECN |
8366 vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG); 8313 NETIF_F_RXHASH | NETIF_F_RXCSUM;
8367 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 8314
8368 dev->features |= NETIF_F_IPV6_CSUM; 8315 if (CHIP_NUM(bp) == CHIP_NUM_5709)
8369 vlan_features_add(dev, NETIF_F_IPV6_CSUM); 8316 dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8370 } 8317
8371 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 8318 dev->vlan_features = dev->hw_features;
8372 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN; 8319 dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8373 vlan_features_add(dev, NETIF_F_TSO | NETIF_F_TSO_ECN); 8320 dev->features |= dev->hw_features;
8374 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 8321
8375 dev->features |= NETIF_F_TSO6;
8376 vlan_features_add(dev, NETIF_F_TSO6);
8377 }
8378 if ((rc = register_netdev(dev))) { 8322 if ((rc = register_netdev(dev))) {
8379 dev_err(&pdev->dev, "Cannot register net device\n"); 8323 dev_err(&pdev->dev, "Cannot register net device\n");
8380 goto error; 8324 goto error;
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 68020451dc4f..bf371f6fe154 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6754,8 +6754,6 @@ struct bnx2 {
6754 u32 rx_max_ring_idx; 6754 u32 rx_max_ring_idx;
6755 u32 rx_max_pg_ring_idx; 6755 u32 rx_max_pg_ring_idx;
6756 6756
6757 u32 rx_csum;
6758
6759 /* TX constants */ 6757 /* TX constants */
6760 int tx_ring_size; 6758 int tx_ring_size;
6761 u32 tx_wake_thresh; 6759 u32 tx_wake_thresh;
@@ -6922,6 +6920,7 @@ struct bnx2 {
6922 u8 num_tx_rings; 6920 u8 num_tx_rings;
6923 u8 num_rx_rings; 6921 u8 num_rx_rings;
6924 6922
6923 u32 leds_save;
6925 u32 idle_chk_status_idx; 6924 u32 idle_chk_status_idx;
6926 6925
6927#ifdef BCM_CNIC 6926#ifdef BCM_CNIC
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index e0fca701d2f3..16a76f074df5 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -1,6 +1,6 @@
1/* bnx2x.h: Broadcom Everest network driver. 1/* bnx2x.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation 3 * Copyright (c) 2007-2011 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -22,8 +22,8 @@
22 * (you will need to reboot afterwards) */ 22 * (you will need to reboot afterwards) */
23/* #define BNX2X_STOP_ON_ERROR */ 23/* #define BNX2X_STOP_ON_ERROR */
24 24
25#define DRV_MODULE_VERSION "1.62.11-0" 25#define DRV_MODULE_VERSION "1.62.12-0"
26#define DRV_MODULE_RELDATE "2011/01/31" 26#define DRV_MODULE_RELDATE "2011/03/20"
27#define BNX2X_BC_VER 0x040200 27#define BNX2X_BC_VER 0x040200
28 28
29#define BNX2X_MULTI_QUEUE 29#define BNX2X_MULTI_QUEUE
@@ -473,7 +473,8 @@ struct bnx2x_fastpath {
473#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS) 473#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS)
474#define MAX_RX_BD (NUM_RX_BD - 1) 474#define MAX_RX_BD (NUM_RX_BD - 1)
475#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) 475#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
476#define MIN_RX_AVAIL 128 476#define MIN_RX_SIZE_TPA 72
477#define MIN_RX_SIZE_NONTPA 10
477#define INIT_JUMBO_RX_RING_SIZE MAX_RX_AVAIL 478#define INIT_JUMBO_RX_RING_SIZE MAX_RX_AVAIL
478#define INIT_RX_RING_SIZE MAX_RX_AVAIL 479#define INIT_RX_RING_SIZE MAX_RX_AVAIL
479#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \ 480#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \
@@ -893,6 +894,22 @@ typedef enum {
893 (&bp->def_status_blk->sp_sb.\ 894 (&bp->def_status_blk->sp_sb.\
894 index_values[HC_SP_INDEX_EQ_CONS]) 895 index_values[HC_SP_INDEX_EQ_CONS])
895 896
897/* This is a data that will be used to create a link report message.
898 * We will keep the data used for the last link report in order
899 * to prevent reporting the same link parameters twice.
900 */
901struct bnx2x_link_report_data {
902 u16 line_speed; /* Effective line speed */
903 unsigned long link_report_flags;/* BNX2X_LINK_REPORT_XXX flags */
904};
905
906enum {
907 BNX2X_LINK_REPORT_FD, /* Full DUPLEX */
908 BNX2X_LINK_REPORT_LINK_DOWN,
909 BNX2X_LINK_REPORT_RX_FC_ON,
910 BNX2X_LINK_REPORT_TX_FC_ON,
911};
912
896struct bnx2x { 913struct bnx2x {
897 /* Fields used in the tx and intr/napi performance paths 914 /* Fields used in the tx and intr/napi performance paths
898 * are grouped together in the beginning of the structure 915 * are grouped together in the beginning of the structure
@@ -918,7 +935,6 @@ struct bnx2x {
918 935
919 int tx_ring_size; 936 int tx_ring_size;
920 937
921 u32 rx_csum;
922/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ 938/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
923#define ETH_OVREHEAD (ETH_HLEN + 8 + 8) 939#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)
924#define ETH_MIN_PACKET_SIZE 60 940#define ETH_MIN_PACKET_SIZE 60
@@ -1026,6 +1042,9 @@ struct bnx2x {
1026 1042
1027 struct link_params link_params; 1043 struct link_params link_params;
1028 struct link_vars link_vars; 1044 struct link_vars link_vars;
1045 u32 link_cnt;
1046 struct bnx2x_link_report_data last_reported_link;
1047
1029 struct mdio_if_info mdio; 1048 struct mdio_if_info mdio;
1030 1049
1031 struct bnx2x_common common; 1050 struct bnx2x_common common;
@@ -1442,6 +1461,8 @@ struct bnx2x_func_init_params {
1442#define WAIT_RAMROD_POLL 0x01 1461#define WAIT_RAMROD_POLL 0x01
1443#define WAIT_RAMROD_COMMON 0x02 1462#define WAIT_RAMROD_COMMON 0x02
1444 1463
1464void bnx2x_read_mf_cfg(struct bnx2x *bp);
1465
1445/* dmae */ 1466/* dmae */
1446void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32); 1467void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
1447void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, 1468void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 16581df5ee4e..ca2bbc0e5d48 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -1,6 +1,6 @@
1/* bnx2x_cmn.c: Broadcom Everest network driver. 1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation 3 * Copyright (c) 2007-2011 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -27,6 +27,49 @@
27 27
28static int bnx2x_setup_irqs(struct bnx2x *bp); 28static int bnx2x_setup_irqs(struct bnx2x *bp);
29 29
30/**
31 * bnx2x_bz_fp - zero content of the fastpath structure.
32 *
33 * @bp: driver handle
34 * @index: fastpath index to be zeroed
35 *
36 * Makes sure the contents of the bp->fp[index].napi is kept
37 * intact.
38 */
39static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
40{
41 struct bnx2x_fastpath *fp = &bp->fp[index];
42 struct napi_struct orig_napi = fp->napi;
43 /* bzero bnx2x_fastpath contents */
44 memset(fp, 0, sizeof(*fp));
45
46 /* Restore the NAPI object as it has been already initialized */
47 fp->napi = orig_napi;
48}
49
50/**
51 * bnx2x_move_fp - move content of the fastpath structure.
52 *
53 * @bp: driver handle
54 * @from: source FP index
55 * @to: destination FP index
56 *
57 * Makes sure the contents of the bp->fp[to].napi is kept
58 * intact.
59 */
60static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
61{
62 struct bnx2x_fastpath *from_fp = &bp->fp[from];
63 struct bnx2x_fastpath *to_fp = &bp->fp[to];
64 struct napi_struct orig_napi = to_fp->napi;
65 /* Move bnx2x_fastpath contents */
66 memcpy(to_fp, from_fp, sizeof(*to_fp));
67 to_fp->index = to;
68
69 /* Restore the NAPI object as it has been already initialized */
70 to_fp->napi = orig_napi;
71}
72
30/* free skb in the packet ring at pos idx 73/* free skb in the packet ring at pos idx
31 * return idx of last bd freed 74 * return idx of last bd freed
32 */ 75 */
@@ -265,13 +308,15 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
265 */ 308 */
266#define TPA_TSTAMP_OPT_LEN 12 309#define TPA_TSTAMP_OPT_LEN 12
267/** 310/**
268 * Calculate the approximate value of the MSS for this 311 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
269 * aggregation using the first packet of it.
270 * 312 *
271 * @param bp 313 * @bp: driver handle
272 * @param parsing_flags Parsing flags from the START CQE 314 * @parsing_flags: parsing flags from the START CQE
273 * @param len_on_bd Total length of the first packet for the 315 * @len_on_bd: total length of the first packet for the
274 * aggregation. 316 * aggregation.
317 *
318 * Approximate value of the MSS for this aggregation calculated using
319 * the first packet of it.
275 */ 320 */
276static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags, 321static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
277 u16 len_on_bd) 322 u16 len_on_bd)
@@ -640,7 +685,7 @@ reuse_rx:
640 685
641 skb_checksum_none_assert(skb); 686 skb_checksum_none_assert(skb);
642 687
643 if (bp->rx_csum) { 688 if (bp->dev->features & NETIF_F_RXCSUM) {
644 if (likely(BNX2X_RX_CSUM_OK(cqe))) 689 if (likely(BNX2X_RX_CSUM_OK(cqe)))
645 skb->ip_summed = CHECKSUM_UNNECESSARY; 690 skb->ip_summed = CHECKSUM_UNNECESSARY;
646 else 691 else
@@ -758,35 +803,119 @@ u16 bnx2x_get_mf_speed(struct bnx2x *bp)
758 return line_speed; 803 return line_speed;
759} 804}
760 805
806/**
807 * bnx2x_fill_report_data - fill link report data to report
808 *
809 * @bp: driver handle
810 * @data: link state to update
811 *
812 * It uses a none-atomic bit operations because is called under the mutex.
813 */
814static inline void bnx2x_fill_report_data(struct bnx2x *bp,
815 struct bnx2x_link_report_data *data)
816{
817 u16 line_speed = bnx2x_get_mf_speed(bp);
818
819 memset(data, 0, sizeof(*data));
820
821 /* Fill the report data: efective line speed */
822 data->line_speed = line_speed;
823
824 /* Link is down */
825 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
826 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
827 &data->link_report_flags);
828
829 /* Full DUPLEX */
830 if (bp->link_vars.duplex == DUPLEX_FULL)
831 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
832
833 /* Rx Flow Control is ON */
834 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
835 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
836
837 /* Tx Flow Control is ON */
838 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
839 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
840}
841
842/**
843 * bnx2x_link_report - report link status to OS.
844 *
845 * @bp: driver handle
846 *
847 * Calls the __bnx2x_link_report() under the same locking scheme
848 * as a link/PHY state managing code to ensure a consistent link
849 * reporting.
850 */
851
761void bnx2x_link_report(struct bnx2x *bp) 852void bnx2x_link_report(struct bnx2x *bp)
762{ 853{
763 if (bp->flags & MF_FUNC_DIS) { 854 bnx2x_acquire_phy_lock(bp);
764 netif_carrier_off(bp->dev); 855 __bnx2x_link_report(bp);
765 netdev_err(bp->dev, "NIC Link is Down\n"); 856 bnx2x_release_phy_lock(bp);
766 return; 857}
767 }
768 858
769 if (bp->link_vars.link_up) { 859/**
770 u16 line_speed; 860 * __bnx2x_link_report - report link status to OS.
861 *
862 * @bp: driver handle
863 *
864 * None atomic inmlementation.
865 * Should be called under the phy_lock.
866 */
867void __bnx2x_link_report(struct bnx2x *bp)
868{
869 struct bnx2x_link_report_data cur_data;
771 870
772 if (bp->state == BNX2X_STATE_OPEN) 871 /* reread mf_cfg */
773 netif_carrier_on(bp->dev); 872 if (!CHIP_IS_E1(bp))
774 netdev_info(bp->dev, "NIC Link is Up, "); 873 bnx2x_read_mf_cfg(bp);
874
875 /* Read the current link report info */
876 bnx2x_fill_report_data(bp, &cur_data);
877
878 /* Don't report link down or exactly the same link status twice */
879 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
880 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
881 &bp->last_reported_link.link_report_flags) &&
882 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
883 &cur_data.link_report_flags)))
884 return;
775 885
776 line_speed = bnx2x_get_mf_speed(bp); 886 bp->link_cnt++;
777 887
778 pr_cont("%d Mbps ", line_speed); 888 /* We are going to report a new link parameters now -
889 * remember the current data for the next time.
890 */
891 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
892
893 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
894 &cur_data.link_report_flags)) {
895 netif_carrier_off(bp->dev);
896 netdev_err(bp->dev, "NIC Link is Down\n");
897 return;
898 } else {
899 netif_carrier_on(bp->dev);
900 netdev_info(bp->dev, "NIC Link is Up, ");
901 pr_cont("%d Mbps ", cur_data.line_speed);
779 902
780 if (bp->link_vars.duplex == DUPLEX_FULL) 903 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
904 &cur_data.link_report_flags))
781 pr_cont("full duplex"); 905 pr_cont("full duplex");
782 else 906 else
783 pr_cont("half duplex"); 907 pr_cont("half duplex");
784 908
785 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) { 909 /* Handle the FC at the end so that only these flags would be
786 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) { 910 * possibly set. This way we may easily check if there is no FC
911 * enabled.
912 */
913 if (cur_data.link_report_flags) {
914 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
915 &cur_data.link_report_flags)) {
787 pr_cont(", receive "); 916 pr_cont(", receive ");
788 if (bp->link_vars.flow_ctrl & 917 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
789 BNX2X_FLOW_CTRL_TX) 918 &cur_data.link_report_flags))
790 pr_cont("& transmit "); 919 pr_cont("& transmit ");
791 } else { 920 } else {
792 pr_cont(", transmit "); 921 pr_cont(", transmit ");
@@ -794,62 +923,9 @@ void bnx2x_link_report(struct bnx2x *bp)
794 pr_cont("flow control ON"); 923 pr_cont("flow control ON");
795 } 924 }
796 pr_cont("\n"); 925 pr_cont("\n");
797
798 } else { /* link_down */
799 netif_carrier_off(bp->dev);
800 netdev_err(bp->dev, "NIC Link is Down\n");
801 } 926 }
802} 927}
803 928
804/* Returns the number of actually allocated BDs */
805static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
806 int rx_ring_size)
807{
808 struct bnx2x *bp = fp->bp;
809 u16 ring_prod, cqe_ring_prod;
810 int i;
811
812 fp->rx_comp_cons = 0;
813 cqe_ring_prod = ring_prod = 0;
814 for (i = 0; i < rx_ring_size; i++) {
815 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
816 BNX2X_ERR("was only able to allocate "
817 "%d rx skbs on queue[%d]\n", i, fp->index);
818 fp->eth_q_stats.rx_skb_alloc_failed++;
819 break;
820 }
821 ring_prod = NEXT_RX_IDX(ring_prod);
822 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
823 WARN_ON(ring_prod <= i);
824 }
825
826 fp->rx_bd_prod = ring_prod;
827 /* Limit the CQE producer by the CQE ring size */
828 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
829 cqe_ring_prod);
830 fp->rx_pkt = fp->rx_calls = 0;
831
832 return i;
833}
834
835static inline void bnx2x_alloc_rx_bd_ring(struct bnx2x_fastpath *fp)
836{
837 struct bnx2x *bp = fp->bp;
838 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
839 MAX_RX_AVAIL/bp->num_queues;
840
841 rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size);
842
843 bnx2x_alloc_rx_bds(fp, rx_ring_size);
844
845 /* Warning!
846 * this will generate an interrupt (to the TSTORM)
847 * must only be done after chip is initialized
848 */
849 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
850 fp->rx_sge_prod);
851}
852
853void bnx2x_init_rx_rings(struct bnx2x *bp) 929void bnx2x_init_rx_rings(struct bnx2x *bp)
854{ 930{
855 int func = BP_FUNC(bp); 931 int func = BP_FUNC(bp);
@@ -858,6 +934,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
858 u16 ring_prod; 934 u16 ring_prod;
859 int i, j; 935 int i, j;
860 936
937 /* Allocate TPA resources */
861 for_each_rx_queue(bp, j) { 938 for_each_rx_queue(bp, j) {
862 struct bnx2x_fastpath *fp = &bp->fp[j]; 939 struct bnx2x_fastpath *fp = &bp->fp[j];
863 940
@@ -865,6 +942,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
865 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); 942 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
866 943
867 if (!fp->disable_tpa) { 944 if (!fp->disable_tpa) {
945 /* Fill the per-aggregation pool */
868 for (i = 0; i < max_agg_queues; i++) { 946 for (i = 0; i < max_agg_queues; i++) {
869 fp->tpa_pool[i].skb = 947 fp->tpa_pool[i].skb =
870 netdev_alloc_skb(bp->dev, fp->rx_buf_size); 948 netdev_alloc_skb(bp->dev, fp->rx_buf_size);
@@ -919,13 +997,13 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
919 997
920 fp->rx_bd_cons = 0; 998 fp->rx_bd_cons = 0;
921 999
922 bnx2x_set_next_page_rx_bd(fp); 1000 /* Activate BD ring */
923 1001 /* Warning!
924 /* CQ ring */ 1002 * this will generate an interrupt (to the TSTORM)
925 bnx2x_set_next_page_rx_cq(fp); 1003 * must only be done after chip is initialized
926 1004 */
927 /* Allocate BDs and initialize BD ring */ 1005 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
928 bnx2x_alloc_rx_bd_ring(fp); 1006 fp->rx_sge_prod);
929 1007
930 if (j != 0) 1008 if (j != 0)
931 continue; 1009 continue;
@@ -959,27 +1037,40 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
959 } 1037 }
960} 1038}
961 1039
1040static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1041{
1042 struct bnx2x *bp = fp->bp;
1043 int i;
1044
1045 /* ring wasn't allocated */
1046 if (fp->rx_buf_ring == NULL)
1047 return;
1048
1049 for (i = 0; i < NUM_RX_BD; i++) {
1050 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1051 struct sk_buff *skb = rx_buf->skb;
1052
1053 if (skb == NULL)
1054 continue;
1055
1056 dma_unmap_single(&bp->pdev->dev,
1057 dma_unmap_addr(rx_buf, mapping),
1058 fp->rx_buf_size, DMA_FROM_DEVICE);
1059
1060 rx_buf->skb = NULL;
1061 dev_kfree_skb(skb);
1062 }
1063}
1064
962static void bnx2x_free_rx_skbs(struct bnx2x *bp) 1065static void bnx2x_free_rx_skbs(struct bnx2x *bp)
963{ 1066{
964 int i, j; 1067 int j;
965 1068
966 for_each_rx_queue(bp, j) { 1069 for_each_rx_queue(bp, j) {
967 struct bnx2x_fastpath *fp = &bp->fp[j]; 1070 struct bnx2x_fastpath *fp = &bp->fp[j];
968 1071
969 for (i = 0; i < NUM_RX_BD; i++) { 1072 bnx2x_free_rx_bds(fp);
970 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
971 struct sk_buff *skb = rx_buf->skb;
972
973 if (skb == NULL)
974 continue;
975
976 dma_unmap_single(&bp->pdev->dev,
977 dma_unmap_addr(rx_buf, mapping),
978 fp->rx_buf_size, DMA_FROM_DEVICE);
979 1073
980 rx_buf->skb = NULL;
981 dev_kfree_skb(skb);
982 }
983 if (!fp->disable_tpa) 1074 if (!fp->disable_tpa)
984 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ? 1075 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
985 ETH_MAX_AGGREGATION_QUEUES_E1 : 1076 ETH_MAX_AGGREGATION_QUEUES_E1 :
@@ -1345,29 +1436,47 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1345 1436
1346 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; 1437 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1347 1438
1439 /* Set the initial link reported state to link down */
1440 bnx2x_acquire_phy_lock(bp);
1441 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1442 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1443 &bp->last_reported_link.link_report_flags);
1444 bnx2x_release_phy_lock(bp);
1445
1348 /* must be called before memory allocation and HW init */ 1446 /* must be called before memory allocation and HW init */
1349 bnx2x_ilt_set_info(bp); 1447 bnx2x_ilt_set_info(bp);
1350 1448
1449 /* zero fastpath structures preserving invariants like napi which are
1450 * allocated only once
1451 */
1452 for_each_queue(bp, i)
1453 bnx2x_bz_fp(bp, i);
1454
1351 /* Set the receive queues buffer size */ 1455 /* Set the receive queues buffer size */
1352 bnx2x_set_rx_buf_size(bp); 1456 bnx2x_set_rx_buf_size(bp);
1353 1457
1458 for_each_queue(bp, i)
1459 bnx2x_fp(bp, i, disable_tpa) =
1460 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1461
1462#ifdef BCM_CNIC
1463 /* We don't want TPA on FCoE L2 ring */
1464 bnx2x_fcoe(bp, disable_tpa) = 1;
1465#endif
1466
1354 if (bnx2x_alloc_mem(bp)) 1467 if (bnx2x_alloc_mem(bp))
1355 return -ENOMEM; 1468 return -ENOMEM;
1356 1469
1470 /* As long as bnx2x_alloc_mem() may possibly update
1471 * bp->num_queues, bnx2x_set_real_num_queues() should always
1472 * come after it.
1473 */
1357 rc = bnx2x_set_real_num_queues(bp); 1474 rc = bnx2x_set_real_num_queues(bp);
1358 if (rc) { 1475 if (rc) {
1359 BNX2X_ERR("Unable to set real_num_queues\n"); 1476 BNX2X_ERR("Unable to set real_num_queues\n");
1360 goto load_error0; 1477 goto load_error0;
1361 } 1478 }
1362 1479
1363 for_each_queue(bp, i)
1364 bnx2x_fp(bp, i, disable_tpa) =
1365 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1366
1367#ifdef BCM_CNIC
1368 /* We don't want TPA on FCoE L2 ring */
1369 bnx2x_fcoe(bp, disable_tpa) = 1;
1370#endif
1371 bnx2x_napi_enable(bp); 1480 bnx2x_napi_enable(bp);
1372 1481
1373 /* Send LOAD_REQUEST command to MCP 1482 /* Send LOAD_REQUEST command to MCP
@@ -1976,12 +2085,11 @@ static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
1976} 2085}
1977 2086
1978/** 2087/**
1979 * Update PBD in GSO case. 2088 * bnx2x_set_pbd_gso - update PBD in GSO case.
1980 * 2089 *
1981 * @param skb 2090 * @skb: packet skb
1982 * @param tx_start_bd 2091 * @pbd: parse BD
1983 * @param pbd 2092 * @xmit_type: xmit flags
1984 * @param xmit_type
1985 */ 2093 */
1986static inline void bnx2x_set_pbd_gso(struct sk_buff *skb, 2094static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
1987 struct eth_tx_parse_bd_e1x *pbd, 2095 struct eth_tx_parse_bd_e1x *pbd,
@@ -2008,13 +2116,14 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2008} 2116}
2009 2117
2010/** 2118/**
2119 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
2011 * 2120 *
2012 * @param skb 2121 * @bp: driver handle
2013 * @param tx_start_bd 2122 * @skb: packet skb
2014 * @param pbd_e2 2123 * @parsing_data: data to be updated
2015 * @param xmit_type 2124 * @xmit_type: xmit flags
2016 * 2125 *
2017 * @return header len 2126 * 57712 related
2018 */ 2127 */
2019static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, 2128static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2020 u32 *parsing_data, u32 xmit_type) 2129 u32 *parsing_data, u32 xmit_type)
@@ -2039,13 +2148,12 @@ static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2039} 2148}
2040 2149
2041/** 2150/**
2151 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
2042 * 2152 *
2043 * @param skb 2153 * @bp: driver handle
2044 * @param tx_start_bd 2154 * @skb: packet skb
2045 * @param pbd 2155 * @pbd: parse BD to be updated
2046 * @param xmit_type 2156 * @xmit_type: xmit flags
2047 *
2048 * @return Header length
2049 */ 2157 */
2050static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb, 2158static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2051 struct eth_tx_parse_bd_e1x *pbd, 2159 struct eth_tx_parse_bd_e1x *pbd,
@@ -2393,6 +2501,232 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2393 return 0; 2501 return 0;
2394} 2502}
2395 2503
2504static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
2505{
2506 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
2507 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
2508
2509 /* Common */
2510#ifdef BCM_CNIC
2511 if (IS_FCOE_IDX(fp_index)) {
2512 memset(sb, 0, sizeof(union host_hc_status_block));
2513 fp->status_blk_mapping = 0;
2514
2515 } else {
2516#endif
2517 /* status blocks */
2518 if (CHIP_IS_E2(bp))
2519 BNX2X_PCI_FREE(sb->e2_sb,
2520 bnx2x_fp(bp, fp_index,
2521 status_blk_mapping),
2522 sizeof(struct host_hc_status_block_e2));
2523 else
2524 BNX2X_PCI_FREE(sb->e1x_sb,
2525 bnx2x_fp(bp, fp_index,
2526 status_blk_mapping),
2527 sizeof(struct host_hc_status_block_e1x));
2528#ifdef BCM_CNIC
2529 }
2530#endif
2531 /* Rx */
2532 if (!skip_rx_queue(bp, fp_index)) {
2533 bnx2x_free_rx_bds(fp);
2534
2535 /* fastpath rx rings: rx_buf rx_desc rx_comp */
2536 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
2537 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
2538 bnx2x_fp(bp, fp_index, rx_desc_mapping),
2539 sizeof(struct eth_rx_bd) * NUM_RX_BD);
2540
2541 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
2542 bnx2x_fp(bp, fp_index, rx_comp_mapping),
2543 sizeof(struct eth_fast_path_rx_cqe) *
2544 NUM_RCQ_BD);
2545
2546 /* SGE ring */
2547 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
2548 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
2549 bnx2x_fp(bp, fp_index, rx_sge_mapping),
2550 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
2551 }
2552
2553 /* Tx */
2554 if (!skip_tx_queue(bp, fp_index)) {
2555 /* fastpath tx rings: tx_buf tx_desc */
2556 BNX2X_FREE(bnx2x_fp(bp, fp_index, tx_buf_ring));
2557 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, tx_desc_ring),
2558 bnx2x_fp(bp, fp_index, tx_desc_mapping),
2559 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
2560 }
2561 /* end of fastpath */
2562}
2563
2564void bnx2x_free_fp_mem(struct bnx2x *bp)
2565{
2566 int i;
2567 for_each_queue(bp, i)
2568 bnx2x_free_fp_mem_at(bp, i);
2569}
2570
2571static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
2572{
2573 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
2574 if (CHIP_IS_E2(bp)) {
2575 bnx2x_fp(bp, index, sb_index_values) =
2576 (__le16 *)status_blk.e2_sb->sb.index_values;
2577 bnx2x_fp(bp, index, sb_running_index) =
2578 (__le16 *)status_blk.e2_sb->sb.running_index;
2579 } else {
2580 bnx2x_fp(bp, index, sb_index_values) =
2581 (__le16 *)status_blk.e1x_sb->sb.index_values;
2582 bnx2x_fp(bp, index, sb_running_index) =
2583 (__le16 *)status_blk.e1x_sb->sb.running_index;
2584 }
2585}
2586
2587static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
2588{
2589 union host_hc_status_block *sb;
2590 struct bnx2x_fastpath *fp = &bp->fp[index];
2591 int ring_size = 0;
2592
2593 /* if rx_ring_size specified - use it */
2594 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
2595 MAX_RX_AVAIL/bp->num_queues;
2596
2597 /* allocate at least number of buffers required by FW */
2598 rx_ring_size = max_t(int, fp->disable_tpa ? MIN_RX_SIZE_NONTPA :
2599 MIN_RX_SIZE_TPA,
2600 rx_ring_size);
2601
2602 bnx2x_fp(bp, index, bp) = bp;
2603 bnx2x_fp(bp, index, index) = index;
2604
2605 /* Common */
2606 sb = &bnx2x_fp(bp, index, status_blk);
2607#ifdef BCM_CNIC
2608 if (!IS_FCOE_IDX(index)) {
2609#endif
2610 /* status blocks */
2611 if (CHIP_IS_E2(bp))
2612 BNX2X_PCI_ALLOC(sb->e2_sb,
2613 &bnx2x_fp(bp, index, status_blk_mapping),
2614 sizeof(struct host_hc_status_block_e2));
2615 else
2616 BNX2X_PCI_ALLOC(sb->e1x_sb,
2617 &bnx2x_fp(bp, index, status_blk_mapping),
2618 sizeof(struct host_hc_status_block_e1x));
2619#ifdef BCM_CNIC
2620 }
2621#endif
2622 set_sb_shortcuts(bp, index);
2623
2624 /* Tx */
2625 if (!skip_tx_queue(bp, index)) {
2626 /* fastpath tx rings: tx_buf tx_desc */
2627 BNX2X_ALLOC(bnx2x_fp(bp, index, tx_buf_ring),
2628 sizeof(struct sw_tx_bd) * NUM_TX_BD);
2629 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, tx_desc_ring),
2630 &bnx2x_fp(bp, index, tx_desc_mapping),
2631 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
2632 }
2633
2634 /* Rx */
2635 if (!skip_rx_queue(bp, index)) {
2636 /* fastpath rx rings: rx_buf rx_desc rx_comp */
2637 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
2638 sizeof(struct sw_rx_bd) * NUM_RX_BD);
2639 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
2640 &bnx2x_fp(bp, index, rx_desc_mapping),
2641 sizeof(struct eth_rx_bd) * NUM_RX_BD);
2642
2643 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
2644 &bnx2x_fp(bp, index, rx_comp_mapping),
2645 sizeof(struct eth_fast_path_rx_cqe) *
2646 NUM_RCQ_BD);
2647
2648 /* SGE ring */
2649 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
2650 sizeof(struct sw_rx_page) * NUM_RX_SGE);
2651 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
2652 &bnx2x_fp(bp, index, rx_sge_mapping),
2653 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
2654 /* RX BD ring */
2655 bnx2x_set_next_page_rx_bd(fp);
2656
2657 /* CQ ring */
2658 bnx2x_set_next_page_rx_cq(fp);
2659
2660 /* BDs */
2661 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
2662 if (ring_size < rx_ring_size)
2663 goto alloc_mem_err;
2664 }
2665
2666 return 0;
2667
2668/* handles low memory cases */
2669alloc_mem_err:
2670 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
2671 index, ring_size);
2672 /* FW will drop all packets if queue is not big enough,
2673 * In these cases we disable the queue
2674 * Min size diferent for TPA and non-TPA queues
2675 */
2676 if (ring_size < (fp->disable_tpa ?
2677 MIN_RX_SIZE_TPA : MIN_RX_SIZE_NONTPA)) {
2678 /* release memory allocated for this queue */
2679 bnx2x_free_fp_mem_at(bp, index);
2680 return -ENOMEM;
2681 }
2682 return 0;
2683}
2684
2685int bnx2x_alloc_fp_mem(struct bnx2x *bp)
2686{
2687 int i;
2688
2689 /**
2690 * 1. Allocate FP for leading - fatal if error
2691 * 2. {CNIC} Allocate FCoE FP - fatal if error
2692 * 3. Allocate RSS - fix number of queues if error
2693 */
2694
2695 /* leading */
2696 if (bnx2x_alloc_fp_mem_at(bp, 0))
2697 return -ENOMEM;
2698#ifdef BCM_CNIC
2699 /* FCoE */
2700 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
2701 return -ENOMEM;
2702#endif
2703 /* RSS */
2704 for_each_nondefault_eth_queue(bp, i)
2705 if (bnx2x_alloc_fp_mem_at(bp, i))
2706 break;
2707
2708 /* handle memory failures */
2709 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
2710 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
2711
2712 WARN_ON(delta < 0);
2713#ifdef BCM_CNIC
2714 /**
2715 * move non eth FPs next to last eth FP
2716 * must be done in that order
2717 * FCOE_IDX < FWD_IDX < OOO_IDX
2718 */
2719
2720 /* move FCoE fp */
2721 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
2722#endif
2723 bp->num_queues -= delta;
2724 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
2725 bp->num_queues + delta, bp->num_queues);
2726 }
2727
2728 return 0;
2729}
2396 2730
2397static int bnx2x_setup_irqs(struct bnx2x *bp) 2731static int bnx2x_setup_irqs(struct bnx2x *bp)
2398{ 2732{
@@ -2457,11 +2791,21 @@ alloc_err:
2457 2791
2458} 2792}
2459 2793
2794static int bnx2x_reload_if_running(struct net_device *dev)
2795{
2796 struct bnx2x *bp = netdev_priv(dev);
2797
2798 if (unlikely(!netif_running(dev)))
2799 return 0;
2800
2801 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2802 return bnx2x_nic_load(bp, LOAD_NORMAL);
2803}
2804
2460/* called with rtnl_lock */ 2805/* called with rtnl_lock */
2461int bnx2x_change_mtu(struct net_device *dev, int new_mtu) 2806int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2462{ 2807{
2463 struct bnx2x *bp = netdev_priv(dev); 2808 struct bnx2x *bp = netdev_priv(dev);
2464 int rc = 0;
2465 2809
2466 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { 2810 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2467 printk(KERN_ERR "Handling parity error recovery. Try again later\n"); 2811 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
@@ -2478,12 +2822,55 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2478 */ 2822 */
2479 dev->mtu = new_mtu; 2823 dev->mtu = new_mtu;
2480 2824
2481 if (netif_running(dev)) { 2825 return bnx2x_reload_if_running(dev);
2482 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 2826}
2483 rc = bnx2x_nic_load(bp, LOAD_NORMAL); 2827
2828u32 bnx2x_fix_features(struct net_device *dev, u32 features)
2829{
2830 struct bnx2x *bp = netdev_priv(dev);
2831
2832 /* TPA requires Rx CSUM offloading */
2833 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
2834 features &= ~NETIF_F_LRO;
2835
2836 return features;
2837}
2838
2839int bnx2x_set_features(struct net_device *dev, u32 features)
2840{
2841 struct bnx2x *bp = netdev_priv(dev);
2842 u32 flags = bp->flags;
2843 bool bnx2x_reload = false;
2844
2845 if (features & NETIF_F_LRO)
2846 flags |= TPA_ENABLE_FLAG;
2847 else
2848 flags &= ~TPA_ENABLE_FLAG;
2849
2850 if (features & NETIF_F_LOOPBACK) {
2851 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
2852 bp->link_params.loopback_mode = LOOPBACK_BMAC;
2853 bnx2x_reload = true;
2854 }
2855 } else {
2856 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
2857 bp->link_params.loopback_mode = LOOPBACK_NONE;
2858 bnx2x_reload = true;
2859 }
2484 } 2860 }
2485 2861
2486 return rc; 2862 if (flags ^ bp->flags) {
2863 bp->flags = flags;
2864 bnx2x_reload = true;
2865 }
2866
2867 if (bnx2x_reload) {
2868 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
2869 return bnx2x_reload_if_running(dev);
2870 /* else: bnx2x_nic_load() will be called at end of recovery */
2871 }
2872
2873 return 0;
2487} 2874}
2488 2875
2489void bnx2x_tx_timeout(struct net_device *dev) 2876void bnx2x_tx_timeout(struct net_device *dev)
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index ef37b98d6146..fab161e8030d 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -1,6 +1,6 @@
1/* bnx2x_cmn.h: Broadcom Everest network driver. 1/* bnx2x_cmn.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation 3 * Copyright (c) 2007-2011 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -25,260 +25,277 @@
25 25
26extern int num_queues; 26extern int num_queues;
27 27
28/************************ Macros ********************************/
29#define BNX2X_PCI_FREE(x, y, size) \
30 do { \
31 if (x) { \
32 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
33 x = NULL; \
34 y = 0; \
35 } \
36 } while (0)
37
38#define BNX2X_FREE(x) \
39 do { \
40 if (x) { \
41 kfree((void *)x); \
42 x = NULL; \
43 } \
44 } while (0)
45
46#define BNX2X_PCI_ALLOC(x, y, size) \
47 do { \
48 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
49 if (x == NULL) \
50 goto alloc_mem_err; \
51 memset((void *)x, 0, size); \
52 } while (0)
53
54#define BNX2X_ALLOC(x, size) \
55 do { \
56 x = kzalloc(size, GFP_KERNEL); \
57 if (x == NULL) \
58 goto alloc_mem_err; \
59 } while (0)
60
28/*********************** Interfaces **************************** 61/*********************** Interfaces ****************************
29 * Functions that need to be implemented by each driver version 62 * Functions that need to be implemented by each driver version
30 */ 63 */
31 64
32/** 65/**
33 * Initialize link parameters structure variables. 66 * bnx2x_initial_phy_init - initialize link parameters structure variables.
34 *
35 * @param bp
36 * @param load_mode
37 * 67 *
38 * @return u8 68 * @bp: driver handle
69 * @load_mode: current mode
39 */ 70 */
40u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode); 71u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode);
41 72
42/** 73/**
43 * Configure hw according to link parameters structure. 74 * bnx2x_link_set - configure hw according to link parameters structure.
44 * 75 *
45 * @param bp 76 * @bp: driver handle
46 */ 77 */
47void bnx2x_link_set(struct bnx2x *bp); 78void bnx2x_link_set(struct bnx2x *bp);
48 79
49/** 80/**
50 * Query link status 81 * bnx2x_link_test - query link status.
51 * 82 *
52 * @param bp 83 * @bp: driver handle
53 * @param is_serdes 84 * @is_serdes: bool
54 * 85 *
55 * @return 0 - link is UP 86 * Returns 0 if link is UP.
56 */ 87 */
57u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes); 88u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes);
58 89
59/** 90/**
60 * Handles link status change 91 * bnx2x__link_status_update - handles link status change.
61 * 92 *
62 * @param bp 93 * @bp: driver handle
63 */ 94 */
64void bnx2x__link_status_update(struct bnx2x *bp); 95void bnx2x__link_status_update(struct bnx2x *bp);
65 96
66/** 97/**
67 * Report link status to upper layer 98 * bnx2x_link_report - report link status to upper layer.
68 *
69 * @param bp
70 * 99 *
71 * @return int 100 * @bp: driver handle
72 */ 101 */
73void bnx2x_link_report(struct bnx2x *bp); 102void bnx2x_link_report(struct bnx2x *bp);
74 103
104/* None-atomic version of bnx2x_link_report() */
105void __bnx2x_link_report(struct bnx2x *bp);
106
75/** 107/**
76 * calculates MF speed according to current linespeed and MF 108 * bnx2x_get_mf_speed - calculate MF speed.
77 * configuration
78 * 109 *
79 * @param bp 110 * @bp: driver handle
80 * 111 *
81 * @return u16 112 * Takes into account current linespeed and MF configuration.
82 */ 113 */
83u16 bnx2x_get_mf_speed(struct bnx2x *bp); 114u16 bnx2x_get_mf_speed(struct bnx2x *bp);
84 115
85/** 116/**
86 * MSI-X slowpath interrupt handler 117 * bnx2x_msix_sp_int - MSI-X slowpath interrupt handler
87 *
88 * @param irq
89 * @param dev_instance
90 * 118 *
91 * @return irqreturn_t 119 * @irq: irq number
120 * @dev_instance: private instance
92 */ 121 */
93irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance); 122irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance);
94 123
95/** 124/**
96 * non MSI-X interrupt handler 125 * bnx2x_interrupt - non MSI-X interrupt handler
97 * 126 *
98 * @param irq 127 * @irq: irq number
99 * @param dev_instance 128 * @dev_instance: private instance
100 *
101 * @return irqreturn_t
102 */ 129 */
103irqreturn_t bnx2x_interrupt(int irq, void *dev_instance); 130irqreturn_t bnx2x_interrupt(int irq, void *dev_instance);
104#ifdef BCM_CNIC 131#ifdef BCM_CNIC
105 132
106/** 133/**
107 * Send command to cnic driver 134 * bnx2x_cnic_notify - send command to cnic driver
108 * 135 *
109 * @param bp 136 * @bp: driver handle
110 * @param cmd 137 * @cmd: command
111 */ 138 */
112int bnx2x_cnic_notify(struct bnx2x *bp, int cmd); 139int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
113 140
114/** 141/**
115 * Provides cnic information for proper interrupt handling 142 * bnx2x_setup_cnic_irq_info - provides cnic with IRQ information
116 * 143 *
117 * @param bp 144 * @bp: driver handle
118 */ 145 */
119void bnx2x_setup_cnic_irq_info(struct bnx2x *bp); 146void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
120#endif 147#endif
121 148
122/** 149/**
123 * Enable HW interrupts. 150 * bnx2x_int_enable - enable HW interrupts.
124 * 151 *
125 * @param bp 152 * @bp: driver handle
126 */ 153 */
127void bnx2x_int_enable(struct bnx2x *bp); 154void bnx2x_int_enable(struct bnx2x *bp);
128 155
129/** 156/**
130 * Disable interrupts. This function ensures that there are no 157 * bnx2x_int_disable_sync - disable interrupts.
131 * ISRs or SP DPCs (sp_task) are running after it returns.
132 * 158 *
133 * @param bp 159 * @bp: driver handle
134 * @param disable_hw if true, disable HW interrupts. 160 * @disable_hw: true, disable HW interrupts.
161 *
162 * This function ensures that there are no
163 * ISRs or SP DPCs (sp_task) are running after it returns.
135 */ 164 */
136void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw); 165void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
137 166
138/** 167/**
139 * Loads device firmware 168 * bnx2x_init_firmware - loads device firmware
140 * 169 *
141 * @param bp 170 * @bp: driver handle
142 *
143 * @return int
144 */ 171 */
145int bnx2x_init_firmware(struct bnx2x *bp); 172int bnx2x_init_firmware(struct bnx2x *bp);
146 173
147/** 174/**
148 * Init HW blocks according to current initialization stage: 175 * bnx2x_init_hw - init HW blocks according to current initialization stage.
149 * COMMON, PORT or FUNCTION.
150 *
151 * @param bp
152 * @param load_code: COMMON, PORT or FUNCTION
153 * 176 *
154 * @return int 177 * @bp: driver handle
178 * @load_code: COMMON, PORT or FUNCTION
155 */ 179 */
156int bnx2x_init_hw(struct bnx2x *bp, u32 load_code); 180int bnx2x_init_hw(struct bnx2x *bp, u32 load_code);
157 181
158/** 182/**
159 * Init driver internals: 183 * bnx2x_nic_init - init driver internals.
184 *
185 * @bp: driver handle
186 * @load_code: COMMON, PORT or FUNCTION
187 *
188 * Initializes:
160 * - rings 189 * - rings
161 * - status blocks 190 * - status blocks
162 * - etc. 191 * - etc.
163 *
164 * @param bp
165 * @param load_code COMMON, PORT or FUNCTION
166 */ 192 */
167void bnx2x_nic_init(struct bnx2x *bp, u32 load_code); 193void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
168 194
169/** 195/**
170 * Allocate driver's memory. 196 * bnx2x_alloc_mem - allocate driver's memory.
171 *
172 * @param bp
173 * 197 *
174 * @return int 198 * @bp: driver handle
175 */ 199 */
176int bnx2x_alloc_mem(struct bnx2x *bp); 200int bnx2x_alloc_mem(struct bnx2x *bp);
177 201
178/** 202/**
179 * Release driver's memory. 203 * bnx2x_free_mem - release driver's memory.
180 * 204 *
181 * @param bp 205 * @bp: driver handle
182 */ 206 */
183void bnx2x_free_mem(struct bnx2x *bp); 207void bnx2x_free_mem(struct bnx2x *bp);
184 208
185/** 209/**
186 * Setup eth Client. 210 * bnx2x_setup_client - setup eth client.
187 * 211 *
188 * @param bp 212 * @bp: driver handle
189 * @param fp 213 * @fp: pointer to fastpath structure
190 * @param is_leading 214 * @is_leading: boolean
191 *
192 * @return int
193 */ 215 */
194int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp, 216int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
195 int is_leading); 217 int is_leading);
196 218
197/** 219/**
198 * Set number of queues according to mode 220 * bnx2x_set_num_queues - set number of queues according to mode.
199 *
200 * @param bp
201 * 221 *
222 * @bp: driver handle
202 */ 223 */
203void bnx2x_set_num_queues(struct bnx2x *bp); 224void bnx2x_set_num_queues(struct bnx2x *bp);
204 225
205/** 226/**
206 * Cleanup chip internals: 227 * bnx2x_chip_cleanup - cleanup chip internals.
228 *
229 * @bp: driver handle
230 * @unload_mode: COMMON, PORT, FUNCTION
231 *
207 * - Cleanup MAC configuration. 232 * - Cleanup MAC configuration.
208 * - Close clients. 233 * - Closes clients.
209 * - etc. 234 * - etc.
210 *
211 * @param bp
212 * @param unload_mode
213 */ 235 */
214void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode); 236void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode);
215 237
216/** 238/**
217 * Acquire HW lock. 239 * bnx2x_acquire_hw_lock - acquire HW lock.
218 * 240 *
219 * @param bp 241 * @bp: driver handle
220 * @param resource Resource bit which was locked 242 * @resource: resource bit which was locked
221 *
222 * @return int
223 */ 243 */
224int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource); 244int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource);
225 245
226/** 246/**
227 * Release HW lock. 247 * bnx2x_release_hw_lock - release HW lock.
228 *
229 * @param bp driver handle
230 * @param resource Resource bit which was locked
231 * 248 *
232 * @return int 249 * @bp: driver handle
250 * @resource: resource bit which was locked
233 */ 251 */
234int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource); 252int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
235 253
236/** 254/**
237 * Configure eth MAC address in the HW according to the value in 255 * bnx2x_set_eth_mac - configure eth MAC address in the HW
238 * netdev->dev_addr.
239 * 256 *
240 * @param bp driver handle 257 * @bp: driver handle
241 * @param set 258 * @set: set or clear
259 *
260 * Configures according to the value in netdev->dev_addr.
242 */ 261 */
243void bnx2x_set_eth_mac(struct bnx2x *bp, int set); 262void bnx2x_set_eth_mac(struct bnx2x *bp, int set);
244 263
245#ifdef BCM_CNIC 264#ifdef BCM_CNIC
246/** 265/**
247 * Set/Clear FIP MAC(s) at the next enties in the CAM after the ETH 266 * bnx2x_set_fip_eth_mac_addr - Set/Clear FIP MAC(s)
248 * MAC(s). This function will wait until the ramdord completion
249 * returns.
250 * 267 *
251 * @param bp driver handle 268 * @bp: driver handle
252 * @param set set or clear the CAM entry 269 * @set: set or clear the CAM entry
253 * 270 *
254 * @return 0 if cussess, -ENODEV if ramrod doesn't return. 271 * Used next enties in the CAM after the ETH MAC(s).
272 * This function will wait until the ramdord completion returns.
273 * Return 0 if cussess, -ENODEV if ramrod doesn't return.
255 */ 274 */
256int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set); 275int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set);
257 276
258/** 277/**
259 * Set/Clear ALL_ENODE mcast MAC. 278 * bnx2x_set_all_enode_macs - Set/Clear ALL_ENODE mcast MAC.
260 *
261 * @param bp
262 * @param set
263 * 279 *
264 * @return int 280 * @bp: driver handle
281 * @set: set or clear
265 */ 282 */
266int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set); 283int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set);
267#endif 284#endif
268 285
269/** 286/**
270 * Set MAC filtering configurations. 287 * bnx2x_set_rx_mode - set MAC filtering configurations.
271 * 288 *
272 * @remarks called with netif_tx_lock from dev_mcast.c 289 * @dev: netdevice
273 * 290 *
274 * @param dev net_device 291 * called with netif_tx_lock from dev_mcast.c
275 */ 292 */
276void bnx2x_set_rx_mode(struct net_device *dev); 293void bnx2x_set_rx_mode(struct net_device *dev);
277 294
278/** 295/**
279 * Configure MAC filtering rules in a FW. 296 * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW.
280 * 297 *
281 * @param bp driver handle 298 * @bp: driver handle
282 */ 299 */
283void bnx2x_set_storm_rx_mode(struct bnx2x *bp); 300void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
284 301
@@ -290,63 +307,59 @@ bool bnx2x_reset_is_done(struct bnx2x *bp);
290void bnx2x_disable_close_the_gate(struct bnx2x *bp); 307void bnx2x_disable_close_the_gate(struct bnx2x *bp);
291 308
292/** 309/**
293 * Perform statistics handling according to event 310 * bnx2x_stats_handle - perform statistics handling according to event.
294 * 311 *
295 * @param bp driver handle 312 * @bp: driver handle
296 * @param event bnx2x_stats_event 313 * @event: bnx2x_stats_event
297 */ 314 */
298void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); 315void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
299 316
300/** 317/**
301 * Handle ramrods completion 318 * bnx2x_sp_event - handle ramrods completion.
302 * 319 *
303 * @param fp fastpath handle for the event 320 * @fp: fastpath handle for the event
304 * @param rr_cqe eth_rx_cqe 321 * @rr_cqe: eth_rx_cqe
305 */ 322 */
306void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe); 323void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
307 324
308/** 325/**
309 * Init/halt function before/after sending 326 * bnx2x_func_start - init function
310 * CLIENT_SETUP/CFC_DEL for the first/last client.
311 * 327 *
312 * @param bp 328 * @bp: driver handle
313 * 329 *
314 * @return int 330 * Must be called before sending CLIENT_SETUP for the first client.
315 */ 331 */
316int bnx2x_func_start(struct bnx2x *bp); 332int bnx2x_func_start(struct bnx2x *bp);
317 333
318/** 334/**
319 * Prepare ILT configurations according to current driver 335 * bnx2x_ilt_set_info - prepare ILT configurations.
320 * parameters.
321 * 336 *
322 * @param bp 337 * @bp: driver handle
323 */ 338 */
324void bnx2x_ilt_set_info(struct bnx2x *bp); 339void bnx2x_ilt_set_info(struct bnx2x *bp);
325 340
326/** 341/**
327 * Inintialize dcbx protocol 342 * bnx2x_dcbx_init - initialize dcbx protocol.
328 * 343 *
329 * @param bp 344 * @bp: driver handle
330 */ 345 */
331void bnx2x_dcbx_init(struct bnx2x *bp); 346void bnx2x_dcbx_init(struct bnx2x *bp);
332 347
333/** 348/**
334 * Set power state to the requested value. Currently only D0 and 349 * bnx2x_set_power_state - set power state to the requested value.
335 * D3hot are supported.
336 * 350 *
337 * @param bp 351 * @bp: driver handle
338 * @param state D0 or D3hot 352 * @state: required state D0 or D3hot
339 * 353 *
340 * @return int 354 * Currently only D0 and D3hot are supported.
341 */ 355 */
342int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state); 356int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
343 357
344/** 358/**
345 * Updates MAX part of MF configuration in HW 359 * bnx2x_update_max_mf_config - update MAX part of MF configuration in HW.
346 * (if required)
347 * 360 *
348 * @param bp 361 * @bp: driver handle
349 * @param value 362 * @value: new value
350 */ 363 */
351void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value); 364void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value);
352 365
@@ -377,83 +390,72 @@ int bnx2x_resume(struct pci_dev *pdev);
377/* Release IRQ vectors */ 390/* Release IRQ vectors */
378void bnx2x_free_irq(struct bnx2x *bp); 391void bnx2x_free_irq(struct bnx2x *bp);
379 392
393void bnx2x_free_fp_mem(struct bnx2x *bp);
394int bnx2x_alloc_fp_mem(struct bnx2x *bp);
395
380void bnx2x_init_rx_rings(struct bnx2x *bp); 396void bnx2x_init_rx_rings(struct bnx2x *bp);
381void bnx2x_free_skbs(struct bnx2x *bp); 397void bnx2x_free_skbs(struct bnx2x *bp);
382void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw); 398void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
383void bnx2x_netif_start(struct bnx2x *bp); 399void bnx2x_netif_start(struct bnx2x *bp);
384 400
385/** 401/**
386 * Fill msix_table, request vectors, update num_queues according 402 * bnx2x_enable_msix - set msix configuration.
387 * to number of available vectors
388 * 403 *
389 * @param bp 404 * @bp: driver handle
390 * 405 *
391 * @return int 406 * fills msix_table, requests vectors, updates num_queues
407 * according to number of available vectors.
392 */ 408 */
393int bnx2x_enable_msix(struct bnx2x *bp); 409int bnx2x_enable_msix(struct bnx2x *bp);
394 410
395/** 411/**
396 * Request msi mode from OS, updated internals accordingly 412 * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly
397 *
398 * @param bp
399 * 413 *
400 * @return int 414 * @bp: driver handle
401 */ 415 */
402int bnx2x_enable_msi(struct bnx2x *bp); 416int bnx2x_enable_msi(struct bnx2x *bp);
403 417
404/** 418/**
405 * NAPI callback 419 * bnx2x_poll - NAPI callback
406 * 420 *
407 * @param napi 421 * @napi: napi structure
408 * @param budget 422 * @budget:
409 * 423 *
410 * @return int
411 */ 424 */
412int bnx2x_poll(struct napi_struct *napi, int budget); 425int bnx2x_poll(struct napi_struct *napi, int budget);
413 426
414/** 427/**
415 * Allocate/release memories outsize main driver structure 428 * bnx2x_alloc_mem_bp - allocate memories outsize main driver structure
416 * 429 *
417 * @param bp 430 * @bp: driver handle
418 *
419 * @return int
420 */ 431 */
421int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp); 432int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp);
422void bnx2x_free_mem_bp(struct bnx2x *bp);
423 433
424/** 434/**
425 * Change mtu netdev callback 435 * bnx2x_free_mem_bp - release memories outsize main driver structure
426 * 436 *
427 * @param dev 437 * @bp: driver handle
428 * @param new_mtu
429 *
430 * @return int
431 */ 438 */
432int bnx2x_change_mtu(struct net_device *dev, int new_mtu); 439void bnx2x_free_mem_bp(struct bnx2x *bp);
433 440
434/** 441/**
435 * tx timeout netdev callback 442 * bnx2x_change_mtu - change mtu netdev callback
436 * 443 *
437 * @param dev 444 * @dev: net device
438 * @param new_mtu 445 * @new_mtu: requested mtu
439 * 446 *
440 * @return int
441 */ 447 */
442void bnx2x_tx_timeout(struct net_device *dev); 448int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
449
450u32 bnx2x_fix_features(struct net_device *dev, u32 features);
451int bnx2x_set_features(struct net_device *dev, u32 features);
443 452
444#ifdef BCM_VLAN
445/** 453/**
446 * vlan rx register netdev callback 454 * bnx2x_tx_timeout - tx timeout netdev callback
447 * 455 *
448 * @param dev 456 * @dev: net device
449 * @param new_mtu
450 *
451 * @return int
452 */ 457 */
453void bnx2x_vlan_rx_register(struct net_device *dev, 458void bnx2x_tx_timeout(struct net_device *dev);
454 struct vlan_group *vlgrp);
455
456#endif
457 459
458static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) 460static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
459{ 461{
@@ -705,7 +707,7 @@ static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
705/** 707/**
706 * disables tx from stack point of view 708 * disables tx from stack point of view
707 * 709 *
708 * @param bp 710 * @bp: driver handle
709 */ 711 */
710static inline void bnx2x_tx_disable(struct bnx2x *bp) 712static inline void bnx2x_tx_disable(struct bnx2x *bp)
711{ 713{
@@ -880,6 +882,9 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
880{ 882{
881 int i; 883 int i;
882 884
885 if (fp->disable_tpa)
886 return;
887
883 for (i = 0; i < last; i++) 888 for (i = 0; i < last; i++)
884 bnx2x_free_rx_sge(bp, fp, i); 889 bnx2x_free_rx_sge(bp, fp, i);
885} 890}
@@ -908,36 +913,39 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
908 } 913 }
909} 914}
910 915
911 916static inline void bnx2x_init_tx_ring_one(struct bnx2x_fastpath *fp)
912static inline void bnx2x_init_tx_rings(struct bnx2x *bp)
913{ 917{
914 int i, j; 918 int i;
915 919
916 for_each_tx_queue(bp, j) { 920 for (i = 1; i <= NUM_TX_RINGS; i++) {
917 struct bnx2x_fastpath *fp = &bp->fp[j]; 921 struct eth_tx_next_bd *tx_next_bd =
922 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
918 923
919 for (i = 1; i <= NUM_TX_RINGS; i++) { 924 tx_next_bd->addr_hi =
920 struct eth_tx_next_bd *tx_next_bd = 925 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
921 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd; 926 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
927 tx_next_bd->addr_lo =
928 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
929 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
930 }
922 931
923 tx_next_bd->addr_hi = 932 SET_FLAG(fp->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
924 cpu_to_le32(U64_HI(fp->tx_desc_mapping + 933 fp->tx_db.data.zero_fill1 = 0;
925 BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); 934 fp->tx_db.data.prod = 0;
926 tx_next_bd->addr_lo =
927 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
928 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
929 }
930 935
931 SET_FLAG(fp->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); 936 fp->tx_pkt_prod = 0;
932 fp->tx_db.data.zero_fill1 = 0; 937 fp->tx_pkt_cons = 0;
933 fp->tx_db.data.prod = 0; 938 fp->tx_bd_prod = 0;
939 fp->tx_bd_cons = 0;
940 fp->tx_pkt = 0;
941}
934 942
935 fp->tx_pkt_prod = 0; 943static inline void bnx2x_init_tx_rings(struct bnx2x *bp)
936 fp->tx_pkt_cons = 0; 944{
937 fp->tx_bd_prod = 0; 945 int i;
938 fp->tx_bd_cons = 0; 946
939 fp->tx_pkt = 0; 947 for_each_tx_queue(bp, i)
940 } 948 bnx2x_init_tx_ring_one(&bp->fp[i]);
941} 949}
942 950
943static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp) 951static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
@@ -992,6 +1000,44 @@ static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
992 } 1000 }
993} 1001}
994 1002
1003/* Returns the number of actually allocated BDs */
1004static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
1005 int rx_ring_size)
1006{
1007 struct bnx2x *bp = fp->bp;
1008 u16 ring_prod, cqe_ring_prod;
1009 int i;
1010
1011 fp->rx_comp_cons = 0;
1012 cqe_ring_prod = ring_prod = 0;
1013
1014 /* This routine is called only during fo init so
1015 * fp->eth_q_stats.rx_skb_alloc_failed = 0
1016 */
1017 for (i = 0; i < rx_ring_size; i++) {
1018 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
1019 fp->eth_q_stats.rx_skb_alloc_failed++;
1020 continue;
1021 }
1022 ring_prod = NEXT_RX_IDX(ring_prod);
1023 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
1024 WARN_ON(ring_prod <= (i - fp->eth_q_stats.rx_skb_alloc_failed));
1025 }
1026
1027 if (fp->eth_q_stats.rx_skb_alloc_failed)
1028 BNX2X_ERR("was only able to allocate "
1029 "%d rx skbs on queue[%d]\n",
1030 (i - fp->eth_q_stats.rx_skb_alloc_failed), fp->index);
1031
1032 fp->rx_bd_prod = ring_prod;
1033 /* Limit the CQE producer by the CQE ring size */
1034 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
1035 cqe_ring_prod);
1036 fp->rx_pkt = fp->rx_calls = 0;
1037
1038 return i - fp->eth_q_stats.rx_skb_alloc_failed;
1039}
1040
995#ifdef BCM_CNIC 1041#ifdef BCM_CNIC
996static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp) 1042static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
997{ 1043{
@@ -1041,12 +1087,23 @@ static inline void storm_memset_cmng(struct bnx2x *bp,
1041 struct cmng_struct_per_port *cmng, 1087 struct cmng_struct_per_port *cmng,
1042 u8 port) 1088 u8 port)
1043{ 1089{
1044 size_t size = sizeof(struct cmng_struct_per_port); 1090 size_t size =
1091 sizeof(struct rate_shaping_vars_per_port) +
1092 sizeof(struct fairness_vars_per_port) +
1093 sizeof(struct safc_struct_per_port) +
1094 sizeof(struct pfc_struct_per_port);
1045 1095
1046 u32 addr = BAR_XSTRORM_INTMEM + 1096 u32 addr = BAR_XSTRORM_INTMEM +
1047 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port); 1097 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
1048 1098
1049 __storm_memset_struct(bp, addr, size, (u32 *)cmng); 1099 __storm_memset_struct(bp, addr, size, (u32 *)cmng);
1100
1101 addr += size + 4 /* SKIP DCB+LLFC */;
1102 size = sizeof(struct cmng_struct_per_port) -
1103 size /* written */ - 4 /*skipped*/;
1104
1105 __storm_memset_struct(bp, addr, size,
1106 (u32 *)(cmng->traffic_type_to_priority_cos));
1050} 1107}
1051 1108
1052/* HW Lock for shared dual port PHYs */ 1109/* HW Lock for shared dual port PHYs */
@@ -1054,12 +1111,11 @@ void bnx2x_acquire_phy_lock(struct bnx2x *bp);
1054void bnx2x_release_phy_lock(struct bnx2x *bp); 1111void bnx2x_release_phy_lock(struct bnx2x *bp);
1055 1112
1056/** 1113/**
1057 * Extracts MAX BW part from MF configuration. 1114 * bnx2x_extract_max_cfg - extract MAX BW part from MF configuration.
1058 * 1115 *
1059 * @param bp 1116 * @bp: driver handle
1060 * @param mf_cfg 1117 * @mf_cfg: MF configuration
1061 * 1118 *
1062 * @return u16
1063 */ 1119 */
1064static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg) 1120static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
1065{ 1121{
diff --git a/drivers/net/bnx2x/bnx2x_dcb.c b/drivers/net/bnx2x/bnx2x_dcb.c
index 9a24d79c71d9..0f8309233ff2 100644
--- a/drivers/net/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/bnx2x/bnx2x_dcb.c
@@ -1,6 +1,6 @@
1/* bnx2x_dcb.c: Broadcom Everest network driver. 1/* bnx2x_dcb.c: Broadcom Everest network driver.
2 * 2 *
3 * Copyright 2009-2010 Broadcom Corporation 3 * Copyright 2009-2011 Broadcom Corporation
4 * 4 *
5 * Unless you and Broadcom execute a separate written software license 5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you 6 * agreement governing use of this software, this software is licensed to you
@@ -571,6 +571,28 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
571{ 571{
572 switch (state) { 572 switch (state) {
573 case BNX2X_DCBX_STATE_NEG_RECEIVED: 573 case BNX2X_DCBX_STATE_NEG_RECEIVED:
574#ifdef BCM_CNIC
575 if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) {
576 struct cnic_ops *c_ops;
577 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
578 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
579 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
580 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
581
582 rcu_read_lock();
583 c_ops = rcu_dereference(bp->cnic_ops);
584 if (c_ops) {
585 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_ISCSI_CMD);
586 rcu_read_unlock();
587 return;
588 }
589 rcu_read_unlock();
590 }
591
592 /* fall through if no CNIC initialized */
593 case BNX2X_DCBX_STATE_ISCSI_STOPPED:
594#endif
595
574 { 596 {
575 DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n"); 597 DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n");
576#ifdef BCM_DCBNL 598#ifdef BCM_DCBNL
@@ -1057,12 +1079,6 @@ static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
1057 } 1079 }
1058} 1080}
1059 1081
1060
1061/*******************************************************************************
1062 * Description: single priority group
1063 *
1064 * Return:
1065 ******************************************************************************/
1066static void bnx2x_dcbx_ets_disabled_entry_data(struct bnx2x *bp, 1082static void bnx2x_dcbx_ets_disabled_entry_data(struct bnx2x *bp,
1067 struct cos_help_data *cos_data, 1083 struct cos_help_data *cos_data,
1068 u32 pri_join_mask) 1084 u32 pri_join_mask)
@@ -1075,11 +1091,6 @@ static void bnx2x_dcbx_ets_disabled_entry_data(struct bnx2x *bp,
1075 cos_data->num_of_cos = 1; 1091 cos_data->num_of_cos = 1;
1076} 1092}
1077 1093
1078/*******************************************************************************
1079 * Description: updating the cos bw
1080 *
1081 * Return:
1082 ******************************************************************************/
1083static inline void bnx2x_dcbx_add_to_cos_bw(struct bnx2x *bp, 1094static inline void bnx2x_dcbx_add_to_cos_bw(struct bnx2x *bp,
1084 struct cos_entry_help_data *data, 1095 struct cos_entry_help_data *data,
1085 u8 pg_bw) 1096 u8 pg_bw)
@@ -1090,11 +1101,6 @@ static inline void bnx2x_dcbx_add_to_cos_bw(struct bnx2x *bp,
1090 data->cos_bw += pg_bw; 1101 data->cos_bw += pg_bw;
1091} 1102}
1092 1103
1093/*******************************************************************************
1094 * Description: single priority group
1095 *
1096 * Return:
1097 ******************************************************************************/
1098static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp, 1104static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp,
1099 struct cos_help_data *cos_data, 1105 struct cos_help_data *cos_data,
1100 u32 *pg_pri_orginal_spread, 1106 u32 *pg_pri_orginal_spread,
@@ -1347,11 +1353,6 @@ static void bnx2x_dcbx_two_pg_to_cos_params(
1347 } 1353 }
1348} 1354}
1349 1355
1350/*******************************************************************************
1351 * Description: Still
1352 *
1353 * Return:
1354 ******************************************************************************/
1355static void bnx2x_dcbx_three_pg_to_cos_params( 1356static void bnx2x_dcbx_three_pg_to_cos_params(
1356 struct bnx2x *bp, 1357 struct bnx2x *bp,
1357 struct pg_help_data *pg_help_data, 1358 struct pg_help_data *pg_help_data,
@@ -1539,11 +1540,6 @@ static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
1539 } 1540 }
1540} 1541}
1541 1542
1542/*******************************************************************************
1543 * Description: Fill pfc_config struct that will be sent in DCBX start ramrod
1544 *
1545 * Return:
1546 ******************************************************************************/
1547static void bnx2x_pfc_fw_struct_e2(struct bnx2x *bp) 1543static void bnx2x_pfc_fw_struct_e2(struct bnx2x *bp)
1548{ 1544{
1549 struct flow_control_configuration *pfc_fw_cfg = NULL; 1545 struct flow_control_configuration *pfc_fw_cfg = NULL;
diff --git a/drivers/net/bnx2x/bnx2x_dcb.h b/drivers/net/bnx2x/bnx2x_dcb.h
index 71b8eda43bd0..bed369d67e02 100644
--- a/drivers/net/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/bnx2x/bnx2x_dcb.h
@@ -1,6 +1,6 @@
1/* bnx2x_dcb.h: Broadcom Everest network driver. 1/* bnx2x_dcb.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright 2009-2010 Broadcom Corporation 3 * Copyright 2009-2011 Broadcom Corporation
4 * 4 *
5 * Unless you and Broadcom execute a separate written software license 5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you 6 * agreement governing use of this software, this software is licensed to you
@@ -61,9 +61,6 @@ struct bnx2x_dcbx_port_params {
61#define BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE 1 61#define BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE 1
62#define BNX2X_DCBX_OVERWRITE_SETTINGS_INVALID (BNX2X_DCBX_CONFIG_INV_VALUE) 62#define BNX2X_DCBX_OVERWRITE_SETTINGS_INVALID (BNX2X_DCBX_CONFIG_INV_VALUE)
63 63
64/*******************************************************************************
65 * LLDP protocol configuration parameters.
66 ******************************************************************************/
67struct bnx2x_config_lldp_params { 64struct bnx2x_config_lldp_params {
68 u32 overwrite_settings; 65 u32 overwrite_settings;
69 u32 msg_tx_hold; 66 u32 msg_tx_hold;
@@ -83,9 +80,6 @@ struct bnx2x_admin_priority_app_table {
83 u32 app_id; 80 u32 app_id;
84}; 81};
85 82
86/*******************************************************************************
87 * DCBX protocol configuration parameters.
88 ******************************************************************************/
89struct bnx2x_config_dcbx_params { 83struct bnx2x_config_dcbx_params {
90 u32 overwrite_settings; 84 u32 overwrite_settings;
91 u32 admin_dcbx_version; 85 u32 admin_dcbx_version;
@@ -183,9 +177,13 @@ void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled);
183 177
184enum { 178enum {
185 BNX2X_DCBX_STATE_NEG_RECEIVED = 0x1, 179 BNX2X_DCBX_STATE_NEG_RECEIVED = 0x1,
186 BNX2X_DCBX_STATE_TX_PAUSED = 0x2, 180#ifdef BCM_CNIC
187 BNX2X_DCBX_STATE_TX_RELEASED = 0x4 181 BNX2X_DCBX_STATE_ISCSI_STOPPED,
182#endif
183 BNX2X_DCBX_STATE_TX_PAUSED,
184 BNX2X_DCBX_STATE_TX_RELEASED
188}; 185};
186
189void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state); 187void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state);
190 188
191/* DCB netlink */ 189/* DCB netlink */
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index 89cb977898cb..727fe89ff37f 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -1,6 +1,6 @@
1/* bnx2x_ethtool.c: Broadcom Everest network driver. 1/* bnx2x_ethtool.c: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation 3 * Copyright (c) 2007-2011 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -167,6 +167,7 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
167{ 167{
168 struct bnx2x *bp = netdev_priv(dev); 168 struct bnx2x *bp = netdev_priv(dev);
169 int cfg_idx = bnx2x_get_link_cfg_idx(bp); 169 int cfg_idx = bnx2x_get_link_cfg_idx(bp);
170
170 /* Dual Media boards present all available port types */ 171 /* Dual Media boards present all available port types */
171 cmd->supported = bp->port.supported[cfg_idx] | 172 cmd->supported = bp->port.supported[cfg_idx] |
172 (bp->port.supported[cfg_idx ^ 1] & 173 (bp->port.supported[cfg_idx ^ 1] &
@@ -176,16 +177,16 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
176 if ((bp->state == BNX2X_STATE_OPEN) && 177 if ((bp->state == BNX2X_STATE_OPEN) &&
177 !(bp->flags & MF_FUNC_DIS) && 178 !(bp->flags & MF_FUNC_DIS) &&
178 (bp->link_vars.link_up)) { 179 (bp->link_vars.link_up)) {
179 cmd->speed = bp->link_vars.line_speed; 180 ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed);
180 cmd->duplex = bp->link_vars.duplex; 181 cmd->duplex = bp->link_vars.duplex;
181 } else { 182 } else {
182 183 ethtool_cmd_speed_set(
183 cmd->speed = bp->link_params.req_line_speed[cfg_idx]; 184 cmd, bp->link_params.req_line_speed[cfg_idx]);
184 cmd->duplex = bp->link_params.req_duplex[cfg_idx]; 185 cmd->duplex = bp->link_params.req_duplex[cfg_idx];
185 } 186 }
186 187
187 if (IS_MF(bp)) 188 if (IS_MF(bp))
188 cmd->speed = bnx2x_get_mf_speed(bp); 189 ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp));
189 190
190 if (bp->port.supported[cfg_idx] & SUPPORTED_TP) 191 if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
191 cmd->port = PORT_TP; 192 cmd->port = PORT_TP;
@@ -206,10 +207,11 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
206 cmd->maxrxpkt = 0; 207 cmd->maxrxpkt = 0;
207 208
208 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n" 209 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
209 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n" 210 DP_LEVEL " supported 0x%x advertising 0x%x speed %u\n"
210 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n" 211 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
211 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n", 212 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
212 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed, 213 cmd->cmd, cmd->supported, cmd->advertising,
214 ethtool_cmd_speed(cmd),
213 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, 215 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
214 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); 216 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
215 217
@@ -226,16 +228,15 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
226 return 0; 228 return 0;
227 229
228 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n" 230 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
229 " supported 0x%x advertising 0x%x speed %d speed_hi %d\n" 231 " supported 0x%x advertising 0x%x speed %u\n"
230 " duplex %d port %d phy_address %d transceiver %d\n" 232 " duplex %d port %d phy_address %d transceiver %d\n"
231 " autoneg %d maxtxpkt %d maxrxpkt %d\n", 233 " autoneg %d maxtxpkt %d maxrxpkt %d\n",
232 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed, 234 cmd->cmd, cmd->supported, cmd->advertising,
233 cmd->speed_hi, 235 ethtool_cmd_speed(cmd),
234 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, 236 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
235 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); 237 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
236 238
237 speed = cmd->speed; 239 speed = ethtool_cmd_speed(cmd);
238 speed |= (cmd->speed_hi << 16);
239 240
240 if (IS_MF_SI(bp)) { 241 if (IS_MF_SI(bp)) {
241 u32 part; 242 u32 part;
@@ -439,7 +440,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
439 break; 440 break;
440 441
441 default: 442 default:
442 DP(NETIF_MSG_LINK, "Unsupported speed %d\n", speed); 443 DP(NETIF_MSG_LINK, "Unsupported speed %u\n", speed);
443 return -EINVAL; 444 return -EINVAL;
444 } 445 }
445 446
@@ -1219,7 +1220,8 @@ static int bnx2x_set_ringparam(struct net_device *dev,
1219 } 1220 }
1220 1221
1221 if ((ering->rx_pending > MAX_RX_AVAIL) || 1222 if ((ering->rx_pending > MAX_RX_AVAIL) ||
1222 (ering->rx_pending < MIN_RX_AVAIL) || 1223 (ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
1224 MIN_RX_SIZE_TPA)) ||
1223 (ering->tx_pending > MAX_TX_AVAIL) || 1225 (ering->tx_pending > MAX_TX_AVAIL) ||
1224 (ering->tx_pending <= MAX_SKB_FRAGS + 4)) 1226 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
1225 return -EINVAL; 1227 return -EINVAL;
@@ -1299,91 +1301,6 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
1299 return 0; 1301 return 0;
1300} 1302}
1301 1303
1302static int bnx2x_set_flags(struct net_device *dev, u32 data)
1303{
1304 struct bnx2x *bp = netdev_priv(dev);
1305 int changed = 0;
1306 int rc = 0;
1307
1308 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
1309 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
1310 return -EAGAIN;
1311 }
1312
1313 if (!(data & ETH_FLAG_RXVLAN))
1314 return -EINVAL;
1315
1316 if ((data & ETH_FLAG_LRO) && bp->rx_csum && bp->disable_tpa)
1317 return -EINVAL;
1318
1319 rc = ethtool_op_set_flags(dev, data, ETH_FLAG_LRO | ETH_FLAG_RXVLAN |
1320 ETH_FLAG_TXVLAN | ETH_FLAG_RXHASH);
1321 if (rc)
1322 return rc;
1323
1324 /* TPA requires Rx CSUM offloading */
1325 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
1326 if (!(bp->flags & TPA_ENABLE_FLAG)) {
1327 bp->flags |= TPA_ENABLE_FLAG;
1328 changed = 1;
1329 }
1330 } else if (bp->flags & TPA_ENABLE_FLAG) {
1331 dev->features &= ~NETIF_F_LRO;
1332 bp->flags &= ~TPA_ENABLE_FLAG;
1333 changed = 1;
1334 }
1335
1336 if (changed && netif_running(dev)) {
1337 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
1338 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
1339 }
1340
1341 return rc;
1342}
1343
1344static u32 bnx2x_get_rx_csum(struct net_device *dev)
1345{
1346 struct bnx2x *bp = netdev_priv(dev);
1347
1348 return bp->rx_csum;
1349}
1350
1351static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
1352{
1353 struct bnx2x *bp = netdev_priv(dev);
1354 int rc = 0;
1355
1356 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
1357 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
1358 return -EAGAIN;
1359 }
1360
1361 bp->rx_csum = data;
1362
1363 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
1364 TPA'ed packets will be discarded due to wrong TCP CSUM */
1365 if (!data) {
1366 u32 flags = ethtool_op_get_flags(dev);
1367
1368 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
1369 }
1370
1371 return rc;
1372}
1373
1374static int bnx2x_set_tso(struct net_device *dev, u32 data)
1375{
1376 if (data) {
1377 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
1378 dev->features |= NETIF_F_TSO6;
1379 } else {
1380 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
1381 dev->features &= ~NETIF_F_TSO6;
1382 }
1383
1384 return 0;
1385}
1386
1387static const struct { 1304static const struct {
1388 char string[ETH_GSTRING_LEN]; 1305 char string[ETH_GSTRING_LEN];
1389} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = { 1306} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
@@ -2097,35 +2014,37 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
2097 } 2014 }
2098} 2015}
2099 2016
2100static int bnx2x_phys_id(struct net_device *dev, u32 data) 2017static int bnx2x_set_phys_id(struct net_device *dev,
2018 enum ethtool_phys_id_state state)
2101{ 2019{
2102 struct bnx2x *bp = netdev_priv(dev); 2020 struct bnx2x *bp = netdev_priv(dev);
2103 int i;
2104 2021
2105 if (!netif_running(dev)) 2022 if (!netif_running(dev))
2106 return 0; 2023 return -EAGAIN;
2107 2024
2108 if (!bp->port.pmf) 2025 if (!bp->port.pmf)
2109 return 0; 2026 return -EOPNOTSUPP;
2110 2027
2111 if (data == 0) 2028 switch (state) {
2112 data = 2; 2029 case ETHTOOL_ID_ACTIVE:
2030 return 1; /* cycle on/off once per second */
2113 2031
2114 for (i = 0; i < (data * 2); i++) { 2032 case ETHTOOL_ID_ON:
2115 if ((i % 2) == 0) 2033 bnx2x_set_led(&bp->link_params, &bp->link_vars,
2116 bnx2x_set_led(&bp->link_params, &bp->link_vars, 2034 LED_MODE_ON, SPEED_1000);
2117 LED_MODE_ON, SPEED_1000); 2035 break;
2118 else
2119 bnx2x_set_led(&bp->link_params, &bp->link_vars,
2120 LED_MODE_FRONT_PANEL_OFF, 0);
2121 2036
2122 msleep_interruptible(500); 2037 case ETHTOOL_ID_OFF:
2123 if (signal_pending(current)) 2038 bnx2x_set_led(&bp->link_params, &bp->link_vars,
2124 break; 2039 LED_MODE_FRONT_PANEL_OFF, 0);
2125 } 2040
2041 break;
2126 2042
2127 bnx2x_set_led(&bp->link_params, &bp->link_vars, 2043 case ETHTOOL_ID_INACTIVE:
2128 LED_MODE_OPER, bp->link_vars.line_speed); 2044 bnx2x_set_led(&bp->link_params, &bp->link_vars,
2045 LED_MODE_OPER,
2046 bp->link_vars.line_speed);
2047 }
2129 2048
2130 return 0; 2049 return 0;
2131} 2050}
@@ -2204,20 +2123,10 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
2204 .set_ringparam = bnx2x_set_ringparam, 2123 .set_ringparam = bnx2x_set_ringparam,
2205 .get_pauseparam = bnx2x_get_pauseparam, 2124 .get_pauseparam = bnx2x_get_pauseparam,
2206 .set_pauseparam = bnx2x_set_pauseparam, 2125 .set_pauseparam = bnx2x_set_pauseparam,
2207 .get_rx_csum = bnx2x_get_rx_csum,
2208 .set_rx_csum = bnx2x_set_rx_csum,
2209 .get_tx_csum = ethtool_op_get_tx_csum,
2210 .set_tx_csum = ethtool_op_set_tx_hw_csum,
2211 .set_flags = bnx2x_set_flags,
2212 .get_flags = ethtool_op_get_flags,
2213 .get_sg = ethtool_op_get_sg,
2214 .set_sg = ethtool_op_set_sg,
2215 .get_tso = ethtool_op_get_tso,
2216 .set_tso = bnx2x_set_tso,
2217 .self_test = bnx2x_self_test, 2126 .self_test = bnx2x_self_test,
2218 .get_sset_count = bnx2x_get_sset_count, 2127 .get_sset_count = bnx2x_get_sset_count,
2219 .get_strings = bnx2x_get_strings, 2128 .get_strings = bnx2x_get_strings,
2220 .phys_id = bnx2x_phys_id, 2129 .set_phys_id = bnx2x_set_phys_id,
2221 .get_ethtool_stats = bnx2x_get_ethtool_stats, 2130 .get_ethtool_stats = bnx2x_get_ethtool_stats,
2222 .get_rxnfc = bnx2x_get_rxnfc, 2131 .get_rxnfc = bnx2x_get_rxnfc,
2223 .get_rxfh_indir = bnx2x_get_rxfh_indir, 2132 .get_rxfh_indir = bnx2x_get_rxfh_indir,
diff --git a/drivers/net/bnx2x/bnx2x_fw_defs.h b/drivers/net/bnx2x/bnx2x_fw_defs.h
index f4e5b1ce8149..9fe367836a57 100644
--- a/drivers/net/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/bnx2x/bnx2x_fw_defs.h
@@ -1,6 +1,6 @@
1/* bnx2x_fw_defs.h: Broadcom Everest network driver. 1/* bnx2x_fw_defs.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation 3 * Copyright (c) 2007-2011 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/bnx2x/bnx2x_fw_file_hdr.h
index f807262911e5..f4a07fbaed05 100644
--- a/drivers/net/bnx2x/bnx2x_fw_file_hdr.h
+++ b/drivers/net/bnx2x/bnx2x_fw_file_hdr.h
@@ -1,6 +1,6 @@
1/* bnx2x_fw_file_hdr.h: FW binary file header structure. 1/* bnx2x_fw_file_hdr.h: FW binary file header structure.
2 * 2 *
3 * Copyright (c) 2007-2009 Broadcom Corporation 3 * Copyright (c) 2007-2011 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index dac1bf9cbbfa..cdf19fe7c7f6 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -1,6 +1,6 @@
1/* bnx2x_hsi.h: Broadcom Everest network driver. 1/* bnx2x_hsi.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation 3 * Copyright (c) 2007-2011 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -1929,7 +1929,7 @@ struct host_func_stats {
1929 1929
1930#define BCM_5710_FW_MAJOR_VERSION 6 1930#define BCM_5710_FW_MAJOR_VERSION 6
1931#define BCM_5710_FW_MINOR_VERSION 2 1931#define BCM_5710_FW_MINOR_VERSION 2
1932#define BCM_5710_FW_REVISION_VERSION 5 1932#define BCM_5710_FW_REVISION_VERSION 9
1933#define BCM_5710_FW_ENGINEERING_VERSION 0 1933#define BCM_5710_FW_ENGINEERING_VERSION 0
1934#define BCM_5710_FW_COMPILE_FLAGS 1 1934#define BCM_5710_FW_COMPILE_FLAGS 1
1935 1935
diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h
index fa6dbe3f2058..d5399206f66e 100644
--- a/drivers/net/bnx2x/bnx2x_init.h
+++ b/drivers/net/bnx2x/bnx2x_init.h
@@ -1,7 +1,7 @@
1/* bnx2x_init.h: Broadcom Everest network driver. 1/* bnx2x_init.h: Broadcom Everest network driver.
2 * Structures and macroes needed during the initialization. 2 * Structures and macroes needed during the initialization.
3 * 3 *
4 * Copyright (c) 2007-2009 Broadcom Corporation 4 * Copyright (c) 2007-2011 Broadcom Corporation
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/bnx2x/bnx2x_init_ops.h b/drivers/net/bnx2x/bnx2x_init_ops.h
index 66df29fcf751..aafd0232393f 100644
--- a/drivers/net/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/bnx2x/bnx2x_init_ops.h
@@ -2,7 +2,7 @@
2 * Static functions needed during the initialization. 2 * Static functions needed during the initialization.
3 * This file is "included" in bnx2x_main.c. 3 * This file is "included" in bnx2x_main.c.
4 * 4 *
5 * Copyright (c) 2007-2010 Broadcom Corporation 5 * Copyright (c) 2007-2011 Broadcom Corporation
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index 974ef2be36a5..076e11f5769f 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -385,7 +385,7 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
385 return 0; 385 return 0;
386} 386}
387/******************************************************************/ 387/******************************************************************/
388/* ETS section */ 388/* PFC section */
389/******************************************************************/ 389/******************************************************************/
390 390
391static void bnx2x_bmac2_get_pfc_stat(struct link_params *params, 391static void bnx2x_bmac2_get_pfc_stat(struct link_params *params,
@@ -1301,14 +1301,12 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
1301 return 0; 1301 return 0;
1302} 1302}
1303 1303
1304/* 1304/**
1305 * get_emac_base 1305 * bnx2x_get_emac_base - retrive emac base address
1306 *
1307 * @param cb
1308 * @param mdc_mdio_access
1309 * @param port
1310 * 1306 *
1311 * @return u32 1307 * @bp: driver handle
1308 * @mdc_mdio_access: access type
1309 * @port: port id
1312 * 1310 *
1313 * This function selects the MDC/MDIO access (through emac0 or 1311 * This function selects the MDC/MDIO access (through emac0 or
1314 * emac1) depend on the mdc_mdio_access, port, port swapped. Each 1312 * emac1) depend on the mdc_mdio_access, port, port swapped. Each
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index a97a4a1c344f..f45c0caf3240 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -1,6 +1,6 @@
1/* bnx2x_main.c: Broadcom Everest network driver. 1/* bnx2x_main.c: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation 3 * Copyright (c) 2007-2011 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -2036,7 +2036,7 @@ static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2036 return CMNG_FNS_NONE; 2036 return CMNG_FNS_NONE;
2037} 2037}
2038 2038
2039static void bnx2x_read_mf_cfg(struct bnx2x *bp) 2039void bnx2x_read_mf_cfg(struct bnx2x *bp)
2040{ 2040{
2041 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1); 2041 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
2042 2042
@@ -2123,7 +2123,6 @@ static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2123/* This function is called upon link interrupt */ 2123/* This function is called upon link interrupt */
2124static void bnx2x_link_attn(struct bnx2x *bp) 2124static void bnx2x_link_attn(struct bnx2x *bp)
2125{ 2125{
2126 u32 prev_link_status = bp->link_vars.link_status;
2127 /* Make sure that we are synced with the current statistics */ 2126 /* Make sure that we are synced with the current statistics */
2128 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2127 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2129 2128
@@ -2168,17 +2167,15 @@ static void bnx2x_link_attn(struct bnx2x *bp)
2168 "single function mode without fairness\n"); 2167 "single function mode without fairness\n");
2169 } 2168 }
2170 2169
2170 __bnx2x_link_report(bp);
2171
2171 if (IS_MF(bp)) 2172 if (IS_MF(bp))
2172 bnx2x_link_sync_notify(bp); 2173 bnx2x_link_sync_notify(bp);
2173
2174 /* indicate link status only if link status actually changed */
2175 if (prev_link_status != bp->link_vars.link_status)
2176 bnx2x_link_report(bp);
2177} 2174}
2178 2175
2179void bnx2x__link_status_update(struct bnx2x *bp) 2176void bnx2x__link_status_update(struct bnx2x *bp)
2180{ 2177{
2181 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS)) 2178 if (bp->state != BNX2X_STATE_OPEN)
2182 return; 2179 return;
2183 2180
2184 bnx2x_link_status_update(&bp->link_params, &bp->link_vars); 2181 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
@@ -2188,10 +2185,6 @@ void bnx2x__link_status_update(struct bnx2x *bp)
2188 else 2185 else
2189 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2186 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2190 2187
2191 /* the link status update could be the result of a DCC event
2192 hence re-read the shmem mf configuration */
2193 bnx2x_read_mf_cfg(bp);
2194
2195 /* indicate link status */ 2188 /* indicate link status */
2196 bnx2x_link_report(bp); 2189 bnx2x_link_report(bp);
2197} 2190}
@@ -3120,10 +3113,14 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3120 if (val & DRV_STATUS_SET_MF_BW) 3113 if (val & DRV_STATUS_SET_MF_BW)
3121 bnx2x_set_mf_bw(bp); 3114 bnx2x_set_mf_bw(bp);
3122 3115
3123 bnx2x__link_status_update(bp);
3124 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) 3116 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3125 bnx2x_pmf_update(bp); 3117 bnx2x_pmf_update(bp);
3126 3118
3119 /* Always call it here: bnx2x_link_report() will
3120 * prevent the link indication duplication.
3121 */
3122 bnx2x__link_status_update(bp);
3123
3127 if (bp->port.pmf && 3124 if (bp->port.pmf &&
3128 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) && 3125 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
3129 bp->dcbx_enabled > 0) 3126 bp->dcbx_enabled > 0)
@@ -3904,10 +3901,9 @@ static void bnx2x_timer(unsigned long data)
3904 3901
3905 if (poll) { 3902 if (poll) {
3906 struct bnx2x_fastpath *fp = &bp->fp[0]; 3903 struct bnx2x_fastpath *fp = &bp->fp[0];
3907 int rc;
3908 3904
3909 bnx2x_tx_int(fp); 3905 bnx2x_tx_int(fp);
3910 rc = bnx2x_rx_int(fp, 1000); 3906 bnx2x_rx_int(fp, 1000);
3911 } 3907 }
3912 3908
3913 if (!BP_NOMCP(bp)) { 3909 if (!BP_NOMCP(bp)) {
@@ -4062,7 +4058,6 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
4062 struct hc_status_block_data_e2 sb_data_e2; 4058 struct hc_status_block_data_e2 sb_data_e2;
4063 struct hc_status_block_data_e1x sb_data_e1x; 4059 struct hc_status_block_data_e1x sb_data_e1x;
4064 struct hc_status_block_sm *hc_sm_p; 4060 struct hc_status_block_sm *hc_sm_p;
4065 struct hc_index_data *hc_index_p;
4066 int data_size; 4061 int data_size;
4067 u32 *sb_data_p; 4062 u32 *sb_data_p;
4068 4063
@@ -4083,7 +4078,6 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
4083 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping); 4078 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
4084 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping); 4079 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
4085 hc_sm_p = sb_data_e2.common.state_machine; 4080 hc_sm_p = sb_data_e2.common.state_machine;
4086 hc_index_p = sb_data_e2.index_data;
4087 sb_data_p = (u32 *)&sb_data_e2; 4081 sb_data_p = (u32 *)&sb_data_e2;
4088 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); 4082 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
4089 } else { 4083 } else {
@@ -4097,7 +4091,6 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
4097 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping); 4091 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
4098 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping); 4092 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
4099 hc_sm_p = sb_data_e1x.common.state_machine; 4093 hc_sm_p = sb_data_e1x.common.state_machine;
4100 hc_index_p = sb_data_e1x.index_data;
4101 sb_data_p = (u32 *)&sb_data_e1x; 4094 sb_data_p = (u32 *)&sb_data_e1x;
4102 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); 4095 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
4103 } 4096 }
@@ -4454,7 +4447,7 @@ static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
4454 4447
4455 fp->state = BNX2X_FP_STATE_CLOSED; 4448 fp->state = BNX2X_FP_STATE_CLOSED;
4456 4449
4457 fp->index = fp->cid = fp_idx; 4450 fp->cid = fp_idx;
4458 fp->cl_id = BP_L_ID(bp) + fp_idx; 4451 fp->cl_id = BP_L_ID(bp) + fp_idx;
4459 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE; 4452 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
4460 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE; 4453 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
@@ -4566,9 +4559,11 @@ gunzip_nomem1:
4566 4559
4567static void bnx2x_gunzip_end(struct bnx2x *bp) 4560static void bnx2x_gunzip_end(struct bnx2x *bp)
4568{ 4561{
4569 kfree(bp->strm->workspace); 4562 if (bp->strm) {
4570 kfree(bp->strm); 4563 kfree(bp->strm->workspace);
4571 bp->strm = NULL; 4564 kfree(bp->strm);
4565 bp->strm = NULL;
4566 }
4572 4567
4573 if (bp->gunzip_buf) { 4568 if (bp->gunzip_buf) {
4574 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, 4569 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
@@ -5876,9 +5871,6 @@ int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5876 5871
5877 bp->dmae_ready = 0; 5872 bp->dmae_ready = 0;
5878 spin_lock_init(&bp->dmae_lock); 5873 spin_lock_init(&bp->dmae_lock);
5879 rc = bnx2x_gunzip_init(bp);
5880 if (rc)
5881 return rc;
5882 5874
5883 switch (load_code) { 5875 switch (load_code) {
5884 case FW_MSG_CODE_DRV_LOAD_COMMON: 5876 case FW_MSG_CODE_DRV_LOAD_COMMON:
@@ -5922,80 +5914,10 @@ init_hw_err:
5922 5914
5923void bnx2x_free_mem(struct bnx2x *bp) 5915void bnx2x_free_mem(struct bnx2x *bp)
5924{ 5916{
5925 5917 bnx2x_gunzip_end(bp);
5926#define BNX2X_PCI_FREE(x, y, size) \
5927 do { \
5928 if (x) { \
5929 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
5930 x = NULL; \
5931 y = 0; \
5932 } \
5933 } while (0)
5934
5935#define BNX2X_FREE(x) \
5936 do { \
5937 if (x) { \
5938 kfree((void *)x); \
5939 x = NULL; \
5940 } \
5941 } while (0)
5942
5943 int i;
5944 5918
5945 /* fastpath */ 5919 /* fastpath */
5946 /* Common */ 5920 bnx2x_free_fp_mem(bp);
5947 for_each_queue(bp, i) {
5948#ifdef BCM_CNIC
5949 /* FCoE client uses default status block */
5950 if (IS_FCOE_IDX(i)) {
5951 union host_hc_status_block *sb =
5952 &bnx2x_fp(bp, i, status_blk);
5953 memset(sb, 0, sizeof(union host_hc_status_block));
5954 bnx2x_fp(bp, i, status_blk_mapping) = 0;
5955 } else {
5956#endif
5957 /* status blocks */
5958 if (CHIP_IS_E2(bp))
5959 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
5960 bnx2x_fp(bp, i, status_blk_mapping),
5961 sizeof(struct host_hc_status_block_e2));
5962 else
5963 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
5964 bnx2x_fp(bp, i, status_blk_mapping),
5965 sizeof(struct host_hc_status_block_e1x));
5966#ifdef BCM_CNIC
5967 }
5968#endif
5969 }
5970 /* Rx */
5971 for_each_rx_queue(bp, i) {
5972
5973 /* fastpath rx rings: rx_buf rx_desc rx_comp */
5974 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5975 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5976 bnx2x_fp(bp, i, rx_desc_mapping),
5977 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5978
5979 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5980 bnx2x_fp(bp, i, rx_comp_mapping),
5981 sizeof(struct eth_fast_path_rx_cqe) *
5982 NUM_RCQ_BD);
5983
5984 /* SGE ring */
5985 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5986 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5987 bnx2x_fp(bp, i, rx_sge_mapping),
5988 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5989 }
5990 /* Tx */
5991 for_each_tx_queue(bp, i) {
5992
5993 /* fastpath tx rings: tx_buf tx_desc */
5994 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5995 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5996 bnx2x_fp(bp, i, tx_desc_mapping),
5997 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
5998 }
5999 /* end of fastpath */ 5921 /* end of fastpath */
6000 5922
6001 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, 5923 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
@@ -6028,101 +5950,13 @@ void bnx2x_free_mem(struct bnx2x *bp)
6028 BCM_PAGE_SIZE * NUM_EQ_PAGES); 5950 BCM_PAGE_SIZE * NUM_EQ_PAGES);
6029 5951
6030 BNX2X_FREE(bp->rx_indir_table); 5952 BNX2X_FREE(bp->rx_indir_table);
6031
6032#undef BNX2X_PCI_FREE
6033#undef BNX2X_KFREE
6034} 5953}
6035 5954
6036static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
6037{
6038 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
6039 if (CHIP_IS_E2(bp)) {
6040 bnx2x_fp(bp, index, sb_index_values) =
6041 (__le16 *)status_blk.e2_sb->sb.index_values;
6042 bnx2x_fp(bp, index, sb_running_index) =
6043 (__le16 *)status_blk.e2_sb->sb.running_index;
6044 } else {
6045 bnx2x_fp(bp, index, sb_index_values) =
6046 (__le16 *)status_blk.e1x_sb->sb.index_values;
6047 bnx2x_fp(bp, index, sb_running_index) =
6048 (__le16 *)status_blk.e1x_sb->sb.running_index;
6049 }
6050}
6051 5955
6052int bnx2x_alloc_mem(struct bnx2x *bp) 5956int bnx2x_alloc_mem(struct bnx2x *bp)
6053{ 5957{
6054#define BNX2X_PCI_ALLOC(x, y, size) \ 5958 if (bnx2x_gunzip_init(bp))
6055 do { \ 5959 return -ENOMEM;
6056 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
6057 if (x == NULL) \
6058 goto alloc_mem_err; \
6059 memset(x, 0, size); \
6060 } while (0)
6061
6062#define BNX2X_ALLOC(x, size) \
6063 do { \
6064 x = kzalloc(size, GFP_KERNEL); \
6065 if (x == NULL) \
6066 goto alloc_mem_err; \
6067 } while (0)
6068
6069 int i;
6070
6071 /* fastpath */
6072 /* Common */
6073 for_each_queue(bp, i) {
6074 union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
6075 bnx2x_fp(bp, i, bp) = bp;
6076 /* status blocks */
6077#ifdef BCM_CNIC
6078 if (!IS_FCOE_IDX(i)) {
6079#endif
6080 if (CHIP_IS_E2(bp))
6081 BNX2X_PCI_ALLOC(sb->e2_sb,
6082 &bnx2x_fp(bp, i, status_blk_mapping),
6083 sizeof(struct host_hc_status_block_e2));
6084 else
6085 BNX2X_PCI_ALLOC(sb->e1x_sb,
6086 &bnx2x_fp(bp, i, status_blk_mapping),
6087 sizeof(struct host_hc_status_block_e1x));
6088#ifdef BCM_CNIC
6089 }
6090#endif
6091 set_sb_shortcuts(bp, i);
6092 }
6093 /* Rx */
6094 for_each_queue(bp, i) {
6095
6096 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6097 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6098 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6099 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6100 &bnx2x_fp(bp, i, rx_desc_mapping),
6101 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6102
6103 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6104 &bnx2x_fp(bp, i, rx_comp_mapping),
6105 sizeof(struct eth_fast_path_rx_cqe) *
6106 NUM_RCQ_BD);
6107
6108 /* SGE ring */
6109 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6110 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6111 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6112 &bnx2x_fp(bp, i, rx_sge_mapping),
6113 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6114 }
6115 /* Tx */
6116 for_each_queue(bp, i) {
6117
6118 /* fastpath tx rings: tx_buf tx_desc */
6119 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6120 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6121 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6122 &bnx2x_fp(bp, i, tx_desc_mapping),
6123 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6124 }
6125 /* end of fastpath */
6126 5960
6127#ifdef BCM_CNIC 5961#ifdef BCM_CNIC
6128 if (CHIP_IS_E2(bp)) 5962 if (CHIP_IS_E2(bp))
@@ -6162,14 +5996,18 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
6162 5996
6163 BNX2X_ALLOC(bp->rx_indir_table, sizeof(bp->rx_indir_table[0]) * 5997 BNX2X_ALLOC(bp->rx_indir_table, sizeof(bp->rx_indir_table[0]) *
6164 TSTORM_INDIRECTION_TABLE_SIZE); 5998 TSTORM_INDIRECTION_TABLE_SIZE);
5999
6000 /* fastpath */
6001 /* need to be done at the end, since it's self adjusting to amount
6002 * of memory available for RSS queues
6003 */
6004 if (bnx2x_alloc_fp_mem(bp))
6005 goto alloc_mem_err;
6165 return 0; 6006 return 0;
6166 6007
6167alloc_mem_err: 6008alloc_mem_err:
6168 bnx2x_free_mem(bp); 6009 bnx2x_free_mem(bp);
6169 return -ENOMEM; 6010 return -ENOMEM;
6170
6171#undef BNX2X_PCI_ALLOC
6172#undef BNX2X_ALLOC
6173} 6011}
6174 6012
6175/* 6013/*
@@ -6197,14 +6035,14 @@ static int bnx2x_func_stop(struct bnx2x *bp)
6197} 6035}
6198 6036
6199/** 6037/**
6200 * Sets a MAC in a CAM for a few L2 Clients for E1x chips 6038 * bnx2x_set_mac_addr_gen - set a MAC in a CAM for a few L2 Clients for E1x chips
6201 * 6039 *
6202 * @param bp driver descriptor 6040 * @bp: driver handle
6203 * @param set set or clear an entry (1 or 0) 6041 * @set: set or clear an entry (1 or 0)
6204 * @param mac pointer to a buffer containing a MAC 6042 * @mac: pointer to a buffer containing a MAC
6205 * @param cl_bit_vec bit vector of clients to register a MAC for 6043 * @cl_bit_vec: bit vector of clients to register a MAC for
6206 * @param cam_offset offset in a CAM to use 6044 * @cam_offset: offset in a CAM to use
6207 * @param is_bcast is the set MAC a broadcast address (for E1 only) 6045 * @is_bcast: is the set MAC a broadcast address (for E1 only)
6208 */ 6046 */
6209static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac, 6047static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
6210 u32 cl_bit_vec, u8 cam_offset, 6048 u32 cl_bit_vec, u8 cam_offset,
@@ -6564,14 +6402,13 @@ void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp)
6564 6402
6565#ifdef BCM_CNIC 6403#ifdef BCM_CNIC
6566/** 6404/**
6567 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH 6405 * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
6568 * MAC(s). This function will wait until the ramdord completion
6569 * returns.
6570 * 6406 *
6571 * @param bp driver handle 6407 * @bp: driver handle
6572 * @param set set or clear the CAM entry 6408 * @set: set or clear the CAM entry
6573 * 6409 *
6574 * @return 0 if cussess, -ENODEV if ramrod doesn't return. 6410 * This function will wait until the ramdord completion returns.
6411 * Return 0 if success, -ENODEV if ramrod doesn't return.
6575 */ 6412 */
6576static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set) 6413static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
6577{ 6414{
@@ -6592,14 +6429,13 @@ static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
6592} 6429}
6593 6430
6594/** 6431/**
6595 * Set FCoE L2 MAC(s) at the next enties in the CAM after the 6432 * bnx2x_set_fip_eth_mac_addr - set FCoE L2 MAC(s)
6596 * ETH MAC(s). This function will wait until the ramdord
6597 * completion returns.
6598 * 6433 *
6599 * @param bp driver handle 6434 * @bp: driver handle
6600 * @param set set or clear the CAM entry 6435 * @set: set or clear the CAM entry
6601 * 6436 *
6602 * @return 0 if cussess, -ENODEV if ramrod doesn't return. 6437 * This function will wait until the ramrod completion returns.
6438 * Returns 0 if success, -ENODEV if ramrod doesn't return.
6603 */ 6439 */
6604int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set) 6440int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set)
6605{ 6441{
@@ -6803,12 +6639,11 @@ static int bnx2x_setup_fw_client(struct bnx2x *bp,
6803} 6639}
6804 6640
6805/** 6641/**
6806 * Configure interrupt mode according to current configuration. 6642 * bnx2x_set_int_mode - configure interrupt mode
6807 * In case of MSI-X it will also try to enable MSI-X.
6808 * 6643 *
6809 * @param bp 6644 * @bp: driver handle
6810 * 6645 *
6811 * @return int 6646 * In case of MSI-X it will also try to enable MSI-X.
6812 */ 6647 */
6813static int __devinit bnx2x_set_int_mode(struct bnx2x *bp) 6648static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
6814{ 6649{
@@ -7392,10 +7227,11 @@ static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
7392 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); 7227 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
7393} 7228}
7394 7229
7395/* Restore the value of the `magic' bit. 7230/**
7231 * bnx2x_clp_reset_done - restore the value of the `magic' bit.
7396 * 7232 *
7397 * @param pdev Device handle. 7233 * @bp: driver handle
7398 * @param magic_val Old value of the `magic' bit. 7234 * @magic_val: old value of the `magic' bit.
7399 */ 7235 */
7400static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val) 7236static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
7401{ 7237{
@@ -7406,10 +7242,12 @@ static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
7406} 7242}
7407 7243
7408/** 7244/**
7409 * Prepares for MCP reset: takes care of CLP configurations. 7245 * bnx2x_reset_mcp_prep - prepare for MCP reset.
7410 * 7246 *
7411 * @param bp 7247 * @bp: driver handle
7412 * @param magic_val Old value of 'magic' bit. 7248 * @magic_val: old value of 'magic' bit.
7249 *
7250 * Takes care of CLP configurations.
7413 */ 7251 */
7414static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val) 7252static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
7415{ 7253{
@@ -7434,10 +7272,10 @@ static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
7434#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */ 7272#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
7435#define MCP_ONE_TIMEOUT 100 /* 100 ms */ 7273#define MCP_ONE_TIMEOUT 100 /* 100 ms */
7436 7274
7437/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10, 7275/**
7438 * depending on the HW type. 7276 * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT
7439 * 7277 *
7440 * @param bp 7278 * @bp: driver handle
7441 */ 7279 */
7442static inline void bnx2x_mcp_wait_one(struct bnx2x *bp) 7280static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
7443{ 7281{
@@ -8059,13 +7897,9 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8059 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ? 7897 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
8060 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0; 7898 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
8061 7899
8062 if (BP_E1HVN(bp) == 0) { 7900 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8063 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc); 7901 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8064 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG; 7902
8065 } else {
8066 /* no WOL capability for E1HVN != 0 */
8067 bp->flags |= NO_WOL_FLAG;
8068 }
8069 BNX2X_DEV_INFO("%sWoL capable\n", 7903 BNX2X_DEV_INFO("%sWoL capable\n",
8070 (bp->flags & NO_WOL_FLAG) ? "not " : ""); 7904 (bp->flags & NO_WOL_FLAG) ? "not " : "");
8071 7905
@@ -8571,15 +8405,6 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8571 BNX2X_DEV_INFO("Read iSCSI MAC: " 8405 BNX2X_DEV_INFO("Read iSCSI MAC: "
8572 "0x%x:0x%04x\n", val2, val); 8406 "0x%x:0x%04x\n", val2, val);
8573 bnx2x_set_mac_buf(iscsi_mac, val, val2); 8407 bnx2x_set_mac_buf(iscsi_mac, val, val2);
8574
8575 /* Disable iSCSI OOO if MAC configuration is
8576 * invalid.
8577 */
8578 if (!is_valid_ether_addr(iscsi_mac)) {
8579 bp->flags |= NO_ISCSI_OOO_FLAG |
8580 NO_ISCSI_FLAG;
8581 memset(iscsi_mac, 0, ETH_ALEN);
8582 }
8583 } else 8408 } else
8584 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; 8409 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
8585 8410
@@ -8592,13 +8417,6 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8592 "0x%x:0x%04x\n", val2, val); 8417 "0x%x:0x%04x\n", val2, val);
8593 bnx2x_set_mac_buf(fip_mac, val, val2); 8418 bnx2x_set_mac_buf(fip_mac, val, val2);
8594 8419
8595 /* Disable FCoE if MAC configuration is
8596 * invalid.
8597 */
8598 if (!is_valid_ether_addr(fip_mac)) {
8599 bp->flags |= NO_FCOE_FLAG;
8600 memset(bp->fip_mac, 0, ETH_ALEN);
8601 }
8602 } else 8420 } else
8603 bp->flags |= NO_FCOE_FLAG; 8421 bp->flags |= NO_FCOE_FLAG;
8604 } 8422 }
@@ -8629,13 +8447,29 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8629 else if (!IS_MF(bp)) 8447 else if (!IS_MF(bp))
8630 memcpy(fip_mac, iscsi_mac, ETH_ALEN); 8448 memcpy(fip_mac, iscsi_mac, ETH_ALEN);
8631 } 8449 }
8450
8451 /* Disable iSCSI if MAC configuration is
8452 * invalid.
8453 */
8454 if (!is_valid_ether_addr(iscsi_mac)) {
8455 bp->flags |= NO_ISCSI_FLAG;
8456 memset(iscsi_mac, 0, ETH_ALEN);
8457 }
8458
8459 /* Disable FCoE if MAC configuration is
8460 * invalid.
8461 */
8462 if (!is_valid_ether_addr(fip_mac)) {
8463 bp->flags |= NO_FCOE_FLAG;
8464 memset(bp->fip_mac, 0, ETH_ALEN);
8465 }
8632#endif 8466#endif
8633} 8467}
8634 8468
8635static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) 8469static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8636{ 8470{
8637 int /*abs*/func = BP_ABS_FUNC(bp); 8471 int /*abs*/func = BP_ABS_FUNC(bp);
8638 int vn, port; 8472 int vn;
8639 u32 val = 0; 8473 u32 val = 0;
8640 int rc = 0; 8474 int rc = 0;
8641 8475
@@ -8670,7 +8504,6 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8670 bp->mf_ov = 0; 8504 bp->mf_ov = 0;
8671 bp->mf_mode = 0; 8505 bp->mf_mode = 0;
8672 vn = BP_E1HVN(bp); 8506 vn = BP_E1HVN(bp);
8673 port = BP_PORT(bp);
8674 8507
8675 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { 8508 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
8676 DP(NETIF_MSG_PROBE, 8509 DP(NETIF_MSG_PROBE,
@@ -8904,8 +8737,6 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8904 bp->multi_mode = multi_mode; 8737 bp->multi_mode = multi_mode;
8905 bp->int_mode = int_mode; 8738 bp->int_mode = int_mode;
8906 8739
8907 bp->dev->features |= NETIF_F_GRO;
8908
8909 /* Set TPA flags */ 8740 /* Set TPA flags */
8910 if (disable_tpa) { 8741 if (disable_tpa) {
8911 bp->flags &= ~TPA_ENABLE_FLAG; 8742 bp->flags &= ~TPA_ENABLE_FLAG;
@@ -8925,8 +8756,6 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8925 8756
8926 bp->tx_ring_size = MAX_TX_AVAIL; 8757 bp->tx_ring_size = MAX_TX_AVAIL;
8927 8758
8928 bp->rx_csum = 1;
8929
8930 /* make sure that the numbers are in the right granularity */ 8759 /* make sure that the numbers are in the right granularity */
8931 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR; 8760 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
8932 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR; 8761 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
@@ -9304,6 +9133,8 @@ static const struct net_device_ops bnx2x_netdev_ops = {
9304 .ndo_validate_addr = eth_validate_addr, 9133 .ndo_validate_addr = eth_validate_addr,
9305 .ndo_do_ioctl = bnx2x_ioctl, 9134 .ndo_do_ioctl = bnx2x_ioctl,
9306 .ndo_change_mtu = bnx2x_change_mtu, 9135 .ndo_change_mtu = bnx2x_change_mtu,
9136 .ndo_fix_features = bnx2x_fix_features,
9137 .ndo_set_features = bnx2x_set_features,
9307 .ndo_tx_timeout = bnx2x_tx_timeout, 9138 .ndo_tx_timeout = bnx2x_tx_timeout,
9308#ifdef CONFIG_NET_POLL_CONTROLLER 9139#ifdef CONFIG_NET_POLL_CONTROLLER
9309 .ndo_poll_controller = poll_bnx2x, 9140 .ndo_poll_controller = poll_bnx2x,
@@ -9430,20 +9261,20 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
9430 9261
9431 dev->netdev_ops = &bnx2x_netdev_ops; 9262 dev->netdev_ops = &bnx2x_netdev_ops;
9432 bnx2x_set_ethtool_ops(dev); 9263 bnx2x_set_ethtool_ops(dev);
9433 dev->features |= NETIF_F_SG; 9264
9434 dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 9265 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
9266 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
9267 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_HW_VLAN_TX;
9268
9269 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
9270 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
9271
9272 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX;
9435 if (bp->flags & USING_DAC_FLAG) 9273 if (bp->flags & USING_DAC_FLAG)
9436 dev->features |= NETIF_F_HIGHDMA; 9274 dev->features |= NETIF_F_HIGHDMA;
9437 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9438 dev->features |= NETIF_F_TSO6;
9439 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
9440 9275
9441 dev->vlan_features |= NETIF_F_SG; 9276 /* Add Loopback capability to the device */
9442 dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 9277 dev->hw_features |= NETIF_F_LOOPBACK;
9443 if (bp->flags & USING_DAC_FLAG)
9444 dev->vlan_features |= NETIF_F_HIGHDMA;
9445 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9446 dev->vlan_features |= NETIF_F_TSO6;
9447 9278
9448#ifdef BCM_DCBNL 9279#ifdef BCM_DCBNL
9449 dev->dcbnl_ops = &bnx2x_dcbnl_ops; 9280 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
@@ -10342,6 +10173,11 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
10342 break; 10173 break;
10343 } 10174 }
10344 10175
10176 case DRV_CTL_ISCSI_STOPPED_CMD: {
10177 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_ISCSI_STOPPED);
10178 break;
10179 }
10180
10345 default: 10181 default:
10346 BNX2X_ERR("unknown command %x\n", ctl->cmd); 10182 BNX2X_ERR("unknown command %x\n", ctl->cmd);
10347 rc = -EINVAL; 10183 rc = -EINVAL;
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index 1509a2318af9..86bba25d2d3f 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -1,6 +1,6 @@
1/* bnx2x_reg.h: Broadcom Everest network driver. 1/* bnx2x_reg.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation 3 * Copyright (c) 2007-2011 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index 3445ded6674f..e535bfa08945 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -1,6 +1,6 @@
1/* bnx2x_stats.c: Broadcom Everest network driver. 1/* bnx2x_stats.c: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation 3 * Copyright (c) 2007-2011 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/bnx2x/bnx2x_stats.h b/drivers/net/bnx2x/bnx2x_stats.h
index 596798c47452..45d14d8bc1aa 100644
--- a/drivers/net/bnx2x/bnx2x_stats.h
+++ b/drivers/net/bnx2x/bnx2x_stats.h
@@ -1,6 +1,6 @@
1/* bnx2x_stats.h: Broadcom Everest network driver. 1/* bnx2x_stats.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation 3 * Copyright (c) 2007-2011 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile
index 3c5c014e82b2..4c21bf6b8b2f 100644
--- a/drivers/net/bonding/Makefile
+++ b/drivers/net/bonding/Makefile
@@ -9,6 +9,3 @@ bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_debugfs.o
9proc-$(CONFIG_PROC_FS) += bond_procfs.o 9proc-$(CONFIG_PROC_FS) += bond_procfs.o
10bonding-objs += $(proc-y) 10bonding-objs += $(proc-y)
11 11
12ipv6-$(subst m,y,$(CONFIG_IPV6)) += bond_ipv6.o
13bonding-objs += $(ipv6-y)
14
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 31912f17653f..c7537abca4f2 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -716,11 +716,9 @@ static void __set_agg_ports_ready(struct aggregator *aggregator, int val)
716static u32 __get_agg_bandwidth(struct aggregator *aggregator) 716static u32 __get_agg_bandwidth(struct aggregator *aggregator)
717{ 717{
718 u32 bandwidth = 0; 718 u32 bandwidth = 0;
719 u32 basic_speed;
720 719
721 if (aggregator->num_of_ports) { 720 if (aggregator->num_of_ports) {
722 basic_speed = __get_link_speed(aggregator->lag_ports); 721 switch (__get_link_speed(aggregator->lag_ports)) {
723 switch (basic_speed) {
724 case AD_LINK_SPEED_BITMASK_1MBPS: 722 case AD_LINK_SPEED_BITMASK_1MBPS:
725 bandwidth = aggregator->num_of_ports; 723 bandwidth = aggregator->num_of_ports;
726 break; 724 break;
@@ -2405,14 +2403,6 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
2405 struct ad_info ad_info; 2403 struct ad_info ad_info;
2406 int res = 1; 2404 int res = 1;
2407 2405
2408 /* make sure that the slaves list will
2409 * not change during tx
2410 */
2411 read_lock(&bond->lock);
2412
2413 if (!BOND_IS_OK(bond))
2414 goto out;
2415
2416 if (bond_3ad_get_active_agg_info(bond, &ad_info)) { 2406 if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
2417 pr_debug("%s: Error: bond_3ad_get_active_agg_info failed\n", 2407 pr_debug("%s: Error: bond_3ad_get_active_agg_info failed\n",
2418 dev->name); 2408 dev->name);
@@ -2466,39 +2456,20 @@ out:
2466 /* no suitable interface, frame not sent */ 2456 /* no suitable interface, frame not sent */
2467 dev_kfree_skb(skb); 2457 dev_kfree_skb(skb);
2468 } 2458 }
2469 read_unlock(&bond->lock); 2459
2470 return NETDEV_TX_OK; 2460 return NETDEV_TX_OK;
2471} 2461}
2472 2462
2473int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type* ptype, struct net_device *orig_dev) 2463void bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond,
2464 struct slave *slave)
2474{ 2465{
2475 struct bonding *bond = netdev_priv(dev); 2466 if (skb->protocol != PKT_TYPE_LACPDU)
2476 struct slave *slave = NULL; 2467 return;
2477 int ret = NET_RX_DROP;
2478
2479 if (!(dev->flags & IFF_MASTER))
2480 goto out;
2481
2482 skb = skb_share_check(skb, GFP_ATOMIC);
2483 if (!skb)
2484 goto out;
2485 2468
2486 if (!pskb_may_pull(skb, sizeof(struct lacpdu))) 2469 if (!pskb_may_pull(skb, sizeof(struct lacpdu)))
2487 goto out; 2470 return;
2488 2471
2489 read_lock(&bond->lock); 2472 read_lock(&bond->lock);
2490 slave = bond_get_slave_by_dev(netdev_priv(dev), orig_dev);
2491 if (!slave)
2492 goto out_unlock;
2493
2494 bond_3ad_rx_indication((struct lacpdu *) skb->data, slave, skb->len); 2473 bond_3ad_rx_indication((struct lacpdu *) skb->data, slave, skb->len);
2495
2496 ret = NET_RX_SUCCESS;
2497
2498out_unlock:
2499 read_unlock(&bond->lock); 2474 read_unlock(&bond->lock);
2500out:
2501 dev_kfree_skb(skb);
2502
2503 return ret;
2504} 2475}
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index 01b8a6af275b..0ee3f1632c46 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -258,7 +258,6 @@ struct ad_bond_info {
258 * requested 258 * requested
259 */ 259 */
260 struct timer_list ad_timer; 260 struct timer_list ad_timer;
261 struct packet_type ad_pkt_type;
262}; 261};
263 262
264struct ad_slave_info { 263struct ad_slave_info {
@@ -280,7 +279,8 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave);
280void bond_3ad_handle_link_change(struct slave *slave, char link); 279void bond_3ad_handle_link_change(struct slave *slave, char link);
281int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info); 280int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info);
282int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev); 281int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev);
283int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type* ptype, struct net_device *orig_dev); 282void bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond,
283 struct slave *slave);
284int bond_3ad_set_carrier(struct bonding *bond); 284int bond_3ad_set_carrier(struct bonding *bond);
285#endif //__BOND_3AD_H__ 285#endif //__BOND_3AD_H__
286 286
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index ba715826e2a8..8f2d2e7c70e5 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -308,49 +308,33 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
308 _unlock_rx_hashtbl(bond); 308 _unlock_rx_hashtbl(bond);
309} 309}
310 310
311static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct packet_type *ptype, struct net_device *orig_dev) 311static void rlb_arp_recv(struct sk_buff *skb, struct bonding *bond,
312 struct slave *slave)
312{ 313{
313 struct bonding *bond; 314 struct arp_pkt *arp;
314 struct arp_pkt *arp = (struct arp_pkt *)skb->data;
315 int res = NET_RX_DROP;
316 315
317 while (bond_dev->priv_flags & IFF_802_1Q_VLAN) 316 if (skb->protocol != cpu_to_be16(ETH_P_ARP))
318 bond_dev = vlan_dev_real_dev(bond_dev); 317 return;
319
320 if (!(bond_dev->priv_flags & IFF_BONDING) ||
321 !(bond_dev->flags & IFF_MASTER))
322 goto out;
323 318
319 arp = (struct arp_pkt *) skb->data;
324 if (!arp) { 320 if (!arp) {
325 pr_debug("Packet has no ARP data\n"); 321 pr_debug("Packet has no ARP data\n");
326 goto out; 322 return;
327 } 323 }
328 324
329 skb = skb_share_check(skb, GFP_ATOMIC); 325 if (!pskb_may_pull(skb, arp_hdr_len(bond->dev)))
330 if (!skb) 326 return;
331 goto out;
332
333 if (!pskb_may_pull(skb, arp_hdr_len(bond_dev)))
334 goto out;
335 327
336 if (skb->len < sizeof(struct arp_pkt)) { 328 if (skb->len < sizeof(struct arp_pkt)) {
337 pr_debug("Packet is too small to be an ARP\n"); 329 pr_debug("Packet is too small to be an ARP\n");
338 goto out; 330 return;
339 } 331 }
340 332
341 if (arp->op_code == htons(ARPOP_REPLY)) { 333 if (arp->op_code == htons(ARPOP_REPLY)) {
342 /* update rx hash table for this ARP */ 334 /* update rx hash table for this ARP */
343 bond = netdev_priv(bond_dev);
344 rlb_update_entry_from_arp(bond, arp); 335 rlb_update_entry_from_arp(bond, arp);
345 pr_debug("Server received an ARP Reply from client\n"); 336 pr_debug("Server received an ARP Reply from client\n");
346 } 337 }
347
348 res = NET_RX_SUCCESS;
349
350out:
351 dev_kfree_skb(skb);
352
353 return res;
354} 338}
355 339
356/* Caller must hold bond lock for read */ 340/* Caller must hold bond lock for read */
@@ -759,7 +743,6 @@ static void rlb_init_table_entry(struct rlb_client_info *entry)
759static int rlb_initialize(struct bonding *bond) 743static int rlb_initialize(struct bonding *bond)
760{ 744{
761 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); 745 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
762 struct packet_type *pk_type = &(BOND_ALB_INFO(bond).rlb_pkt_type);
763 struct rlb_client_info *new_hashtbl; 746 struct rlb_client_info *new_hashtbl;
764 int size = RLB_HASH_TABLE_SIZE * sizeof(struct rlb_client_info); 747 int size = RLB_HASH_TABLE_SIZE * sizeof(struct rlb_client_info);
765 int i; 748 int i;
@@ -784,13 +767,8 @@ static int rlb_initialize(struct bonding *bond)
784 767
785 _unlock_rx_hashtbl(bond); 768 _unlock_rx_hashtbl(bond);
786 769
787 /*initialize packet type*/
788 pk_type->type = cpu_to_be16(ETH_P_ARP);
789 pk_type->dev = bond->dev;
790 pk_type->func = rlb_arp_recv;
791
792 /* register to receive ARPs */ 770 /* register to receive ARPs */
793 dev_add_pack(pk_type); 771 bond->recv_probe = rlb_arp_recv;
794 772
795 return 0; 773 return 0;
796} 774}
@@ -799,8 +777,6 @@ static void rlb_deinitialize(struct bonding *bond)
799{ 777{
800 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); 778 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
801 779
802 dev_remove_pack(&(bond_info->rlb_pkt_type));
803
804 _lock_rx_hashtbl(bond); 780 _lock_rx_hashtbl(bond);
805 781
806 kfree(bond_info->rx_hashtbl); 782 kfree(bond_info->rx_hashtbl);
@@ -1249,16 +1225,10 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1249 skb_reset_mac_header(skb); 1225 skb_reset_mac_header(skb);
1250 eth_data = eth_hdr(skb); 1226 eth_data = eth_hdr(skb);
1251 1227
1252 /* make sure that the curr_active_slave and the slaves list do 1228 /* make sure that the curr_active_slave do not change during tx
1253 * not change during tx
1254 */ 1229 */
1255 read_lock(&bond->lock);
1256 read_lock(&bond->curr_slave_lock); 1230 read_lock(&bond->curr_slave_lock);
1257 1231
1258 if (!BOND_IS_OK(bond)) {
1259 goto out;
1260 }
1261
1262 switch (ntohs(skb->protocol)) { 1232 switch (ntohs(skb->protocol)) {
1263 case ETH_P_IP: { 1233 case ETH_P_IP: {
1264 const struct iphdr *iph = ip_hdr(skb); 1234 const struct iphdr *iph = ip_hdr(skb);
@@ -1358,13 +1328,12 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1358 } 1328 }
1359 } 1329 }
1360 1330
1361out:
1362 if (res) { 1331 if (res) {
1363 /* no suitable interface, frame not sent */ 1332 /* no suitable interface, frame not sent */
1364 dev_kfree_skb(skb); 1333 dev_kfree_skb(skb);
1365 } 1334 }
1366 read_unlock(&bond->curr_slave_lock); 1335 read_unlock(&bond->curr_slave_lock);
1367 read_unlock(&bond->lock); 1336
1368 return NETDEV_TX_OK; 1337 return NETDEV_TX_OK;
1369} 1338}
1370 1339
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index 8ca7158b2dda..90f140a2d197 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -129,7 +129,6 @@ struct alb_bond_info {
129 int lp_counter; 129 int lp_counter;
130 /* -------- rlb parameters -------- */ 130 /* -------- rlb parameters -------- */
131 int rlb_enabled; 131 int rlb_enabled;
132 struct packet_type rlb_pkt_type;
133 struct rlb_client_info *rx_hashtbl; /* Receive hash table */ 132 struct rlb_client_info *rx_hashtbl; /* Receive hash table */
134 spinlock_t rx_hashtbl_lock; 133 spinlock_t rx_hashtbl_lock;
135 u32 rx_hashtbl_head; 134 u32 rx_hashtbl_head;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 16d6fe954695..088fd845ffdf 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -89,8 +89,7 @@
89 89
90static int max_bonds = BOND_DEFAULT_MAX_BONDS; 90static int max_bonds = BOND_DEFAULT_MAX_BONDS;
91static int tx_queues = BOND_DEFAULT_TX_QUEUES; 91static int tx_queues = BOND_DEFAULT_TX_QUEUES;
92static int num_grat_arp = 1; 92static int num_peer_notif = 1;
93static int num_unsol_na = 1;
94static int miimon = BOND_LINK_MON_INTERV; 93static int miimon = BOND_LINK_MON_INTERV;
95static int updelay; 94static int updelay;
96static int downdelay; 95static int downdelay;
@@ -113,10 +112,10 @@ module_param(max_bonds, int, 0);
113MODULE_PARM_DESC(max_bonds, "Max number of bonded devices"); 112MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
114module_param(tx_queues, int, 0); 113module_param(tx_queues, int, 0);
115MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)"); 114MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)");
116module_param(num_grat_arp, int, 0644); 115module_param_named(num_grat_arp, num_peer_notif, int, 0644);
117MODULE_PARM_DESC(num_grat_arp, "Number of gratuitous ARP packets to send on failover event"); 116MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on failover event (alias of num_unsol_na)");
118module_param(num_unsol_na, int, 0644); 117module_param_named(num_unsol_na, num_peer_notif, int, 0644);
119MODULE_PARM_DESC(num_unsol_na, "Number of unsolicited IPv6 Neighbor Advertisements packets to send on failover event"); 118MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on failover event (alias of num_grat_arp)");
120module_param(miimon, int, 0); 119module_param(miimon, int, 0);
121MODULE_PARM_DESC(miimon, "Link check interval in milliseconds"); 120MODULE_PARM_DESC(miimon, "Link check interval in milliseconds");
122module_param(updelay, int, 0); 121module_param(updelay, int, 0);
@@ -234,7 +233,6 @@ struct bond_parm_tbl ad_select_tbl[] = {
234 233
235/*-------------------------- Forward declarations ---------------------------*/ 234/*-------------------------- Forward declarations ---------------------------*/
236 235
237static void bond_send_gratuitous_arp(struct bonding *bond);
238static int bond_init(struct net_device *bond_dev); 236static int bond_init(struct net_device *bond_dev);
239static void bond_uninit(struct net_device *bond_dev); 237static void bond_uninit(struct net_device *bond_dev);
240 238
@@ -346,32 +344,6 @@ out:
346} 344}
347 345
348/** 346/**
349 * bond_has_challenged_slaves
350 * @bond: the bond we're working on
351 *
352 * Searches the slave list. Returns 1 if a vlan challenged slave
353 * was found, 0 otherwise.
354 *
355 * Assumes bond->lock is held.
356 */
357static int bond_has_challenged_slaves(struct bonding *bond)
358{
359 struct slave *slave;
360 int i;
361
362 bond_for_each_slave(bond, slave, i) {
363 if (slave->dev->features & NETIF_F_VLAN_CHALLENGED) {
364 pr_debug("found VLAN challenged slave - %s\n",
365 slave->dev->name);
366 return 1;
367 }
368 }
369
370 pr_debug("no VLAN challenged slaves found\n");
371 return 0;
372}
373
374/**
375 * bond_next_vlan - safely skip to the next item in the vlans list. 347 * bond_next_vlan - safely skip to the next item in the vlans list.
376 * @bond: the bond we're working on 348 * @bond: the bond we're working on
377 * @curr: item we're advancing from 349 * @curr: item we're advancing from
@@ -631,7 +603,8 @@ down:
631static int bond_update_speed_duplex(struct slave *slave) 603static int bond_update_speed_duplex(struct slave *slave)
632{ 604{
633 struct net_device *slave_dev = slave->dev; 605 struct net_device *slave_dev = slave->dev;
634 struct ethtool_cmd etool; 606 struct ethtool_cmd etool = { .cmd = ETHTOOL_GSET };
607 u32 slave_speed;
635 int res; 608 int res;
636 609
637 /* Fake speed and duplex */ 610 /* Fake speed and duplex */
@@ -645,7 +618,8 @@ static int bond_update_speed_duplex(struct slave *slave)
645 if (res < 0) 618 if (res < 0)
646 return -1; 619 return -1;
647 620
648 switch (etool.speed) { 621 slave_speed = ethtool_cmd_speed(&etool);
622 switch (slave_speed) {
649 case SPEED_10: 623 case SPEED_10:
650 case SPEED_100: 624 case SPEED_100:
651 case SPEED_1000: 625 case SPEED_1000:
@@ -663,7 +637,7 @@ static int bond_update_speed_duplex(struct slave *slave)
663 return -1; 637 return -1;
664 } 638 }
665 639
666 slave->speed = etool.speed; 640 slave->speed = slave_speed;
667 slave->duplex = etool.duplex; 641 slave->duplex = etool.duplex;
668 642
669 return 0; 643 return 0;
@@ -1087,6 +1061,21 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
1087 return bestslave; 1061 return bestslave;
1088} 1062}
1089 1063
1064static bool bond_should_notify_peers(struct bonding *bond)
1065{
1066 struct slave *slave = bond->curr_active_slave;
1067
1068 pr_debug("bond_should_notify_peers: bond %s slave %s\n",
1069 bond->dev->name, slave ? slave->dev->name : "NULL");
1070
1071 if (!slave || !bond->send_peer_notif ||
1072 test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
1073 return false;
1074
1075 bond->send_peer_notif--;
1076 return true;
1077}
1078
1090/** 1079/**
1091 * change_active_interface - change the active slave into the specified one 1080 * change_active_interface - change the active slave into the specified one
1092 * @bond: our bonding struct 1081 * @bond: our bonding struct
@@ -1154,6 +1143,8 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1154 bond_set_slave_inactive_flags(old_active); 1143 bond_set_slave_inactive_flags(old_active);
1155 1144
1156 if (new_active) { 1145 if (new_active) {
1146 bool should_notify_peers = false;
1147
1157 bond_set_slave_active_flags(new_active); 1148 bond_set_slave_active_flags(new_active);
1158 1149
1159 if (bond->params.fail_over_mac) 1150 if (bond->params.fail_over_mac)
@@ -1161,17 +1152,19 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1161 old_active); 1152 old_active);
1162 1153
1163 if (netif_running(bond->dev)) { 1154 if (netif_running(bond->dev)) {
1164 bond->send_grat_arp = bond->params.num_grat_arp; 1155 bond->send_peer_notif =
1165 bond_send_gratuitous_arp(bond); 1156 bond->params.num_peer_notif;
1166 1157 should_notify_peers =
1167 bond->send_unsol_na = bond->params.num_unsol_na; 1158 bond_should_notify_peers(bond);
1168 bond_send_unsolicited_na(bond);
1169 } 1159 }
1170 1160
1171 write_unlock_bh(&bond->curr_slave_lock); 1161 write_unlock_bh(&bond->curr_slave_lock);
1172 read_unlock(&bond->lock); 1162 read_unlock(&bond->lock);
1173 1163
1174 netdev_bonding_change(bond->dev, NETDEV_BONDING_FAILOVER); 1164 netdev_bonding_change(bond->dev, NETDEV_BONDING_FAILOVER);
1165 if (should_notify_peers)
1166 netdev_bonding_change(bond->dev,
1167 NETDEV_NOTIFY_PEERS);
1175 1168
1176 read_lock(&bond->lock); 1169 read_lock(&bond->lock);
1177 write_lock_bh(&bond->curr_slave_lock); 1170 write_lock_bh(&bond->curr_slave_lock);
@@ -1387,52 +1380,68 @@ static int bond_sethwaddr(struct net_device *bond_dev,
1387 return 0; 1380 return 0;
1388} 1381}
1389 1382
1390#define BOND_VLAN_FEATURES \ 1383static u32 bond_fix_features(struct net_device *dev, u32 features)
1391 (NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX | \
1392 NETIF_F_HW_VLAN_FILTER)
1393
1394/*
1395 * Compute the common dev->feature set available to all slaves. Some
1396 * feature bits are managed elsewhere, so preserve those feature bits
1397 * on the master device.
1398 */
1399static int bond_compute_features(struct bonding *bond)
1400{ 1384{
1401 struct slave *slave; 1385 struct slave *slave;
1402 struct net_device *bond_dev = bond->dev; 1386 struct bonding *bond = netdev_priv(dev);
1403 u32 features = bond_dev->features; 1387 u32 mask;
1404 u32 vlan_features = 0;
1405 unsigned short max_hard_header_len = max((u16)ETH_HLEN,
1406 bond_dev->hard_header_len);
1407 int i; 1388 int i;
1408 1389
1409 features &= ~(NETIF_F_ALL_CSUM | BOND_VLAN_FEATURES); 1390 read_lock(&bond->lock);
1410 features |= NETIF_F_GSO_MASK | NETIF_F_NO_CSUM;
1411 1391
1412 if (!bond->first_slave) 1392 if (!bond->first_slave) {
1413 goto done; 1393 /* Disable adding VLANs to empty bond. But why? --mq */
1394 features |= NETIF_F_VLAN_CHALLENGED;
1395 goto out;
1396 }
1414 1397
1398 mask = features;
1415 features &= ~NETIF_F_ONE_FOR_ALL; 1399 features &= ~NETIF_F_ONE_FOR_ALL;
1400 features |= NETIF_F_ALL_FOR_ALL;
1416 1401
1417 vlan_features = bond->first_slave->dev->vlan_features;
1418 bond_for_each_slave(bond, slave, i) { 1402 bond_for_each_slave(bond, slave, i) {
1419 features = netdev_increment_features(features, 1403 features = netdev_increment_features(features,
1420 slave->dev->features, 1404 slave->dev->features,
1421 NETIF_F_ONE_FOR_ALL); 1405 mask);
1406 }
1407
1408out:
1409 read_unlock(&bond->lock);
1410 return features;
1411}
1412
1413#define BOND_VLAN_FEATURES (NETIF_F_ALL_TX_OFFLOADS | \
1414 NETIF_F_SOFT_FEATURES | \
1415 NETIF_F_LRO)
1416
1417static void bond_compute_features(struct bonding *bond)
1418{
1419 struct slave *slave;
1420 struct net_device *bond_dev = bond->dev;
1421 u32 vlan_features = BOND_VLAN_FEATURES;
1422 unsigned short max_hard_header_len = ETH_HLEN;
1423 int i;
1424
1425 read_lock(&bond->lock);
1426
1427 if (!bond->first_slave)
1428 goto done;
1429
1430 bond_for_each_slave(bond, slave, i) {
1422 vlan_features = netdev_increment_features(vlan_features, 1431 vlan_features = netdev_increment_features(vlan_features,
1423 slave->dev->vlan_features, 1432 slave->dev->vlan_features, BOND_VLAN_FEATURES);
1424 NETIF_F_ONE_FOR_ALL); 1433
1425 if (slave->dev->hard_header_len > max_hard_header_len) 1434 if (slave->dev->hard_header_len > max_hard_header_len)
1426 max_hard_header_len = slave->dev->hard_header_len; 1435 max_hard_header_len = slave->dev->hard_header_len;
1427 } 1436 }
1428 1437
1429done: 1438done:
1430 features |= (bond_dev->features & BOND_VLAN_FEATURES); 1439 bond_dev->vlan_features = vlan_features;
1431 bond_dev->features = netdev_fix_features(bond_dev, features);
1432 bond_dev->vlan_features = netdev_fix_features(bond_dev, vlan_features);
1433 bond_dev->hard_header_len = max_hard_header_len; 1440 bond_dev->hard_header_len = max_hard_header_len;
1434 1441
1435 return 0; 1442 read_unlock(&bond->lock);
1443
1444 netdev_change_features(bond_dev);
1436} 1445}
1437 1446
1438static void bond_setup_by_slave(struct net_device *bond_dev, 1447static void bond_setup_by_slave(struct net_device *bond_dev,
@@ -1452,27 +1461,17 @@ static void bond_setup_by_slave(struct net_device *bond_dev,
1452} 1461}
1453 1462
1454/* On bonding slaves other than the currently active slave, suppress 1463/* On bonding slaves other than the currently active slave, suppress
1455 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and 1464 * duplicates except for alb non-mcast/bcast.
1456 * ARP on active-backup slaves with arp_validate enabled.
1457 */ 1465 */
1458static bool bond_should_deliver_exact_match(struct sk_buff *skb, 1466static bool bond_should_deliver_exact_match(struct sk_buff *skb,
1459 struct slave *slave, 1467 struct slave *slave,
1460 struct bonding *bond) 1468 struct bonding *bond)
1461{ 1469{
1462 if (bond_is_slave_inactive(slave)) { 1470 if (bond_is_slave_inactive(slave)) {
1463 if (slave_do_arp_validate(bond, slave) &&
1464 skb->protocol == __cpu_to_be16(ETH_P_ARP))
1465 return false;
1466
1467 if (bond->params.mode == BOND_MODE_ALB && 1471 if (bond->params.mode == BOND_MODE_ALB &&
1468 skb->pkt_type != PACKET_BROADCAST && 1472 skb->pkt_type != PACKET_BROADCAST &&
1469 skb->pkt_type != PACKET_MULTICAST) 1473 skb->pkt_type != PACKET_MULTICAST)
1470 return false;
1471
1472 if (bond->params.mode == BOND_MODE_8023AD &&
1473 skb->protocol == __cpu_to_be16(ETH_P_SLOW))
1474 return false; 1474 return false;
1475
1476 return true; 1475 return true;
1477 } 1476 }
1478 return false; 1477 return false;
@@ -1496,6 +1495,15 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1496 if (bond->params.arp_interval) 1495 if (bond->params.arp_interval)
1497 slave->dev->last_rx = jiffies; 1496 slave->dev->last_rx = jiffies;
1498 1497
1498 if (bond->recv_probe) {
1499 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
1500
1501 if (likely(nskb)) {
1502 bond->recv_probe(nskb, bond, slave);
1503 dev_kfree_skb(nskb);
1504 }
1505 }
1506
1499 if (bond_should_deliver_exact_match(skb, slave, bond)) { 1507 if (bond_should_deliver_exact_match(skb, slave, bond)) {
1500 return RX_HANDLER_EXACT; 1508 return RX_HANDLER_EXACT;
1501 } 1509 }
@@ -1526,7 +1534,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1526 struct netdev_hw_addr *ha; 1534 struct netdev_hw_addr *ha;
1527 struct sockaddr addr; 1535 struct sockaddr addr;
1528 int link_reporting; 1536 int link_reporting;
1529 int old_features = bond_dev->features;
1530 int res = 0; 1537 int res = 0;
1531 1538
1532 if (!bond->params.use_carrier && slave_dev->ethtool_ops == NULL && 1539 if (!bond->params.use_carrier && slave_dev->ethtool_ops == NULL &&
@@ -1559,16 +1566,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1559 pr_warning("%s: Warning: enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n", 1566 pr_warning("%s: Warning: enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n",
1560 bond_dev->name, slave_dev->name, 1567 bond_dev->name, slave_dev->name,
1561 slave_dev->name, bond_dev->name); 1568 slave_dev->name, bond_dev->name);
1562 bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
1563 } 1569 }
1564 } else { 1570 } else {
1565 pr_debug("%s: ! NETIF_F_VLAN_CHALLENGED\n", slave_dev->name); 1571 pr_debug("%s: ! NETIF_F_VLAN_CHALLENGED\n", slave_dev->name);
1566 if (bond->slave_cnt == 0) {
1567 /* First slave, and it is not VLAN challenged,
1568 * so remove the block of adding VLANs over the bond.
1569 */
1570 bond_dev->features &= ~NETIF_F_VLAN_CHALLENGED;
1571 }
1572 } 1572 }
1573 1573
1574 /* 1574 /*
@@ -1757,10 +1757,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1757 new_slave->delay = 0; 1757 new_slave->delay = 0;
1758 new_slave->link_failure_count = 0; 1758 new_slave->link_failure_count = 0;
1759 1759
1760 bond_compute_features(bond);
1761
1762 write_unlock_bh(&bond->lock); 1760 write_unlock_bh(&bond->lock);
1763 1761
1762 bond_compute_features(bond);
1763
1764 read_lock(&bond->lock); 1764 read_lock(&bond->lock);
1765 1765
1766 new_slave->last_arp_rx = jiffies; 1766 new_slave->last_arp_rx = jiffies;
@@ -1940,7 +1940,7 @@ err_free:
1940 kfree(new_slave); 1940 kfree(new_slave);
1941 1941
1942err_undo_flags: 1942err_undo_flags:
1943 bond_dev->features = old_features; 1943 bond_compute_features(bond);
1944 1944
1945 return res; 1945 return res;
1946} 1946}
@@ -1961,6 +1961,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1961 struct bonding *bond = netdev_priv(bond_dev); 1961 struct bonding *bond = netdev_priv(bond_dev);
1962 struct slave *slave, *oldcurrent; 1962 struct slave *slave, *oldcurrent;
1963 struct sockaddr addr; 1963 struct sockaddr addr;
1964 u32 old_features = bond_dev->features;
1964 1965
1965 /* slave is not a slave or master is not master of this slave */ 1966 /* slave is not a slave or master is not master of this slave */
1966 if (!(slave_dev->flags & IFF_SLAVE) || 1967 if (!(slave_dev->flags & IFF_SLAVE) ||
@@ -2021,8 +2022,6 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
2021 /* release the slave from its bond */ 2022 /* release the slave from its bond */
2022 bond_detach_slave(bond, slave); 2023 bond_detach_slave(bond, slave);
2023 2024
2024 bond_compute_features(bond);
2025
2026 if (bond->primary_slave == slave) 2025 if (bond->primary_slave == slave)
2027 bond->primary_slave = NULL; 2026 bond->primary_slave = NULL;
2028 2027
@@ -2066,24 +2065,23 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
2066 */ 2065 */
2067 memset(bond_dev->dev_addr, 0, bond_dev->addr_len); 2066 memset(bond_dev->dev_addr, 0, bond_dev->addr_len);
2068 2067
2069 if (!bond->vlgrp) { 2068 if (bond->vlgrp) {
2070 bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
2071 } else {
2072 pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n", 2069 pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
2073 bond_dev->name, bond_dev->name); 2070 bond_dev->name, bond_dev->name);
2074 pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n", 2071 pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
2075 bond_dev->name); 2072 bond_dev->name);
2076 } 2073 }
2077 } else if ((bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
2078 !bond_has_challenged_slaves(bond)) {
2079 pr_info("%s: last VLAN challenged slave %s left bond %s. VLAN blocking is removed\n",
2080 bond_dev->name, slave_dev->name, bond_dev->name);
2081 bond_dev->features &= ~NETIF_F_VLAN_CHALLENGED;
2082 } 2074 }
2083 2075
2084 write_unlock_bh(&bond->lock); 2076 write_unlock_bh(&bond->lock);
2085 unblock_netpoll_tx(); 2077 unblock_netpoll_tx();
2086 2078
2079 bond_compute_features(bond);
2080 if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
2081 (old_features & NETIF_F_VLAN_CHALLENGED))
2082 pr_info("%s: last VLAN challenged slave %s left bond %s. VLAN blocking is removed\n",
2083 bond_dev->name, slave_dev->name, bond_dev->name);
2084
2087 /* must do this from outside any spinlocks */ 2085 /* must do this from outside any spinlocks */
2088 bond_destroy_slave_symlinks(bond_dev, slave_dev); 2086 bond_destroy_slave_symlinks(bond_dev, slave_dev);
2089 2087
@@ -2201,8 +2199,6 @@ static int bond_release_all(struct net_device *bond_dev)
2201 bond_alb_deinit_slave(bond, slave); 2199 bond_alb_deinit_slave(bond, slave);
2202 } 2200 }
2203 2201
2204 bond_compute_features(bond);
2205
2206 bond_destroy_slave_symlinks(bond_dev, slave_dev); 2202 bond_destroy_slave_symlinks(bond_dev, slave_dev);
2207 bond_del_vlans_from_slave(bond, slave_dev); 2203 bond_del_vlans_from_slave(bond, slave_dev);
2208 2204
@@ -2251,9 +2247,7 @@ static int bond_release_all(struct net_device *bond_dev)
2251 */ 2247 */
2252 memset(bond_dev->dev_addr, 0, bond_dev->addr_len); 2248 memset(bond_dev->dev_addr, 0, bond_dev->addr_len);
2253 2249
2254 if (!bond->vlgrp) { 2250 if (bond->vlgrp) {
2255 bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
2256 } else {
2257 pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n", 2251 pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
2258 bond_dev->name, bond_dev->name); 2252 bond_dev->name, bond_dev->name);
2259 pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n", 2253 pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
@@ -2264,6 +2258,9 @@ static int bond_release_all(struct net_device *bond_dev)
2264 2258
2265out: 2259out:
2266 write_unlock_bh(&bond->lock); 2260 write_unlock_bh(&bond->lock);
2261
2262 bond_compute_features(bond);
2263
2267 return 0; 2264 return 0;
2268} 2265}
2269 2266
@@ -2493,7 +2490,7 @@ static void bond_miimon_commit(struct bonding *bond)
2493 2490
2494 bond_update_speed_duplex(slave); 2491 bond_update_speed_duplex(slave);
2495 2492
2496 pr_info("%s: link status definitely up for interface %s, %d Mbps %s duplex.\n", 2493 pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n",
2497 bond->dev->name, slave->dev->name, 2494 bond->dev->name, slave->dev->name,
2498 slave->speed, slave->duplex ? "full" : "half"); 2495 slave->speed, slave->duplex ? "full" : "half");
2499 2496
@@ -2570,6 +2567,7 @@ void bond_mii_monitor(struct work_struct *work)
2570{ 2567{
2571 struct bonding *bond = container_of(work, struct bonding, 2568 struct bonding *bond = container_of(work, struct bonding,
2572 mii_work.work); 2569 mii_work.work);
2570 bool should_notify_peers = false;
2573 2571
2574 read_lock(&bond->lock); 2572 read_lock(&bond->lock);
2575 if (bond->kill_timers) 2573 if (bond->kill_timers)
@@ -2578,17 +2576,7 @@ void bond_mii_monitor(struct work_struct *work)
2578 if (bond->slave_cnt == 0) 2576 if (bond->slave_cnt == 0)
2579 goto re_arm; 2577 goto re_arm;
2580 2578
2581 if (bond->send_grat_arp) { 2579 should_notify_peers = bond_should_notify_peers(bond);
2582 read_lock(&bond->curr_slave_lock);
2583 bond_send_gratuitous_arp(bond);
2584 read_unlock(&bond->curr_slave_lock);
2585 }
2586
2587 if (bond->send_unsol_na) {
2588 read_lock(&bond->curr_slave_lock);
2589 bond_send_unsolicited_na(bond);
2590 read_unlock(&bond->curr_slave_lock);
2591 }
2592 2580
2593 if (bond_miimon_inspect(bond)) { 2581 if (bond_miimon_inspect(bond)) {
2594 read_unlock(&bond->lock); 2582 read_unlock(&bond->lock);
@@ -2608,6 +2596,12 @@ re_arm:
2608 msecs_to_jiffies(bond->params.miimon)); 2596 msecs_to_jiffies(bond->params.miimon));
2609out: 2597out:
2610 read_unlock(&bond->lock); 2598 read_unlock(&bond->lock);
2599
2600 if (should_notify_peers) {
2601 rtnl_lock();
2602 netdev_bonding_change(bond->dev, NETDEV_NOTIFY_PEERS);
2603 rtnl_unlock();
2604 }
2611} 2605}
2612 2606
2613static __be32 bond_glean_dev_ip(struct net_device *dev) 2607static __be32 bond_glean_dev_ip(struct net_device *dev)
@@ -2751,44 +2745,6 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2751 } 2745 }
2752} 2746}
2753 2747
2754/*
2755 * Kick out a gratuitous ARP for an IP on the bonding master plus one
2756 * for each VLAN above us.
2757 *
2758 * Caller must hold curr_slave_lock for read or better
2759 */
2760static void bond_send_gratuitous_arp(struct bonding *bond)
2761{
2762 struct slave *slave = bond->curr_active_slave;
2763 struct vlan_entry *vlan;
2764 struct net_device *vlan_dev;
2765
2766 pr_debug("bond_send_grat_arp: bond %s slave %s\n",
2767 bond->dev->name, slave ? slave->dev->name : "NULL");
2768
2769 if (!slave || !bond->send_grat_arp ||
2770 test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
2771 return;
2772
2773 bond->send_grat_arp--;
2774
2775 if (bond->master_ip) {
2776 bond_arp_send(slave->dev, ARPOP_REPLY, bond->master_ip,
2777 bond->master_ip, 0);
2778 }
2779
2780 if (!bond->vlgrp)
2781 return;
2782
2783 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
2784 vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
2785 if (vlan->vlan_ip) {
2786 bond_arp_send(slave->dev, ARPOP_REPLY, vlan->vlan_ip,
2787 vlan->vlan_ip, vlan->vlan_id);
2788 }
2789 }
2790}
2791
2792static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 sip, __be32 tip) 2748static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 sip, __be32 tip)
2793{ 2749{
2794 int i; 2750 int i;
@@ -2806,48 +2762,26 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32
2806 } 2762 }
2807} 2763}
2808 2764
2809static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) 2765static void bond_arp_rcv(struct sk_buff *skb, struct bonding *bond,
2766 struct slave *slave)
2810{ 2767{
2811 struct arphdr *arp; 2768 struct arphdr *arp;
2812 struct slave *slave;
2813 struct bonding *bond;
2814 unsigned char *arp_ptr; 2769 unsigned char *arp_ptr;
2815 __be32 sip, tip; 2770 __be32 sip, tip;
2816 2771
2817 if (dev->priv_flags & IFF_802_1Q_VLAN) { 2772 if (skb->protocol != __cpu_to_be16(ETH_P_ARP))
2818 /* 2773 return;
2819 * When using VLANS and bonding, dev and oriv_dev may be
2820 * incorrect if the physical interface supports VLAN
2821 * acceleration. With this change ARP validation now
2822 * works for hosts only reachable on the VLAN interface.
2823 */
2824 dev = vlan_dev_real_dev(dev);
2825 orig_dev = dev_get_by_index_rcu(dev_net(skb->dev),skb->skb_iif);
2826 }
2827
2828 if (!(dev->priv_flags & IFF_BONDING) || !(dev->flags & IFF_MASTER))
2829 goto out;
2830 2774
2831 bond = netdev_priv(dev);
2832 read_lock(&bond->lock); 2775 read_lock(&bond->lock);
2833 2776
2834 pr_debug("bond_arp_rcv: bond %s skb->dev %s orig_dev %s\n", 2777 pr_debug("bond_arp_rcv: bond %s skb->dev %s\n",
2835 bond->dev->name, skb->dev ? skb->dev->name : "NULL", 2778 bond->dev->name, skb->dev->name);
2836 orig_dev ? orig_dev->name : "NULL");
2837
2838 slave = bond_get_slave_by_dev(bond, orig_dev);
2839 if (!slave || !slave_do_arp_validate(bond, slave))
2840 goto out_unlock;
2841
2842 skb = skb_share_check(skb, GFP_ATOMIC);
2843 if (!skb)
2844 goto out_unlock;
2845 2779
2846 if (!pskb_may_pull(skb, arp_hdr_len(dev))) 2780 if (!pskb_may_pull(skb, arp_hdr_len(bond->dev)))
2847 goto out_unlock; 2781 goto out_unlock;
2848 2782
2849 arp = arp_hdr(skb); 2783 arp = arp_hdr(skb);
2850 if (arp->ar_hln != dev->addr_len || 2784 if (arp->ar_hln != bond->dev->addr_len ||
2851 skb->pkt_type == PACKET_OTHERHOST || 2785 skb->pkt_type == PACKET_OTHERHOST ||
2852 skb->pkt_type == PACKET_LOOPBACK || 2786 skb->pkt_type == PACKET_LOOPBACK ||
2853 arp->ar_hrd != htons(ARPHRD_ETHER) || 2787 arp->ar_hrd != htons(ARPHRD_ETHER) ||
@@ -2856,9 +2790,9 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
2856 goto out_unlock; 2790 goto out_unlock;
2857 2791
2858 arp_ptr = (unsigned char *)(arp + 1); 2792 arp_ptr = (unsigned char *)(arp + 1);
2859 arp_ptr += dev->addr_len; 2793 arp_ptr += bond->dev->addr_len;
2860 memcpy(&sip, arp_ptr, 4); 2794 memcpy(&sip, arp_ptr, 4);
2861 arp_ptr += 4 + dev->addr_len; 2795 arp_ptr += 4 + bond->dev->addr_len;
2862 memcpy(&tip, arp_ptr, 4); 2796 memcpy(&tip, arp_ptr, 4);
2863 2797
2864 pr_debug("bond_arp_rcv: %s %s/%d av %d sv %d sip %pI4 tip %pI4\n", 2798 pr_debug("bond_arp_rcv: %s %s/%d av %d sv %d sip %pI4 tip %pI4\n",
@@ -2881,9 +2815,6 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
2881 2815
2882out_unlock: 2816out_unlock:
2883 read_unlock(&bond->lock); 2817 read_unlock(&bond->lock);
2884out:
2885 dev_kfree_skb(skb);
2886 return NET_RX_SUCCESS;
2887} 2818}
2888 2819
2889/* 2820/*
@@ -3243,6 +3174,7 @@ void bond_activebackup_arp_mon(struct work_struct *work)
3243{ 3174{
3244 struct bonding *bond = container_of(work, struct bonding, 3175 struct bonding *bond = container_of(work, struct bonding,
3245 arp_work.work); 3176 arp_work.work);
3177 bool should_notify_peers = false;
3246 int delta_in_ticks; 3178 int delta_in_ticks;
3247 3179
3248 read_lock(&bond->lock); 3180 read_lock(&bond->lock);
@@ -3255,17 +3187,7 @@ void bond_activebackup_arp_mon(struct work_struct *work)
3255 if (bond->slave_cnt == 0) 3187 if (bond->slave_cnt == 0)
3256 goto re_arm; 3188 goto re_arm;
3257 3189
3258 if (bond->send_grat_arp) { 3190 should_notify_peers = bond_should_notify_peers(bond);
3259 read_lock(&bond->curr_slave_lock);
3260 bond_send_gratuitous_arp(bond);
3261 read_unlock(&bond->curr_slave_lock);
3262 }
3263
3264 if (bond->send_unsol_na) {
3265 read_lock(&bond->curr_slave_lock);
3266 bond_send_unsolicited_na(bond);
3267 read_unlock(&bond->curr_slave_lock);
3268 }
3269 3191
3270 if (bond_ab_arp_inspect(bond, delta_in_ticks)) { 3192 if (bond_ab_arp_inspect(bond, delta_in_ticks)) {
3271 read_unlock(&bond->lock); 3193 read_unlock(&bond->lock);
@@ -3286,6 +3208,12 @@ re_arm:
3286 queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); 3208 queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
3287out: 3209out:
3288 read_unlock(&bond->lock); 3210 read_unlock(&bond->lock);
3211
3212 if (should_notify_peers) {
3213 rtnl_lock();
3214 netdev_bonding_change(bond->dev, NETDEV_NOTIFY_PEERS);
3215 rtnl_unlock();
3216 }
3289} 3217}
3290 3218
3291/*-------------------------- netdev event handling --------------------------*/ 3219/*-------------------------- netdev event handling --------------------------*/
@@ -3339,8 +3267,8 @@ static int bond_slave_netdev_event(unsigned long event,
3339 3267
3340 slave = bond_get_slave_by_dev(bond, slave_dev); 3268 slave = bond_get_slave_by_dev(bond, slave_dev);
3341 if (slave) { 3269 if (slave) {
3342 u16 old_speed = slave->speed; 3270 u32 old_speed = slave->speed;
3343 u16 old_duplex = slave->duplex; 3271 u8 old_duplex = slave->duplex;
3344 3272
3345 bond_update_speed_duplex(slave); 3273 bond_update_speed_duplex(slave);
3346 3274
@@ -3482,48 +3410,6 @@ static struct notifier_block bond_inetaddr_notifier = {
3482 .notifier_call = bond_inetaddr_event, 3410 .notifier_call = bond_inetaddr_event,
3483}; 3411};
3484 3412
3485/*-------------------------- Packet type handling ---------------------------*/
3486
3487/* register to receive lacpdus on a bond */
3488static void bond_register_lacpdu(struct bonding *bond)
3489{
3490 struct packet_type *pk_type = &(BOND_AD_INFO(bond).ad_pkt_type);
3491
3492 /* initialize packet type */
3493 pk_type->type = PKT_TYPE_LACPDU;
3494 pk_type->dev = bond->dev;
3495 pk_type->func = bond_3ad_lacpdu_recv;
3496
3497 dev_add_pack(pk_type);
3498}
3499
3500/* unregister to receive lacpdus on a bond */
3501static void bond_unregister_lacpdu(struct bonding *bond)
3502{
3503 dev_remove_pack(&(BOND_AD_INFO(bond).ad_pkt_type));
3504}
3505
3506void bond_register_arp(struct bonding *bond)
3507{
3508 struct packet_type *pt = &bond->arp_mon_pt;
3509
3510 if (pt->type)
3511 return;
3512
3513 pt->type = htons(ETH_P_ARP);
3514 pt->dev = bond->dev;
3515 pt->func = bond_arp_rcv;
3516 dev_add_pack(pt);
3517}
3518
3519void bond_unregister_arp(struct bonding *bond)
3520{
3521 struct packet_type *pt = &bond->arp_mon_pt;
3522
3523 dev_remove_pack(pt);
3524 pt->type = 0;
3525}
3526
3527/*---------------------------- Hashing Policies -----------------------------*/ 3413/*---------------------------- Hashing Policies -----------------------------*/
3528 3414
3529/* 3415/*
@@ -3617,14 +3503,14 @@ static int bond_open(struct net_device *bond_dev)
3617 3503
3618 queue_delayed_work(bond->wq, &bond->arp_work, 0); 3504 queue_delayed_work(bond->wq, &bond->arp_work, 0);
3619 if (bond->params.arp_validate) 3505 if (bond->params.arp_validate)
3620 bond_register_arp(bond); 3506 bond->recv_probe = bond_arp_rcv;
3621 } 3507 }
3622 3508
3623 if (bond->params.mode == BOND_MODE_8023AD) { 3509 if (bond->params.mode == BOND_MODE_8023AD) {
3624 INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler); 3510 INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
3625 queue_delayed_work(bond->wq, &bond->ad_work, 0); 3511 queue_delayed_work(bond->wq, &bond->ad_work, 0);
3626 /* register to receive LACPDUs */ 3512 /* register to receive LACPDUs */
3627 bond_register_lacpdu(bond); 3513 bond->recv_probe = bond_3ad_lacpdu_recv;
3628 bond_3ad_initiate_agg_selection(bond, 1); 3514 bond_3ad_initiate_agg_selection(bond, 1);
3629 } 3515 }
3630 3516
@@ -3635,18 +3521,9 @@ static int bond_close(struct net_device *bond_dev)
3635{ 3521{
3636 struct bonding *bond = netdev_priv(bond_dev); 3522 struct bonding *bond = netdev_priv(bond_dev);
3637 3523
3638 if (bond->params.mode == BOND_MODE_8023AD) {
3639 /* Unregister the receive of LACPDUs */
3640 bond_unregister_lacpdu(bond);
3641 }
3642
3643 if (bond->params.arp_validate)
3644 bond_unregister_arp(bond);
3645
3646 write_lock_bh(&bond->lock); 3524 write_lock_bh(&bond->lock);
3647 3525
3648 bond->send_grat_arp = 0; 3526 bond->send_peer_notif = 0;
3649 bond->send_unsol_na = 0;
3650 3527
3651 /* signal timers not to re-arm */ 3528 /* signal timers not to re-arm */
3652 bond->kill_timers = 1; 3529 bond->kill_timers = 1;
@@ -3682,6 +3559,7 @@ static int bond_close(struct net_device *bond_dev)
3682 */ 3559 */
3683 bond_alb_deinitialize(bond); 3560 bond_alb_deinitialize(bond);
3684 } 3561 }
3562 bond->recv_probe = NULL;
3685 3563
3686 return 0; 3564 return 0;
3687} 3565}
@@ -4105,10 +3983,6 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
4105 int i, slave_no, res = 1; 3983 int i, slave_no, res = 1;
4106 struct iphdr *iph = ip_hdr(skb); 3984 struct iphdr *iph = ip_hdr(skb);
4107 3985
4108 read_lock(&bond->lock);
4109
4110 if (!BOND_IS_OK(bond))
4111 goto out;
4112 /* 3986 /*
4113 * Start with the curr_active_slave that joined the bond as the 3987 * Start with the curr_active_slave that joined the bond as the
4114 * default for sending IGMP traffic. For failover purposes one 3988 * default for sending IGMP traffic. For failover purposes one
@@ -4155,7 +4029,7 @@ out:
4155 /* no suitable interface, frame not sent */ 4029 /* no suitable interface, frame not sent */
4156 dev_kfree_skb(skb); 4030 dev_kfree_skb(skb);
4157 } 4031 }
4158 read_unlock(&bond->lock); 4032
4159 return NETDEV_TX_OK; 4033 return NETDEV_TX_OK;
4160} 4034}
4161 4035
@@ -4169,24 +4043,18 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
4169 struct bonding *bond = netdev_priv(bond_dev); 4043 struct bonding *bond = netdev_priv(bond_dev);
4170 int res = 1; 4044 int res = 1;
4171 4045
4172 read_lock(&bond->lock);
4173 read_lock(&bond->curr_slave_lock); 4046 read_lock(&bond->curr_slave_lock);
4174 4047
4175 if (!BOND_IS_OK(bond)) 4048 if (bond->curr_active_slave)
4176 goto out; 4049 res = bond_dev_queue_xmit(bond, skb,
4177 4050 bond->curr_active_slave->dev);
4178 if (!bond->curr_active_slave)
4179 goto out;
4180 4051
4181 res = bond_dev_queue_xmit(bond, skb, bond->curr_active_slave->dev);
4182
4183out:
4184 if (res) 4052 if (res)
4185 /* no suitable interface, frame not sent */ 4053 /* no suitable interface, frame not sent */
4186 dev_kfree_skb(skb); 4054 dev_kfree_skb(skb);
4187 4055
4188 read_unlock(&bond->curr_slave_lock); 4056 read_unlock(&bond->curr_slave_lock);
4189 read_unlock(&bond->lock); 4057
4190 return NETDEV_TX_OK; 4058 return NETDEV_TX_OK;
4191} 4059}
4192 4060
@@ -4203,11 +4071,6 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
4203 int i; 4071 int i;
4204 int res = 1; 4072 int res = 1;
4205 4073
4206 read_lock(&bond->lock);
4207
4208 if (!BOND_IS_OK(bond))
4209 goto out;
4210
4211 slave_no = bond->xmit_hash_policy(skb, bond->slave_cnt); 4074 slave_no = bond->xmit_hash_policy(skb, bond->slave_cnt);
4212 4075
4213 bond_for_each_slave(bond, slave, i) { 4076 bond_for_each_slave(bond, slave, i) {
@@ -4227,12 +4090,11 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
4227 } 4090 }
4228 } 4091 }
4229 4092
4230out:
4231 if (res) { 4093 if (res) {
4232 /* no suitable interface, frame not sent */ 4094 /* no suitable interface, frame not sent */
4233 dev_kfree_skb(skb); 4095 dev_kfree_skb(skb);
4234 } 4096 }
4235 read_unlock(&bond->lock); 4097
4236 return NETDEV_TX_OK; 4098 return NETDEV_TX_OK;
4237} 4099}
4238 4100
@@ -4247,11 +4109,6 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
4247 int i; 4109 int i;
4248 int res = 1; 4110 int res = 1;
4249 4111
4250 read_lock(&bond->lock);
4251
4252 if (!BOND_IS_OK(bond))
4253 goto out;
4254
4255 read_lock(&bond->curr_slave_lock); 4112 read_lock(&bond->curr_slave_lock);
4256 start_at = bond->curr_active_slave; 4113 start_at = bond->curr_active_slave;
4257 read_unlock(&bond->curr_slave_lock); 4114 read_unlock(&bond->curr_slave_lock);
@@ -4290,7 +4147,6 @@ out:
4290 dev_kfree_skb(skb); 4147 dev_kfree_skb(skb);
4291 4148
4292 /* frame sent to all suitable interfaces */ 4149 /* frame sent to all suitable interfaces */
4293 read_unlock(&bond->lock);
4294 return NETDEV_TX_OK; 4150 return NETDEV_TX_OK;
4295} 4151}
4296 4152
@@ -4322,10 +4178,8 @@ static inline int bond_slave_override(struct bonding *bond,
4322 struct slave *slave = NULL; 4178 struct slave *slave = NULL;
4323 struct slave *check_slave; 4179 struct slave *check_slave;
4324 4180
4325 read_lock(&bond->lock); 4181 if (!skb->queue_mapping)
4326 4182 return 1;
4327 if (!BOND_IS_OK(bond) || !skb->queue_mapping)
4328 goto out;
4329 4183
4330 /* Find out if any slaves have the same mapping as this skb. */ 4184 /* Find out if any slaves have the same mapping as this skb. */
4331 bond_for_each_slave(bond, check_slave, i) { 4185 bond_for_each_slave(bond, check_slave, i) {
@@ -4341,8 +4195,6 @@ static inline int bond_slave_override(struct bonding *bond,
4341 res = bond_dev_queue_xmit(bond, skb, slave->dev); 4195 res = bond_dev_queue_xmit(bond, skb, slave->dev);
4342 } 4196 }
4343 4197
4344out:
4345 read_unlock(&bond->lock);
4346 return res; 4198 return res;
4347} 4199}
4348 4200
@@ -4357,24 +4209,17 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
4357 u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; 4209 u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
4358 4210
4359 if (unlikely(txq >= dev->real_num_tx_queues)) { 4211 if (unlikely(txq >= dev->real_num_tx_queues)) {
4360 do 4212 do {
4361 txq -= dev->real_num_tx_queues; 4213 txq -= dev->real_num_tx_queues;
4362 while (txq >= dev->real_num_tx_queues); 4214 } while (txq >= dev->real_num_tx_queues);
4363 } 4215 }
4364 return txq; 4216 return txq;
4365} 4217}
4366 4218
4367static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) 4219static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
4368{ 4220{
4369 struct bonding *bond = netdev_priv(dev); 4221 struct bonding *bond = netdev_priv(dev);
4370 4222
4371 /*
4372 * If we risk deadlock from transmitting this in the
4373 * netpoll path, tell netpoll to queue the frame for later tx
4374 */
4375 if (is_netpoll_tx_blocked(dev))
4376 return NETDEV_TX_BUSY;
4377
4378 if (TX_QUEUE_OVERRIDE(bond->params.mode)) { 4223 if (TX_QUEUE_OVERRIDE(bond->params.mode)) {
4379 if (!bond_slave_override(bond, skb)) 4224 if (!bond_slave_override(bond, skb))
4380 return NETDEV_TX_OK; 4225 return NETDEV_TX_OK;
@@ -4404,6 +4249,29 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
4404 } 4249 }
4405} 4250}
4406 4251
4252static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
4253{
4254 struct bonding *bond = netdev_priv(dev);
4255 netdev_tx_t ret = NETDEV_TX_OK;
4256
4257 /*
4258 * If we risk deadlock from transmitting this in the
4259 * netpoll path, tell netpoll to queue the frame for later tx
4260 */
4261 if (is_netpoll_tx_blocked(dev))
4262 return NETDEV_TX_BUSY;
4263
4264 read_lock(&bond->lock);
4265
4266 if (bond->slave_cnt)
4267 ret = __bond_start_xmit(skb, dev);
4268 else
4269 dev_kfree_skb(skb);
4270
4271 read_unlock(&bond->lock);
4272
4273 return ret;
4274}
4407 4275
4408/* 4276/*
4409 * set bond mode specific net device operations 4277 * set bond mode specific net device operations
@@ -4448,11 +4316,6 @@ static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
4448static const struct ethtool_ops bond_ethtool_ops = { 4316static const struct ethtool_ops bond_ethtool_ops = {
4449 .get_drvinfo = bond_ethtool_get_drvinfo, 4317 .get_drvinfo = bond_ethtool_get_drvinfo,
4450 .get_link = ethtool_op_get_link, 4318 .get_link = ethtool_op_get_link,
4451 .get_tx_csum = ethtool_op_get_tx_csum,
4452 .get_sg = ethtool_op_get_sg,
4453 .get_tso = ethtool_op_get_tso,
4454 .get_ufo = ethtool_op_get_ufo,
4455 .get_flags = ethtool_op_get_flags,
4456}; 4319};
4457 4320
4458static const struct net_device_ops bond_netdev_ops = { 4321static const struct net_device_ops bond_netdev_ops = {
@@ -4478,6 +4341,7 @@ static const struct net_device_ops bond_netdev_ops = {
4478#endif 4341#endif
4479 .ndo_add_slave = bond_enslave, 4342 .ndo_add_slave = bond_enslave,
4480 .ndo_del_slave = bond_release, 4343 .ndo_del_slave = bond_release,
4344 .ndo_fix_features = bond_fix_features,
4481}; 4345};
4482 4346
4483static void bond_destructor(struct net_device *bond_dev) 4347static void bond_destructor(struct net_device *bond_dev)
@@ -4533,14 +4397,14 @@ static void bond_setup(struct net_device *bond_dev)
4533 * when there are slaves that are not hw accel 4397 * when there are slaves that are not hw accel
4534 * capable 4398 * capable
4535 */ 4399 */
4536 bond_dev->features |= (NETIF_F_HW_VLAN_TX |
4537 NETIF_F_HW_VLAN_RX |
4538 NETIF_F_HW_VLAN_FILTER);
4539 4400
4540 /* By default, we enable GRO on bonding devices. 4401 bond_dev->hw_features = BOND_VLAN_FEATURES |
4541 * Actual support requires lowlevel drivers are GRO ready. 4402 NETIF_F_HW_VLAN_TX |
4542 */ 4403 NETIF_F_HW_VLAN_RX |
4543 bond_dev->features |= NETIF_F_GRO; 4404 NETIF_F_HW_VLAN_FILTER;
4405
4406 bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM);
4407 bond_dev->features |= bond_dev->hw_features;
4544} 4408}
4545 4409
4546static void bond_work_cancel_all(struct bonding *bond) 4410static void bond_work_cancel_all(struct bonding *bond)
@@ -4724,16 +4588,10 @@ static int bond_check_params(struct bond_params *params)
4724 use_carrier = 1; 4588 use_carrier = 1;
4725 } 4589 }
4726 4590
4727 if (num_grat_arp < 0 || num_grat_arp > 255) { 4591 if (num_peer_notif < 0 || num_peer_notif > 255) {
4728 pr_warning("Warning: num_grat_arp (%d) not in range 0-255 so it was reset to 1\n", 4592 pr_warning("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
4729 num_grat_arp); 4593 num_peer_notif);
4730 num_grat_arp = 1; 4594 num_peer_notif = 1;
4731 }
4732
4733 if (num_unsol_na < 0 || num_unsol_na > 255) {
4734 pr_warning("Warning: num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
4735 num_unsol_na);
4736 num_unsol_na = 1;
4737 } 4595 }
4738 4596
4739 /* reset values for 802.3ad */ 4597 /* reset values for 802.3ad */
@@ -4925,8 +4783,7 @@ static int bond_check_params(struct bond_params *params)
4925 params->mode = bond_mode; 4783 params->mode = bond_mode;
4926 params->xmit_policy = xmit_hashtype; 4784 params->xmit_policy = xmit_hashtype;
4927 params->miimon = miimon; 4785 params->miimon = miimon;
4928 params->num_grat_arp = num_grat_arp; 4786 params->num_peer_notif = num_peer_notif;
4929 params->num_unsol_na = num_unsol_na;
4930 params->arp_interval = arp_interval; 4787 params->arp_interval = arp_interval;
4931 params->arp_validate = arp_validate_value; 4788 params->arp_validate = arp_validate_value;
4932 params->updelay = updelay; 4789 params->updelay = updelay;
@@ -5025,8 +4882,9 @@ int bond_create(struct net *net, const char *name)
5025 4882
5026 rtnl_lock(); 4883 rtnl_lock();
5027 4884
5028 bond_dev = alloc_netdev_mq(sizeof(struct bonding), name ? name : "", 4885 bond_dev = alloc_netdev_mq(sizeof(struct bonding),
5029 bond_setup, tx_queues); 4886 name ? name : "bond%d",
4887 bond_setup, tx_queues);
5030 if (!bond_dev) { 4888 if (!bond_dev) {
5031 pr_err("%s: eek! can't alloc netdev!\n", name); 4889 pr_err("%s: eek! can't alloc netdev!\n", name);
5032 rtnl_unlock(); 4890 rtnl_unlock();
@@ -5036,26 +4894,10 @@ int bond_create(struct net *net, const char *name)
5036 dev_net_set(bond_dev, net); 4894 dev_net_set(bond_dev, net);
5037 bond_dev->rtnl_link_ops = &bond_link_ops; 4895 bond_dev->rtnl_link_ops = &bond_link_ops;
5038 4896
5039 if (!name) {
5040 res = dev_alloc_name(bond_dev, "bond%d");
5041 if (res < 0)
5042 goto out;
5043 } else {
5044 /*
5045 * If we're given a name to register
5046 * we need to ensure that its not already
5047 * registered
5048 */
5049 res = -EEXIST;
5050 if (__dev_get_by_name(net, name) != NULL)
5051 goto out;
5052 }
5053
5054 res = register_netdevice(bond_dev); 4897 res = register_netdevice(bond_dev);
5055 4898
5056 netif_carrier_off(bond_dev); 4899 netif_carrier_off(bond_dev);
5057 4900
5058out:
5059 rtnl_unlock(); 4901 rtnl_unlock();
5060 if (res < 0) 4902 if (res < 0)
5061 bond_destructor(bond_dev); 4903 bond_destructor(bond_dev);
@@ -5121,7 +4963,6 @@ static int __init bonding_init(void)
5121 4963
5122 register_netdevice_notifier(&bond_netdev_notifier); 4964 register_netdevice_notifier(&bond_netdev_notifier);
5123 register_inetaddr_notifier(&bond_inetaddr_notifier); 4965 register_inetaddr_notifier(&bond_inetaddr_notifier);
5124 bond_register_ipv6_notifier();
5125out: 4966out:
5126 return res; 4967 return res;
5127err: 4968err:
@@ -5136,7 +4977,6 @@ static void __exit bonding_exit(void)
5136{ 4977{
5137 unregister_netdevice_notifier(&bond_netdev_notifier); 4978 unregister_netdevice_notifier(&bond_netdev_notifier);
5138 unregister_inetaddr_notifier(&bond_inetaddr_notifier); 4979 unregister_inetaddr_notifier(&bond_inetaddr_notifier);
5139 bond_unregister_ipv6_notifier();
5140 4980
5141 bond_destroy_sysfs(); 4981 bond_destroy_sysfs();
5142 bond_destroy_debugfs(); 4982 bond_destroy_debugfs();
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index c32ff55a34c1..c97307ddd1c9 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -4,8 +4,6 @@
4#include "bonding.h" 4#include "bonding.h"
5 5
6 6
7extern const char *bond_mode_name(int mode);
8
9static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos) 7static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
10 __acquires(RCU) 8 __acquires(RCU)
11 __acquires(&bond->lock) 9 __acquires(&bond->lock)
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index de87aea6d01a..4059bfc73dbf 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -422,11 +422,6 @@ static ssize_t bonding_store_arp_validate(struct device *d,
422 bond->dev->name, arp_validate_tbl[new_value].modename, 422 bond->dev->name, arp_validate_tbl[new_value].modename,
423 new_value); 423 new_value);
424 424
425 if (!bond->params.arp_validate && new_value)
426 bond_register_arp(bond);
427 else if (bond->params.arp_validate && !new_value)
428 bond_unregister_arp(bond);
429
430 bond->params.arp_validate = new_value; 425 bond->params.arp_validate = new_value;
431 426
432 return count; 427 return count;
@@ -874,82 +869,28 @@ static DEVICE_ATTR(ad_select, S_IRUGO | S_IWUSR,
874 bonding_show_ad_select, bonding_store_ad_select); 869 bonding_show_ad_select, bonding_store_ad_select);
875 870
876/* 871/*
877 * Show and set the number of grat ARP to send after a failover event. 872 * Show and set the number of peer notifications to send after a failover event.
878 */ 873 */
879static ssize_t bonding_show_n_grat_arp(struct device *d, 874static ssize_t bonding_show_num_peer_notif(struct device *d,
880 struct device_attribute *attr, 875 struct device_attribute *attr,
881 char *buf) 876 char *buf)
882{ 877{
883 struct bonding *bond = to_bond(d); 878 struct bonding *bond = to_bond(d);
884 879 return sprintf(buf, "%d\n", bond->params.num_peer_notif);
885 return sprintf(buf, "%d\n", bond->params.num_grat_arp);
886} 880}
887 881
888static ssize_t bonding_store_n_grat_arp(struct device *d, 882static ssize_t bonding_store_num_peer_notif(struct device *d,
889 struct device_attribute *attr, 883 struct device_attribute *attr,
890 const char *buf, size_t count) 884 const char *buf, size_t count)
891{ 885{
892 int new_value, ret = count;
893 struct bonding *bond = to_bond(d); 886 struct bonding *bond = to_bond(d);
894 887 int err = kstrtou8(buf, 10, &bond->params.num_peer_notif);
895 if (sscanf(buf, "%d", &new_value) != 1) { 888 return err ? err : count;
896 pr_err("%s: no num_grat_arp value specified.\n",
897 bond->dev->name);
898 ret = -EINVAL;
899 goto out;
900 }
901 if (new_value < 0 || new_value > 255) {
902 pr_err("%s: Invalid num_grat_arp value %d not in range 0-255; rejected.\n",
903 bond->dev->name, new_value);
904 ret = -EINVAL;
905 goto out;
906 } else {
907 bond->params.num_grat_arp = new_value;
908 }
909out:
910 return ret;
911} 889}
912static DEVICE_ATTR(num_grat_arp, S_IRUGO | S_IWUSR, 890static DEVICE_ATTR(num_grat_arp, S_IRUGO | S_IWUSR,
913 bonding_show_n_grat_arp, bonding_store_n_grat_arp); 891 bonding_show_num_peer_notif, bonding_store_num_peer_notif);
914
915/*
916 * Show and set the number of unsolicited NA's to send after a failover event.
917 */
918static ssize_t bonding_show_n_unsol_na(struct device *d,
919 struct device_attribute *attr,
920 char *buf)
921{
922 struct bonding *bond = to_bond(d);
923
924 return sprintf(buf, "%d\n", bond->params.num_unsol_na);
925}
926
927static ssize_t bonding_store_n_unsol_na(struct device *d,
928 struct device_attribute *attr,
929 const char *buf, size_t count)
930{
931 int new_value, ret = count;
932 struct bonding *bond = to_bond(d);
933
934 if (sscanf(buf, "%d", &new_value) != 1) {
935 pr_err("%s: no num_unsol_na value specified.\n",
936 bond->dev->name);
937 ret = -EINVAL;
938 goto out;
939 }
940
941 if (new_value < 0 || new_value > 255) {
942 pr_err("%s: Invalid num_unsol_na value %d not in range 0-255; rejected.\n",
943 bond->dev->name, new_value);
944 ret = -EINVAL;
945 goto out;
946 } else
947 bond->params.num_unsol_na = new_value;
948out:
949 return ret;
950}
951static DEVICE_ATTR(num_unsol_na, S_IRUGO | S_IWUSR, 892static DEVICE_ATTR(num_unsol_na, S_IRUGO | S_IWUSR,
952 bonding_show_n_unsol_na, bonding_store_n_unsol_na); 893 bonding_show_num_peer_notif, bonding_store_num_peer_notif);
953 894
954/* 895/*
955 * Show and set the MII monitor interval. There are two tricky bits 896 * Show and set the MII monitor interval. There are two tricky bits
@@ -1001,7 +942,6 @@ static ssize_t bonding_store_miimon(struct device *d,
1001 bond->dev->name); 942 bond->dev->name);
1002 bond->params.arp_interval = 0; 943 bond->params.arp_interval = 0;
1003 if (bond->params.arp_validate) { 944 if (bond->params.arp_validate) {
1004 bond_unregister_arp(bond);
1005 bond->params.arp_validate = 945 bond->params.arp_validate =
1006 BOND_ARP_VALIDATE_NONE; 946 BOND_ARP_VALIDATE_NONE;
1007 } 947 }
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 90736cb4d975..ea1d005be92d 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -24,8 +24,8 @@
24#include "bond_3ad.h" 24#include "bond_3ad.h"
25#include "bond_alb.h" 25#include "bond_alb.h"
26 26
27#define DRV_VERSION "3.7.0" 27#define DRV_VERSION "3.7.1"
28#define DRV_RELDATE "June 2, 2010" 28#define DRV_RELDATE "April 27, 2011"
29#define DRV_NAME "bonding" 29#define DRV_NAME "bonding"
30#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" 30#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
31 31
@@ -39,16 +39,6 @@
39 netif_carrier_ok(dev)) 39 netif_carrier_ok(dev))
40 40
41/* 41/*
42 * Checks whether bond is ready for transmit.
43 *
44 * Caller must hold bond->lock
45 */
46#define BOND_IS_OK(bond) \
47 (((bond)->dev->flags & IFF_UP) && \
48 netif_running((bond)->dev) && \
49 ((bond)->slave_cnt > 0))
50
51/*
52 * Checks whether slave is ready for transmit. 42 * Checks whether slave is ready for transmit.
53 */ 43 */
54#define SLAVE_IS_OK(slave) \ 44#define SLAVE_IS_OK(slave) \
@@ -149,8 +139,7 @@ struct bond_params {
149 int mode; 139 int mode;
150 int xmit_policy; 140 int xmit_policy;
151 int miimon; 141 int miimon;
152 int num_grat_arp; 142 u8 num_peer_notif;
153 int num_unsol_na;
154 int arp_interval; 143 int arp_interval;
155 int arp_validate; 144 int arp_validate;
156 int use_carrier; 145 int use_carrier;
@@ -178,9 +167,6 @@ struct vlan_entry {
178 struct list_head vlan_list; 167 struct list_head vlan_list;
179 __be32 vlan_ip; 168 __be32 vlan_ip;
180 unsigned short vlan_id; 169 unsigned short vlan_id;
181#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
182 struct in6_addr vlan_ipv6;
183#endif
184}; 170};
185 171
186struct slave { 172struct slave {
@@ -196,12 +182,12 @@ struct slave {
196 u8 backup:1, /* indicates backup slave. Value corresponds with 182 u8 backup:1, /* indicates backup slave. Value corresponds with
197 BOND_STATE_ACTIVE and BOND_STATE_BACKUP */ 183 BOND_STATE_ACTIVE and BOND_STATE_BACKUP */
198 inactive:1; /* indicates inactive slave */ 184 inactive:1; /* indicates inactive slave */
185 u8 duplex;
199 u32 original_mtu; 186 u32 original_mtu;
200 u32 link_failure_count; 187 u32 link_failure_count;
201 u8 perm_hwaddr[ETH_ALEN]; 188 u32 speed;
202 u16 speed;
203 u8 duplex;
204 u16 queue_id; 189 u16 queue_id;
190 u8 perm_hwaddr[ETH_ALEN];
205 struct ad_slave_info ad_info; /* HUGE - better to dynamically alloc */ 191 struct ad_slave_info ad_info; /* HUGE - better to dynamically alloc */
206 struct tlb_slave_info tlb_info; 192 struct tlb_slave_info tlb_info;
207#ifdef CONFIG_NET_POLL_CONTROLLER 193#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -231,11 +217,12 @@ struct bonding {
231 struct slave *primary_slave; 217 struct slave *primary_slave;
232 bool force_primary; 218 bool force_primary;
233 s32 slave_cnt; /* never change this value outside the attach/detach wrappers */ 219 s32 slave_cnt; /* never change this value outside the attach/detach wrappers */
220 void (*recv_probe)(struct sk_buff *, struct bonding *,
221 struct slave *);
234 rwlock_t lock; 222 rwlock_t lock;
235 rwlock_t curr_slave_lock; 223 rwlock_t curr_slave_lock;
236 s8 kill_timers; 224 s8 kill_timers;
237 s8 send_grat_arp; 225 u8 send_peer_notif;
238 s8 send_unsol_na;
239 s8 setup_by_slave; 226 s8 setup_by_slave;
240 s8 igmp_retrans; 227 s8 igmp_retrans;
241#ifdef CONFIG_PROC_FS 228#ifdef CONFIG_PROC_FS
@@ -260,9 +247,6 @@ struct bonding {
260 struct delayed_work alb_work; 247 struct delayed_work alb_work;
261 struct delayed_work ad_work; 248 struct delayed_work ad_work;
262 struct delayed_work mcast_work; 249 struct delayed_work mcast_work;
263#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
264 struct in6_addr master_ipv6;
265#endif
266#ifdef CONFIG_DEBUG_FS 250#ifdef CONFIG_DEBUG_FS
267 /* debugging suport via debugfs */ 251 /* debugging suport via debugfs */
268 struct dentry *debug_dir; 252 struct dentry *debug_dir;
@@ -409,13 +393,12 @@ void bond_set_mode_ops(struct bonding *bond, int mode);
409int bond_parse_parm(const char *mode_arg, const struct bond_parm_tbl *tbl); 393int bond_parse_parm(const char *mode_arg, const struct bond_parm_tbl *tbl);
410void bond_select_active_slave(struct bonding *bond); 394void bond_select_active_slave(struct bonding *bond);
411void bond_change_active_slave(struct bonding *bond, struct slave *new_active); 395void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
412void bond_register_arp(struct bonding *);
413void bond_unregister_arp(struct bonding *);
414void bond_create_debugfs(void); 396void bond_create_debugfs(void);
415void bond_destroy_debugfs(void); 397void bond_destroy_debugfs(void);
416void bond_debug_register(struct bonding *bond); 398void bond_debug_register(struct bonding *bond);
417void bond_debug_unregister(struct bonding *bond); 399void bond_debug_unregister(struct bonding *bond);
418void bond_debug_reregister(struct bonding *bond); 400void bond_debug_reregister(struct bonding *bond);
401const char *bond_mode_name(int mode);
419 402
420struct bond_net { 403struct bond_net {
421 struct net * net; /* Associated network namespace */ 404 struct net * net; /* Associated network namespace */
@@ -459,23 +442,4 @@ extern const struct bond_parm_tbl fail_over_mac_tbl[];
459extern const struct bond_parm_tbl pri_reselect_tbl[]; 442extern const struct bond_parm_tbl pri_reselect_tbl[];
460extern struct bond_parm_tbl ad_select_tbl[]; 443extern struct bond_parm_tbl ad_select_tbl[];
461 444
462#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
463void bond_send_unsolicited_na(struct bonding *bond);
464void bond_register_ipv6_notifier(void);
465void bond_unregister_ipv6_notifier(void);
466#else
467static inline void bond_send_unsolicited_na(struct bonding *bond)
468{
469 return;
470}
471static inline void bond_register_ipv6_notifier(void)
472{
473 return;
474}
475static inline void bond_unregister_ipv6_notifier(void)
476{
477 return;
478}
479#endif
480
481#endif /* _LINUX_BONDING_H */ 445#endif /* _LINUX_BONDING_H */
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 143a28c666af..22ce03e55b83 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -709,10 +709,11 @@ static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep)
709 if (ep->autoneg == AUTONEG_ENABLE) 709 if (ep->autoneg == AUTONEG_ENABLE)
710 cp->link_cntl = BMCR_ANENABLE; 710 cp->link_cntl = BMCR_ANENABLE;
711 else { 711 else {
712 u32 speed = ethtool_cmd_speed(ep);
712 cp->link_cntl = 0; 713 cp->link_cntl = 0;
713 if (ep->speed == SPEED_100) 714 if (speed == SPEED_100)
714 cp->link_cntl |= BMCR_SPEED100; 715 cp->link_cntl |= BMCR_SPEED100;
715 else if (ep->speed == SPEED_1000) 716 else if (speed == SPEED_1000)
716 cp->link_cntl |= CAS_BMCR_SPEED1000; 717 cp->link_cntl |= CAS_BMCR_SPEED1000;
717 if (ep->duplex == DUPLEX_FULL) 718 if (ep->duplex == DUPLEX_FULL)
718 cp->link_cntl |= BMCR_FULLDPLX; 719 cp->link_cntl |= BMCR_FULLDPLX;
@@ -4605,18 +4606,17 @@ static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4605 if (bmcr & BMCR_ANENABLE) { 4606 if (bmcr & BMCR_ANENABLE) {
4606 cmd->advertising |= ADVERTISED_Autoneg; 4607 cmd->advertising |= ADVERTISED_Autoneg;
4607 cmd->autoneg = AUTONEG_ENABLE; 4608 cmd->autoneg = AUTONEG_ENABLE;
4608 cmd->speed = ((speed == 10) ? 4609 ethtool_cmd_speed_set(cmd, ((speed == 10) ?
4609 SPEED_10 : 4610 SPEED_10 :
4610 ((speed == 1000) ? 4611 ((speed == 1000) ?
4611 SPEED_1000 : SPEED_100)); 4612 SPEED_1000 : SPEED_100)));
4612 cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF; 4613 cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
4613 } else { 4614 } else {
4614 cmd->autoneg = AUTONEG_DISABLE; 4615 cmd->autoneg = AUTONEG_DISABLE;
4615 cmd->speed = 4616 ethtool_cmd_speed_set(cmd, ((bmcr & CAS_BMCR_SPEED1000) ?
4616 (bmcr & CAS_BMCR_SPEED1000) ? 4617 SPEED_1000 :
4617 SPEED_1000 : 4618 ((bmcr & BMCR_SPEED100) ?
4618 ((bmcr & BMCR_SPEED100) ? SPEED_100: 4619 SPEED_100 : SPEED_10)));
4619 SPEED_10);
4620 cmd->duplex = 4620 cmd->duplex =
4621 (bmcr & BMCR_FULLDPLX) ? 4621 (bmcr & BMCR_FULLDPLX) ?
4622 DUPLEX_FULL : DUPLEX_HALF; 4622 DUPLEX_FULL : DUPLEX_HALF;
@@ -4633,14 +4633,14 @@ static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4633 * settings that we configured. 4633 * settings that we configured.
4634 */ 4634 */
4635 if (cp->link_cntl & BMCR_ANENABLE) { 4635 if (cp->link_cntl & BMCR_ANENABLE) {
4636 cmd->speed = 0; 4636 ethtool_cmd_speed_set(cmd, 0);
4637 cmd->duplex = 0xff; 4637 cmd->duplex = 0xff;
4638 } else { 4638 } else {
4639 cmd->speed = SPEED_10; 4639 ethtool_cmd_speed_set(cmd, SPEED_10);
4640 if (cp->link_cntl & BMCR_SPEED100) { 4640 if (cp->link_cntl & BMCR_SPEED100) {
4641 cmd->speed = SPEED_100; 4641 ethtool_cmd_speed_set(cmd, SPEED_100);
4642 } else if (cp->link_cntl & CAS_BMCR_SPEED1000) { 4642 } else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
4643 cmd->speed = SPEED_1000; 4643 ethtool_cmd_speed_set(cmd, SPEED_1000);
4644 } 4644 }
4645 cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)? 4645 cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)?
4646 DUPLEX_FULL : DUPLEX_HALF; 4646 DUPLEX_FULL : DUPLEX_HALF;
@@ -4653,6 +4653,7 @@ static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4653{ 4653{
4654 struct cas *cp = netdev_priv(dev); 4654 struct cas *cp = netdev_priv(dev);
4655 unsigned long flags; 4655 unsigned long flags;
4656 u32 speed = ethtool_cmd_speed(cmd);
4656 4657
4657 /* Verify the settings we care about. */ 4658 /* Verify the settings we care about. */
4658 if (cmd->autoneg != AUTONEG_ENABLE && 4659 if (cmd->autoneg != AUTONEG_ENABLE &&
@@ -4660,9 +4661,9 @@ static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4660 return -EINVAL; 4661 return -EINVAL;
4661 4662
4662 if (cmd->autoneg == AUTONEG_DISABLE && 4663 if (cmd->autoneg == AUTONEG_DISABLE &&
4663 ((cmd->speed != SPEED_1000 && 4664 ((speed != SPEED_1000 &&
4664 cmd->speed != SPEED_100 && 4665 speed != SPEED_100 &&
4665 cmd->speed != SPEED_10) || 4666 speed != SPEED_10) ||
4666 (cmd->duplex != DUPLEX_HALF && 4667 (cmd->duplex != DUPLEX_HALF &&
4667 cmd->duplex != DUPLEX_FULL))) 4668 cmd->duplex != DUPLEX_FULL)))
4668 return -EINVAL; 4669 return -EINVAL;
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h
index 092f31a126e6..c26d863e1697 100644
--- a/drivers/net/chelsio/common.h
+++ b/drivers/net/chelsio/common.h
@@ -264,11 +264,6 @@ struct adapter {
264 264
265enum { /* adapter flags */ 265enum { /* adapter flags */
266 FULL_INIT_DONE = 1 << 0, 266 FULL_INIT_DONE = 1 << 0,
267 TSO_CAPABLE = 1 << 2,
268 TCP_CSUM_CAPABLE = 1 << 3,
269 UDP_CSUM_CAPABLE = 1 << 4,
270 VLAN_ACCEL_CAPABLE = 1 << 5,
271 RX_CSUM_ENABLED = 1 << 6,
272}; 267};
273 268
274struct mdio_ops; 269struct mdio_ops;
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c
index 0f71304e0542..b422d83f5343 100644
--- a/drivers/net/chelsio/cxgb2.c
+++ b/drivers/net/chelsio/cxgb2.c
@@ -192,10 +192,8 @@ static void link_start(struct port_info *p)
192 192
193static void enable_hw_csum(struct adapter *adapter) 193static void enable_hw_csum(struct adapter *adapter)
194{ 194{
195 if (adapter->flags & TSO_CAPABLE) 195 if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
196 t1_tp_set_ip_checksum_offload(adapter->tp, 1); /* for TSO only */ 196 t1_tp_set_ip_checksum_offload(adapter->tp, 1); /* for TSO only */
197 if (adapter->flags & UDP_CSUM_CAPABLE)
198 t1_tp_set_udp_checksum_offload(adapter->tp, 1);
199 t1_tp_set_tcp_checksum_offload(adapter->tp, 1); 197 t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
200} 198}
201 199
@@ -579,10 +577,10 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
579 cmd->advertising = p->link_config.advertising; 577 cmd->advertising = p->link_config.advertising;
580 578
581 if (netif_carrier_ok(dev)) { 579 if (netif_carrier_ok(dev)) {
582 cmd->speed = p->link_config.speed; 580 ethtool_cmd_speed_set(cmd, p->link_config.speed);
583 cmd->duplex = p->link_config.duplex; 581 cmd->duplex = p->link_config.duplex;
584 } else { 582 } else {
585 cmd->speed = -1; 583 ethtool_cmd_speed_set(cmd, -1);
586 cmd->duplex = -1; 584 cmd->duplex = -1;
587 } 585 }
588 586
@@ -640,11 +638,12 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
640 return -EOPNOTSUPP; /* can't change speed/duplex */ 638 return -EOPNOTSUPP; /* can't change speed/duplex */
641 639
642 if (cmd->autoneg == AUTONEG_DISABLE) { 640 if (cmd->autoneg == AUTONEG_DISABLE) {
643 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex); 641 u32 speed = ethtool_cmd_speed(cmd);
642 int cap = speed_duplex_to_caps(speed, cmd->duplex);
644 643
645 if (!(lc->supported & cap) || cmd->speed == SPEED_1000) 644 if (!(lc->supported & cap) || (speed == SPEED_1000))
646 return -EINVAL; 645 return -EINVAL;
647 lc->requested_speed = cmd->speed; 646 lc->requested_speed = speed;
648 lc->requested_duplex = cmd->duplex; 647 lc->requested_duplex = cmd->duplex;
649 lc->advertising = 0; 648 lc->advertising = 0;
650 } else { 649 } else {
@@ -705,33 +704,6 @@ static int set_pauseparam(struct net_device *dev,
705 return 0; 704 return 0;
706} 705}
707 706
708static u32 get_rx_csum(struct net_device *dev)
709{
710 struct adapter *adapter = dev->ml_priv;
711
712 return (adapter->flags & RX_CSUM_ENABLED) != 0;
713}
714
715static int set_rx_csum(struct net_device *dev, u32 data)
716{
717 struct adapter *adapter = dev->ml_priv;
718
719 if (data)
720 adapter->flags |= RX_CSUM_ENABLED;
721 else
722 adapter->flags &= ~RX_CSUM_ENABLED;
723 return 0;
724}
725
726static int set_tso(struct net_device *dev, u32 value)
727{
728 struct adapter *adapter = dev->ml_priv;
729
730 if (!(adapter->flags & TSO_CAPABLE))
731 return value ? -EOPNOTSUPP : 0;
732 return ethtool_op_set_tso(dev, value);
733}
734
735static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) 707static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
736{ 708{
737 struct adapter *adapter = dev->ml_priv; 709 struct adapter *adapter = dev->ml_priv;
@@ -831,17 +803,12 @@ static const struct ethtool_ops t1_ethtool_ops = {
831 .get_eeprom = get_eeprom, 803 .get_eeprom = get_eeprom,
832 .get_pauseparam = get_pauseparam, 804 .get_pauseparam = get_pauseparam,
833 .set_pauseparam = set_pauseparam, 805 .set_pauseparam = set_pauseparam,
834 .get_rx_csum = get_rx_csum,
835 .set_rx_csum = set_rx_csum,
836 .set_tx_csum = ethtool_op_set_tx_csum,
837 .set_sg = ethtool_op_set_sg,
838 .get_link = ethtool_op_get_link, 806 .get_link = ethtool_op_get_link,
839 .get_strings = get_strings, 807 .get_strings = get_strings,
840 .get_sset_count = get_sset_count, 808 .get_sset_count = get_sset_count,
841 .get_ethtool_stats = get_stats, 809 .get_ethtool_stats = get_stats,
842 .get_regs_len = get_regs_len, 810 .get_regs_len = get_regs_len,
843 .get_regs = get_regs, 811 .get_regs = get_regs,
844 .set_tso = set_tso,
845}; 812};
846 813
847static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd) 814static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
@@ -1105,28 +1072,28 @@ static int __devinit init_one(struct pci_dev *pdev,
1105 netdev->mem_start = mmio_start; 1072 netdev->mem_start = mmio_start;
1106 netdev->mem_end = mmio_start + mmio_len - 1; 1073 netdev->mem_end = mmio_start + mmio_len - 1;
1107 netdev->ml_priv = adapter; 1074 netdev->ml_priv = adapter;
1108 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; 1075 netdev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
1109 netdev->features |= NETIF_F_LLTX; 1076 NETIF_F_RXCSUM;
1077 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM |
1078 NETIF_F_RXCSUM | NETIF_F_LLTX;
1110 1079
1111 adapter->flags |= RX_CSUM_ENABLED | TCP_CSUM_CAPABLE;
1112 if (pci_using_dac) 1080 if (pci_using_dac)
1113 netdev->features |= NETIF_F_HIGHDMA; 1081 netdev->features |= NETIF_F_HIGHDMA;
1114 if (vlan_tso_capable(adapter)) { 1082 if (vlan_tso_capable(adapter)) {
1115#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 1083#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1116 adapter->flags |= VLAN_ACCEL_CAPABLE;
1117 netdev->features |= 1084 netdev->features |=
1118 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 1085 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1119#endif 1086#endif
1120 1087
1121 /* T204: disable TSO */ 1088 /* T204: disable TSO */
1122 if (!(is_T2(adapter)) || bi->port_number != 4) { 1089 if (!(is_T2(adapter)) || bi->port_number != 4) {
1123 adapter->flags |= TSO_CAPABLE; 1090 netdev->hw_features |= NETIF_F_TSO;
1124 netdev->features |= NETIF_F_TSO; 1091 netdev->features |= NETIF_F_TSO;
1125 } 1092 }
1126 } 1093 }
1127 1094
1128 netdev->netdev_ops = &cxgb_netdev_ops; 1095 netdev->netdev_ops = &cxgb_netdev_ops;
1129 netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ? 1096 netdev->hard_header_len += (netdev->hw_features & NETIF_F_TSO) ?
1130 sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt); 1097 sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
1131 1098
1132 netif_napi_add(netdev, &adapter->napi, t1_poll, 64); 1099 netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 8754d4473042..b948ea737550 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -929,7 +929,7 @@ void t1_sge_intr_enable(struct sge *sge)
929 u32 en = SGE_INT_ENABLE; 929 u32 en = SGE_INT_ENABLE;
930 u32 val = readl(sge->adapter->regs + A_PL_ENABLE); 930 u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
931 931
932 if (sge->adapter->flags & TSO_CAPABLE) 932 if (sge->adapter->port[0].dev->hw_features & NETIF_F_TSO)
933 en &= ~F_PACKET_TOO_BIG; 933 en &= ~F_PACKET_TOO_BIG;
934 writel(en, sge->adapter->regs + A_SG_INT_ENABLE); 934 writel(en, sge->adapter->regs + A_SG_INT_ENABLE);
935 writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE); 935 writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
@@ -952,7 +952,7 @@ int t1_sge_intr_error_handler(struct sge *sge)
952 struct adapter *adapter = sge->adapter; 952 struct adapter *adapter = sge->adapter;
953 u32 cause = readl(adapter->regs + A_SG_INT_CAUSE); 953 u32 cause = readl(adapter->regs + A_SG_INT_CAUSE);
954 954
955 if (adapter->flags & TSO_CAPABLE) 955 if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
956 cause &= ~F_PACKET_TOO_BIG; 956 cause &= ~F_PACKET_TOO_BIG;
957 if (cause & F_RESPQ_EXHAUSTED) 957 if (cause & F_RESPQ_EXHAUSTED)
958 sge->stats.respQ_empty++; 958 sge->stats.respQ_empty++;
@@ -1369,6 +1369,7 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
1369 const struct cpl_rx_pkt *p; 1369 const struct cpl_rx_pkt *p;
1370 struct adapter *adapter = sge->adapter; 1370 struct adapter *adapter = sge->adapter;
1371 struct sge_port_stats *st; 1371 struct sge_port_stats *st;
1372 struct net_device *dev;
1372 1373
1373 skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad); 1374 skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad);
1374 if (unlikely(!skb)) { 1375 if (unlikely(!skb)) {
@@ -1384,9 +1385,10 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
1384 __skb_pull(skb, sizeof(*p)); 1385 __skb_pull(skb, sizeof(*p));
1385 1386
1386 st = this_cpu_ptr(sge->port_stats[p->iff]); 1387 st = this_cpu_ptr(sge->port_stats[p->iff]);
1388 dev = adapter->port[p->iff].dev;
1387 1389
1388 skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev); 1390 skb->protocol = eth_type_trans(skb, dev);
1389 if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff && 1391 if ((dev->features & NETIF_F_RXCSUM) && p->csum == 0xffff &&
1390 skb->protocol == htons(ETH_P_IP) && 1392 skb->protocol == htons(ETH_P_IP) &&
1391 (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) { 1393 (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
1392 ++st->rx_cso_good; 1394 ++st->rx_cso_good;
@@ -1838,8 +1840,7 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1838 return NETDEV_TX_OK; 1840 return NETDEV_TX_OK;
1839 } 1841 }
1840 1842
1841 if (!(adapter->flags & UDP_CSUM_CAPABLE) && 1843 if (skb->ip_summed == CHECKSUM_PARTIAL &&
1842 skb->ip_summed == CHECKSUM_PARTIAL &&
1843 ip_hdr(skb)->protocol == IPPROTO_UDP) { 1844 ip_hdr(skb)->protocol == IPPROTO_UDP) {
1844 if (unlikely(skb_checksum_help(skb))) { 1845 if (unlikely(skb_checksum_help(skb))) {
1845 pr_debug("%s: unable to do udp checksum\n", dev->name); 1846 pr_debug("%s: unable to do udp checksum\n", dev->name);
diff --git a/drivers/net/chelsio/tp.c b/drivers/net/chelsio/tp.c
index 6222d585e447..8bed4a59e65f 100644
--- a/drivers/net/chelsio/tp.c
+++ b/drivers/net/chelsio/tp.c
@@ -152,11 +152,6 @@ void t1_tp_set_ip_checksum_offload(struct petp *tp, int enable)
152 set_csum_offload(tp, F_IP_CSUM, enable); 152 set_csum_offload(tp, F_IP_CSUM, enable);
153} 153}
154 154
155void t1_tp_set_udp_checksum_offload(struct petp *tp, int enable)
156{
157 set_csum_offload(tp, F_UDP_CSUM, enable);
158}
159
160void t1_tp_set_tcp_checksum_offload(struct petp *tp, int enable) 155void t1_tp_set_tcp_checksum_offload(struct petp *tp, int enable)
161{ 156{
162 set_csum_offload(tp, F_TCP_CSUM, enable); 157 set_csum_offload(tp, F_TCP_CSUM, enable);
diff --git a/drivers/net/chelsio/tp.h b/drivers/net/chelsio/tp.h
index 32fc71e58913..dfd8ce25106a 100644
--- a/drivers/net/chelsio/tp.h
+++ b/drivers/net/chelsio/tp.h
@@ -65,7 +65,6 @@ void t1_tp_intr_clear(struct petp *tp);
65int t1_tp_intr_handler(struct petp *tp); 65int t1_tp_intr_handler(struct petp *tp);
66 66
67void t1_tp_get_mib_statistics(adapter_t *adap, struct tp_mib_statistics *tps); 67void t1_tp_get_mib_statistics(adapter_t *adap, struct tp_mib_statistics *tps);
68void t1_tp_set_udp_checksum_offload(struct petp *tp, int enable);
69void t1_tp_set_tcp_checksum_offload(struct petp *tp, int enable); 68void t1_tp_set_tcp_checksum_offload(struct petp *tp, int enable);
70void t1_tp_set_ip_checksum_offload(struct petp *tp, int enable); 69void t1_tp_set_ip_checksum_offload(struct petp *tp, int enable);
71int t1_tp_set_coalescing_size(struct petp *tp, unsigned int size); 70int t1_tp_set_coalescing_size(struct petp *tp, unsigned int size);
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 8cca60e43444..cde59b4e5ef8 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -2966,31 +2966,36 @@ static int cnic_service_bnx2x(void *data, void *status_blk)
2966 return 0; 2966 return 0;
2967} 2967}
2968 2968
2969static void cnic_ulp_stop(struct cnic_dev *dev) 2969static void cnic_ulp_stop_one(struct cnic_local *cp, int if_type)
2970{ 2970{
2971 struct cnic_local *cp = dev->cnic_priv; 2971 struct cnic_ulp_ops *ulp_ops;
2972 int if_type;
2973
2974 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
2975 2972
2976 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { 2973 if (if_type == CNIC_ULP_ISCSI)
2977 struct cnic_ulp_ops *ulp_ops; 2974 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
2978 2975
2979 mutex_lock(&cnic_lock); 2976 mutex_lock(&cnic_lock);
2980 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type], 2977 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
2981 lockdep_is_held(&cnic_lock)); 2978 lockdep_is_held(&cnic_lock));
2982 if (!ulp_ops) { 2979 if (!ulp_ops) {
2983 mutex_unlock(&cnic_lock);
2984 continue;
2985 }
2986 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
2987 mutex_unlock(&cnic_lock); 2980 mutex_unlock(&cnic_lock);
2981 return;
2982 }
2983 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
2984 mutex_unlock(&cnic_lock);
2988 2985
2989 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type])) 2986 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
2990 ulp_ops->cnic_stop(cp->ulp_handle[if_type]); 2987 ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
2991 2988
2992 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 2989 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
2993 } 2990}
2991
2992static void cnic_ulp_stop(struct cnic_dev *dev)
2993{
2994 struct cnic_local *cp = dev->cnic_priv;
2995 int if_type;
2996
2997 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++)
2998 cnic_ulp_stop_one(cp, if_type);
2994} 2999}
2995 3000
2996static void cnic_ulp_start(struct cnic_dev *dev) 3001static void cnic_ulp_start(struct cnic_dev *dev)
@@ -3039,6 +3044,12 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info)
3039 3044
3040 cnic_put(dev); 3045 cnic_put(dev);
3041 break; 3046 break;
3047 case CNIC_CTL_STOP_ISCSI_CMD: {
3048 struct cnic_local *cp = dev->cnic_priv;
3049 set_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags);
3050 queue_delayed_work(cnic_wq, &cp->delete_task, 0);
3051 break;
3052 }
3042 case CNIC_CTL_COMPLETION_CMD: { 3053 case CNIC_CTL_COMPLETION_CMD: {
3043 u32 cid = BNX2X_SW_CID(info->data.comp.cid); 3054 u32 cid = BNX2X_SW_CID(info->data.comp.cid);
3044 u32 l5_cid; 3055 u32 l5_cid;
@@ -3562,8 +3573,12 @@ static void cnic_init_csk_state(struct cnic_sock *csk)
3562 3573
3563static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr) 3574static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3564{ 3575{
3576 struct cnic_local *cp = csk->dev->cnic_priv;
3565 int err = 0; 3577 int err = 0;
3566 3578
3579 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
3580 return -EOPNOTSUPP;
3581
3567 if (!cnic_in_use(csk)) 3582 if (!cnic_in_use(csk))
3568 return -EINVAL; 3583 return -EINVAL;
3569 3584
@@ -3965,6 +3980,15 @@ static void cnic_delete_task(struct work_struct *work)
3965 cp = container_of(work, struct cnic_local, delete_task.work); 3980 cp = container_of(work, struct cnic_local, delete_task.work);
3966 dev = cp->dev; 3981 dev = cp->dev;
3967 3982
3983 if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags)) {
3984 struct drv_ctl_info info;
3985
3986 cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI);
3987
3988 info.cmd = DRV_CTL_ISCSI_STOPPED_CMD;
3989 cp->ethdev->drv_ctl(dev->netdev, &info);
3990 }
3991
3968 for (i = 0; i < cp->max_cid_space; i++) { 3992 for (i = 0; i < cp->max_cid_space; i++) {
3969 struct cnic_context *ctx = &cp->ctx_tbl[i]; 3993 struct cnic_context *ctx = &cp->ctx_tbl[i];
3970 3994
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
index 4456260c653c..3367a6d3a774 100644
--- a/drivers/net/cnic.h
+++ b/drivers/net/cnic.h
@@ -226,6 +226,7 @@ struct cnic_local {
226#define CNIC_LCL_FL_KWQ_INIT 0x0 226#define CNIC_LCL_FL_KWQ_INIT 0x0
227#define CNIC_LCL_FL_L2_WAIT 0x1 227#define CNIC_LCL_FL_L2_WAIT 0x1
228#define CNIC_LCL_FL_RINGS_INITED 0x2 228#define CNIC_LCL_FL_RINGS_INITED 0x2
229#define CNIC_LCL_FL_STOP_ISCSI 0x4
229 230
230 struct cnic_dev *dev; 231 struct cnic_dev *dev;
231 232
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index e01b49ee3591..fdd8e46a9050 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -12,8 +12,8 @@
12#ifndef CNIC_IF_H 12#ifndef CNIC_IF_H
13#define CNIC_IF_H 13#define CNIC_IF_H
14 14
15#define CNIC_MODULE_VERSION "2.2.13" 15#define CNIC_MODULE_VERSION "2.2.14"
16#define CNIC_MODULE_RELDATE "Jan 31, 2011" 16#define CNIC_MODULE_RELDATE "Mar 30, 2011"
17 17
18#define CNIC_ULP_RDMA 0 18#define CNIC_ULP_RDMA 0
19#define CNIC_ULP_ISCSI 1 19#define CNIC_ULP_ISCSI 1
@@ -85,6 +85,7 @@ struct kcqe {
85#define CNIC_CTL_STOP_CMD 1 85#define CNIC_CTL_STOP_CMD 1
86#define CNIC_CTL_START_CMD 2 86#define CNIC_CTL_START_CMD 2
87#define CNIC_CTL_COMPLETION_CMD 3 87#define CNIC_CTL_COMPLETION_CMD 3
88#define CNIC_CTL_STOP_ISCSI_CMD 4
88 89
89#define DRV_CTL_IO_WR_CMD 0x101 90#define DRV_CTL_IO_WR_CMD 0x101
90#define DRV_CTL_IO_RD_CMD 0x102 91#define DRV_CTL_IO_RD_CMD 0x102
@@ -94,6 +95,7 @@ struct kcqe {
94#define DRV_CTL_START_L2_CMD 0x106 95#define DRV_CTL_START_L2_CMD 0x106
95#define DRV_CTL_STOP_L2_CMD 0x107 96#define DRV_CTL_STOP_L2_CMD 0x107
96#define DRV_CTL_RET_L2_SPQ_CREDIT_CMD 0x10c 97#define DRV_CTL_RET_L2_SPQ_CREDIT_CMD 0x10c
98#define DRV_CTL_ISCSI_STOPPED_CMD 0x10d
97 99
98struct cnic_ctl_completion { 100struct cnic_ctl_completion {
99 u32 cid; 101 u32 cid;
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 9d267d3a6892..e66aceb57cef 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -491,8 +491,8 @@ e100_open(struct net_device *dev)
491 491
492 /* allocate the irq corresponding to the receiving DMA */ 492 /* allocate the irq corresponding to the receiving DMA */
493 493
494 if (request_irq(NETWORK_DMA_RX_IRQ_NBR, e100rxtx_interrupt, 494 if (request_irq(NETWORK_DMA_RX_IRQ_NBR, e100rxtx_interrupt, 0, cardname,
495 IRQF_SAMPLE_RANDOM, cardname, (void *)dev)) { 495 (void *)dev)) {
496 goto grace_exit0; 496 goto grace_exit0;
497 } 497 }
498 498
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index ef67be59680f..7300de5a1426 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -50,11 +50,6 @@ struct adapter;
50struct sge_qset; 50struct sge_qset;
51struct port_info; 51struct port_info;
52 52
53enum { /* rx_offload flags */
54 T3_RX_CSUM = 1 << 0,
55 T3_LRO = 1 << 1,
56};
57
58enum mac_idx_types { 53enum mac_idx_types {
59 LAN_MAC_IDX = 0, 54 LAN_MAC_IDX = 0,
60 SAN_MAC_IDX, 55 SAN_MAC_IDX,
@@ -74,7 +69,6 @@ struct port_info {
74 struct vlan_group *vlan_grp; 69 struct vlan_group *vlan_grp;
75 struct sge_qset *qs; 70 struct sge_qset *qs;
76 u8 port_id; 71 u8 port_id;
77 u8 rx_offload;
78 u8 nqsets; 72 u8 nqsets;
79 u8 first_qset; 73 u8 first_qset;
80 struct cphy phy; 74 struct cphy phy;
@@ -212,7 +206,6 @@ struct sge_qset { /* an SGE queue set */
212 struct sge_fl fl[SGE_RXQ_PER_SET]; 206 struct sge_fl fl[SGE_RXQ_PER_SET];
213 struct sge_txq txq[SGE_TXQ_PER_SET]; 207 struct sge_txq txq[SGE_TXQ_PER_SET];
214 int nomem; 208 int nomem;
215 int lro_enabled;
216 void *lro_va; 209 void *lro_va;
217 struct net_device *netdev; 210 struct net_device *netdev;
218 struct netdev_queue *tx_q; /* associated netdev TX queue */ 211 struct netdev_queue *tx_q; /* associated netdev TX queue */
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
index 5ccb77d078aa..056ee8c831f1 100644
--- a/drivers/net/cxgb3/common.h
+++ b/drivers/net/cxgb3/common.h
@@ -317,7 +317,6 @@ struct tp_params {
317 317
318struct qset_params { /* SGE queue set parameters */ 318struct qset_params { /* SGE queue set parameters */
319 unsigned int polling; /* polling/interrupt service for rspq */ 319 unsigned int polling; /* polling/interrupt service for rspq */
320 unsigned int lro; /* large receive offload */
321 unsigned int coalesce_usecs; /* irq coalescing timer */ 320 unsigned int coalesce_usecs; /* irq coalescing timer */
322 unsigned int rspq_size; /* # of entries in response queue */ 321 unsigned int rspq_size; /* # of entries in response queue */
323 unsigned int fl_size; /* # of entries in regular free list */ 322 unsigned int fl_size; /* # of entries in regular free list */
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 910893143295..9081ce037149 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -644,26 +644,6 @@ static void enable_all_napi(struct adapter *adap)
644} 644}
645 645
646/** 646/**
647 * set_qset_lro - Turn a queue set's LRO capability on and off
648 * @dev: the device the qset is attached to
649 * @qset_idx: the queue set index
650 * @val: the LRO switch
651 *
652 * Sets LRO on or off for a particular queue set.
653 * the device's features flag is updated to reflect the LRO
654 * capability when all queues belonging to the device are
655 * in the same state.
656 */
657static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
658{
659 struct port_info *pi = netdev_priv(dev);
660 struct adapter *adapter = pi->adapter;
661
662 adapter->params.sge.qset[qset_idx].lro = !!val;
663 adapter->sge.qs[qset_idx].lro_enabled = !!val;
664}
665
666/**
667 * setup_sge_qsets - configure SGE Tx/Rx/response queues 647 * setup_sge_qsets - configure SGE Tx/Rx/response queues
668 * @adap: the adapter 648 * @adap: the adapter
669 * 649 *
@@ -685,7 +665,6 @@ static int setup_sge_qsets(struct adapter *adap)
685 665
686 pi->qs = &adap->sge.qs[pi->first_qset]; 666 pi->qs = &adap->sge.qs[pi->first_qset];
687 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) { 667 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
688 set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
689 err = t3_sge_alloc_qset(adap, qset_idx, 1, 668 err = t3_sge_alloc_qset(adap, qset_idx, 1,
690 (adap->flags & USING_MSIX) ? qset_idx + 1 : 669 (adap->flags & USING_MSIX) ? qset_idx + 1 :
691 irq_idx, 670 irq_idx,
@@ -1749,23 +1728,26 @@ static int restart_autoneg(struct net_device *dev)
1749 return 0; 1728 return 0;
1750} 1729}
1751 1730
1752static int cxgb3_phys_id(struct net_device *dev, u32 data) 1731static int set_phys_id(struct net_device *dev,
1732 enum ethtool_phys_id_state state)
1753{ 1733{
1754 struct port_info *pi = netdev_priv(dev); 1734 struct port_info *pi = netdev_priv(dev);
1755 struct adapter *adapter = pi->adapter; 1735 struct adapter *adapter = pi->adapter;
1756 int i;
1757 1736
1758 if (data == 0) 1737 switch (state) {
1759 data = 2; 1738 case ETHTOOL_ID_ACTIVE:
1739 return 1; /* cycle on/off once per second */
1760 1740
1761 for (i = 0; i < data * 2; i++) { 1741 case ETHTOOL_ID_OFF:
1742 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
1743 break;
1744
1745 case ETHTOOL_ID_ON:
1746 case ETHTOOL_ID_INACTIVE:
1762 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 1747 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1763 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1764 if (msleep_interruptible(500))
1765 break;
1766 }
1767 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1768 F_GPIO0_OUT_VAL); 1748 F_GPIO0_OUT_VAL);
1749 }
1750
1769 return 0; 1751 return 0;
1770} 1752}
1771 1753
@@ -1777,10 +1759,10 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1777 cmd->advertising = p->link_config.advertising; 1759 cmd->advertising = p->link_config.advertising;
1778 1760
1779 if (netif_carrier_ok(dev)) { 1761 if (netif_carrier_ok(dev)) {
1780 cmd->speed = p->link_config.speed; 1762 ethtool_cmd_speed_set(cmd, p->link_config.speed);
1781 cmd->duplex = p->link_config.duplex; 1763 cmd->duplex = p->link_config.duplex;
1782 } else { 1764 } else {
1783 cmd->speed = -1; 1765 ethtool_cmd_speed_set(cmd, -1);
1784 cmd->duplex = -1; 1766 cmd->duplex = -1;
1785 } 1767 }
1786 1768
@@ -1839,7 +1821,8 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1839 * being requested. 1821 * being requested.
1840 */ 1822 */
1841 if (cmd->autoneg == AUTONEG_DISABLE) { 1823 if (cmd->autoneg == AUTONEG_DISABLE) {
1842 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex); 1824 u32 speed = ethtool_cmd_speed(cmd);
1825 int cap = speed_duplex_to_caps(speed, cmd->duplex);
1843 if (lc->supported & cap) 1826 if (lc->supported & cap)
1844 return 0; 1827 return 0;
1845 } 1828 }
@@ -1847,11 +1830,12 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1847 } 1830 }
1848 1831
1849 if (cmd->autoneg == AUTONEG_DISABLE) { 1832 if (cmd->autoneg == AUTONEG_DISABLE) {
1850 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex); 1833 u32 speed = ethtool_cmd_speed(cmd);
1834 int cap = speed_duplex_to_caps(speed, cmd->duplex);
1851 1835
1852 if (!(lc->supported & cap) || cmd->speed == SPEED_1000) 1836 if (!(lc->supported & cap) || (speed == SPEED_1000))
1853 return -EINVAL; 1837 return -EINVAL;
1854 lc->requested_speed = cmd->speed; 1838 lc->requested_speed = speed;
1855 lc->requested_duplex = cmd->duplex; 1839 lc->requested_duplex = cmd->duplex;
1856 lc->advertising = 0; 1840 lc->advertising = 0;
1857 } else { 1841 } else {
@@ -1907,29 +1891,6 @@ static int set_pauseparam(struct net_device *dev,
1907 return 0; 1891 return 0;
1908} 1892}
1909 1893
1910static u32 get_rx_csum(struct net_device *dev)
1911{
1912 struct port_info *p = netdev_priv(dev);
1913
1914 return p->rx_offload & T3_RX_CSUM;
1915}
1916
1917static int set_rx_csum(struct net_device *dev, u32 data)
1918{
1919 struct port_info *p = netdev_priv(dev);
1920
1921 if (data) {
1922 p->rx_offload |= T3_RX_CSUM;
1923 } else {
1924 int i;
1925
1926 p->rx_offload &= ~(T3_RX_CSUM | T3_LRO);
1927 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1928 set_qset_lro(dev, i, 0);
1929 }
1930 return 0;
1931}
1932
1933static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) 1894static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1934{ 1895{
1935 struct port_info *pi = netdev_priv(dev); 1896 struct port_info *pi = netdev_priv(dev);
@@ -2101,20 +2062,15 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
2101 .set_eeprom = set_eeprom, 2062 .set_eeprom = set_eeprom,
2102 .get_pauseparam = get_pauseparam, 2063 .get_pauseparam = get_pauseparam,
2103 .set_pauseparam = set_pauseparam, 2064 .set_pauseparam = set_pauseparam,
2104 .get_rx_csum = get_rx_csum,
2105 .set_rx_csum = set_rx_csum,
2106 .set_tx_csum = ethtool_op_set_tx_csum,
2107 .set_sg = ethtool_op_set_sg,
2108 .get_link = ethtool_op_get_link, 2065 .get_link = ethtool_op_get_link,
2109 .get_strings = get_strings, 2066 .get_strings = get_strings,
2110 .phys_id = cxgb3_phys_id, 2067 .set_phys_id = set_phys_id,
2111 .nway_reset = restart_autoneg, 2068 .nway_reset = restart_autoneg,
2112 .get_sset_count = get_sset_count, 2069 .get_sset_count = get_sset_count,
2113 .get_ethtool_stats = get_stats, 2070 .get_ethtool_stats = get_stats,
2114 .get_regs_len = get_regs_len, 2071 .get_regs_len = get_regs_len,
2115 .get_regs = get_regs, 2072 .get_regs = get_regs,
2116 .get_wol = get_wol, 2073 .get_wol = get_wol,
2117 .set_tso = ethtool_op_set_tso,
2118}; 2074};
2119 2075
2120static int in_range(int val, int lo, int hi) 2076static int in_range(int val, int lo, int hi)
@@ -2162,15 +2118,6 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2162 MAX_RSPQ_ENTRIES)) 2118 MAX_RSPQ_ENTRIES))
2163 return -EINVAL; 2119 return -EINVAL;
2164 2120
2165 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
2166 for_each_port(adapter, i) {
2167 pi = adap2pinfo(adapter, i);
2168 if (t.qset_idx >= pi->first_qset &&
2169 t.qset_idx < pi->first_qset + pi->nqsets &&
2170 !(pi->rx_offload & T3_RX_CSUM))
2171 return -EINVAL;
2172 }
2173
2174 if ((adapter->flags & FULL_INIT_DONE) && 2121 if ((adapter->flags & FULL_INIT_DONE) &&
2175 (t.rspq_size >= 0 || t.fl_size[0] >= 0 || 2122 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2176 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 || 2123 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
@@ -2231,8 +2178,14 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2231 } 2178 }
2232 } 2179 }
2233 } 2180 }
2234 if (t.lro >= 0) 2181
2235 set_qset_lro(dev, t.qset_idx, t.lro); 2182 if (t.lro >= 0) {
2183 if (t.lro)
2184 dev->wanted_features |= NETIF_F_GRO;
2185 else
2186 dev->wanted_features &= ~NETIF_F_GRO;
2187 netdev_update_features(dev);
2188 }
2236 2189
2237 break; 2190 break;
2238 } 2191 }
@@ -2266,7 +2219,7 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2266 t.fl_size[0] = q->fl_size; 2219 t.fl_size[0] = q->fl_size;
2267 t.fl_size[1] = q->jumbo_size; 2220 t.fl_size[1] = q->jumbo_size;
2268 t.polling = q->polling; 2221 t.polling = q->polling;
2269 t.lro = q->lro; 2222 t.lro = !!(dev->features & NETIF_F_GRO);
2270 t.intr_lat = q->coalesce_usecs; 2223 t.intr_lat = q->coalesce_usecs;
2271 t.cong_thres = q->cong_thres; 2224 t.cong_thres = q->cong_thres;
2272 t.qnum = q1; 2225 t.qnum = q1;
@@ -3304,18 +3257,18 @@ static int __devinit init_one(struct pci_dev *pdev,
3304 adapter->port[i] = netdev; 3257 adapter->port[i] = netdev;
3305 pi = netdev_priv(netdev); 3258 pi = netdev_priv(netdev);
3306 pi->adapter = adapter; 3259 pi->adapter = adapter;
3307 pi->rx_offload = T3_RX_CSUM | T3_LRO;
3308 pi->port_id = i; 3260 pi->port_id = i;
3309 netif_carrier_off(netdev); 3261 netif_carrier_off(netdev);
3310 netdev->irq = pdev->irq; 3262 netdev->irq = pdev->irq;
3311 netdev->mem_start = mmio_start; 3263 netdev->mem_start = mmio_start;
3312 netdev->mem_end = mmio_start + mmio_len - 1; 3264 netdev->mem_end = mmio_start + mmio_len - 1;
3313 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; 3265 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
3314 netdev->features |= NETIF_F_GRO; 3266 NETIF_F_TSO | NETIF_F_RXCSUM;
3267 netdev->features |= netdev->hw_features |
3268 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3315 if (pci_using_dac) 3269 if (pci_using_dac)
3316 netdev->features |= NETIF_F_HIGHDMA; 3270 netdev->features |= NETIF_F_HIGHDMA;
3317 3271
3318 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3319 netdev->netdev_ops = &cxgb_netdev_ops; 3272 netdev->netdev_ops = &cxgb_netdev_ops;
3320 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops); 3273 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3321 } 3274 }
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index bfa2d56af1ee..cba1401377ab 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -2019,7 +2019,7 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
2019 skb_pull(skb, sizeof(*p) + pad); 2019 skb_pull(skb, sizeof(*p) + pad);
2020 skb->protocol = eth_type_trans(skb, adap->port[p->iff]); 2020 skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
2021 pi = netdev_priv(skb->dev); 2021 pi = netdev_priv(skb->dev);
2022 if ((pi->rx_offload & T3_RX_CSUM) && p->csum_valid && 2022 if ((skb->dev->features & NETIF_F_RXCSUM) && p->csum_valid &&
2023 p->csum == htons(0xffff) && !p->fragment) { 2023 p->csum == htons(0xffff) && !p->fragment) {
2024 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; 2024 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2025 skb->ip_summed = CHECKSUM_UNNECESSARY; 2025 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -2120,7 +2120,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2120 offset = 2 + sizeof(struct cpl_rx_pkt); 2120 offset = 2 + sizeof(struct cpl_rx_pkt);
2121 cpl = qs->lro_va = sd->pg_chunk.va + 2; 2121 cpl = qs->lro_va = sd->pg_chunk.va + 2;
2122 2122
2123 if ((pi->rx_offload & T3_RX_CSUM) && 2123 if ((qs->netdev->features & NETIF_F_RXCSUM) &&
2124 cpl->csum_valid && cpl->csum == htons(0xffff)) { 2124 cpl->csum_valid && cpl->csum == htons(0xffff)) {
2125 skb->ip_summed = CHECKSUM_UNNECESSARY; 2125 skb->ip_summed = CHECKSUM_UNNECESSARY;
2126 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; 2126 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
@@ -2285,7 +2285,8 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
2285 q->next_holdoff = q->holdoff_tmr; 2285 q->next_holdoff = q->holdoff_tmr;
2286 2286
2287 while (likely(budget_left && is_new_response(r, q))) { 2287 while (likely(budget_left && is_new_response(r, q))) {
2288 int packet_complete, eth, ethpad = 2, lro = qs->lro_enabled; 2288 int packet_complete, eth, ethpad = 2;
2289 int lro = !!(qs->netdev->features & NETIF_F_GRO);
2289 struct sk_buff *skb = NULL; 2290 struct sk_buff *skb = NULL;
2290 u32 len, flags; 2291 u32 len, flags;
2291 __be32 rss_hi, rss_lo; 2292 __be32 rss_hi, rss_lo;
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h
index 01d49eaa44d2..bc9982a4c1f4 100644
--- a/drivers/net/cxgb4/cxgb4.h
+++ b/drivers/net/cxgb4/cxgb4.h
@@ -290,7 +290,6 @@ struct port_info {
290 u8 port_id; 290 u8 port_id;
291 u8 tx_chan; 291 u8 tx_chan;
292 u8 lport; /* associated offload logical port */ 292 u8 lport; /* associated offload logical port */
293 u8 rx_offload; /* CSO, etc */
294 u8 nqsets; /* # of qsets */ 293 u8 nqsets; /* # of qsets */
295 u8 first_qset; /* index of first qset */ 294 u8 first_qset; /* index of first qset */
296 u8 rss_mode; 295 u8 rss_mode;
@@ -298,11 +297,6 @@ struct port_info {
298 u16 *rss; 297 u16 *rss;
299}; 298};
300 299
301/* port_info.rx_offload flags */
302enum {
303 RX_CSO = 1 << 0,
304};
305
306struct dentry; 300struct dentry;
307struct work_struct; 301struct work_struct;
308 302
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index 5352c8a23f4d..7e3cfbe89e3b 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -1336,15 +1336,20 @@ static int restart_autoneg(struct net_device *dev)
1336 return 0; 1336 return 0;
1337} 1337}
1338 1338
1339static int identify_port(struct net_device *dev, u32 data) 1339static int identify_port(struct net_device *dev,
1340 enum ethtool_phys_id_state state)
1340{ 1341{
1342 unsigned int val;
1341 struct adapter *adap = netdev2adap(dev); 1343 struct adapter *adap = netdev2adap(dev);
1342 1344
1343 if (data == 0) 1345 if (state == ETHTOOL_ID_ACTIVE)
1344 data = 2; /* default to 2 seconds */ 1346 val = 0xffff;
1347 else if (state == ETHTOOL_ID_INACTIVE)
1348 val = 0;
1349 else
1350 return -EINVAL;
1345 1351
1346 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, 1352 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
1347 data * 5);
1348} 1353}
1349 1354
1350static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps) 1355static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
@@ -1431,7 +1436,8 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1431 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported); 1436 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
1432 cmd->advertising = from_fw_linkcaps(p->port_type, 1437 cmd->advertising = from_fw_linkcaps(p->port_type,
1433 p->link_cfg.advertising); 1438 p->link_cfg.advertising);
1434 cmd->speed = netif_carrier_ok(dev) ? p->link_cfg.speed : 0; 1439 ethtool_cmd_speed_set(cmd,
1440 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
1435 cmd->duplex = DUPLEX_FULL; 1441 cmd->duplex = DUPLEX_FULL;
1436 cmd->autoneg = p->link_cfg.autoneg; 1442 cmd->autoneg = p->link_cfg.autoneg;
1437 cmd->maxtxpkt = 0; 1443 cmd->maxtxpkt = 0;
@@ -1455,6 +1461,7 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1455 unsigned int cap; 1461 unsigned int cap;
1456 struct port_info *p = netdev_priv(dev); 1462 struct port_info *p = netdev_priv(dev);
1457 struct link_config *lc = &p->link_cfg; 1463 struct link_config *lc = &p->link_cfg;
1464 u32 speed = ethtool_cmd_speed(cmd);
1458 1465
1459 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */ 1466 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
1460 return -EINVAL; 1467 return -EINVAL;
@@ -1465,16 +1472,16 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1465 * being requested. 1472 * being requested.
1466 */ 1473 */
1467 if (cmd->autoneg == AUTONEG_DISABLE && 1474 if (cmd->autoneg == AUTONEG_DISABLE &&
1468 (lc->supported & speed_to_caps(cmd->speed))) 1475 (lc->supported & speed_to_caps(speed)))
1469 return 0; 1476 return 0;
1470 return -EINVAL; 1477 return -EINVAL;
1471 } 1478 }
1472 1479
1473 if (cmd->autoneg == AUTONEG_DISABLE) { 1480 if (cmd->autoneg == AUTONEG_DISABLE) {
1474 cap = speed_to_caps(cmd->speed); 1481 cap = speed_to_caps(speed);
1475 1482
1476 if (!(lc->supported & cap) || cmd->speed == SPEED_1000 || 1483 if (!(lc->supported & cap) || (speed == SPEED_1000) ||
1477 cmd->speed == SPEED_10000) 1484 (speed == SPEED_10000))
1478 return -EINVAL; 1485 return -EINVAL;
1479 lc->requested_speed = cap; 1486 lc->requested_speed = cap;
1480 lc->advertising = 0; 1487 lc->advertising = 0;
@@ -1526,24 +1533,6 @@ static int set_pauseparam(struct net_device *dev,
1526 return 0; 1533 return 0;
1527} 1534}
1528 1535
1529static u32 get_rx_csum(struct net_device *dev)
1530{
1531 struct port_info *p = netdev_priv(dev);
1532
1533 return p->rx_offload & RX_CSO;
1534}
1535
1536static int set_rx_csum(struct net_device *dev, u32 data)
1537{
1538 struct port_info *p = netdev_priv(dev);
1539
1540 if (data)
1541 p->rx_offload |= RX_CSO;
1542 else
1543 p->rx_offload &= ~RX_CSO;
1544 return 0;
1545}
1546
1547static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) 1536static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1548{ 1537{
1549 const struct port_info *pi = netdev_priv(dev); 1538 const struct port_info *pi = netdev_priv(dev);
@@ -1865,36 +1854,20 @@ static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1865 return err; 1854 return err;
1866} 1855}
1867 1856
1868#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) 1857static int cxgb_set_features(struct net_device *dev, u32 features)
1869
1870static int set_tso(struct net_device *dev, u32 value)
1871{
1872 if (value)
1873 dev->features |= TSO_FLAGS;
1874 else
1875 dev->features &= ~TSO_FLAGS;
1876 return 0;
1877}
1878
1879static int set_flags(struct net_device *dev, u32 flags)
1880{ 1858{
1859 const struct port_info *pi = netdev_priv(dev);
1860 u32 changed = dev->features ^ features;
1881 int err; 1861 int err;
1882 unsigned long old_feat = dev->features;
1883 1862
1884 err = ethtool_op_set_flags(dev, flags, ETH_FLAG_RXHASH | 1863 if (!(changed & NETIF_F_HW_VLAN_RX))
1885 ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN); 1864 return 0;
1886 if (err)
1887 return err;
1888
1889 if ((old_feat ^ dev->features) & NETIF_F_HW_VLAN_RX) {
1890 const struct port_info *pi = netdev_priv(dev);
1891 1865
1892 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1, 1866 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
1893 -1, -1, -1, !!(flags & ETH_FLAG_RXVLAN), 1867 -1, -1, -1,
1894 true); 1868 !!(features & NETIF_F_HW_VLAN_RX), true);
1895 if (err) 1869 if (unlikely(err))
1896 dev->features = old_feat; 1870 dev->features = features ^ NETIF_F_HW_VLAN_RX;
1897 }
1898 return err; 1871 return err;
1899} 1872}
1900 1873
@@ -2005,13 +1978,9 @@ static struct ethtool_ops cxgb_ethtool_ops = {
2005 .set_eeprom = set_eeprom, 1978 .set_eeprom = set_eeprom,
2006 .get_pauseparam = get_pauseparam, 1979 .get_pauseparam = get_pauseparam,
2007 .set_pauseparam = set_pauseparam, 1980 .set_pauseparam = set_pauseparam,
2008 .get_rx_csum = get_rx_csum,
2009 .set_rx_csum = set_rx_csum,
2010 .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
2011 .set_sg = ethtool_op_set_sg,
2012 .get_link = ethtool_op_get_link, 1981 .get_link = ethtool_op_get_link,
2013 .get_strings = get_strings, 1982 .get_strings = get_strings,
2014 .phys_id = identify_port, 1983 .set_phys_id = identify_port,
2015 .nway_reset = restart_autoneg, 1984 .nway_reset = restart_autoneg,
2016 .get_sset_count = get_sset_count, 1985 .get_sset_count = get_sset_count,
2017 .get_ethtool_stats = get_stats, 1986 .get_ethtool_stats = get_stats,
@@ -2019,8 +1988,6 @@ static struct ethtool_ops cxgb_ethtool_ops = {
2019 .get_regs = get_regs, 1988 .get_regs = get_regs,
2020 .get_wol = get_wol, 1989 .get_wol = get_wol,
2021 .set_wol = set_wol, 1990 .set_wol = set_wol,
2022 .set_tso = set_tso,
2023 .set_flags = set_flags,
2024 .get_rxnfc = get_rxnfc, 1991 .get_rxnfc = get_rxnfc,
2025 .get_rxfh_indir = get_rss_table, 1992 .get_rxfh_indir = get_rss_table,
2026 .set_rxfh_indir = set_rss_table, 1993 .set_rxfh_indir = set_rss_table,
@@ -2877,6 +2844,7 @@ static const struct net_device_ops cxgb4_netdev_ops = {
2877 .ndo_get_stats64 = cxgb_get_stats, 2844 .ndo_get_stats64 = cxgb_get_stats,
2878 .ndo_set_rx_mode = cxgb_set_rxmode, 2845 .ndo_set_rx_mode = cxgb_set_rxmode,
2879 .ndo_set_mac_address = cxgb_set_mac_addr, 2846 .ndo_set_mac_address = cxgb_set_mac_addr,
2847 .ndo_set_features = cxgb_set_features,
2880 .ndo_validate_addr = eth_validate_addr, 2848 .ndo_validate_addr = eth_validate_addr,
2881 .ndo_do_ioctl = cxgb_ioctl, 2849 .ndo_do_ioctl = cxgb_ioctl,
2882 .ndo_change_mtu = cxgb_change_mtu, 2850 .ndo_change_mtu = cxgb_change_mtu,
@@ -3559,6 +3527,7 @@ static void free_some_resources(struct adapter *adapter)
3559 t4_fw_bye(adapter, adapter->fn); 3527 t4_fw_bye(adapter, adapter->fn);
3560} 3528}
3561 3529
3530#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
3562#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \ 3531#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
3563 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA) 3532 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3564 3533
@@ -3660,14 +3629,14 @@ static int __devinit init_one(struct pci_dev *pdev,
3660 pi = netdev_priv(netdev); 3629 pi = netdev_priv(netdev);
3661 pi->adapter = adapter; 3630 pi->adapter = adapter;
3662 pi->xact_addr_filt = -1; 3631 pi->xact_addr_filt = -1;
3663 pi->rx_offload = RX_CSO;
3664 pi->port_id = i; 3632 pi->port_id = i;
3665 netdev->irq = pdev->irq; 3633 netdev->irq = pdev->irq;
3666 3634
3667 netdev->features |= NETIF_F_SG | TSO_FLAGS; 3635 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
3668 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 3636 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3669 netdev->features |= NETIF_F_GRO | NETIF_F_RXHASH | highdma; 3637 NETIF_F_RXCSUM | NETIF_F_RXHASH |
3670 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 3638 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3639 netdev->features |= netdev->hw_features | highdma;
3671 netdev->vlan_features = netdev->features & VLAN_FEAT; 3640 netdev->vlan_features = netdev->features & VLAN_FEAT;
3672 3641
3673 netdev->netdev_ops = &cxgb4_netdev_ops; 3642 netdev->netdev_ops = &cxgb4_netdev_ops;
diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c
index 311471b439a8..75a4b0fa19ee 100644
--- a/drivers/net/cxgb4/sge.c
+++ b/drivers/net/cxgb4/sge.c
@@ -1556,7 +1556,6 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1556{ 1556{
1557 bool csum_ok; 1557 bool csum_ok;
1558 struct sk_buff *skb; 1558 struct sk_buff *skb;
1559 struct port_info *pi;
1560 const struct cpl_rx_pkt *pkt; 1559 const struct cpl_rx_pkt *pkt;
1561 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); 1560 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1562 1561
@@ -1584,10 +1583,9 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1584 if (skb->dev->features & NETIF_F_RXHASH) 1583 if (skb->dev->features & NETIF_F_RXHASH)
1585 skb->rxhash = (__force u32)pkt->rsshdr.hash_val; 1584 skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
1586 1585
1587 pi = netdev_priv(skb->dev);
1588 rxq->stats.pkts++; 1586 rxq->stats.pkts++;
1589 1587
1590 if (csum_ok && (pi->rx_offload & RX_CSO) && 1588 if (csum_ok && (q->netdev->features & NETIF_F_RXCSUM) &&
1591 (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) { 1589 (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
1592 if (!pkt->ip_frag) { 1590 if (!pkt->ip_frag) {
1593 skb->ip_summed = CHECKSUM_UNNECESSARY; 1591 skb->ip_summed = CHECKSUM_UNNECESSARY;
diff --git a/drivers/net/cxgb4vf/adapter.h b/drivers/net/cxgb4vf/adapter.h
index 4766b4116b41..4fd821aadc8a 100644
--- a/drivers/net/cxgb4vf/adapter.h
+++ b/drivers/net/cxgb4vf/adapter.h
@@ -97,17 +97,11 @@ struct port_info {
97 u16 rss_size; /* size of VI's RSS table slice */ 97 u16 rss_size; /* size of VI's RSS table slice */
98 u8 pidx; /* index into adapter port[] */ 98 u8 pidx; /* index into adapter port[] */
99 u8 port_id; /* physical port ID */ 99 u8 port_id; /* physical port ID */
100 u8 rx_offload; /* CSO, etc. */
101 u8 nqsets; /* # of "Queue Sets" */ 100 u8 nqsets; /* # of "Queue Sets" */
102 u8 first_qset; /* index of first "Queue Set" */ 101 u8 first_qset; /* index of first "Queue Set" */
103 struct link_config link_cfg; /* physical port configuration */ 102 struct link_config link_cfg; /* physical port configuration */
104}; 103};
105 104
106/* port_info.rx_offload flags */
107enum {
108 RX_CSO = 1 << 0,
109};
110
111/* 105/*
112 * Scatter Gather Engine resources for the "adapter". Our ingress and egress 106 * Scatter Gather Engine resources for the "adapter". Our ingress and egress
113 * queues are organized into "Queue Sets" with one ingress and one egress 107 * queues are organized into "Queue Sets" with one ingress and one egress
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index 4661cbbd9bd9..e71c08e547e4 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -1167,7 +1167,8 @@ static int cxgb4vf_get_settings(struct net_device *dev,
1167 1167
1168 cmd->supported = pi->link_cfg.supported; 1168 cmd->supported = pi->link_cfg.supported;
1169 cmd->advertising = pi->link_cfg.advertising; 1169 cmd->advertising = pi->link_cfg.advertising;
1170 cmd->speed = netif_carrier_ok(dev) ? pi->link_cfg.speed : -1; 1170 ethtool_cmd_speed_set(cmd,
1171 netif_carrier_ok(dev) ? pi->link_cfg.speed : -1);
1171 cmd->duplex = DUPLEX_FULL; 1172 cmd->duplex = DUPLEX_FULL;
1172 1173
1173 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE; 1174 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
@@ -1326,37 +1327,22 @@ static void cxgb4vf_get_pauseparam(struct net_device *dev,
1326} 1327}
1327 1328
1328/* 1329/*
1329 * Return whether RX Checksum Offloading is currently enabled for the device. 1330 * Identify the port by blinking the port's LED.
1330 */
1331static u32 cxgb4vf_get_rx_csum(struct net_device *dev)
1332{
1333 struct port_info *pi = netdev_priv(dev);
1334
1335 return (pi->rx_offload & RX_CSO) != 0;
1336}
1337
1338/*
1339 * Turn RX Checksum Offloading on or off for the device.
1340 */ 1331 */
1341static int cxgb4vf_set_rx_csum(struct net_device *dev, u32 csum) 1332static int cxgb4vf_phys_id(struct net_device *dev,
1333 enum ethtool_phys_id_state state)
1342{ 1334{
1335 unsigned int val;
1343 struct port_info *pi = netdev_priv(dev); 1336 struct port_info *pi = netdev_priv(dev);
1344 1337
1345 if (csum) 1338 if (state == ETHTOOL_ID_ACTIVE)
1346 pi->rx_offload |= RX_CSO; 1339 val = 0xffff;
1340 else if (state == ETHTOOL_ID_INACTIVE)
1341 val = 0;
1347 else 1342 else
1348 pi->rx_offload &= ~RX_CSO; 1343 return -EINVAL;
1349 return 0;
1350}
1351
1352/*
1353 * Identify the port by blinking the port's LED.
1354 */
1355static int cxgb4vf_phys_id(struct net_device *dev, u32 id)
1356{
1357 struct port_info *pi = netdev_priv(dev);
1358 1344
1359 return t4vf_identify_port(pi->adapter, pi->viid, 5); 1345 return t4vf_identify_port(pi->adapter, pi->viid, val);
1360} 1346}
1361 1347
1362/* 1348/*
@@ -1560,18 +1546,6 @@ static void cxgb4vf_get_wol(struct net_device *dev,
1560 */ 1546 */
1561#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) 1547#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
1562 1548
1563/*
1564 * Set TCP Segmentation Offloading feature capabilities.
1565 */
1566static int cxgb4vf_set_tso(struct net_device *dev, u32 tso)
1567{
1568 if (tso)
1569 dev->features |= TSO_FLAGS;
1570 else
1571 dev->features &= ~TSO_FLAGS;
1572 return 0;
1573}
1574
1575static struct ethtool_ops cxgb4vf_ethtool_ops = { 1549static struct ethtool_ops cxgb4vf_ethtool_ops = {
1576 .get_settings = cxgb4vf_get_settings, 1550 .get_settings = cxgb4vf_get_settings,
1577 .get_drvinfo = cxgb4vf_get_drvinfo, 1551 .get_drvinfo = cxgb4vf_get_drvinfo,
@@ -1582,19 +1556,14 @@ static struct ethtool_ops cxgb4vf_ethtool_ops = {
1582 .get_coalesce = cxgb4vf_get_coalesce, 1556 .get_coalesce = cxgb4vf_get_coalesce,
1583 .set_coalesce = cxgb4vf_set_coalesce, 1557 .set_coalesce = cxgb4vf_set_coalesce,
1584 .get_pauseparam = cxgb4vf_get_pauseparam, 1558 .get_pauseparam = cxgb4vf_get_pauseparam,
1585 .get_rx_csum = cxgb4vf_get_rx_csum,
1586 .set_rx_csum = cxgb4vf_set_rx_csum,
1587 .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
1588 .set_sg = ethtool_op_set_sg,
1589 .get_link = ethtool_op_get_link, 1559 .get_link = ethtool_op_get_link,
1590 .get_strings = cxgb4vf_get_strings, 1560 .get_strings = cxgb4vf_get_strings,
1591 .phys_id = cxgb4vf_phys_id, 1561 .set_phys_id = cxgb4vf_phys_id,
1592 .get_sset_count = cxgb4vf_get_sset_count, 1562 .get_sset_count = cxgb4vf_get_sset_count,
1593 .get_ethtool_stats = cxgb4vf_get_ethtool_stats, 1563 .get_ethtool_stats = cxgb4vf_get_ethtool_stats,
1594 .get_regs_len = cxgb4vf_get_regs_len, 1564 .get_regs_len = cxgb4vf_get_regs_len,
1595 .get_regs = cxgb4vf_get_regs, 1565 .get_regs = cxgb4vf_get_regs,
1596 .get_wol = cxgb4vf_get_wol, 1566 .get_wol = cxgb4vf_get_wol,
1597 .set_tso = cxgb4vf_set_tso,
1598}; 1567};
1599 1568
1600/* 1569/*
@@ -2629,19 +2598,19 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2629 * it. 2598 * it.
2630 */ 2599 */
2631 pi->xact_addr_filt = -1; 2600 pi->xact_addr_filt = -1;
2632 pi->rx_offload = RX_CSO;
2633 netif_carrier_off(netdev); 2601 netif_carrier_off(netdev);
2634 netdev->irq = pdev->irq; 2602 netdev->irq = pdev->irq;
2635 2603
2636 netdev->features = (NETIF_F_SG | TSO_FLAGS | 2604 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
2637 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2605 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2638 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | 2606 NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
2639 NETIF_F_GRO); 2607 netdev->vlan_features = NETIF_F_SG | TSO_FLAGS |
2608 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2609 NETIF_F_HIGHDMA;
2610 netdev->features = netdev->hw_features |
2611 NETIF_F_HW_VLAN_RX;
2640 if (pci_using_dac) 2612 if (pci_using_dac)
2641 netdev->features |= NETIF_F_HIGHDMA; 2613 netdev->features |= NETIF_F_HIGHDMA;
2642 netdev->vlan_features =
2643 (netdev->features &
2644 ~(NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX));
2645 2614
2646#ifdef HAVE_NET_DEVICE_OPS 2615#ifdef HAVE_NET_DEVICE_OPS
2647 netdev->netdev_ops = &cxgb4vf_netdev_ops; 2616 netdev->netdev_ops = &cxgb4vf_netdev_ops;
diff --git a/drivers/net/cxgb4vf/sge.c b/drivers/net/cxgb4vf/sge.c
index bb65121f581c..5182960e29fd 100644
--- a/drivers/net/cxgb4vf/sge.c
+++ b/drivers/net/cxgb4vf/sge.c
@@ -1555,8 +1555,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1555 pi = netdev_priv(skb->dev); 1555 pi = netdev_priv(skb->dev);
1556 rxq->stats.pkts++; 1556 rxq->stats.pkts++;
1557 1557
1558 if (csum_ok && (pi->rx_offload & RX_CSO) && !pkt->err_vec && 1558 if (csum_ok && (rspq->netdev->features & NETIF_F_RXCSUM) &&
1559 (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) { 1559 !pkt->err_vec && (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
1560 if (!pkt->ip_frag) 1560 if (!pkt->ip_frag)
1561 skb->ip_summed = CHECKSUM_UNNECESSARY; 1561 skb->ip_summed = CHECKSUM_UNNECESSARY;
1562 else { 1562 else {
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 8b0084d17c8c..17654059922d 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -331,18 +331,18 @@ static struct {
331 "DE422",\ 331 "DE422",\
332 ""} 332 ""}
333 333
334static char* __initdata depca_signature[] = DEPCA_SIGNATURE; 334static const char* const depca_signature[] __devinitconst = DEPCA_SIGNATURE;
335 335
336enum depca_type { 336enum depca_type {
337 DEPCA, de100, de101, de200, de201, de202, de210, de212, de422, unknown 337 DEPCA, de100, de101, de200, de201, de202, de210, de212, de422, unknown
338}; 338};
339 339
340static char depca_string[] = "depca"; 340static const char depca_string[] = "depca";
341 341
342static int depca_device_remove (struct device *device); 342static int depca_device_remove (struct device *device);
343 343
344#ifdef CONFIG_EISA 344#ifdef CONFIG_EISA
345static struct eisa_device_id depca_eisa_ids[] = { 345static const struct eisa_device_id depca_eisa_ids[] __devinitconst = {
346 { "DEC4220", de422 }, 346 { "DEC4220", de422 },
347 { "" } 347 { "" }
348}; 348};
@@ -367,19 +367,19 @@ static struct eisa_driver depca_eisa_driver = {
367#define DE210_ID 0x628d 367#define DE210_ID 0x628d
368#define DE212_ID 0x6def 368#define DE212_ID 0x6def
369 369
370static short depca_mca_adapter_ids[] = { 370static const short depca_mca_adapter_ids[] __devinitconst = {
371 DE210_ID, 371 DE210_ID,
372 DE212_ID, 372 DE212_ID,
373 0x0000 373 0x0000
374}; 374};
375 375
376static char *depca_mca_adapter_name[] = { 376static const char *depca_mca_adapter_name[] = {
377 "DEC EtherWORKS MC Adapter (DE210)", 377 "DEC EtherWORKS MC Adapter (DE210)",
378 "DEC EtherWORKS MC Adapter (DE212)", 378 "DEC EtherWORKS MC Adapter (DE212)",
379 NULL 379 NULL
380}; 380};
381 381
382static enum depca_type depca_mca_adapter_type[] = { 382static const enum depca_type depca_mca_adapter_type[] = {
383 de210, 383 de210,
384 de212, 384 de212,
385 0 385 0
@@ -541,10 +541,9 @@ static void SetMulticastFilter(struct net_device *dev);
541static int load_packet(struct net_device *dev, struct sk_buff *skb); 541static int load_packet(struct net_device *dev, struct sk_buff *skb);
542static void depca_dbg_open(struct net_device *dev); 542static void depca_dbg_open(struct net_device *dev);
543 543
544static u_char de1xx_irq[] __initdata = { 2, 3, 4, 5, 7, 9, 0 }; 544static const u_char de1xx_irq[] __devinitconst = { 2, 3, 4, 5, 7, 9, 0 };
545static u_char de2xx_irq[] __initdata = { 5, 9, 10, 11, 15, 0 }; 545static const u_char de2xx_irq[] __devinitconst = { 5, 9, 10, 11, 15, 0 };
546static u_char de422_irq[] __initdata = { 5, 9, 10, 11, 0 }; 546static const u_char de422_irq[] __devinitconst = { 5, 9, 10, 11, 0 };
547static u_char *depca_irq;
548 547
549static int irq; 548static int irq;
550static int io; 549static int io;
@@ -580,7 +579,7 @@ static const struct net_device_ops depca_netdev_ops = {
580 .ndo_validate_addr = eth_validate_addr, 579 .ndo_validate_addr = eth_validate_addr,
581}; 580};
582 581
583static int __init depca_hw_init (struct net_device *dev, struct device *device) 582static int __devinit depca_hw_init (struct net_device *dev, struct device *device)
584{ 583{
585 struct depca_private *lp; 584 struct depca_private *lp;
586 int i, j, offset, netRAM, mem_len, status = 0; 585 int i, j, offset, netRAM, mem_len, status = 0;
@@ -748,6 +747,7 @@ static int __init depca_hw_init (struct net_device *dev, struct device *device)
748 if (dev->irq < 2) { 747 if (dev->irq < 2) {
749 unsigned char irqnum; 748 unsigned char irqnum;
750 unsigned long irq_mask, delay; 749 unsigned long irq_mask, delay;
750 const u_char *depca_irq;
751 751
752 irq_mask = probe_irq_on(); 752 irq_mask = probe_irq_on();
753 753
@@ -770,6 +770,7 @@ static int __init depca_hw_init (struct net_device *dev, struct device *device)
770 break; 770 break;
771 771
772 default: 772 default:
773 depca_irq = NULL;
773 break; /* Not reached */ 774 break; /* Not reached */
774 } 775 }
775 776
@@ -1302,7 +1303,7 @@ static void SetMulticastFilter(struct net_device *dev)
1302 } 1303 }
1303} 1304}
1304 1305
1305static int __init depca_common_init (u_long ioaddr, struct net_device **devp) 1306static int __devinit depca_common_init (u_long ioaddr, struct net_device **devp)
1306{ 1307{
1307 int status = 0; 1308 int status = 0;
1308 1309
@@ -1333,7 +1334,7 @@ static int __init depca_common_init (u_long ioaddr, struct net_device **devp)
1333/* 1334/*
1334** Microchannel bus I/O device probe 1335** Microchannel bus I/O device probe
1335*/ 1336*/
1336static int __init depca_mca_probe(struct device *device) 1337static int __devinit depca_mca_probe(struct device *device)
1337{ 1338{
1338 unsigned char pos[2]; 1339 unsigned char pos[2];
1339 unsigned char where; 1340 unsigned char where;
@@ -1457,7 +1458,7 @@ static int __init depca_mca_probe(struct device *device)
1457** ISA bus I/O device probe 1458** ISA bus I/O device probe
1458*/ 1459*/
1459 1460
1460static void __init depca_platform_probe (void) 1461static void __devinit depca_platform_probe (void)
1461{ 1462{
1462 int i; 1463 int i;
1463 struct platform_device *pldev; 1464 struct platform_device *pldev;
@@ -1497,7 +1498,7 @@ static void __init depca_platform_probe (void)
1497 } 1498 }
1498} 1499}
1499 1500
1500static enum depca_type __init depca_shmem_probe (ulong *mem_start) 1501static enum depca_type __devinit depca_shmem_probe (ulong *mem_start)
1501{ 1502{
1502 u_long mem_base[] = DEPCA_RAM_BASE_ADDRESSES; 1503 u_long mem_base[] = DEPCA_RAM_BASE_ADDRESSES;
1503 enum depca_type adapter = unknown; 1504 enum depca_type adapter = unknown;
@@ -1558,7 +1559,7 @@ static int __devinit depca_isa_probe (struct platform_device *device)
1558*/ 1559*/
1559 1560
1560#ifdef CONFIG_EISA 1561#ifdef CONFIG_EISA
1561static int __init depca_eisa_probe (struct device *device) 1562static int __devinit depca_eisa_probe (struct device *device)
1562{ 1563{
1563 enum depca_type adapter = unknown; 1564 enum depca_type adapter = unknown;
1564 struct eisa_device *edev; 1565 struct eisa_device *edev;
@@ -1629,7 +1630,7 @@ static int __devexit depca_device_remove (struct device *device)
1629** and Boot (readb) ROM. This will also give us a clue to the network RAM 1630** and Boot (readb) ROM. This will also give us a clue to the network RAM
1630** base address. 1631** base address.
1631*/ 1632*/
1632static int __init DepcaSignature(char *name, u_long base_addr) 1633static int __devinit DepcaSignature(char *name, u_long base_addr)
1633{ 1634{
1634 u_int i, j, k; 1635 u_int i, j, k;
1635 void __iomem *ptr; 1636 void __iomem *ptr;
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index c05db6046050..c445457b66d5 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -1189,10 +1189,10 @@ static int rio_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1189 cmd->transceiver = XCVR_INTERNAL; 1189 cmd->transceiver = XCVR_INTERNAL;
1190 } 1190 }
1191 if ( np->link_status ) { 1191 if ( np->link_status ) {
1192 cmd->speed = np->speed; 1192 ethtool_cmd_speed_set(cmd, np->speed);
1193 cmd->duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; 1193 cmd->duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
1194 } else { 1194 } else {
1195 cmd->speed = -1; 1195 ethtool_cmd_speed_set(cmd, -1);
1196 cmd->duplex = -1; 1196 cmd->duplex = -1;
1197 } 1197 }
1198 if ( np->an_enable) 1198 if ( np->an_enable)
@@ -1219,31 +1219,20 @@ static int rio_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1219 } else { 1219 } else {
1220 np->an_enable = 0; 1220 np->an_enable = 0;
1221 if (np->speed == 1000) { 1221 if (np->speed == 1000) {
1222 cmd->speed = SPEED_100; 1222 ethtool_cmd_speed_set(cmd, SPEED_100);
1223 cmd->duplex = DUPLEX_FULL; 1223 cmd->duplex = DUPLEX_FULL;
1224 printk("Warning!! Can't disable Auto negotiation in 1000Mbps, change to Manual 100Mbps, Full duplex.\n"); 1224 printk("Warning!! Can't disable Auto negotiation in 1000Mbps, change to Manual 100Mbps, Full duplex.\n");
1225 } 1225 }
1226 switch(cmd->speed + cmd->duplex) { 1226 switch (ethtool_cmd_speed(cmd)) {
1227 1227 case SPEED_10:
1228 case SPEED_10 + DUPLEX_HALF:
1229 np->speed = 10;
1230 np->full_duplex = 0;
1231 break;
1232
1233 case SPEED_10 + DUPLEX_FULL:
1234 np->speed = 10; 1228 np->speed = 10;
1235 np->full_duplex = 1; 1229 np->full_duplex = (cmd->duplex == DUPLEX_FULL);
1236 break; 1230 break;
1237 case SPEED_100 + DUPLEX_HALF: 1231 case SPEED_100:
1238 np->speed = 100; 1232 np->speed = 100;
1239 np->full_duplex = 0; 1233 np->full_duplex = (cmd->duplex == DUPLEX_FULL);
1240 break;
1241 case SPEED_100 + DUPLEX_FULL:
1242 np->speed = 100;
1243 np->full_duplex = 1;
1244 break; 1234 break;
1245 case SPEED_1000 + DUPLEX_HALF:/* not supported */ 1235 case SPEED_1000: /* not supported */
1246 case SPEED_1000 + DUPLEX_FULL:/* not supported */
1247 default: 1236 default:
1248 return -EINVAL; 1237 return -EINVAL;
1249 } 1238 }
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index b7af5bab9937..fbaff3584bd4 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -131,8 +131,6 @@ typedef struct board_info {
131 u32 msg_enable; 131 u32 msg_enable;
132 u32 wake_state; 132 u32 wake_state;
133 133
134 int rx_csum;
135 int can_csum;
136 int ip_summed; 134 int ip_summed;
137} board_info_t; 135} board_info_t;
138 136
@@ -470,47 +468,20 @@ static int dm9000_nway_reset(struct net_device *dev)
470 return mii_nway_restart(&dm->mii); 468 return mii_nway_restart(&dm->mii);
471} 469}
472 470
473static uint32_t dm9000_get_rx_csum(struct net_device *dev) 471static int dm9000_set_features(struct net_device *dev, u32 features)
474{ 472{
475 board_info_t *dm = to_dm9000_board(dev); 473 board_info_t *dm = to_dm9000_board(dev);
476 return dm->rx_csum; 474 u32 changed = dev->features ^ features;
477} 475 unsigned long flags;
478
479static int dm9000_set_rx_csum_unlocked(struct net_device *dev, uint32_t data)
480{
481 board_info_t *dm = to_dm9000_board(dev);
482
483 if (dm->can_csum) {
484 dm->rx_csum = data;
485 iow(dm, DM9000_RCSR, dm->rx_csum ? RCSR_CSUM : 0);
486 476
477 if (!(changed & NETIF_F_RXCSUM))
487 return 0; 478 return 0;
488 }
489
490 return -EOPNOTSUPP;
491}
492
493static int dm9000_set_rx_csum(struct net_device *dev, uint32_t data)
494{
495 board_info_t *dm = to_dm9000_board(dev);
496 unsigned long flags;
497 int ret;
498 479
499 spin_lock_irqsave(&dm->lock, flags); 480 spin_lock_irqsave(&dm->lock, flags);
500 ret = dm9000_set_rx_csum_unlocked(dev, data); 481 iow(dm, DM9000_RCSR, (features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
501 spin_unlock_irqrestore(&dm->lock, flags); 482 spin_unlock_irqrestore(&dm->lock, flags);
502 483
503 return ret; 484 return 0;
504}
505
506static int dm9000_set_tx_csum(struct net_device *dev, uint32_t data)
507{
508 board_info_t *dm = to_dm9000_board(dev);
509 int ret = -EOPNOTSUPP;
510
511 if (dm->can_csum)
512 ret = ethtool_op_set_tx_csum(dev, data);
513 return ret;
514} 485}
515 486
516static u32 dm9000_get_link(struct net_device *dev) 487static u32 dm9000_get_link(struct net_device *dev)
@@ -643,10 +614,6 @@ static const struct ethtool_ops dm9000_ethtool_ops = {
643 .get_eeprom_len = dm9000_get_eeprom_len, 614 .get_eeprom_len = dm9000_get_eeprom_len,
644 .get_eeprom = dm9000_get_eeprom, 615 .get_eeprom = dm9000_get_eeprom,
645 .set_eeprom = dm9000_set_eeprom, 616 .set_eeprom = dm9000_set_eeprom,
646 .get_rx_csum = dm9000_get_rx_csum,
647 .set_rx_csum = dm9000_set_rx_csum,
648 .get_tx_csum = ethtool_op_get_tx_csum,
649 .set_tx_csum = dm9000_set_tx_csum,
650}; 617};
651 618
652static void dm9000_show_carrier(board_info_t *db, 619static void dm9000_show_carrier(board_info_t *db,
@@ -800,7 +767,9 @@ dm9000_init_dm9000(struct net_device *dev)
800 db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */ 767 db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */
801 768
802 /* Checksum mode */ 769 /* Checksum mode */
803 dm9000_set_rx_csum_unlocked(dev, db->rx_csum); 770 if (dev->hw_features & NETIF_F_RXCSUM)
771 iow(db, DM9000_RCSR,
772 (dev->features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
804 773
805 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ 774 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
806 775
@@ -1049,7 +1018,7 @@ dm9000_rx(struct net_device *dev)
1049 1018
1050 /* Pass to upper layer */ 1019 /* Pass to upper layer */
1051 skb->protocol = eth_type_trans(skb, dev); 1020 skb->protocol = eth_type_trans(skb, dev);
1052 if (db->rx_csum) { 1021 if (dev->features & NETIF_F_RXCSUM) {
1053 if ((((rxbyte & 0x1c) << 3) & rxbyte) == 0) 1022 if ((((rxbyte & 0x1c) << 3) & rxbyte) == 0)
1054 skb->ip_summed = CHECKSUM_UNNECESSARY; 1023 skb->ip_summed = CHECKSUM_UNNECESSARY;
1055 else 1024 else
@@ -1358,6 +1327,7 @@ static const struct net_device_ops dm9000_netdev_ops = {
1358 .ndo_set_multicast_list = dm9000_hash_table, 1327 .ndo_set_multicast_list = dm9000_hash_table,
1359 .ndo_do_ioctl = dm9000_ioctl, 1328 .ndo_do_ioctl = dm9000_ioctl,
1360 .ndo_change_mtu = eth_change_mtu, 1329 .ndo_change_mtu = eth_change_mtu,
1330 .ndo_set_features = dm9000_set_features,
1361 .ndo_validate_addr = eth_validate_addr, 1331 .ndo_validate_addr = eth_validate_addr,
1362 .ndo_set_mac_address = eth_mac_addr, 1332 .ndo_set_mac_address = eth_mac_addr,
1363#ifdef CONFIG_NET_POLL_CONTROLLER 1333#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1551,9 +1521,8 @@ dm9000_probe(struct platform_device *pdev)
1551 1521
1552 /* dm9000a/b are capable of hardware checksum offload */ 1522 /* dm9000a/b are capable of hardware checksum offload */
1553 if (db->type == TYPE_DM9000A || db->type == TYPE_DM9000B) { 1523 if (db->type == TYPE_DM9000A || db->type == TYPE_DM9000B) {
1554 db->can_csum = 1; 1524 ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
1555 db->rx_csum = 1; 1525 ndev->features |= ndev->hw_features;
1556 ndev->features |= NETIF_F_IP_CSUM;
1557 } 1526 }
1558 1527
1559 /* from this point we assume that we have found a DM9000 */ 1528 /* from this point we assume that we have found a DM9000 */
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index ff2d29b17858..39cf9b9bd673 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -168,10 +168,6 @@ static int __init dummy_init_one(void)
168 if (!dev_dummy) 168 if (!dev_dummy)
169 return -ENOMEM; 169 return -ENOMEM;
170 170
171 err = dev_alloc_name(dev_dummy, dev_dummy->name);
172 if (err < 0)
173 goto err;
174
175 dev_dummy->rtnl_link_ops = &dummy_link_ops; 171 dev_dummy->rtnl_link_ops = &dummy_link_ops;
176 err = register_netdevice(dev_dummy); 172 err = register_netdevice(dev_dummy);
177 if (err < 0) 173 if (err < 0)
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index b0aa9e68990a..e336c7937f05 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -593,7 +593,6 @@ struct nic {
593 enum phy phy; 593 enum phy phy;
594 struct params params; 594 struct params params;
595 struct timer_list watchdog; 595 struct timer_list watchdog;
596 struct timer_list blink_timer;
597 struct mii_if_info mii; 596 struct mii_if_info mii;
598 struct work_struct tx_timeout_task; 597 struct work_struct tx_timeout_task;
599 enum loopback loopback; 598 enum loopback loopback;
@@ -618,7 +617,6 @@ struct nic {
618 u32 rx_tco_frames; 617 u32 rx_tco_frames;
619 u32 rx_over_length_errors; 618 u32 rx_over_length_errors;
620 619
621 u16 leds;
622 u16 eeprom_wc; 620 u16 eeprom_wc;
623 __le16 eeprom[256]; 621 __le16 eeprom[256];
624 spinlock_t mdio_lock; 622 spinlock_t mdio_lock;
@@ -1512,7 +1510,7 @@ static int e100_phy_init(struct nic *nic)
1512 1510
1513static int e100_hw_init(struct nic *nic) 1511static int e100_hw_init(struct nic *nic)
1514{ 1512{
1515 int err; 1513 int err = 0;
1516 1514
1517 e100_hw_reset(nic); 1515 e100_hw_reset(nic);
1518 1516
@@ -1668,7 +1666,8 @@ static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
1668static void e100_watchdog(unsigned long data) 1666static void e100_watchdog(unsigned long data)
1669{ 1667{
1670 struct nic *nic = (struct nic *)data; 1668 struct nic *nic = (struct nic *)data;
1671 struct ethtool_cmd cmd; 1669 struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
1670 u32 speed;
1672 1671
1673 netif_printk(nic, timer, KERN_DEBUG, nic->netdev, 1672 netif_printk(nic, timer, KERN_DEBUG, nic->netdev,
1674 "right now = %ld\n", jiffies); 1673 "right now = %ld\n", jiffies);
@@ -1676,10 +1675,11 @@ static void e100_watchdog(unsigned long data)
1676 /* mii library handles link maintenance tasks */ 1675 /* mii library handles link maintenance tasks */
1677 1676
1678 mii_ethtool_gset(&nic->mii, &cmd); 1677 mii_ethtool_gset(&nic->mii, &cmd);
1678 speed = ethtool_cmd_speed(&cmd);
1679 1679
1680 if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) { 1680 if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
1681 netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n", 1681 netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n",
1682 cmd.speed == SPEED_100 ? 100 : 10, 1682 speed == SPEED_100 ? 100 : 10,
1683 cmd.duplex == DUPLEX_FULL ? "Full" : "Half"); 1683 cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
1684 } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) { 1684 } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
1685 netdev_info(nic->netdev, "NIC Link is Down\n"); 1685 netdev_info(nic->netdev, "NIC Link is Down\n");
@@ -1698,13 +1698,13 @@ static void e100_watchdog(unsigned long data)
1698 spin_unlock_irq(&nic->cmd_lock); 1698 spin_unlock_irq(&nic->cmd_lock);
1699 1699
1700 e100_update_stats(nic); 1700 e100_update_stats(nic);
1701 e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex); 1701 e100_adjust_adaptive_ifs(nic, speed, cmd.duplex);
1702 1702
1703 if (nic->mac <= mac_82557_D100_C) 1703 if (nic->mac <= mac_82557_D100_C)
1704 /* Issue a multicast command to workaround a 557 lock up */ 1704 /* Issue a multicast command to workaround a 557 lock up */
1705 e100_set_multicast_list(nic->netdev); 1705 e100_set_multicast_list(nic->netdev);
1706 1706
1707 if (nic->flags & ich && cmd.speed==SPEED_10 && cmd.duplex==DUPLEX_HALF) 1707 if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF)
1708 /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */ 1708 /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
1709 nic->flags |= ich_10h_workaround; 1709 nic->flags |= ich_10h_workaround;
1710 else 1710 else
@@ -2351,30 +2351,6 @@ err_clean_rx:
2351#define E100_82552_LED_OVERRIDE 0x19 2351#define E100_82552_LED_OVERRIDE 0x19
2352#define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */ 2352#define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */
2353#define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */ 2353#define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */
2354static void e100_blink_led(unsigned long data)
2355{
2356 struct nic *nic = (struct nic *)data;
2357 enum led_state {
2358 led_on = 0x01,
2359 led_off = 0x04,
2360 led_on_559 = 0x05,
2361 led_on_557 = 0x07,
2362 };
2363 u16 led_reg = MII_LED_CONTROL;
2364
2365 if (nic->phy == phy_82552_v) {
2366 led_reg = E100_82552_LED_OVERRIDE;
2367
2368 nic->leds = (nic->leds == E100_82552_LED_ON) ?
2369 E100_82552_LED_OFF : E100_82552_LED_ON;
2370 } else {
2371 nic->leds = (nic->leds & led_on) ? led_off :
2372 (nic->mac < mac_82559_D101M) ? led_on_557 :
2373 led_on_559;
2374 }
2375 mdio_write(nic->netdev, nic->mii.phy_id, led_reg, nic->leds);
2376 mod_timer(&nic->blink_timer, jiffies + HZ / 4);
2377}
2378 2354
2379static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) 2355static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2380{ 2356{
@@ -2598,19 +2574,38 @@ static void e100_diag_test(struct net_device *netdev,
2598 msleep_interruptible(4 * 1000); 2574 msleep_interruptible(4 * 1000);
2599} 2575}
2600 2576
2601static int e100_phys_id(struct net_device *netdev, u32 data) 2577static int e100_set_phys_id(struct net_device *netdev,
2578 enum ethtool_phys_id_state state)
2602{ 2579{
2603 struct nic *nic = netdev_priv(netdev); 2580 struct nic *nic = netdev_priv(netdev);
2581 enum led_state {
2582 led_on = 0x01,
2583 led_off = 0x04,
2584 led_on_559 = 0x05,
2585 led_on_557 = 0x07,
2586 };
2604 u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE : 2587 u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE :
2605 MII_LED_CONTROL; 2588 MII_LED_CONTROL;
2589 u16 leds = 0;
2606 2590
2607 if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ)) 2591 switch (state) {
2608 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ); 2592 case ETHTOOL_ID_ACTIVE:
2609 mod_timer(&nic->blink_timer, jiffies); 2593 return 2;
2610 msleep_interruptible(data * 1000); 2594
2611 del_timer_sync(&nic->blink_timer); 2595 case ETHTOOL_ID_ON:
2612 mdio_write(netdev, nic->mii.phy_id, led_reg, 0); 2596 leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON :
2597 (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
2598 break;
2599
2600 case ETHTOOL_ID_OFF:
2601 leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off;
2602 break;
2603
2604 case ETHTOOL_ID_INACTIVE:
2605 break;
2606 }
2613 2607
2608 mdio_write(netdev, nic->mii.phy_id, led_reg, leds);
2614 return 0; 2609 return 0;
2615} 2610}
2616 2611
@@ -2691,7 +2686,7 @@ static const struct ethtool_ops e100_ethtool_ops = {
2691 .set_ringparam = e100_set_ringparam, 2686 .set_ringparam = e100_set_ringparam,
2692 .self_test = e100_diag_test, 2687 .self_test = e100_diag_test,
2693 .get_strings = e100_get_strings, 2688 .get_strings = e100_get_strings,
2694 .phys_id = e100_phys_id, 2689 .set_phys_id = e100_set_phys_id,
2695 .get_ethtool_stats = e100_get_ethtool_stats, 2690 .get_ethtool_stats = e100_get_ethtool_stats,
2696 .get_sset_count = e100_get_sset_count, 2691 .get_sset_count = e100_get_sset_count,
2697}; 2692};
@@ -2832,9 +2827,6 @@ static int __devinit e100_probe(struct pci_dev *pdev,
2832 init_timer(&nic->watchdog); 2827 init_timer(&nic->watchdog);
2833 nic->watchdog.function = e100_watchdog; 2828 nic->watchdog.function = e100_watchdog;
2834 nic->watchdog.data = (unsigned long)nic; 2829 nic->watchdog.data = (unsigned long)nic;
2835 init_timer(&nic->blink_timer);
2836 nic->blink_timer.function = e100_blink_led;
2837 nic->blink_timer.data = (unsigned long)nic;
2838 2830
2839 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task); 2831 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
2840 2832
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index a881dd0093bd..8676899120c3 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -238,9 +238,6 @@ struct e1000_adapter {
238 struct work_struct reset_task; 238 struct work_struct reset_task;
239 u8 fc_autoneg; 239 u8 fc_autoneg;
240 240
241 struct timer_list blink_timer;
242 unsigned long led_status;
243
244 /* TX */ 241 /* TX */
245 struct e1000_tx_ring *tx_ring; /* One per active queue */ 242 struct e1000_tx_ring *tx_ring; /* One per active queue */
246 unsigned int restart_queue; 243 unsigned int restart_queue;
@@ -349,7 +346,7 @@ extern int e1000_up(struct e1000_adapter *adapter);
349extern void e1000_down(struct e1000_adapter *adapter); 346extern void e1000_down(struct e1000_adapter *adapter);
350extern void e1000_reinit_locked(struct e1000_adapter *adapter); 347extern void e1000_reinit_locked(struct e1000_adapter *adapter);
351extern void e1000_reset(struct e1000_adapter *adapter); 348extern void e1000_reset(struct e1000_adapter *adapter);
352extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx); 349extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx);
353extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); 350extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
354extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); 351extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
355extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter); 352extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index dd70738eb2f4..ec0fa426cce2 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -158,7 +158,7 @@ static int e1000_get_settings(struct net_device *netdev,
158 158
159 e1000_get_speed_and_duplex(hw, &adapter->link_speed, 159 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
160 &adapter->link_duplex); 160 &adapter->link_duplex);
161 ecmd->speed = adapter->link_speed; 161 ethtool_cmd_speed_set(ecmd, adapter->link_speed);
162 162
163 /* unfortunately FULL_DUPLEX != DUPLEX_FULL 163 /* unfortunately FULL_DUPLEX != DUPLEX_FULL
164 * and HALF_DUPLEX != DUPLEX_HALF */ 164 * and HALF_DUPLEX != DUPLEX_HALF */
@@ -168,7 +168,7 @@ static int e1000_get_settings(struct net_device *netdev,
168 else 168 else
169 ecmd->duplex = DUPLEX_HALF; 169 ecmd->duplex = DUPLEX_HALF;
170 } else { 170 } else {
171 ecmd->speed = -1; 171 ethtool_cmd_speed_set(ecmd, -1);
172 ecmd->duplex = -1; 172 ecmd->duplex = -1;
173 } 173 }
174 174
@@ -197,11 +197,13 @@ static int e1000_set_settings(struct net_device *netdev,
197 ADVERTISED_TP | 197 ADVERTISED_TP |
198 ADVERTISED_Autoneg; 198 ADVERTISED_Autoneg;
199 ecmd->advertising = hw->autoneg_advertised; 199 ecmd->advertising = hw->autoneg_advertised;
200 } else 200 } else {
201 if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) { 201 u32 speed = ethtool_cmd_speed(ecmd);
202 if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
202 clear_bit(__E1000_RESETTING, &adapter->flags); 203 clear_bit(__E1000_RESETTING, &adapter->flags);
203 return -EINVAL; 204 return -EINVAL;
204 } 205 }
206 }
205 207
206 /* reset the link */ 208 /* reset the link */
207 209
@@ -1753,46 +1755,28 @@ static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1753 return 0; 1755 return 0;
1754} 1756}
1755 1757
1756/* toggle LED 4 times per second = 2 "blinks" per second */ 1758static int e1000_set_phys_id(struct net_device *netdev,
1757#define E1000_ID_INTERVAL (HZ/4) 1759 enum ethtool_phys_id_state state)
1758
1759/* bit defines for adapter->led_status */
1760#define E1000_LED_ON 0
1761
1762static void e1000_led_blink_callback(unsigned long data)
1763{ 1760{
1764 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 1761 struct e1000_adapter *adapter = netdev_priv(netdev);
1765 struct e1000_hw *hw = &adapter->hw; 1762 struct e1000_hw *hw = &adapter->hw;
1766 1763
1767 if (test_and_change_bit(E1000_LED_ON, &adapter->led_status)) 1764 switch (state) {
1768 e1000_led_off(hw); 1765 case ETHTOOL_ID_ACTIVE:
1769 else 1766 e1000_setup_led(hw);
1770 e1000_led_on(hw); 1767 return 2;
1771 1768
1772 mod_timer(&adapter->blink_timer, jiffies + E1000_ID_INTERVAL); 1769 case ETHTOOL_ID_ON:
1773} 1770 e1000_led_on(hw);
1774 1771 break;
1775static int e1000_phys_id(struct net_device *netdev, u32 data)
1776{
1777 struct e1000_adapter *adapter = netdev_priv(netdev);
1778 struct e1000_hw *hw = &adapter->hw;
1779 1772
1780 if (!data) 1773 case ETHTOOL_ID_OFF:
1781 data = INT_MAX; 1774 e1000_led_off(hw);
1775 break;
1782 1776
1783 if (!adapter->blink_timer.function) { 1777 case ETHTOOL_ID_INACTIVE:
1784 init_timer(&adapter->blink_timer); 1778 e1000_cleanup_led(hw);
1785 adapter->blink_timer.function = e1000_led_blink_callback;
1786 adapter->blink_timer.data = (unsigned long)adapter;
1787 } 1779 }
1788 e1000_setup_led(hw);
1789 mod_timer(&adapter->blink_timer, jiffies);
1790 msleep_interruptible(data * 1000);
1791 del_timer_sync(&adapter->blink_timer);
1792
1793 e1000_led_off(hw);
1794 clear_bit(E1000_LED_ON, &adapter->led_status);
1795 e1000_cleanup_led(hw);
1796 1780
1797 return 0; 1781 return 0;
1798} 1782}
@@ -1929,7 +1913,7 @@ static const struct ethtool_ops e1000_ethtool_ops = {
1929 .set_tso = e1000_set_tso, 1913 .set_tso = e1000_set_tso,
1930 .self_test = e1000_diag_test, 1914 .self_test = e1000_diag_test,
1931 .get_strings = e1000_get_strings, 1915 .get_strings = e1000_get_strings,
1932 .phys_id = e1000_phys_id, 1916 .set_phys_id = e1000_set_phys_id,
1933 .get_ethtool_stats = e1000_get_ethtool_stats, 1917 .get_ethtool_stats = e1000_get_ethtool_stats,
1934 .get_sset_count = e1000_get_sset_count, 1918 .get_sset_count = e1000_get_sset_count,
1935 .get_coalesce = e1000_get_coalesce, 1919 .get_coalesce = e1000_get_coalesce,
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 477e066a1cf0..c18cb8e883dd 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -96,7 +96,6 @@ int e1000_up(struct e1000_adapter *adapter);
96void e1000_down(struct e1000_adapter *adapter); 96void e1000_down(struct e1000_adapter *adapter);
97void e1000_reinit_locked(struct e1000_adapter *adapter); 97void e1000_reinit_locked(struct e1000_adapter *adapter);
98void e1000_reset(struct e1000_adapter *adapter); 98void e1000_reset(struct e1000_adapter *adapter);
99int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx);
100int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); 99int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
101int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); 100int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
102void e1000_free_all_tx_resources(struct e1000_adapter *adapter); 101void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
@@ -4385,7 +4384,6 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4385 struct mii_ioctl_data *data = if_mii(ifr); 4384 struct mii_ioctl_data *data = if_mii(ifr);
4386 int retval; 4385 int retval;
4387 u16 mii_reg; 4386 u16 mii_reg;
4388 u16 spddplx;
4389 unsigned long flags; 4387 unsigned long flags;
4390 4388
4391 if (hw->media_type != e1000_media_type_copper) 4389 if (hw->media_type != e1000_media_type_copper)
@@ -4424,17 +4422,18 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4424 hw->autoneg = 1; 4422 hw->autoneg = 1;
4425 hw->autoneg_advertised = 0x2F; 4423 hw->autoneg_advertised = 0x2F;
4426 } else { 4424 } else {
4425 u32 speed;
4427 if (mii_reg & 0x40) 4426 if (mii_reg & 0x40)
4428 spddplx = SPEED_1000; 4427 speed = SPEED_1000;
4429 else if (mii_reg & 0x2000) 4428 else if (mii_reg & 0x2000)
4430 spddplx = SPEED_100; 4429 speed = SPEED_100;
4431 else 4430 else
4432 spddplx = SPEED_10; 4431 speed = SPEED_10;
4433 spddplx += (mii_reg & 0x100) 4432 retval = e1000_set_spd_dplx(
4434 ? DUPLEX_FULL : 4433 adapter, speed,
4435 DUPLEX_HALF; 4434 ((mii_reg & 0x100)
4436 retval = e1000_set_spd_dplx(adapter, 4435 ? DUPLEX_FULL :
4437 spddplx); 4436 DUPLEX_HALF));
4438 if (retval) 4437 if (retval)
4439 return retval; 4438 return retval;
4440 } 4439 }
@@ -4596,20 +4595,24 @@ static void e1000_restore_vlan(struct e1000_adapter *adapter)
4596 } 4595 }
4597} 4596}
4598 4597
4599int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) 4598int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
4600{ 4599{
4601 struct e1000_hw *hw = &adapter->hw; 4600 struct e1000_hw *hw = &adapter->hw;
4602 4601
4603 hw->autoneg = 0; 4602 hw->autoneg = 0;
4604 4603
4604 /* Make sure dplx is at most 1 bit and lsb of speed is not set
4605 * for the switch() below to work */
4606 if ((spd & 1) || (dplx & ~1))
4607 goto err_inval;
4608
4605 /* Fiber NICs only allow 1000 gbps Full duplex */ 4609 /* Fiber NICs only allow 1000 gbps Full duplex */
4606 if ((hw->media_type == e1000_media_type_fiber) && 4610 if ((hw->media_type == e1000_media_type_fiber) &&
4607 spddplx != (SPEED_1000 + DUPLEX_FULL)) { 4611 spd != SPEED_1000 &&
4608 e_err(probe, "Unsupported Speed/Duplex configuration\n"); 4612 dplx != DUPLEX_FULL)
4609 return -EINVAL; 4613 goto err_inval;
4610 }
4611 4614
4612 switch (spddplx) { 4615 switch (spd + dplx) {
4613 case SPEED_10 + DUPLEX_HALF: 4616 case SPEED_10 + DUPLEX_HALF:
4614 hw->forced_speed_duplex = e1000_10_half; 4617 hw->forced_speed_duplex = e1000_10_half;
4615 break; 4618 break;
@@ -4628,10 +4631,13 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
4628 break; 4631 break;
4629 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 4632 case SPEED_1000 + DUPLEX_HALF: /* not supported */
4630 default: 4633 default:
4631 e_err(probe, "Unsupported Speed/Duplex configuration\n"); 4634 goto err_inval;
4632 return -EINVAL;
4633 } 4635 }
4634 return 0; 4636 return 0;
4637
4638err_inval:
4639 e_err(probe, "Unsupported Speed/Duplex configuration\n");
4640 return -EINVAL;
4635} 4641}
4636 4642
4637static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) 4643static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 89a69035e538..8295f2192439 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -300,6 +300,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
300 func->set_lan_id = e1000_set_lan_id_single_port; 300 func->set_lan_id = e1000_set_lan_id_single_port;
301 func->check_mng_mode = e1000e_check_mng_mode_generic; 301 func->check_mng_mode = e1000e_check_mng_mode_generic;
302 func->led_on = e1000e_led_on_generic; 302 func->led_on = e1000e_led_on_generic;
303 func->blink_led = e1000e_blink_led_generic;
303 304
304 /* FWSM register */ 305 /* FWSM register */
305 mac->has_fwsm = true; 306 mac->has_fwsm = true;
@@ -320,6 +321,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
320 default: 321 default:
321 func->check_mng_mode = e1000e_check_mng_mode_generic; 322 func->check_mng_mode = e1000e_check_mng_mode_generic;
322 func->led_on = e1000e_led_on_generic; 323 func->led_on = e1000e_led_on_generic;
324 func->blink_led = e1000e_blink_led_generic;
323 325
324 /* FWSM register */ 326 /* FWSM register */
325 mac->has_fwsm = true; 327 mac->has_fwsm = true;
@@ -431,9 +433,6 @@ static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
431 case e1000_82573: 433 case e1000_82573:
432 case e1000_82574: 434 case e1000_82574:
433 case e1000_82583: 435 case e1000_82583:
434 /* Disable ASPM L0s due to hardware errata */
435 e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L0S);
436
437 if (pdev->device == E1000_DEV_ID_82573L) { 436 if (pdev->device == E1000_DEV_ID_82573L) {
438 adapter->flags |= FLAG_HAS_JUMBO_FRAMES; 437 adapter->flags |= FLAG_HAS_JUMBO_FRAMES;
439 adapter->max_hw_frame_size = DEFAULT_JUMBO; 438 adapter->max_hw_frame_size = DEFAULT_JUMBO;
@@ -594,7 +593,7 @@ static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw)
594 593
595 extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; 594 extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
596 595
597 msleep(2); 596 usleep_range(2000, 4000);
598 i++; 597 i++;
599 } while (i < MDIO_OWNERSHIP_TIMEOUT); 598 } while (i < MDIO_OWNERSHIP_TIMEOUT);
600 599
@@ -816,7 +815,7 @@ static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw)
816 815
817 /* Check for pending operations. */ 816 /* Check for pending operations. */
818 for (i = 0; i < E1000_FLASH_UPDATES; i++) { 817 for (i = 0; i < E1000_FLASH_UPDATES; i++) {
819 msleep(1); 818 usleep_range(1000, 2000);
820 if ((er32(EECD) & E1000_EECD_FLUPD) == 0) 819 if ((er32(EECD) & E1000_EECD_FLUPD) == 0)
821 break; 820 break;
822 } 821 }
@@ -840,7 +839,7 @@ static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw)
840 ew32(EECD, eecd); 839 ew32(EECD, eecd);
841 840
842 for (i = 0; i < E1000_FLASH_UPDATES; i++) { 841 for (i = 0; i < E1000_FLASH_UPDATES; i++) {
843 msleep(1); 842 usleep_range(1000, 2000);
844 if ((er32(EECD) & E1000_EECD_FLUPD) == 0) 843 if ((er32(EECD) & E1000_EECD_FLUPD) == 0)
845 break; 844 break;
846 } 845 }
@@ -930,7 +929,7 @@ static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw)
930 if (er32(EEMNGCTL) & 929 if (er32(EEMNGCTL) &
931 E1000_NVM_CFG_DONE_PORT_0) 930 E1000_NVM_CFG_DONE_PORT_0)
932 break; 931 break;
933 msleep(1); 932 usleep_range(1000, 2000);
934 timeout--; 933 timeout--;
935 } 934 }
936 if (!timeout) { 935 if (!timeout) {
@@ -1037,7 +1036,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
1037 ew32(TCTL, E1000_TCTL_PSP); 1036 ew32(TCTL, E1000_TCTL_PSP);
1038 e1e_flush(); 1037 e1e_flush();
1039 1038
1040 msleep(10); 1039 usleep_range(10000, 20000);
1041 1040
1042 /* 1041 /*
1043 * Must acquire the MDIO ownership before MAC reset. 1042 * Must acquire the MDIO ownership before MAC reset.
@@ -2066,7 +2065,8 @@ struct e1000_info e1000_82573_info = {
2066 | FLAG_HAS_SMART_POWER_DOWN 2065 | FLAG_HAS_SMART_POWER_DOWN
2067 | FLAG_HAS_AMT 2066 | FLAG_HAS_AMT
2068 | FLAG_HAS_SWSM_ON_LOAD, 2067 | FLAG_HAS_SWSM_ON_LOAD,
2069 .flags2 = FLAG2_DISABLE_ASPM_L1, 2068 .flags2 = FLAG2_DISABLE_ASPM_L1
2069 | FLAG2_DISABLE_ASPM_L0S,
2070 .pba = 20, 2070 .pba = 20,
2071 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, 2071 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
2072 .get_variants = e1000_get_variants_82571, 2072 .get_variants = e1000_get_variants_82571,
@@ -2086,7 +2086,8 @@ struct e1000_info e1000_82574_info = {
2086 | FLAG_HAS_SMART_POWER_DOWN 2086 | FLAG_HAS_SMART_POWER_DOWN
2087 | FLAG_HAS_AMT 2087 | FLAG_HAS_AMT
2088 | FLAG_HAS_CTRLEXT_ON_LOAD, 2088 | FLAG_HAS_CTRLEXT_ON_LOAD,
2089 .flags2 = FLAG2_CHECK_PHY_HANG, 2089 .flags2 = FLAG2_CHECK_PHY_HANG
2090 | FLAG2_DISABLE_ASPM_L0S,
2090 .pba = 32, 2091 .pba = 32,
2091 .max_hw_frame_size = DEFAULT_JUMBO, 2092 .max_hw_frame_size = DEFAULT_JUMBO,
2092 .get_variants = e1000_get_variants_82571, 2093 .get_variants = e1000_get_variants_82571,
@@ -2104,6 +2105,7 @@ struct e1000_info e1000_82583_info = {
2104 | FLAG_HAS_SMART_POWER_DOWN 2105 | FLAG_HAS_SMART_POWER_DOWN
2105 | FLAG_HAS_AMT 2106 | FLAG_HAS_AMT
2106 | FLAG_HAS_CTRLEXT_ON_LOAD, 2107 | FLAG_HAS_CTRLEXT_ON_LOAD,
2108 .flags2 = FLAG2_DISABLE_ASPM_L0S,
2107 .pba = 32, 2109 .pba = 32,
2108 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, 2110 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
2109 .get_variants = e1000_get_variants_82571, 2111 .get_variants = e1000_get_variants_82571,
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 00bf595ebd67..9549879e66a0 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -31,6 +31,7 @@
31#ifndef _E1000_H_ 31#ifndef _E1000_H_
32#define _E1000_H_ 32#define _E1000_H_
33 33
34#include <linux/bitops.h>
34#include <linux/types.h> 35#include <linux/types.h>
35#include <linux/timer.h> 36#include <linux/timer.h>
36#include <linux/workqueue.h> 37#include <linux/workqueue.h>
@@ -39,6 +40,7 @@
39#include <linux/pci.h> 40#include <linux/pci.h>
40#include <linux/pci-aspm.h> 41#include <linux/pci-aspm.h>
41#include <linux/crc32.h> 42#include <linux/crc32.h>
43#include <linux/if_vlan.h>
42 44
43#include "hw.h" 45#include "hw.h"
44 46
@@ -280,7 +282,7 @@ struct e1000_adapter {
280 282
281 const struct e1000_info *ei; 283 const struct e1000_info *ei;
282 284
283 struct vlan_group *vlgrp; 285 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
284 u32 bd_number; 286 u32 bd_number;
285 u32 rx_buffer_len; 287 u32 rx_buffer_len;
286 u16 mng_vlan_id; 288 u16 mng_vlan_id;
@@ -389,13 +391,10 @@ struct e1000_adapter {
389 391
390 bool fc_autoneg; 392 bool fc_autoneg;
391 393
392 unsigned long led_status;
393
394 unsigned int flags; 394 unsigned int flags;
395 unsigned int flags2; 395 unsigned int flags2;
396 struct work_struct downshift_task; 396 struct work_struct downshift_task;
397 struct work_struct update_phy_task; 397 struct work_struct update_phy_task;
398 struct work_struct led_blink_task;
399 struct work_struct print_hang_task; 398 struct work_struct print_hang_task;
400 399
401 bool idle_check; 400 bool idle_check;
@@ -456,6 +455,7 @@ struct e1000_info {
456#define FLAG2_HAS_PHY_STATS (1 << 4) 455#define FLAG2_HAS_PHY_STATS (1 << 4)
457#define FLAG2_HAS_EEE (1 << 5) 456#define FLAG2_HAS_EEE (1 << 5)
458#define FLAG2_DMA_BURST (1 << 6) 457#define FLAG2_DMA_BURST (1 << 6)
458#define FLAG2_DISABLE_ASPM_L0S (1 << 7)
459#define FLAG2_DISABLE_AIM (1 << 8) 459#define FLAG2_DISABLE_AIM (1 << 8)
460#define FLAG2_CHECK_PHY_HANG (1 << 9) 460#define FLAG2_CHECK_PHY_HANG (1 << 9)
461 461
@@ -484,7 +484,6 @@ extern const char e1000e_driver_version[];
484 484
485extern void e1000e_check_options(struct e1000_adapter *adapter); 485extern void e1000e_check_options(struct e1000_adapter *adapter);
486extern void e1000e_set_ethtool_ops(struct net_device *netdev); 486extern void e1000e_set_ethtool_ops(struct net_device *netdev);
487extern void e1000e_led_blink_task(struct work_struct *work);
488 487
489extern int e1000e_up(struct e1000_adapter *adapter); 488extern int e1000e_up(struct e1000_adapter *adapter);
490extern void e1000e_down(struct e1000_adapter *adapter); 489extern void e1000e_down(struct e1000_adapter *adapter);
@@ -502,7 +501,6 @@ extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
502extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); 501extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
503extern void e1000e_get_hw_control(struct e1000_adapter *adapter); 502extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
504extern void e1000e_release_hw_control(struct e1000_adapter *adapter); 503extern void e1000e_release_hw_control(struct e1000_adapter *adapter);
505extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
506 504
507extern unsigned int copybreak; 505extern unsigned int copybreak;
508 506
@@ -573,7 +571,7 @@ extern s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data);
573extern void e1000e_config_collision_dist(struct e1000_hw *hw); 571extern void e1000e_config_collision_dist(struct e1000_hw *hw);
574extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw); 572extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw);
575extern s32 e1000e_force_mac_fc(struct e1000_hw *hw); 573extern s32 e1000e_force_mac_fc(struct e1000_hw *hw);
576extern s32 e1000e_blink_led(struct e1000_hw *hw); 574extern s32 e1000e_blink_led_generic(struct e1000_hw *hw);
577extern void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value); 575extern void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
578extern s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw); 576extern s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
579extern void e1000e_reset_adaptive(struct e1000_hw *hw); 577extern void e1000e_reset_adaptive(struct e1000_hw *hw);
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index 2fefa820302b..f4bbeb22f51f 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -612,7 +612,7 @@ static s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw)
612 while (timeout) { 612 while (timeout) {
613 if (er32(EEMNGCTL) & mask) 613 if (er32(EEMNGCTL) & mask)
614 break; 614 break;
615 msleep(1); 615 usleep_range(1000, 2000);
616 timeout--; 616 timeout--;
617 } 617 }
618 if (!timeout) { 618 if (!timeout) {
@@ -802,7 +802,7 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
802 ew32(TCTL, E1000_TCTL_PSP); 802 ew32(TCTL, E1000_TCTL_PSP);
803 e1e_flush(); 803 e1e_flush();
804 804
805 msleep(10); 805 usleep_range(10000, 20000);
806 806
807 ctrl = er32(CTRL); 807 ctrl = er32(CTRL);
808 808
@@ -1434,6 +1434,7 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
1434static struct e1000_mac_operations es2_mac_ops = { 1434static struct e1000_mac_operations es2_mac_ops = {
1435 .read_mac_addr = e1000_read_mac_addr_80003es2lan, 1435 .read_mac_addr = e1000_read_mac_addr_80003es2lan,
1436 .id_led_init = e1000e_id_led_init, 1436 .id_led_init = e1000e_id_led_init,
1437 .blink_led = e1000e_blink_led_generic,
1437 .check_mng_mode = e1000e_check_mng_mode_generic, 1438 .check_mng_mode = e1000e_check_mng_mode_generic,
1438 /* check_for_link dependent on media type */ 1439 /* check_for_link dependent on media type */
1439 .cleanup_led = e1000e_cleanup_led_generic, 1440 .cleanup_led = e1000e_cleanup_led_generic,
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 07f09e96e453..859d0d3af6c9 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -122,6 +122,7 @@ static int e1000_get_settings(struct net_device *netdev,
122{ 122{
123 struct e1000_adapter *adapter = netdev_priv(netdev); 123 struct e1000_adapter *adapter = netdev_priv(netdev);
124 struct e1000_hw *hw = &adapter->hw; 124 struct e1000_hw *hw = &adapter->hw;
125 u32 speed;
125 126
126 if (hw->phy.media_type == e1000_media_type_copper) { 127 if (hw->phy.media_type == e1000_media_type_copper) {
127 128
@@ -159,23 +160,23 @@ static int e1000_get_settings(struct net_device *netdev,
159 ecmd->transceiver = XCVR_EXTERNAL; 160 ecmd->transceiver = XCVR_EXTERNAL;
160 } 161 }
161 162
162 ecmd->speed = -1; 163 speed = -1;
163 ecmd->duplex = -1; 164 ecmd->duplex = -1;
164 165
165 if (netif_running(netdev)) { 166 if (netif_running(netdev)) {
166 if (netif_carrier_ok(netdev)) { 167 if (netif_carrier_ok(netdev)) {
167 ecmd->speed = adapter->link_speed; 168 speed = adapter->link_speed;
168 ecmd->duplex = adapter->link_duplex - 1; 169 ecmd->duplex = adapter->link_duplex - 1;
169 } 170 }
170 } else { 171 } else {
171 u32 status = er32(STATUS); 172 u32 status = er32(STATUS);
172 if (status & E1000_STATUS_LU) { 173 if (status & E1000_STATUS_LU) {
173 if (status & E1000_STATUS_SPEED_1000) 174 if (status & E1000_STATUS_SPEED_1000)
174 ecmd->speed = 1000; 175 speed = SPEED_1000;
175 else if (status & E1000_STATUS_SPEED_100) 176 else if (status & E1000_STATUS_SPEED_100)
176 ecmd->speed = 100; 177 speed = SPEED_100;
177 else 178 else
178 ecmd->speed = 10; 179 speed = SPEED_10;
179 180
180 if (status & E1000_STATUS_FD) 181 if (status & E1000_STATUS_FD)
181 ecmd->duplex = DUPLEX_FULL; 182 ecmd->duplex = DUPLEX_FULL;
@@ -184,6 +185,7 @@ static int e1000_get_settings(struct net_device *netdev,
184 } 185 }
185 } 186 }
186 187
188 ethtool_cmd_speed_set(ecmd, speed);
187 ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) || 189 ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) ||
188 hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; 190 hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
189 191
@@ -198,20 +200,25 @@ static int e1000_get_settings(struct net_device *netdev,
198 return 0; 200 return 0;
199} 201}
200 202
201static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) 203static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
202{ 204{
203 struct e1000_mac_info *mac = &adapter->hw.mac; 205 struct e1000_mac_info *mac = &adapter->hw.mac;
204 206
205 mac->autoneg = 0; 207 mac->autoneg = 0;
206 208
209 /* Make sure dplx is at most 1 bit and lsb of speed is not set
210 * for the switch() below to work */
211 if ((spd & 1) || (dplx & ~1))
212 goto err_inval;
213
207 /* Fiber NICs only allow 1000 gbps Full duplex */ 214 /* Fiber NICs only allow 1000 gbps Full duplex */
208 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) && 215 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&
209 spddplx != (SPEED_1000 + DUPLEX_FULL)) { 216 spd != SPEED_1000 &&
210 e_err("Unsupported Speed/Duplex configuration\n"); 217 dplx != DUPLEX_FULL) {
211 return -EINVAL; 218 goto err_inval;
212 } 219 }
213 220
214 switch (spddplx) { 221 switch (spd + dplx) {
215 case SPEED_10 + DUPLEX_HALF: 222 case SPEED_10 + DUPLEX_HALF:
216 mac->forced_speed_duplex = ADVERTISE_10_HALF; 223 mac->forced_speed_duplex = ADVERTISE_10_HALF;
217 break; 224 break;
@@ -230,10 +237,13 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
230 break; 237 break;
231 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 238 case SPEED_1000 + DUPLEX_HALF: /* not supported */
232 default: 239 default:
233 e_err("Unsupported Speed/Duplex configuration\n"); 240 goto err_inval;
234 return -EINVAL;
235 } 241 }
236 return 0; 242 return 0;
243
244err_inval:
245 e_err("Unsupported Speed/Duplex configuration\n");
246 return -EINVAL;
237} 247}
238 248
239static int e1000_set_settings(struct net_device *netdev, 249static int e1000_set_settings(struct net_device *netdev,
@@ -253,7 +263,7 @@ static int e1000_set_settings(struct net_device *netdev,
253 } 263 }
254 264
255 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) 265 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
256 msleep(1); 266 usleep_range(1000, 2000);
257 267
258 if (ecmd->autoneg == AUTONEG_ENABLE) { 268 if (ecmd->autoneg == AUTONEG_ENABLE) {
259 hw->mac.autoneg = 1; 269 hw->mac.autoneg = 1;
@@ -269,7 +279,8 @@ static int e1000_set_settings(struct net_device *netdev,
269 if (adapter->fc_autoneg) 279 if (adapter->fc_autoneg)
270 hw->fc.requested_mode = e1000_fc_default; 280 hw->fc.requested_mode = e1000_fc_default;
271 } else { 281 } else {
272 if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) { 282 u32 speed = ethtool_cmd_speed(ecmd);
283 if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
273 clear_bit(__E1000_RESETTING, &adapter->state); 284 clear_bit(__E1000_RESETTING, &adapter->state);
274 return -EINVAL; 285 return -EINVAL;
275 } 286 }
@@ -317,7 +328,7 @@ static int e1000_set_pauseparam(struct net_device *netdev,
317 adapter->fc_autoneg = pause->autoneg; 328 adapter->fc_autoneg = pause->autoneg;
318 329
319 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) 330 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
320 msleep(1); 331 usleep_range(1000, 2000);
321 332
322 if (adapter->fc_autoneg == AUTONEG_ENABLE) { 333 if (adapter->fc_autoneg == AUTONEG_ENABLE) {
323 hw->fc.requested_mode = e1000_fc_default; 334 hw->fc.requested_mode = e1000_fc_default;
@@ -673,7 +684,7 @@ static int e1000_set_ringparam(struct net_device *netdev,
673 return -EINVAL; 684 return -EINVAL;
674 685
675 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) 686 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
676 msleep(1); 687 usleep_range(1000, 2000);
677 688
678 if (netif_running(adapter->netdev)) 689 if (netif_running(adapter->netdev))
679 e1000e_down(adapter); 690 e1000e_down(adapter);
@@ -952,7 +963,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
952 963
953 /* Disable all the interrupts */ 964 /* Disable all the interrupts */
954 ew32(IMC, 0xFFFFFFFF); 965 ew32(IMC, 0xFFFFFFFF);
955 msleep(10); 966 usleep_range(10000, 20000);
956 967
957 /* Test each interrupt */ 968 /* Test each interrupt */
958 for (i = 0; i < 10; i++) { 969 for (i = 0; i < 10; i++) {
@@ -984,7 +995,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
984 adapter->test_icr = 0; 995 adapter->test_icr = 0;
985 ew32(IMC, mask); 996 ew32(IMC, mask);
986 ew32(ICS, mask); 997 ew32(ICS, mask);
987 msleep(10); 998 usleep_range(10000, 20000);
988 999
989 if (adapter->test_icr & mask) { 1000 if (adapter->test_icr & mask) {
990 *data = 3; 1001 *data = 3;
@@ -1002,7 +1013,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
1002 adapter->test_icr = 0; 1013 adapter->test_icr = 0;
1003 ew32(IMS, mask); 1014 ew32(IMS, mask);
1004 ew32(ICS, mask); 1015 ew32(ICS, mask);
1005 msleep(10); 1016 usleep_range(10000, 20000);
1006 1017
1007 if (!(adapter->test_icr & mask)) { 1018 if (!(adapter->test_icr & mask)) {
1008 *data = 4; 1019 *data = 4;
@@ -1020,7 +1031,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
1020 adapter->test_icr = 0; 1031 adapter->test_icr = 0;
1021 ew32(IMC, ~mask & 0x00007FFF); 1032 ew32(IMC, ~mask & 0x00007FFF);
1022 ew32(ICS, ~mask & 0x00007FFF); 1033 ew32(ICS, ~mask & 0x00007FFF);
1023 msleep(10); 1034 usleep_range(10000, 20000);
1024 1035
1025 if (adapter->test_icr) { 1036 if (adapter->test_icr) {
1026 *data = 5; 1037 *data = 5;
@@ -1031,7 +1042,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
1031 1042
1032 /* Disable all the interrupts */ 1043 /* Disable all the interrupts */
1033 ew32(IMC, 0xFFFFFFFF); 1044 ew32(IMC, 0xFFFFFFFF);
1034 msleep(10); 1045 usleep_range(10000, 20000);
1035 1046
1036 /* Unhook test interrupt handler */ 1047 /* Unhook test interrupt handler */
1037 free_irq(irq, netdev); 1048 free_irq(irq, netdev);
@@ -1406,7 +1417,7 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
1406 */ 1417 */
1407#define E1000_SERDES_LB_ON 0x410 1418#define E1000_SERDES_LB_ON 0x410
1408 ew32(SCTL, E1000_SERDES_LB_ON); 1419 ew32(SCTL, E1000_SERDES_LB_ON);
1409 msleep(10); 1420 usleep_range(10000, 20000);
1410 1421
1411 return 0; 1422 return 0;
1412} 1423}
@@ -1501,7 +1512,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
1501 hw->phy.media_type == e1000_media_type_internal_serdes) { 1512 hw->phy.media_type == e1000_media_type_internal_serdes) {
1502#define E1000_SERDES_LB_OFF 0x400 1513#define E1000_SERDES_LB_OFF 0x400
1503 ew32(SCTL, E1000_SERDES_LB_OFF); 1514 ew32(SCTL, E1000_SERDES_LB_OFF);
1504 msleep(10); 1515 usleep_range(10000, 20000);
1505 break; 1516 break;
1506 } 1517 }
1507 /* Fall Through */ 1518 /* Fall Through */
@@ -1851,64 +1862,35 @@ static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1851 return 0; 1862 return 0;
1852} 1863}
1853 1864
1854/* toggle LED 4 times per second = 2 "blinks" per second */ 1865static int e1000_set_phys_id(struct net_device *netdev,
1855#define E1000_ID_INTERVAL (HZ/4) 1866 enum ethtool_phys_id_state state)
1856
1857/* bit defines for adapter->led_status */
1858#define E1000_LED_ON 0
1859
1860void e1000e_led_blink_task(struct work_struct *work)
1861{
1862 struct e1000_adapter *adapter = container_of(work,
1863 struct e1000_adapter, led_blink_task);
1864
1865 if (test_and_change_bit(E1000_LED_ON, &adapter->led_status))
1866 adapter->hw.mac.ops.led_off(&adapter->hw);
1867 else
1868 adapter->hw.mac.ops.led_on(&adapter->hw);
1869}
1870
1871static void e1000_led_blink_callback(unsigned long data)
1872{
1873 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1874
1875 schedule_work(&adapter->led_blink_task);
1876 mod_timer(&adapter->blink_timer, jiffies + E1000_ID_INTERVAL);
1877}
1878
1879static int e1000_phys_id(struct net_device *netdev, u32 data)
1880{ 1867{
1881 struct e1000_adapter *adapter = netdev_priv(netdev); 1868 struct e1000_adapter *adapter = netdev_priv(netdev);
1882 struct e1000_hw *hw = &adapter->hw; 1869 struct e1000_hw *hw = &adapter->hw;
1883 1870
1884 if (!data) 1871 switch (state) {
1885 data = INT_MAX; 1872 case ETHTOOL_ID_ACTIVE:
1873 if (!hw->mac.ops.blink_led)
1874 return 2; /* cycle on/off twice per second */
1886 1875
1887 if ((hw->phy.type == e1000_phy_ife) || 1876 hw->mac.ops.blink_led(hw);
1888 (hw->mac.type == e1000_pchlan) || 1877 break;
1889 (hw->mac.type == e1000_pch2lan) || 1878
1890 (hw->mac.type == e1000_82583) || 1879 case ETHTOOL_ID_INACTIVE:
1891 (hw->mac.type == e1000_82574)) {
1892 if (!adapter->blink_timer.function) {
1893 init_timer(&adapter->blink_timer);
1894 adapter->blink_timer.function =
1895 e1000_led_blink_callback;
1896 adapter->blink_timer.data = (unsigned long) adapter;
1897 }
1898 mod_timer(&adapter->blink_timer, jiffies);
1899 msleep_interruptible(data * 1000);
1900 del_timer_sync(&adapter->blink_timer);
1901 if (hw->phy.type == e1000_phy_ife) 1880 if (hw->phy.type == e1000_phy_ife)
1902 e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0); 1881 e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
1903 } else { 1882 hw->mac.ops.led_off(hw);
1904 e1000e_blink_led(hw); 1883 hw->mac.ops.cleanup_led(hw);
1905 msleep_interruptible(data * 1000); 1884 break;
1906 }
1907 1885
1908 hw->mac.ops.led_off(hw); 1886 case ETHTOOL_ID_ON:
1909 clear_bit(E1000_LED_ON, &adapter->led_status); 1887 adapter->hw.mac.ops.led_on(&adapter->hw);
1910 hw->mac.ops.cleanup_led(hw); 1888 break;
1911 1889
1890 case ETHTOOL_ID_OFF:
1891 adapter->hw.mac.ops.led_off(&adapter->hw);
1892 break;
1893 }
1912 return 0; 1894 return 0;
1913} 1895}
1914 1896
@@ -2020,6 +2002,31 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
2020 } 2002 }
2021} 2003}
2022 2004
2005static int e1000e_set_flags(struct net_device *netdev, u32 data)
2006{
2007 struct e1000_adapter *adapter = netdev_priv(netdev);
2008 bool need_reset = false;
2009 int rc;
2010
2011 need_reset = (data & ETH_FLAG_RXVLAN) !=
2012 (netdev->features & NETIF_F_HW_VLAN_RX);
2013
2014 rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_RXVLAN |
2015 ETH_FLAG_TXVLAN);
2016
2017 if (rc)
2018 return rc;
2019
2020 if (need_reset) {
2021 if (netif_running(netdev))
2022 e1000e_reinit_locked(adapter);
2023 else
2024 e1000e_reset(adapter);
2025 }
2026
2027 return 0;
2028}
2029
2023static const struct ethtool_ops e1000_ethtool_ops = { 2030static const struct ethtool_ops e1000_ethtool_ops = {
2024 .get_settings = e1000_get_settings, 2031 .get_settings = e1000_get_settings,
2025 .set_settings = e1000_set_settings, 2032 .set_settings = e1000_set_settings,
@@ -2049,12 +2056,13 @@ static const struct ethtool_ops e1000_ethtool_ops = {
2049 .set_tso = e1000_set_tso, 2056 .set_tso = e1000_set_tso,
2050 .self_test = e1000_diag_test, 2057 .self_test = e1000_diag_test,
2051 .get_strings = e1000_get_strings, 2058 .get_strings = e1000_get_strings,
2052 .phys_id = e1000_phys_id, 2059 .set_phys_id = e1000_set_phys_id,
2053 .get_ethtool_stats = e1000_get_ethtool_stats, 2060 .get_ethtool_stats = e1000_get_ethtool_stats,
2054 .get_sset_count = e1000e_get_sset_count, 2061 .get_sset_count = e1000e_get_sset_count,
2055 .get_coalesce = e1000_get_coalesce, 2062 .get_coalesce = e1000_get_coalesce,
2056 .set_coalesce = e1000_set_coalesce, 2063 .set_coalesce = e1000_set_coalesce,
2057 .get_flags = ethtool_op_get_flags, 2064 .get_flags = ethtool_op_get_flags,
2065 .set_flags = e1000e_set_flags,
2058}; 2066};
2059 2067
2060void e1000e_set_ethtool_ops(struct net_device *netdev) 2068void e1000e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 307e1ec22417..6c2fa8327f5c 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -756,6 +756,7 @@ struct e1000_host_mng_command_info {
756/* Function pointers and static data for the MAC. */ 756/* Function pointers and static data for the MAC. */
757struct e1000_mac_operations { 757struct e1000_mac_operations {
758 s32 (*id_led_init)(struct e1000_hw *); 758 s32 (*id_led_init)(struct e1000_hw *);
759 s32 (*blink_led)(struct e1000_hw *);
759 bool (*check_mng_mode)(struct e1000_hw *); 760 bool (*check_mng_mode)(struct e1000_hw *);
760 s32 (*check_for_link)(struct e1000_hw *); 761 s32 (*check_for_link)(struct e1000_hw *);
761 s32 (*cleanup_led)(struct e1000_hw *); 762 s32 (*cleanup_led)(struct e1000_hw *);
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index ce1dbfdca112..3369d1f6a39c 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -338,7 +338,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
338 /* Ungate automatic PHY configuration on non-managed 82579 */ 338 /* Ungate automatic PHY configuration on non-managed 82579 */
339 if ((hw->mac.type == e1000_pch2lan) && 339 if ((hw->mac.type == e1000_pch2lan) &&
340 !(fwsm & E1000_ICH_FWSM_FW_VALID)) { 340 !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
341 msleep(10); 341 usleep_range(10000, 20000);
342 e1000_gate_hw_phy_config_ich8lan(hw, false); 342 e1000_gate_hw_phy_config_ich8lan(hw, false);
343 } 343 }
344 344
@@ -427,7 +427,7 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
427 phy->id = 0; 427 phy->id = 0;
428 while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) && 428 while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) &&
429 (i++ < 100)) { 429 (i++ < 100)) {
430 msleep(1); 430 usleep_range(1000, 2000);
431 ret_val = e1000e_get_phy_id(hw); 431 ret_val = e1000e_get_phy_id(hw);
432 if (ret_val) 432 if (ret_val)
433 return ret_val; 433 return ret_val;
@@ -564,6 +564,8 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
564 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan; 564 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
565 /* ID LED init */ 565 /* ID LED init */
566 mac->ops.id_led_init = e1000e_id_led_init; 566 mac->ops.id_led_init = e1000e_id_led_init;
567 /* blink LED */
568 mac->ops.blink_led = e1000e_blink_led_generic;
567 /* setup LED */ 569 /* setup LED */
568 mac->ops.setup_led = e1000e_setup_led_generic; 570 mac->ops.setup_led = e1000e_setup_led_generic;
569 /* cleanup LED */ 571 /* cleanup LED */
@@ -767,6 +769,8 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
767 (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) { 769 (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) {
768 adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES; 770 adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
769 adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN; 771 adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN;
772
773 hw->mac.ops.blink_led = NULL;
770 } 774 }
771 775
772 if ((adapter->hw.mac.type == e1000_ich8lan) && 776 if ((adapter->hw.mac.type == e1000_ich8lan) &&
@@ -1704,7 +1708,7 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
1704 goto out; 1708 goto out;
1705 1709
1706 /* Allow time for h/w to get to quiescent state after reset */ 1710 /* Allow time for h/w to get to quiescent state after reset */
1707 msleep(10); 1711 usleep_range(10000, 20000);
1708 1712
1709 /* Perform any necessary post-reset workarounds */ 1713 /* Perform any necessary post-reset workarounds */
1710 switch (hw->mac.type) { 1714 switch (hw->mac.type) {
@@ -1737,7 +1741,7 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
1737 if (hw->mac.type == e1000_pch2lan) { 1741 if (hw->mac.type == e1000_pch2lan) {
1738 /* Ungate automatic PHY configuration on non-managed 82579 */ 1742 /* Ungate automatic PHY configuration on non-managed 82579 */
1739 if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { 1743 if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
1740 msleep(10); 1744 usleep_range(10000, 20000);
1741 e1000_gate_hw_phy_config_ich8lan(hw, false); 1745 e1000_gate_hw_phy_config_ich8lan(hw, false);
1742 } 1746 }
1743 1747
@@ -2532,7 +2536,7 @@ release:
2532 */ 2536 */
2533 if (!ret_val) { 2537 if (!ret_val) {
2534 e1000e_reload_nvm(hw); 2538 e1000e_reload_nvm(hw);
2535 msleep(10); 2539 usleep_range(10000, 20000);
2536 } 2540 }
2537 2541
2538out: 2542out:
@@ -3009,7 +3013,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3009 ew32(TCTL, E1000_TCTL_PSP); 3013 ew32(TCTL, E1000_TCTL_PSP);
3010 e1e_flush(); 3014 e1e_flush();
3011 3015
3012 msleep(10); 3016 usleep_range(10000, 20000);
3013 3017
3014 /* Workaround for ICH8 bit corruption issue in FIFO memory */ 3018 /* Workaround for ICH8 bit corruption issue in FIFO memory */
3015 if (hw->mac.type == e1000_ich8lan) { 3019 if (hw->mac.type == e1000_ich8lan) {
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 96921de5df2e..dd8ab05b5590 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -144,7 +144,7 @@ void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
144 * @hw: pointer to the HW structure 144 * @hw: pointer to the HW structure
145 * @rar_count: receive address registers 145 * @rar_count: receive address registers
146 * 146 *
147 * Setups the receive address registers by setting the base receive address 147 * Setup the receive address registers by setting the base receive address
148 * register to the devices MAC address and clearing all the other receive 148 * register to the devices MAC address and clearing all the other receive
149 * address registers to 0. 149 * address registers to 0.
150 **/ 150 **/
@@ -868,7 +868,7 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
868 * milliseconds even if the other end is doing it in SW). 868 * milliseconds even if the other end is doing it in SW).
869 */ 869 */
870 for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) { 870 for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
871 msleep(10); 871 usleep_range(10000, 20000);
872 status = er32(STATUS); 872 status = er32(STATUS);
873 if (status & E1000_STATUS_LU) 873 if (status & E1000_STATUS_LU)
874 break; 874 break;
@@ -930,7 +930,7 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
930 930
931 ew32(CTRL, ctrl); 931 ew32(CTRL, ctrl);
932 e1e_flush(); 932 e1e_flush();
933 msleep(1); 933 usleep_range(1000, 2000);
934 934
935 /* 935 /*
936 * For these adapters, the SW definable pin 1 is set when the optics 936 * For these adapters, the SW definable pin 1 is set when the optics
@@ -1181,7 +1181,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1181 * of pause frames. In this case, we had to advertise 1181 * of pause frames. In this case, we had to advertise
1182 * FULL flow control because we could not advertise Rx 1182 * FULL flow control because we could not advertise Rx
1183 * ONLY. Hence, we must now check to see if we need to 1183 * ONLY. Hence, we must now check to see if we need to
1184 * turn OFF the TRANSMISSION of PAUSE frames. 1184 * turn OFF the TRANSMISSION of PAUSE frames.
1185 */ 1185 */
1186 if (hw->fc.requested_mode == e1000_fc_full) { 1186 if (hw->fc.requested_mode == e1000_fc_full) {
1187 hw->fc.current_mode = e1000_fc_full; 1187 hw->fc.current_mode = e1000_fc_full;
@@ -1385,7 +1385,7 @@ s32 e1000e_get_auto_rd_done(struct e1000_hw *hw)
1385 while (i < AUTO_READ_DONE_TIMEOUT) { 1385 while (i < AUTO_READ_DONE_TIMEOUT) {
1386 if (er32(EECD) & E1000_EECD_AUTO_RD) 1386 if (er32(EECD) & E1000_EECD_AUTO_RD)
1387 break; 1387 break;
1388 msleep(1); 1388 usleep_range(1000, 2000);
1389 i++; 1389 i++;
1390 } 1390 }
1391 1391
@@ -1530,12 +1530,12 @@ s32 e1000e_cleanup_led_generic(struct e1000_hw *hw)
1530} 1530}
1531 1531
1532/** 1532/**
1533 * e1000e_blink_led - Blink LED 1533 * e1000e_blink_led_generic - Blink LED
1534 * @hw: pointer to the HW structure 1534 * @hw: pointer to the HW structure
1535 * 1535 *
1536 * Blink the LEDs which are set to be on. 1536 * Blink the LEDs which are set to be on.
1537 **/ 1537 **/
1538s32 e1000e_blink_led(struct e1000_hw *hw) 1538s32 e1000e_blink_led_generic(struct e1000_hw *hw)
1539{ 1539{
1540 u32 ledctl_blink = 0; 1540 u32 ledctl_blink = 0;
1541 u32 i; 1541 u32 i;
@@ -2087,8 +2087,6 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
2087 if (ret_val) 2087 if (ret_val)
2088 return ret_val; 2088 return ret_val;
2089 2089
2090 msleep(10);
2091
2092 while (widx < words) { 2090 while (widx < words) {
2093 u8 write_opcode = NVM_WRITE_OPCODE_SPI; 2091 u8 write_opcode = NVM_WRITE_OPCODE_SPI;
2094 2092
@@ -2132,7 +2130,7 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
2132 } 2130 }
2133 } 2131 }
2134 2132
2135 msleep(10); 2133 usleep_range(10000, 20000);
2136 nvm->ops.release(hw); 2134 nvm->ops.release(hw);
2137 return 0; 2135 return 0;
2138} 2136}
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 506a0a0043b3..0939040305fa 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -58,6 +58,8 @@
58char e1000e_driver_name[] = "e1000e"; 58char e1000e_driver_name[] = "e1000e";
59const char e1000e_driver_version[] = DRV_VERSION; 59const char e1000e_driver_version[] = DRV_VERSION;
60 60
61static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
62
61static const struct e1000_info *e1000_info_tbl[] = { 63static const struct e1000_info *e1000_info_tbl[] = {
62 [board_82571] = &e1000_82571_info, 64 [board_82571] = &e1000_82571_info,
63 [board_82572] = &e1000_82572_info, 65 [board_82572] = &e1000_82572_info,
@@ -459,13 +461,13 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
459 struct net_device *netdev, struct sk_buff *skb, 461 struct net_device *netdev, struct sk_buff *skb,
460 u8 status, __le16 vlan) 462 u8 status, __le16 vlan)
461{ 463{
464 u16 tag = le16_to_cpu(vlan);
462 skb->protocol = eth_type_trans(skb, netdev); 465 skb->protocol = eth_type_trans(skb, netdev);
463 466
464 if (adapter->vlgrp && (status & E1000_RXD_STAT_VP)) 467 if (status & E1000_RXD_STAT_VP)
465 vlan_gro_receive(&adapter->napi, adapter->vlgrp, 468 __vlan_hwaccel_put_tag(skb, tag);
466 le16_to_cpu(vlan), skb); 469
467 else 470 napi_gro_receive(&adapter->napi, skb);
468 napi_gro_receive(&adapter->napi, skb);
469} 471}
470 472
471/** 473/**
@@ -2433,6 +2435,8 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
2433 vfta |= (1 << (vid & 0x1F)); 2435 vfta |= (1 << (vid & 0x1F));
2434 hw->mac.ops.write_vfta(hw, index, vfta); 2436 hw->mac.ops.write_vfta(hw, index, vfta);
2435 } 2437 }
2438
2439 set_bit(vid, adapter->active_vlans);
2436} 2440}
2437 2441
2438static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 2442static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
@@ -2441,13 +2445,6 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2441 struct e1000_hw *hw = &adapter->hw; 2445 struct e1000_hw *hw = &adapter->hw;
2442 u32 vfta, index; 2446 u32 vfta, index;
2443 2447
2444 if (!test_bit(__E1000_DOWN, &adapter->state))
2445 e1000_irq_disable(adapter);
2446 vlan_group_set_device(adapter->vlgrp, vid, NULL);
2447
2448 if (!test_bit(__E1000_DOWN, &adapter->state))
2449 e1000_irq_enable(adapter);
2450
2451 if ((adapter->hw.mng_cookie.status & 2448 if ((adapter->hw.mng_cookie.status &
2452 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 2449 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2453 (vid == adapter->mng_vlan_id)) { 2450 (vid == adapter->mng_vlan_id)) {
@@ -2463,93 +2460,105 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2463 vfta &= ~(1 << (vid & 0x1F)); 2460 vfta &= ~(1 << (vid & 0x1F));
2464 hw->mac.ops.write_vfta(hw, index, vfta); 2461 hw->mac.ops.write_vfta(hw, index, vfta);
2465 } 2462 }
2463
2464 clear_bit(vid, adapter->active_vlans);
2466} 2465}
2467 2466
2468static void e1000_update_mng_vlan(struct e1000_adapter *adapter) 2467/**
2468 * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering
2469 * @adapter: board private structure to initialize
2470 **/
2471static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter)
2469{ 2472{
2470 struct net_device *netdev = adapter->netdev; 2473 struct net_device *netdev = adapter->netdev;
2471 u16 vid = adapter->hw.mng_cookie.vlan_id; 2474 struct e1000_hw *hw = &adapter->hw;
2472 u16 old_vid = adapter->mng_vlan_id; 2475 u32 rctl;
2473
2474 if (!adapter->vlgrp)
2475 return;
2476 2476
2477 if (!vlan_group_get_device(adapter->vlgrp, vid)) { 2477 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2478 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 2478 /* disable VLAN receive filtering */
2479 if (adapter->hw.mng_cookie.status & 2479 rctl = er32(RCTL);
2480 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { 2480 rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN);
2481 e1000_vlan_rx_add_vid(netdev, vid); 2481 ew32(RCTL, rctl);
2482 adapter->mng_vlan_id = vid; 2482
2483 if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
2484 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
2485 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
2483 } 2486 }
2484
2485 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
2486 (vid != old_vid) &&
2487 !vlan_group_get_device(adapter->vlgrp, old_vid))
2488 e1000_vlan_rx_kill_vid(netdev, old_vid);
2489 } else {
2490 adapter->mng_vlan_id = vid;
2491 } 2487 }
2492} 2488}
2493 2489
2490/**
2491 * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering
2492 * @adapter: board private structure to initialize
2493 **/
2494static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter)
2495{
2496 struct e1000_hw *hw = &adapter->hw;
2497 u32 rctl;
2494 2498
2495static void e1000_vlan_rx_register(struct net_device *netdev, 2499 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2496 struct vlan_group *grp) 2500 /* enable VLAN receive filtering */
2501 rctl = er32(RCTL);
2502 rctl |= E1000_RCTL_VFE;
2503 rctl &= ~E1000_RCTL_CFIEN;
2504 ew32(RCTL, rctl);
2505 }
2506}
2507
2508/**
2509 * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping
2510 * @adapter: board private structure to initialize
2511 **/
2512static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter)
2497{ 2513{
2498 struct e1000_adapter *adapter = netdev_priv(netdev);
2499 struct e1000_hw *hw = &adapter->hw; 2514 struct e1000_hw *hw = &adapter->hw;
2500 u32 ctrl, rctl; 2515 u32 ctrl;
2501 2516
2502 if (!test_bit(__E1000_DOWN, &adapter->state)) 2517 /* disable VLAN tag insert/strip */
2503 e1000_irq_disable(adapter); 2518 ctrl = er32(CTRL);
2504 adapter->vlgrp = grp; 2519 ctrl &= ~E1000_CTRL_VME;
2520 ew32(CTRL, ctrl);
2521}
2505 2522
2506 if (grp) { 2523/**
2507 /* enable VLAN tag insert/strip */ 2524 * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping
2508 ctrl = er32(CTRL); 2525 * @adapter: board private structure to initialize
2509 ctrl |= E1000_CTRL_VME; 2526 **/
2510 ew32(CTRL, ctrl); 2527static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter)
2528{
2529 struct e1000_hw *hw = &adapter->hw;
2530 u32 ctrl;
2511 2531
2512 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { 2532 /* enable VLAN tag insert/strip */
2513 /* enable VLAN receive filtering */ 2533 ctrl = er32(CTRL);
2514 rctl = er32(RCTL); 2534 ctrl |= E1000_CTRL_VME;
2515 rctl &= ~E1000_RCTL_CFIEN; 2535 ew32(CTRL, ctrl);
2516 ew32(RCTL, rctl); 2536}
2517 e1000_update_mng_vlan(adapter);
2518 }
2519 } else {
2520 /* disable VLAN tag insert/strip */
2521 ctrl = er32(CTRL);
2522 ctrl &= ~E1000_CTRL_VME;
2523 ew32(CTRL, ctrl);
2524 2537
2525 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { 2538static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
2526 if (adapter->mng_vlan_id != 2539{
2527 (u16)E1000_MNG_VLAN_NONE) { 2540 struct net_device *netdev = adapter->netdev;
2528 e1000_vlan_rx_kill_vid(netdev, 2541 u16 vid = adapter->hw.mng_cookie.vlan_id;
2529 adapter->mng_vlan_id); 2542 u16 old_vid = adapter->mng_vlan_id;
2530 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 2543
2531 } 2544 if (adapter->hw.mng_cookie.status &
2532 } 2545 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
2546 e1000_vlan_rx_add_vid(netdev, vid);
2547 adapter->mng_vlan_id = vid;
2533 } 2548 }
2534 2549
2535 if (!test_bit(__E1000_DOWN, &adapter->state)) 2550 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid))
2536 e1000_irq_enable(adapter); 2551 e1000_vlan_rx_kill_vid(netdev, old_vid);
2537} 2552}
2538 2553
2539static void e1000_restore_vlan(struct e1000_adapter *adapter) 2554static void e1000_restore_vlan(struct e1000_adapter *adapter)
2540{ 2555{
2541 u16 vid; 2556 u16 vid;
2542 2557
2543 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp); 2558 e1000_vlan_rx_add_vid(adapter->netdev, 0);
2544
2545 if (!adapter->vlgrp)
2546 return;
2547 2559
2548 for (vid = 0; vid < VLAN_N_VID; vid++) { 2560 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2549 if (!vlan_group_get_device(adapter->vlgrp, vid))
2550 continue;
2551 e1000_vlan_rx_add_vid(adapter->netdev, vid); 2561 e1000_vlan_rx_add_vid(adapter->netdev, vid);
2552 }
2553} 2562}
2554 2563
2555static void e1000_init_manageability_pt(struct e1000_adapter *adapter) 2564static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
@@ -2902,7 +2911,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
2902 rctl = er32(RCTL); 2911 rctl = er32(RCTL);
2903 ew32(RCTL, rctl & ~E1000_RCTL_EN); 2912 ew32(RCTL, rctl & ~E1000_RCTL_EN);
2904 e1e_flush(); 2913 e1e_flush();
2905 msleep(10); 2914 usleep_range(10000, 20000);
2906 2915
2907 if (adapter->flags2 & FLAG2_DMA_BURST) { 2916 if (adapter->flags2 & FLAG2_DMA_BURST) {
2908 /* 2917 /*
@@ -3039,6 +3048,8 @@ static void e1000_set_multi(struct net_device *netdev)
3039 if (netdev->flags & IFF_PROMISC) { 3048 if (netdev->flags & IFF_PROMISC) {
3040 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 3049 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
3041 rctl &= ~E1000_RCTL_VFE; 3050 rctl &= ~E1000_RCTL_VFE;
3051 /* Do not hardware filter VLANs in promisc mode */
3052 e1000e_vlan_filter_disable(adapter);
3042 } else { 3053 } else {
3043 if (netdev->flags & IFF_ALLMULTI) { 3054 if (netdev->flags & IFF_ALLMULTI) {
3044 rctl |= E1000_RCTL_MPE; 3055 rctl |= E1000_RCTL_MPE;
@@ -3046,8 +3057,7 @@ static void e1000_set_multi(struct net_device *netdev)
3046 } else { 3057 } else {
3047 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE); 3058 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
3048 } 3059 }
3049 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) 3060 e1000e_vlan_filter_enable(adapter);
3050 rctl |= E1000_RCTL_VFE;
3051 } 3061 }
3052 3062
3053 ew32(RCTL, rctl); 3063 ew32(RCTL, rctl);
@@ -3072,6 +3082,11 @@ static void e1000_set_multi(struct net_device *netdev)
3072 */ 3082 */
3073 e1000_update_mc_addr_list(hw, NULL, 0); 3083 e1000_update_mc_addr_list(hw, NULL, 0);
3074 } 3084 }
3085
3086 if (netdev->features & NETIF_F_HW_VLAN_RX)
3087 e1000e_vlan_strip_enable(adapter);
3088 else
3089 e1000e_vlan_strip_disable(adapter);
3075} 3090}
3076 3091
3077/** 3092/**
@@ -3383,7 +3398,7 @@ void e1000e_down(struct e1000_adapter *adapter)
3383 ew32(TCTL, tctl); 3398 ew32(TCTL, tctl);
3384 /* flush both disables and wait for them to finish */ 3399 /* flush both disables and wait for them to finish */
3385 e1e_flush(); 3400 e1e_flush();
3386 msleep(10); 3401 usleep_range(10000, 20000);
3387 3402
3388 napi_disable(&adapter->napi); 3403 napi_disable(&adapter->napi);
3389 e1000_irq_disable(adapter); 3404 e1000_irq_disable(adapter);
@@ -3418,7 +3433,7 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter)
3418{ 3433{
3419 might_sleep(); 3434 might_sleep();
3420 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) 3435 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
3421 msleep(1); 3436 usleep_range(1000, 2000);
3422 e1000e_down(adapter); 3437 e1000e_down(adapter);
3423 e1000e_up(adapter); 3438 e1000e_up(adapter);
3424 clear_bit(__E1000_RESETTING, &adapter->state); 3439 clear_bit(__E1000_RESETTING, &adapter->state);
@@ -3721,10 +3736,8 @@ static int e1000_close(struct net_device *netdev)
3721 * kill manageability vlan ID if supported, but not if a vlan with 3736 * kill manageability vlan ID if supported, but not if a vlan with
3722 * the same ID is registered on the host OS (let 8021q kill it) 3737 * the same ID is registered on the host OS (let 8021q kill it)
3723 */ 3738 */
3724 if ((adapter->hw.mng_cookie.status & 3739 if (adapter->hw.mng_cookie.status &
3725 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 3740 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
3726 !(adapter->vlgrp &&
3727 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
3728 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 3741 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
3729 3742
3730 /* 3743 /*
@@ -4328,7 +4341,6 @@ static void e1000_watchdog_task(struct work_struct *work)
4328link_up: 4341link_up:
4329 spin_lock(&adapter->stats64_lock); 4342 spin_lock(&adapter->stats64_lock);
4330 e1000e_update_stats(adapter); 4343 e1000e_update_stats(adapter);
4331 spin_unlock(&adapter->stats64_lock);
4332 4344
4333 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; 4345 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
4334 adapter->tpt_old = adapter->stats.tpt; 4346 adapter->tpt_old = adapter->stats.tpt;
@@ -4339,6 +4351,7 @@ link_up:
4339 adapter->gorc_old = adapter->stats.gorc; 4351 adapter->gorc_old = adapter->stats.gorc;
4340 adapter->gotc = adapter->stats.gotc - adapter->gotc_old; 4352 adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
4341 adapter->gotc_old = adapter->stats.gotc; 4353 adapter->gotc_old = adapter->stats.gotc;
4354 spin_unlock(&adapter->stats64_lock);
4342 4355
4343 e1000e_update_adaptive(&adapter->hw); 4356 e1000e_update_adaptive(&adapter->hw);
4344 4357
@@ -5028,7 +5041,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
5028 } 5041 }
5029 5042
5030 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) 5043 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
5031 msleep(1); 5044 usleep_range(1000, 2000);
5032 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ 5045 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
5033 adapter->max_frame_size = max_frame; 5046 adapter->max_frame_size = max_frame;
5034 e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu); 5047 e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
@@ -5373,7 +5386,7 @@ static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5373 pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16); 5386 pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16);
5374} 5387}
5375#endif 5388#endif
5376void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) 5389static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5377{ 5390{
5378 dev_info(&pdev->dev, "Disabling ASPM %s %s\n", 5391 dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
5379 (state & PCIE_LINK_STATE_L0S) ? "L0s" : "", 5392 (state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
@@ -5393,13 +5406,19 @@ static int __e1000_resume(struct pci_dev *pdev)
5393 struct net_device *netdev = pci_get_drvdata(pdev); 5406 struct net_device *netdev = pci_get_drvdata(pdev);
5394 struct e1000_adapter *adapter = netdev_priv(netdev); 5407 struct e1000_adapter *adapter = netdev_priv(netdev);
5395 struct e1000_hw *hw = &adapter->hw; 5408 struct e1000_hw *hw = &adapter->hw;
5409 u16 aspm_disable_flag = 0;
5396 u32 err; 5410 u32 err;
5397 5411
5412 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
5413 aspm_disable_flag = PCIE_LINK_STATE_L0S;
5414 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
5415 aspm_disable_flag |= PCIE_LINK_STATE_L1;
5416 if (aspm_disable_flag)
5417 e1000e_disable_aspm(pdev, aspm_disable_flag);
5418
5398 pci_set_power_state(pdev, PCI_D0); 5419 pci_set_power_state(pdev, PCI_D0);
5399 pci_restore_state(pdev); 5420 pci_restore_state(pdev);
5400 pci_save_state(pdev); 5421 pci_save_state(pdev);
5401 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
5402 e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
5403 5422
5404 e1000e_set_interrupt_capability(adapter); 5423 e1000e_set_interrupt_capability(adapter);
5405 if (netif_running(netdev)) { 5424 if (netif_running(netdev)) {
@@ -5643,11 +5662,17 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5643 struct net_device *netdev = pci_get_drvdata(pdev); 5662 struct net_device *netdev = pci_get_drvdata(pdev);
5644 struct e1000_adapter *adapter = netdev_priv(netdev); 5663 struct e1000_adapter *adapter = netdev_priv(netdev);
5645 struct e1000_hw *hw = &adapter->hw; 5664 struct e1000_hw *hw = &adapter->hw;
5665 u16 aspm_disable_flag = 0;
5646 int err; 5666 int err;
5647 pci_ers_result_t result; 5667 pci_ers_result_t result;
5648 5668
5669 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
5670 aspm_disable_flag = PCIE_LINK_STATE_L0S;
5649 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) 5671 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
5650 e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1); 5672 aspm_disable_flag |= PCIE_LINK_STATE_L1;
5673 if (aspm_disable_flag)
5674 e1000e_disable_aspm(pdev, aspm_disable_flag);
5675
5651 err = pci_enable_device_mem(pdev); 5676 err = pci_enable_device_mem(pdev);
5652 if (err) { 5677 if (err) {
5653 dev_err(&pdev->dev, 5678 dev_err(&pdev->dev,
@@ -5714,7 +5739,7 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
5714 u8 pba_str[E1000_PBANUM_LENGTH]; 5739 u8 pba_str[E1000_PBANUM_LENGTH];
5715 5740
5716 /* print bus type/speed/width info */ 5741 /* print bus type/speed/width info */
5717 e_info("(PCI Express:2.5GB/s:%s) %pM\n", 5742 e_info("(PCI Express:2.5GT/s:%s) %pM\n",
5718 /* bus width */ 5743 /* bus width */
5719 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : 5744 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
5720 "Width x1"), 5745 "Width x1"),
@@ -5759,7 +5784,6 @@ static const struct net_device_ops e1000e_netdev_ops = {
5759 .ndo_tx_timeout = e1000_tx_timeout, 5784 .ndo_tx_timeout = e1000_tx_timeout,
5760 .ndo_validate_addr = eth_validate_addr, 5785 .ndo_validate_addr = eth_validate_addr,
5761 5786
5762 .ndo_vlan_rx_register = e1000_vlan_rx_register,
5763 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, 5787 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
5764 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, 5788 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
5765#ifdef CONFIG_NET_POLL_CONTROLLER 5789#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -5789,12 +5813,17 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
5789 resource_size_t flash_start, flash_len; 5813 resource_size_t flash_start, flash_len;
5790 5814
5791 static int cards_found; 5815 static int cards_found;
5816 u16 aspm_disable_flag = 0;
5792 int i, err, pci_using_dac; 5817 int i, err, pci_using_dac;
5793 u16 eeprom_data = 0; 5818 u16 eeprom_data = 0;
5794 u16 eeprom_apme_mask = E1000_EEPROM_APME; 5819 u16 eeprom_apme_mask = E1000_EEPROM_APME;
5795 5820
5821 if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S)
5822 aspm_disable_flag = PCIE_LINK_STATE_L0S;
5796 if (ei->flags2 & FLAG2_DISABLE_ASPM_L1) 5823 if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
5797 e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1); 5824 aspm_disable_flag |= PCIE_LINK_STATE_L1;
5825 if (aspm_disable_flag)
5826 e1000e_disable_aspm(pdev, aspm_disable_flag);
5798 5827
5799 err = pci_enable_device_mem(pdev); 5828 err = pci_enable_device_mem(pdev);
5800 if (err) 5829 if (err)
@@ -5991,7 +6020,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
5991 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); 6020 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
5992 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); 6021 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
5993 INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang); 6022 INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
5994 INIT_WORK(&adapter->led_blink_task, e1000e_led_blink_task);
5995 6023
5996 /* Initialize link parameters. User can change them with ethtool */ 6024 /* Initialize link parameters. User can change them with ethtool */
5997 adapter->hw.mac.autoneg = 1; 6025 adapter->hw.mac.autoneg = 1;
@@ -6124,7 +6152,6 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
6124 cancel_work_sync(&adapter->watchdog_task); 6152 cancel_work_sync(&adapter->watchdog_task);
6125 cancel_work_sync(&adapter->downshift_task); 6153 cancel_work_sync(&adapter->downshift_task);
6126 cancel_work_sync(&adapter->update_phy_task); 6154 cancel_work_sync(&adapter->update_phy_task);
6127 cancel_work_sync(&adapter->led_blink_task);
6128 cancel_work_sync(&adapter->print_hang_task); 6155 cancel_work_sync(&adapter->print_hang_task);
6129 6156
6130 if (!(netdev->flags & IFF_UP)) 6157 if (!(netdev->flags & IFF_UP))
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 6ae31fcfb629..484774c13c21 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -2372,7 +2372,7 @@ s32 e1000e_determine_phy_address(struct e1000_hw *hw)
2372 ret_val = 0; 2372 ret_val = 0;
2373 goto out; 2373 goto out;
2374 } 2374 }
2375 msleep(1); 2375 usleep_range(1000, 2000);
2376 i++; 2376 i++;
2377 } while (i < 10); 2377 } while (i < 10);
2378 } 2378 }
@@ -2740,7 +2740,7 @@ void e1000_power_down_phy_copper(struct e1000_hw *hw)
2740 e1e_rphy(hw, PHY_CONTROL, &mii_reg); 2740 e1e_rphy(hw, PHY_CONTROL, &mii_reg);
2741 mii_reg |= MII_CR_POWER_DOWN; 2741 mii_reg |= MII_CR_POWER_DOWN;
2742 e1e_wphy(hw, PHY_CONTROL, mii_reg); 2742 e1e_wphy(hw, PHY_CONTROL, mii_reg);
2743 msleep(1); 2743 usleep_range(1000, 2000);
2744} 2744}
2745 2745
2746/** 2746/**
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index eb35951a2442..dfeb006035df 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -1703,7 +1703,7 @@ static int eepro_ethtool_get_settings(struct net_device *dev,
1703 cmd->advertising |= ADVERTISED_AUI; 1703 cmd->advertising |= ADVERTISED_AUI;
1704 } 1704 }
1705 1705
1706 cmd->speed = SPEED_10; 1706 ethtool_cmd_speed_set(cmd, SPEED_10);
1707 1707
1708 if (dev->if_port == TPE && lp->word[1] & ee_Duplex) { 1708 if (dev->if_port == TPE && lp->word[1] & ee_Duplex) {
1709 cmd->duplex = DUPLEX_FULL; 1709 cmd->duplex = DUPLEX_FULL;
diff --git a/drivers/net/ehea/ehea_ethtool.c b/drivers/net/ehea/ehea_ethtool.c
index f3bbdcef338c..7f642aef5e82 100644
--- a/drivers/net/ehea/ehea_ethtool.c
+++ b/drivers/net/ehea/ehea_ethtool.c
@@ -34,6 +34,7 @@
34static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 34static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
35{ 35{
36 struct ehea_port *port = netdev_priv(dev); 36 struct ehea_port *port = netdev_priv(dev);
37 u32 speed;
37 int ret; 38 int ret;
38 39
39 ret = ehea_sense_port_attr(port); 40 ret = ehea_sense_port_attr(port);
@@ -43,17 +44,29 @@ static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
43 44
44 if (netif_carrier_ok(dev)) { 45 if (netif_carrier_ok(dev)) {
45 switch (port->port_speed) { 46 switch (port->port_speed) {
46 case EHEA_SPEED_10M: cmd->speed = SPEED_10; break; 47 case EHEA_SPEED_10M:
47 case EHEA_SPEED_100M: cmd->speed = SPEED_100; break; 48 speed = SPEED_10;
48 case EHEA_SPEED_1G: cmd->speed = SPEED_1000; break; 49 break;
49 case EHEA_SPEED_10G: cmd->speed = SPEED_10000; break; 50 case EHEA_SPEED_100M:
51 speed = SPEED_100;
52 break;
53 case EHEA_SPEED_1G:
54 speed = SPEED_1000;
55 break;
56 case EHEA_SPEED_10G:
57 speed = SPEED_10000;
58 break;
59 default:
60 speed = -1;
61 break; /* BUG */
50 } 62 }
51 cmd->duplex = port->full_duplex == 1 ? 63 cmd->duplex = port->full_duplex == 1 ?
52 DUPLEX_FULL : DUPLEX_HALF; 64 DUPLEX_FULL : DUPLEX_HALF;
53 } else { 65 } else {
54 cmd->speed = -1; 66 speed = ~0;
55 cmd->duplex = -1; 67 cmd->duplex = -1;
56 } 68 }
69 ethtool_cmd_speed_set(cmd, speed);
57 70
58 if (cmd->speed == SPEED_10000) { 71 if (cmd->speed == SPEED_10000) {
59 cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); 72 cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
@@ -167,11 +180,6 @@ static void ehea_set_msglevel(struct net_device *dev, u32 value)
167 port->msg_enable = value; 180 port->msg_enable = value;
168} 181}
169 182
170static u32 ehea_get_rx_csum(struct net_device *dev)
171{
172 return 1;
173}
174
175static char ehea_ethtool_stats_keys[][ETH_GSTRING_LEN] = { 183static char ehea_ethtool_stats_keys[][ETH_GSTRING_LEN] = {
176 {"sig_comp_iv"}, 184 {"sig_comp_iv"},
177 {"swqe_refill_th"}, 185 {"swqe_refill_th"},
@@ -268,34 +276,16 @@ static void ehea_get_ethtool_stats(struct net_device *dev,
268 276
269} 277}
270 278
271static int ehea_set_flags(struct net_device *dev, u32 data)
272{
273 /* Avoid changing the VLAN flags */
274 if ((data & (ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN)) !=
275 (ethtool_op_get_flags(dev) & (ETH_FLAG_RXVLAN |
276 ETH_FLAG_TXVLAN))){
277 return -EINVAL;
278 }
279
280 return ethtool_op_set_flags(dev, data, ETH_FLAG_LRO
281 | ETH_FLAG_TXVLAN
282 | ETH_FLAG_RXVLAN);
283}
284
285const struct ethtool_ops ehea_ethtool_ops = { 279const struct ethtool_ops ehea_ethtool_ops = {
286 .get_settings = ehea_get_settings, 280 .get_settings = ehea_get_settings,
287 .get_drvinfo = ehea_get_drvinfo, 281 .get_drvinfo = ehea_get_drvinfo,
288 .get_msglevel = ehea_get_msglevel, 282 .get_msglevel = ehea_get_msglevel,
289 .set_msglevel = ehea_set_msglevel, 283 .set_msglevel = ehea_set_msglevel,
290 .get_link = ethtool_op_get_link, 284 .get_link = ethtool_op_get_link,
291 .set_tso = ethtool_op_set_tso,
292 .get_strings = ehea_get_strings, 285 .get_strings = ehea_get_strings,
293 .get_sset_count = ehea_get_sset_count, 286 .get_sset_count = ehea_get_sset_count,
294 .get_ethtool_stats = ehea_get_ethtool_stats, 287 .get_ethtool_stats = ehea_get_ethtool_stats,
295 .get_rx_csum = ehea_get_rx_csum,
296 .set_settings = ehea_set_settings, 288 .set_settings = ehea_set_settings,
297 .get_flags = ethtool_op_get_flags,
298 .set_flags = ehea_set_flags,
299 .nway_reset = ehea_nway_reset, /* Restart autonegotiation */ 289 .nway_reset = ehea_nway_reset, /* Restart autonegotiation */
300}; 290};
301 291
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index cf79cf759e13..ba763e0481e3 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -3262,10 +3262,12 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3262 dev->netdev_ops = &ehea_netdev_ops; 3262 dev->netdev_ops = &ehea_netdev_ops;
3263 ehea_set_ethtool_ops(dev); 3263 ehea_set_ethtool_ops(dev);
3264 3264
3265 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
3266 | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_LRO;
3265 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO 3267 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
3266 | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX 3268 | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
3267 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER 3269 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
3268 | NETIF_F_LLTX; 3270 | NETIF_F_LLTX | NETIF_F_RXCSUM;
3269 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; 3271 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
3270 3272
3271 if (use_lro) 3273 if (use_lro)
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index 907b05a1c659..2837ce209cd7 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -1488,7 +1488,7 @@ enc28j60_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1488 cmd->supported = SUPPORTED_10baseT_Half 1488 cmd->supported = SUPPORTED_10baseT_Half
1489 | SUPPORTED_10baseT_Full 1489 | SUPPORTED_10baseT_Full
1490 | SUPPORTED_TP; 1490 | SUPPORTED_TP;
1491 cmd->speed = SPEED_10; 1491 ethtool_cmd_speed_set(cmd, SPEED_10);
1492 cmd->duplex = priv->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; 1492 cmd->duplex = priv->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
1493 cmd->port = PORT_TP; 1493 cmd->port = PORT_TP;
1494 cmd->autoneg = AUTONEG_DISABLE; 1494 cmd->autoneg = AUTONEG_DISABLE;
@@ -1499,7 +1499,8 @@ enc28j60_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1499static int 1499static int
1500enc28j60_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1500enc28j60_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1501{ 1501{
1502 return enc28j60_setlink(dev, cmd->autoneg, cmd->speed, cmd->duplex); 1502 return enc28j60_setlink(dev, cmd->autoneg,
1503 ethtool_cmd_speed(cmd), cmd->duplex);
1503} 1504}
1504 1505
1505static u32 enc28j60_get_msglevel(struct net_device *dev) 1506static u32 enc28j60_get_msglevel(struct net_device *dev)
diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile
index 2e573be16c13..9d4974bba247 100644
--- a/drivers/net/enic/Makefile
+++ b/drivers/net/enic/Makefile
@@ -1,5 +1,5 @@
1obj-$(CONFIG_ENIC) := enic.o 1obj-$(CONFIG_ENIC) := enic.o
2 2
3enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \ 3enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
4 enic_res.o enic_dev.o vnic_dev.o vnic_rq.o vnic_vic.o 4 enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o
5 5
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index 3a3c3c8a3a9b..38b351c7b979 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -32,7 +32,7 @@
32 32
33#define DRV_NAME "enic" 33#define DRV_NAME "enic"
34#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" 34#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
35#define DRV_VERSION "2.1.1.12" 35#define DRV_VERSION "2.1.1.13"
36#define DRV_COPYRIGHT "Copyright 2008-2011 Cisco Systems, Inc" 36#define DRV_COPYRIGHT "Copyright 2008-2011 Cisco Systems, Inc"
37 37
38#define ENIC_BARS_MAX 6 38#define ENIC_BARS_MAX 6
@@ -84,7 +84,6 @@ struct enic {
84 unsigned int flags; 84 unsigned int flags;
85 unsigned int mc_count; 85 unsigned int mc_count;
86 unsigned int uc_count; 86 unsigned int uc_count;
87 int csum_rx_enabled;
88 u32 port_mtu; 87 u32 port_mtu;
89 u32 rx_coalesce_usecs; 88 u32 rx_coalesce_usecs;
90 u32 tx_coalesce_usecs; 89 u32 tx_coalesce_usecs;
@@ -120,4 +119,6 @@ static inline struct device *enic_get_dev(struct enic *enic)
120 return &(enic->pdev->dev); 119 return &(enic->pdev->dev);
121} 120}
122 121
122void enic_reset_addr_lists(struct enic *enic);
123
123#endif /* _ENIC_H_ */ 124#endif /* _ENIC_H_ */
diff --git a/drivers/net/enic/enic_dev.c b/drivers/net/enic/enic_dev.c
index 37ad3a1c82ee..90687b14e60f 100644
--- a/drivers/net/enic/enic_dev.c
+++ b/drivers/net/enic/enic_dev.c
@@ -177,24 +177,24 @@ int enic_vnic_dev_deinit(struct enic *enic)
177 return err; 177 return err;
178} 178}
179 179
180int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp) 180int enic_dev_init_prov2(struct enic *enic, struct vic_provinfo *vp)
181{ 181{
182 int err; 182 int err;
183 183
184 spin_lock(&enic->devcmd_lock); 184 spin_lock(&enic->devcmd_lock);
185 err = vnic_dev_init_prov(enic->vdev, 185 err = vnic_dev_init_prov2(enic->vdev,
186 (u8 *)vp, vic_provinfo_size(vp)); 186 (u8 *)vp, vic_provinfo_size(vp));
187 spin_unlock(&enic->devcmd_lock); 187 spin_unlock(&enic->devcmd_lock);
188 188
189 return err; 189 return err;
190} 190}
191 191
192int enic_dev_init_done(struct enic *enic, int *done, int *error) 192int enic_dev_deinit_done(struct enic *enic, int *status)
193{ 193{
194 int err; 194 int err;
195 195
196 spin_lock(&enic->devcmd_lock); 196 spin_lock(&enic->devcmd_lock);
197 err = vnic_dev_init_done(enic->vdev, done, error); 197 err = vnic_dev_deinit_done(enic->vdev, status);
198 spin_unlock(&enic->devcmd_lock); 198 spin_unlock(&enic->devcmd_lock);
199 199
200 return err; 200 return err;
@@ -219,3 +219,57 @@ void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
219 enic_del_vlan(enic, vid); 219 enic_del_vlan(enic, vid);
220 spin_unlock(&enic->devcmd_lock); 220 spin_unlock(&enic->devcmd_lock);
221} 221}
222
223int enic_dev_enable2(struct enic *enic, int active)
224{
225 int err;
226
227 spin_lock(&enic->devcmd_lock);
228 err = vnic_dev_enable2(enic->vdev, active);
229 spin_unlock(&enic->devcmd_lock);
230
231 return err;
232}
233
234int enic_dev_enable2_done(struct enic *enic, int *status)
235{
236 int err;
237
238 spin_lock(&enic->devcmd_lock);
239 err = vnic_dev_enable2_done(enic->vdev, status);
240 spin_unlock(&enic->devcmd_lock);
241
242 return err;
243}
244
245int enic_dev_status_to_errno(int devcmd_status)
246{
247 switch (devcmd_status) {
248 case ERR_SUCCESS:
249 return 0;
250 case ERR_EINVAL:
251 return -EINVAL;
252 case ERR_EFAULT:
253 return -EFAULT;
254 case ERR_EPERM:
255 return -EPERM;
256 case ERR_EBUSY:
257 return -EBUSY;
258 case ERR_ECMDUNKNOWN:
259 case ERR_ENOTSUPPORTED:
260 return -EOPNOTSUPP;
261 case ERR_EBADSTATE:
262 return -EINVAL;
263 case ERR_ENOMEM:
264 return -ENOMEM;
265 case ERR_ETIMEDOUT:
266 return -ETIMEDOUT;
267 case ERR_ELINKDOWN:
268 return -ENETDOWN;
269 case ERR_EINPROGRESS:
270 return -EINPROGRESS;
271 case ERR_EMAXRES:
272 default:
273 return (devcmd_status < 0) ? devcmd_status : -1;
274 }
275}
diff --git a/drivers/net/enic/enic_dev.h b/drivers/net/enic/enic_dev.h
index 495f57fcb887..d5f681337626 100644
--- a/drivers/net/enic/enic_dev.h
+++ b/drivers/net/enic/enic_dev.h
@@ -35,7 +35,10 @@ int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic);
35int enic_dev_enable(struct enic *enic); 35int enic_dev_enable(struct enic *enic);
36int enic_dev_disable(struct enic *enic); 36int enic_dev_disable(struct enic *enic);
37int enic_vnic_dev_deinit(struct enic *enic); 37int enic_vnic_dev_deinit(struct enic *enic);
38int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp); 38int enic_dev_init_prov2(struct enic *enic, struct vic_provinfo *vp);
39int enic_dev_init_done(struct enic *enic, int *done, int *error); 39int enic_dev_deinit_done(struct enic *enic, int *status);
40int enic_dev_enable2(struct enic *enic, int arg);
41int enic_dev_enable2_done(struct enic *enic, int *status);
42int enic_dev_status_to_errno(int devcmd_status);
40 43
41#endif /* _ENIC_DEV_H_ */ 44#endif /* _ENIC_DEV_H_ */
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index 8b9cad5e9712..3d99b0f1a236 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -45,6 +45,7 @@
45#include "enic_res.h" 45#include "enic_res.h"
46#include "enic.h" 46#include "enic.h"
47#include "enic_dev.h" 47#include "enic_dev.h"
48#include "enic_pp.h"
48 49
49#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ) 50#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
50#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS) 51#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
@@ -179,10 +180,10 @@ static int enic_get_settings(struct net_device *netdev,
179 ecmd->transceiver = XCVR_EXTERNAL; 180 ecmd->transceiver = XCVR_EXTERNAL;
180 181
181 if (netif_carrier_ok(netdev)) { 182 if (netif_carrier_ok(netdev)) {
182 ecmd->speed = vnic_dev_port_speed(enic->vdev); 183 ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev));
183 ecmd->duplex = DUPLEX_FULL; 184 ecmd->duplex = DUPLEX_FULL;
184 } else { 185 } else {
185 ecmd->speed = -1; 186 ethtool_cmd_speed_set(ecmd, -1);
186 ecmd->duplex = -1; 187 ecmd->duplex = -1;
187 } 188 }
188 189
@@ -250,56 +251,6 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
250 *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset]; 251 *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset];
251} 252}
252 253
253static u32 enic_get_rx_csum(struct net_device *netdev)
254{
255 struct enic *enic = netdev_priv(netdev);
256 return enic->csum_rx_enabled;
257}
258
259static int enic_set_rx_csum(struct net_device *netdev, u32 data)
260{
261 struct enic *enic = netdev_priv(netdev);
262
263 if (data && !ENIC_SETTING(enic, RXCSUM))
264 return -EINVAL;
265
266 enic->csum_rx_enabled = !!data;
267
268 return 0;
269}
270
271static int enic_set_tx_csum(struct net_device *netdev, u32 data)
272{
273 struct enic *enic = netdev_priv(netdev);
274
275 if (data && !ENIC_SETTING(enic, TXCSUM))
276 return -EINVAL;
277
278 if (data)
279 netdev->features |= NETIF_F_HW_CSUM;
280 else
281 netdev->features &= ~NETIF_F_HW_CSUM;
282
283 return 0;
284}
285
286static int enic_set_tso(struct net_device *netdev, u32 data)
287{
288 struct enic *enic = netdev_priv(netdev);
289
290 if (data && !ENIC_SETTING(enic, TSO))
291 return -EINVAL;
292
293 if (data)
294 netdev->features |=
295 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN;
296 else
297 netdev->features &=
298 ~(NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN);
299
300 return 0;
301}
302
303static u32 enic_get_msglevel(struct net_device *netdev) 254static u32 enic_get_msglevel(struct net_device *netdev)
304{ 255{
305 struct enic *enic = netdev_priv(netdev); 256 struct enic *enic = netdev_priv(netdev);
@@ -387,17 +338,8 @@ static const struct ethtool_ops enic_ethtool_ops = {
387 .get_strings = enic_get_strings, 338 .get_strings = enic_get_strings,
388 .get_sset_count = enic_get_sset_count, 339 .get_sset_count = enic_get_sset_count,
389 .get_ethtool_stats = enic_get_ethtool_stats, 340 .get_ethtool_stats = enic_get_ethtool_stats,
390 .get_rx_csum = enic_get_rx_csum,
391 .set_rx_csum = enic_set_rx_csum,
392 .get_tx_csum = ethtool_op_get_tx_csum,
393 .set_tx_csum = enic_set_tx_csum,
394 .get_sg = ethtool_op_get_sg,
395 .set_sg = ethtool_op_set_sg,
396 .get_tso = ethtool_op_get_tso,
397 .set_tso = enic_set_tso,
398 .get_coalesce = enic_get_coalesce, 341 .get_coalesce = enic_get_coalesce,
399 .set_coalesce = enic_set_coalesce, 342 .set_coalesce = enic_set_coalesce,
400 .get_flags = ethtool_op_get_flags,
401}; 343};
402 344
403static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) 345static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
@@ -874,7 +816,7 @@ static struct net_device_stats *enic_get_stats(struct net_device *netdev)
874 return net_stats; 816 return net_stats;
875} 817}
876 818
877static void enic_reset_addr_lists(struct enic *enic) 819void enic_reset_addr_lists(struct enic *enic)
878{ 820{
879 enic->mc_count = 0; 821 enic->mc_count = 0;
880 enic->uc_count = 0; 822 enic->uc_count = 0;
@@ -1112,157 +1054,77 @@ static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1112 return -EINVAL; 1054 return -EINVAL;
1113} 1055}
1114 1056
1115static int enic_set_port_profile(struct enic *enic, u8 *mac)
1116{
1117 struct vic_provinfo *vp;
1118 u8 oui[3] = VIC_PROVINFO_CISCO_OUI;
1119 u16 os_type = VIC_GENERIC_PROV_OS_TYPE_LINUX;
1120 char uuid_str[38];
1121 char client_mac_str[18];
1122 u8 *client_mac;
1123 int err;
1124
1125 err = enic_vnic_dev_deinit(enic);
1126 if (err)
1127 return err;
1128
1129 enic_reset_addr_lists(enic);
1130
1131 switch (enic->pp.request) {
1132
1133 case PORT_REQUEST_ASSOCIATE:
1134
1135 if (!(enic->pp.set & ENIC_SET_NAME) || !strlen(enic->pp.name))
1136 return -EINVAL;
1137
1138 if (!is_valid_ether_addr(mac))
1139 return -EADDRNOTAVAIL;
1140
1141 vp = vic_provinfo_alloc(GFP_KERNEL, oui,
1142 VIC_PROVINFO_GENERIC_TYPE);
1143 if (!vp)
1144 return -ENOMEM;
1145
1146 vic_provinfo_add_tlv(vp,
1147 VIC_GENERIC_PROV_TLV_PORT_PROFILE_NAME_STR,
1148 strlen(enic->pp.name) + 1, enic->pp.name);
1149
1150 if (!is_zero_ether_addr(enic->pp.mac_addr))
1151 client_mac = enic->pp.mac_addr;
1152 else
1153 client_mac = mac;
1154
1155 vic_provinfo_add_tlv(vp,
1156 VIC_GENERIC_PROV_TLV_CLIENT_MAC_ADDR,
1157 ETH_ALEN, client_mac);
1158
1159 sprintf(client_mac_str, "%pM", client_mac);
1160 vic_provinfo_add_tlv(vp,
1161 VIC_GENERIC_PROV_TLV_CLUSTER_PORT_UUID_STR,
1162 sizeof(client_mac_str), client_mac_str);
1163
1164 if (enic->pp.set & ENIC_SET_INSTANCE) {
1165 sprintf(uuid_str, "%pUB", enic->pp.instance_uuid);
1166 vic_provinfo_add_tlv(vp,
1167 VIC_GENERIC_PROV_TLV_CLIENT_UUID_STR,
1168 sizeof(uuid_str), uuid_str);
1169 }
1170
1171 if (enic->pp.set & ENIC_SET_HOST) {
1172 sprintf(uuid_str, "%pUB", enic->pp.host_uuid);
1173 vic_provinfo_add_tlv(vp,
1174 VIC_GENERIC_PROV_TLV_HOST_UUID_STR,
1175 sizeof(uuid_str), uuid_str);
1176 }
1177
1178 os_type = htons(os_type);
1179 vic_provinfo_add_tlv(vp,
1180 VIC_GENERIC_PROV_TLV_OS_TYPE,
1181 sizeof(os_type), &os_type);
1182
1183 err = enic_dev_init_prov(enic, vp);
1184 vic_provinfo_free(vp);
1185 if (err)
1186 return err;
1187 break;
1188
1189 case PORT_REQUEST_DISASSOCIATE:
1190 break;
1191
1192 default:
1193 return -EINVAL;
1194 }
1195
1196 /* Set flag to indicate that the port assoc/disassoc
1197 * request has been sent out to fw
1198 */
1199 enic->pp.set |= ENIC_PORT_REQUEST_APPLIED;
1200
1201 return 0;
1202}
1203
1204static int enic_set_vf_port(struct net_device *netdev, int vf, 1057static int enic_set_vf_port(struct net_device *netdev, int vf,
1205 struct nlattr *port[]) 1058 struct nlattr *port[])
1206{ 1059{
1207 struct enic *enic = netdev_priv(netdev); 1060 struct enic *enic = netdev_priv(netdev);
1208 struct enic_port_profile new_pp; 1061 struct enic_port_profile prev_pp;
1209 int err = 0; 1062 int err = 0, restore_pp = 1;
1210 1063
1211 memset(&new_pp, 0, sizeof(new_pp)); 1064 /* don't support VFs, yet */
1065 if (vf != PORT_SELF_VF)
1066 return -EOPNOTSUPP;
1212 1067
1213 if (port[IFLA_PORT_REQUEST]) { 1068 if (!port[IFLA_PORT_REQUEST])
1214 new_pp.set |= ENIC_SET_REQUEST; 1069 return -EOPNOTSUPP;
1215 new_pp.request = nla_get_u8(port[IFLA_PORT_REQUEST]); 1070
1216 } 1071 memcpy(&prev_pp, &enic->pp, sizeof(enic->pp));
1072 memset(&enic->pp, 0, sizeof(enic->pp));
1073
1074 enic->pp.set |= ENIC_SET_REQUEST;
1075 enic->pp.request = nla_get_u8(port[IFLA_PORT_REQUEST]);
1217 1076
1218 if (port[IFLA_PORT_PROFILE]) { 1077 if (port[IFLA_PORT_PROFILE]) {
1219 new_pp.set |= ENIC_SET_NAME; 1078 enic->pp.set |= ENIC_SET_NAME;
1220 memcpy(new_pp.name, nla_data(port[IFLA_PORT_PROFILE]), 1079 memcpy(enic->pp.name, nla_data(port[IFLA_PORT_PROFILE]),
1221 PORT_PROFILE_MAX); 1080 PORT_PROFILE_MAX);
1222 } 1081 }
1223 1082
1224 if (port[IFLA_PORT_INSTANCE_UUID]) { 1083 if (port[IFLA_PORT_INSTANCE_UUID]) {
1225 new_pp.set |= ENIC_SET_INSTANCE; 1084 enic->pp.set |= ENIC_SET_INSTANCE;
1226 memcpy(new_pp.instance_uuid, 1085 memcpy(enic->pp.instance_uuid,
1227 nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX); 1086 nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
1228 } 1087 }
1229 1088
1230 if (port[IFLA_PORT_HOST_UUID]) { 1089 if (port[IFLA_PORT_HOST_UUID]) {
1231 new_pp.set |= ENIC_SET_HOST; 1090 enic->pp.set |= ENIC_SET_HOST;
1232 memcpy(new_pp.host_uuid, 1091 memcpy(enic->pp.host_uuid,
1233 nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX); 1092 nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
1234 } 1093 }
1235 1094
1236 /* don't support VFs, yet */ 1095 /* Special case handling: mac came from IFLA_VF_MAC */
1237 if (vf != PORT_SELF_VF) 1096 if (!is_zero_ether_addr(prev_pp.vf_mac))
1238 return -EOPNOTSUPP; 1097 memcpy(enic->pp.mac_addr, prev_pp.vf_mac, ETH_ALEN);
1239
1240 if (!(new_pp.set & ENIC_SET_REQUEST))
1241 return -EOPNOTSUPP;
1242
1243 if (new_pp.request == PORT_REQUEST_ASSOCIATE) {
1244 /* Special case handling */
1245 if (!is_zero_ether_addr(enic->pp.vf_mac))
1246 memcpy(new_pp.mac_addr, enic->pp.vf_mac, ETH_ALEN);
1247 1098
1248 if (is_zero_ether_addr(netdev->dev_addr)) 1099 if (is_zero_ether_addr(netdev->dev_addr))
1249 random_ether_addr(netdev->dev_addr); 1100 random_ether_addr(netdev->dev_addr);
1250 }
1251 1101
1252 memcpy(&enic->pp, &new_pp, sizeof(struct enic_port_profile)); 1102 err = enic_process_set_pp_request(enic, &prev_pp, &restore_pp);
1103 if (err) {
1104 if (restore_pp) {
1105 /* Things are still the way they were: Implicit
1106 * DISASSOCIATE failed
1107 */
1108 memcpy(&enic->pp, &prev_pp, sizeof(enic->pp));
1109 } else {
1110 memset(&enic->pp, 0, sizeof(enic->pp));
1111 memset(netdev->dev_addr, 0, ETH_ALEN);
1112 }
1113 } else {
1114 /* Set flag to indicate that the port assoc/disassoc
1115 * request has been sent out to fw
1116 */
1117 enic->pp.set |= ENIC_PORT_REQUEST_APPLIED;
1253 1118
1254 err = enic_set_port_profile(enic, netdev->dev_addr); 1119 /* If DISASSOCIATE, clean up all assigned/saved macaddresses */
1255 if (err) 1120 if (enic->pp.request == PORT_REQUEST_DISASSOCIATE) {
1256 goto set_port_profile_cleanup; 1121 memset(enic->pp.mac_addr, 0, ETH_ALEN);
1122 memset(netdev->dev_addr, 0, ETH_ALEN);
1123 }
1124 }
1257 1125
1258set_port_profile_cleanup:
1259 memset(enic->pp.vf_mac, 0, ETH_ALEN); 1126 memset(enic->pp.vf_mac, 0, ETH_ALEN);
1260 1127
1261 if (err || enic->pp.request == PORT_REQUEST_DISASSOCIATE) {
1262 memset(netdev->dev_addr, 0, ETH_ALEN);
1263 memset(enic->pp.mac_addr, 0, ETH_ALEN);
1264 }
1265
1266 return err; 1128 return err;
1267} 1129}
1268 1130
@@ -1270,34 +1132,15 @@ static int enic_get_vf_port(struct net_device *netdev, int vf,
1270 struct sk_buff *skb) 1132 struct sk_buff *skb)
1271{ 1133{
1272 struct enic *enic = netdev_priv(netdev); 1134 struct enic *enic = netdev_priv(netdev);
1273 int err, error, done;
1274 u16 response = PORT_PROFILE_RESPONSE_SUCCESS; 1135 u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
1136 int err;
1275 1137
1276 if (!(enic->pp.set & ENIC_PORT_REQUEST_APPLIED)) 1138 if (!(enic->pp.set & ENIC_PORT_REQUEST_APPLIED))
1277 return -ENODATA; 1139 return -ENODATA;
1278 1140
1279 err = enic_dev_init_done(enic, &done, &error); 1141 err = enic_process_get_pp_request(enic, enic->pp.request, &response);
1280 if (err) 1142 if (err)
1281 error = err; 1143 return err;
1282
1283 switch (error) {
1284 case ERR_SUCCESS:
1285 if (!done)
1286 response = PORT_PROFILE_RESPONSE_INPROGRESS;
1287 break;
1288 case ERR_EINVAL:
1289 response = PORT_PROFILE_RESPONSE_INVALID;
1290 break;
1291 case ERR_EBADSTATE:
1292 response = PORT_PROFILE_RESPONSE_BADSTATE;
1293 break;
1294 case ERR_ENOMEM:
1295 response = PORT_PROFILE_RESPONSE_INSUFFICIENT_RESOURCES;
1296 break;
1297 default:
1298 response = PORT_PROFILE_RESPONSE_ERROR;
1299 break;
1300 }
1301 1144
1302 NLA_PUT_U16(skb, IFLA_PORT_REQUEST, enic->pp.request); 1145 NLA_PUT_U16(skb, IFLA_PORT_REQUEST, enic->pp.request);
1303 NLA_PUT_U16(skb, IFLA_PORT_RESPONSE, response); 1146 NLA_PUT_U16(skb, IFLA_PORT_RESPONSE, response);
@@ -1407,7 +1250,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
1407 skb_put(skb, bytes_written); 1250 skb_put(skb, bytes_written);
1408 skb->protocol = eth_type_trans(skb, netdev); 1251 skb->protocol = eth_type_trans(skb, netdev);
1409 1252
1410 if (enic->csum_rx_enabled && !csum_not_calc) { 1253 if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
1411 skb->csum = htons(checksum); 1254 skb->csum = htons(checksum);
1412 skb->ip_summed = CHECKSUM_COMPLETE; 1255 skb->ip_summed = CHECKSUM_COMPLETE;
1413 } 1256 }
@@ -2536,17 +2379,18 @@ static int __devinit enic_probe(struct pci_dev *pdev,
2536 dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag); 2379 dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag);
2537 } 2380 }
2538 if (ENIC_SETTING(enic, TXCSUM)) 2381 if (ENIC_SETTING(enic, TXCSUM))
2539 netdev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; 2382 netdev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2540 if (ENIC_SETTING(enic, TSO)) 2383 if (ENIC_SETTING(enic, TSO))
2541 netdev->features |= NETIF_F_TSO | 2384 netdev->hw_features |= NETIF_F_TSO |
2542 NETIF_F_TSO6 | NETIF_F_TSO_ECN; 2385 NETIF_F_TSO6 | NETIF_F_TSO_ECN;
2543 if (ENIC_SETTING(enic, LRO)) 2386 if (ENIC_SETTING(enic, RXCSUM))
2544 netdev->features |= NETIF_F_GRO; 2387 netdev->hw_features |= NETIF_F_RXCSUM;
2388
2389 netdev->features |= netdev->hw_features;
2390
2545 if (using_dac) 2391 if (using_dac)
2546 netdev->features |= NETIF_F_HIGHDMA; 2392 netdev->features |= NETIF_F_HIGHDMA;
2547 2393
2548 enic->csum_rx_enabled = ENIC_SETTING(enic, RXCSUM);
2549
2550 err = register_netdev(netdev); 2394 err = register_netdev(netdev);
2551 if (err) { 2395 if (err) {
2552 dev_err(dev, "Cannot register net device, aborting\n"); 2396 dev_err(dev, "Cannot register net device, aborting\n");
diff --git a/drivers/net/enic/enic_pp.c b/drivers/net/enic/enic_pp.c
new file mode 100644
index 000000000000..ffaa75dd1ded
--- /dev/null
+++ b/drivers/net/enic/enic_pp.c
@@ -0,0 +1,264 @@
1/*
2 * Copyright 2011 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18
19#include <linux/kernel.h>
20#include <linux/string.h>
21#include <linux/errno.h>
22#include <linux/types.h>
23#include <linux/netdevice.h>
24#include <linux/etherdevice.h>
25#include <linux/rtnetlink.h>
26#include <net/ip.h>
27
28#include "vnic_vic.h"
29#include "enic_res.h"
30#include "enic.h"
31#include "enic_dev.h"
32
33static int enic_set_port_profile(struct enic *enic)
34{
35 struct net_device *netdev = enic->netdev;
36 struct vic_provinfo *vp;
37 const u8 oui[3] = VIC_PROVINFO_CISCO_OUI;
38 const u16 os_type = htons(VIC_GENERIC_PROV_OS_TYPE_LINUX);
39 char uuid_str[38];
40 char client_mac_str[18];
41 u8 *client_mac;
42 int err;
43
44 if (!(enic->pp.set & ENIC_SET_NAME) || !strlen(enic->pp.name))
45 return -EINVAL;
46
47 vp = vic_provinfo_alloc(GFP_KERNEL, oui,
48 VIC_PROVINFO_GENERIC_TYPE);
49 if (!vp)
50 return -ENOMEM;
51
52 VIC_PROVINFO_ADD_TLV(vp,
53 VIC_GENERIC_PROV_TLV_PORT_PROFILE_NAME_STR,
54 strlen(enic->pp.name) + 1, enic->pp.name);
55
56 if (!is_zero_ether_addr(enic->pp.mac_addr))
57 client_mac = enic->pp.mac_addr;
58 else
59 client_mac = netdev->dev_addr;
60
61 VIC_PROVINFO_ADD_TLV(vp,
62 VIC_GENERIC_PROV_TLV_CLIENT_MAC_ADDR,
63 ETH_ALEN, client_mac);
64
65 snprintf(client_mac_str, sizeof(client_mac_str), "%pM", client_mac);
66 VIC_PROVINFO_ADD_TLV(vp,
67 VIC_GENERIC_PROV_TLV_CLUSTER_PORT_UUID_STR,
68 sizeof(client_mac_str), client_mac_str);
69
70 if (enic->pp.set & ENIC_SET_INSTANCE) {
71 sprintf(uuid_str, "%pUB", enic->pp.instance_uuid);
72 VIC_PROVINFO_ADD_TLV(vp,
73 VIC_GENERIC_PROV_TLV_CLIENT_UUID_STR,
74 sizeof(uuid_str), uuid_str);
75 }
76
77 if (enic->pp.set & ENIC_SET_HOST) {
78 sprintf(uuid_str, "%pUB", enic->pp.host_uuid);
79 VIC_PROVINFO_ADD_TLV(vp,
80 VIC_GENERIC_PROV_TLV_HOST_UUID_STR,
81 sizeof(uuid_str), uuid_str);
82 }
83
84 VIC_PROVINFO_ADD_TLV(vp,
85 VIC_GENERIC_PROV_TLV_OS_TYPE,
86 sizeof(os_type), &os_type);
87
88 err = enic_dev_status_to_errno(enic_dev_init_prov2(enic, vp));
89
90add_tlv_failure:
91 vic_provinfo_free(vp);
92
93 return err;
94}
95
96static int enic_unset_port_profile(struct enic *enic)
97{
98 int err;
99
100 err = enic_vnic_dev_deinit(enic);
101 if (err)
102 return enic_dev_status_to_errno(err);
103
104 enic_reset_addr_lists(enic);
105
106 return 0;
107}
108
109static int enic_are_pp_different(struct enic_port_profile *pp1,
110 struct enic_port_profile *pp2)
111{
112 return strcmp(pp1->name, pp2->name) | !!memcmp(pp1->instance_uuid,
113 pp2->instance_uuid, PORT_UUID_MAX) |
114 !!memcmp(pp1->host_uuid, pp2->host_uuid, PORT_UUID_MAX) |
115 !!memcmp(pp1->mac_addr, pp2->mac_addr, ETH_ALEN);
116}
117
118static int enic_pp_preassociate(struct enic *enic,
119 struct enic_port_profile *prev_pp, int *restore_pp);
120static int enic_pp_disassociate(struct enic *enic,
121 struct enic_port_profile *prev_pp, int *restore_pp);
122static int enic_pp_preassociate_rr(struct enic *enic,
123 struct enic_port_profile *prev_pp, int *restore_pp);
124static int enic_pp_associate(struct enic *enic,
125 struct enic_port_profile *prev_pp, int *restore_pp);
126
127static int (*enic_pp_handlers[])(struct enic *enic,
128 struct enic_port_profile *prev_state, int *restore_pp) = {
129 [PORT_REQUEST_PREASSOCIATE] = enic_pp_preassociate,
130 [PORT_REQUEST_PREASSOCIATE_RR] = enic_pp_preassociate_rr,
131 [PORT_REQUEST_ASSOCIATE] = enic_pp_associate,
132 [PORT_REQUEST_DISASSOCIATE] = enic_pp_disassociate,
133};
134
135static const int enic_pp_handlers_count =
136 sizeof(enic_pp_handlers)/sizeof(*enic_pp_handlers);
137
138static int enic_pp_preassociate(struct enic *enic,
139 struct enic_port_profile *prev_pp, int *restore_pp)
140{
141 return -EOPNOTSUPP;
142}
143
144static int enic_pp_disassociate(struct enic *enic,
145 struct enic_port_profile *prev_pp, int *restore_pp)
146{
147 return enic_unset_port_profile(enic);
148}
149
150static int enic_pp_preassociate_rr(struct enic *enic,
151 struct enic_port_profile *prev_pp, int *restore_pp)
152{
153 int err;
154 int active = 0;
155
156 if (enic->pp.request != PORT_REQUEST_ASSOCIATE) {
157 /* If pre-associate is not part of an associate.
158 We always disassociate first */
159 err = enic_pp_handlers[PORT_REQUEST_DISASSOCIATE](enic,
160 prev_pp, restore_pp);
161 if (err)
162 return err;
163
164 *restore_pp = 0;
165 }
166
167 *restore_pp = 0;
168
169 err = enic_set_port_profile(enic);
170 if (err)
171 return err;
172
173 /* If pre-associate is not part of an associate. */
174 if (enic->pp.request != PORT_REQUEST_ASSOCIATE)
175 err = enic_dev_status_to_errno(enic_dev_enable2(enic, active));
176
177 return err;
178}
179
180static int enic_pp_associate(struct enic *enic,
181 struct enic_port_profile *prev_pp, int *restore_pp)
182{
183 int err;
184 int active = 1;
185
186 /* Check if a pre-associate was called before */
187 if (prev_pp->request != PORT_REQUEST_PREASSOCIATE_RR ||
188 (prev_pp->request == PORT_REQUEST_PREASSOCIATE_RR &&
189 enic_are_pp_different(prev_pp, &enic->pp))) {
190 err = enic_pp_handlers[PORT_REQUEST_DISASSOCIATE](
191 enic, prev_pp, restore_pp);
192 if (err)
193 return err;
194
195 *restore_pp = 0;
196 }
197
198 err = enic_pp_handlers[PORT_REQUEST_PREASSOCIATE_RR](
199 enic, prev_pp, restore_pp);
200 if (err)
201 return err;
202
203 *restore_pp = 0;
204
205 return enic_dev_status_to_errno(enic_dev_enable2(enic, active));
206}
207
208int enic_process_set_pp_request(struct enic *enic,
209 struct enic_port_profile *prev_pp, int *restore_pp)
210{
211 if (enic->pp.request < enic_pp_handlers_count
212 && enic_pp_handlers[enic->pp.request])
213 return enic_pp_handlers[enic->pp.request](enic,
214 prev_pp, restore_pp);
215 else
216 return -EOPNOTSUPP;
217}
218
219int enic_process_get_pp_request(struct enic *enic, int request,
220 u16 *response)
221{
222 int err, status = ERR_SUCCESS;
223
224 switch (request) {
225
226 case PORT_REQUEST_PREASSOCIATE_RR:
227 case PORT_REQUEST_ASSOCIATE:
228 err = enic_dev_enable2_done(enic, &status);
229 break;
230
231 case PORT_REQUEST_DISASSOCIATE:
232 err = enic_dev_deinit_done(enic, &status);
233 break;
234
235 default:
236 return -EINVAL;
237 }
238
239 if (err)
240 status = err;
241
242 switch (status) {
243 case ERR_SUCCESS:
244 *response = PORT_PROFILE_RESPONSE_SUCCESS;
245 break;
246 case ERR_EINVAL:
247 *response = PORT_PROFILE_RESPONSE_INVALID;
248 break;
249 case ERR_EBADSTATE:
250 *response = PORT_PROFILE_RESPONSE_BADSTATE;
251 break;
252 case ERR_ENOMEM:
253 *response = PORT_PROFILE_RESPONSE_INSUFFICIENT_RESOURCES;
254 break;
255 case ERR_EINPROGRESS:
256 *response = PORT_PROFILE_RESPONSE_INPROGRESS;
257 break;
258 default:
259 *response = PORT_PROFILE_RESPONSE_ERROR;
260 break;
261 }
262
263 return 0;
264}
diff --git a/drivers/net/enic/enic_pp.h b/drivers/net/enic/enic_pp.h
new file mode 100644
index 000000000000..699e365a944d
--- /dev/null
+++ b/drivers/net/enic/enic_pp.h
@@ -0,0 +1,27 @@
1/*
2 * Copyright 2011 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18
19#ifndef _ENIC_PP_H_
20#define _ENIC_PP_H_
21
22int enic_process_set_pp_request(struct enic *enic,
23 struct enic_port_profile *prev_pp, int *restore_pp);
24int enic_process_get_pp_request(struct enic *enic, int request,
25 u16 *response);
26
27#endif /* _ENIC_PP_H_ */
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index f111a37419ce..6e5c6356e7df 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -98,9 +98,9 @@ int enic_get_vnic_config(struct enic *enic)
98 "vNIC MAC addr %pM wq/rq %d/%d mtu %d\n", 98 "vNIC MAC addr %pM wq/rq %d/%d mtu %d\n",
99 enic->mac_addr, c->wq_desc_count, c->rq_desc_count, c->mtu); 99 enic->mac_addr, c->wq_desc_count, c->rq_desc_count, c->mtu);
100 dev_info(enic_get_dev(enic), "vNIC csum tx/rx %d/%d " 100 dev_info(enic_get_dev(enic), "vNIC csum tx/rx %d/%d "
101 "tso/lro %d/%d intr timer %d usec rss %d\n", 101 "tso %d intr timer %d usec rss %d\n",
102 ENIC_SETTING(enic, TXCSUM), ENIC_SETTING(enic, RXCSUM), 102 ENIC_SETTING(enic, TXCSUM), ENIC_SETTING(enic, RXCSUM),
103 ENIC_SETTING(enic, TSO), ENIC_SETTING(enic, LRO), 103 ENIC_SETTING(enic, TSO),
104 c->intr_timer_usec, ENIC_SETTING(enic, RSS)); 104 c->intr_timer_usec, ENIC_SETTING(enic, RSS));
105 105
106 return 0; 106 return 0;
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
index c089b362a36f..68f24ae860ae 100644
--- a/drivers/net/enic/vnic_dev.c
+++ b/drivers/net/enic/vnic_dev.c
@@ -786,48 +786,6 @@ int vnic_dev_init(struct vnic_dev *vdev, int arg)
786 return r; 786 return r;
787} 787}
788 788
789int vnic_dev_init_done(struct vnic_dev *vdev, int *done, int *err)
790{
791 u64 a0 = 0, a1 = 0;
792 int wait = 1000;
793 int ret;
794
795 *done = 0;
796
797 ret = vnic_dev_cmd(vdev, CMD_INIT_STATUS, &a0, &a1, wait);
798 if (ret)
799 return ret;
800
801 *done = (a0 == 0);
802
803 *err = (a0 == 0) ? (int)a1:0;
804
805 return 0;
806}
807
808int vnic_dev_init_prov(struct vnic_dev *vdev, u8 *buf, u32 len)
809{
810 u64 a0, a1 = len;
811 int wait = 1000;
812 dma_addr_t prov_pa;
813 void *prov_buf;
814 int ret;
815
816 prov_buf = pci_alloc_consistent(vdev->pdev, len, &prov_pa);
817 if (!prov_buf)
818 return -ENOMEM;
819
820 memcpy(prov_buf, buf, len);
821
822 a0 = prov_pa;
823
824 ret = vnic_dev_cmd(vdev, CMD_INIT_PROV_INFO, &a0, &a1, wait);
825
826 pci_free_consistent(vdev->pdev, len, prov_buf, prov_pa);
827
828 return ret;
829}
830
831int vnic_dev_deinit(struct vnic_dev *vdev) 789int vnic_dev_deinit(struct vnic_dev *vdev)
832{ 790{
833 u64 a0 = 0, a1 = 0; 791 u64 a0 = 0, a1 = 0;
@@ -927,4 +885,59 @@ err_out:
927 return NULL; 885 return NULL;
928} 886}
929 887
888int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len)
889{
890 u64 a0, a1 = len;
891 int wait = 1000;
892 dma_addr_t prov_pa;
893 void *prov_buf;
894 int ret;
895
896 prov_buf = pci_alloc_consistent(vdev->pdev, len, &prov_pa);
897 if (!prov_buf)
898 return -ENOMEM;
930 899
900 memcpy(prov_buf, buf, len);
901
902 a0 = prov_pa;
903
904 ret = vnic_dev_cmd(vdev, CMD_INIT_PROV_INFO2, &a0, &a1, wait);
905
906 pci_free_consistent(vdev->pdev, len, prov_buf, prov_pa);
907
908 return ret;
909}
910
911int vnic_dev_enable2(struct vnic_dev *vdev, int active)
912{
913 u64 a0, a1 = 0;
914 int wait = 1000;
915
916 a0 = (active ? CMD_ENABLE2_ACTIVE : 0);
917
918 return vnic_dev_cmd(vdev, CMD_ENABLE2, &a0, &a1, wait);
919}
920
921static int vnic_dev_cmd_status(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
922 int *status)
923{
924 u64 a0 = cmd, a1 = 0;
925 int wait = 1000;
926 int ret;
927
928 ret = vnic_dev_cmd(vdev, CMD_STATUS, &a0, &a1, wait);
929 if (!ret)
930 *status = (int)a0;
931
932 return ret;
933}
934
935int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status)
936{
937 return vnic_dev_cmd_status(vdev, CMD_ENABLE2, status);
938}
939
940int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status)
941{
942 return vnic_dev_cmd_status(vdev, CMD_DEINIT, status);
943}
diff --git a/drivers/net/enic/vnic_dev.h b/drivers/net/enic/vnic_dev.h
index e837546213a8..cf482a2c9dd9 100644
--- a/drivers/net/enic/vnic_dev.h
+++ b/drivers/net/enic/vnic_dev.h
@@ -108,8 +108,6 @@ int vnic_dev_disable(struct vnic_dev *vdev);
108int vnic_dev_open(struct vnic_dev *vdev, int arg); 108int vnic_dev_open(struct vnic_dev *vdev, int arg);
109int vnic_dev_open_done(struct vnic_dev *vdev, int *done); 109int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
110int vnic_dev_init(struct vnic_dev *vdev, int arg); 110int vnic_dev_init(struct vnic_dev *vdev, int arg);
111int vnic_dev_init_done(struct vnic_dev *vdev, int *done, int *err);
112int vnic_dev_init_prov(struct vnic_dev *vdev, u8 *buf, u32 len);
113int vnic_dev_deinit(struct vnic_dev *vdev); 111int vnic_dev_deinit(struct vnic_dev *vdev);
114int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg); 112int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg);
115int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done); 113int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done);
@@ -122,5 +120,9 @@ int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
122struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, 120struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
123 void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar, 121 void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar,
124 unsigned int num_bars); 122 unsigned int num_bars);
123int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len);
124int vnic_dev_enable2(struct vnic_dev *vdev, int active);
125int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status);
126int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status);
125 127
126#endif /* _VNIC_DEV_H_ */ 128#endif /* _VNIC_DEV_H_ */
diff --git a/drivers/net/enic/vnic_devcmd.h b/drivers/net/enic/vnic_devcmd.h
index d833a071bac5..c5569bfb47ac 100644
--- a/drivers/net/enic/vnic_devcmd.h
+++ b/drivers/net/enic/vnic_devcmd.h
@@ -267,17 +267,62 @@ enum vnic_devcmd_cmd {
267 267
268 /* 268 /*
269 * As for BY_BDF except a0 is index of hvnlink subordinate vnic 269 * As for BY_BDF except a0 is index of hvnlink subordinate vnic
270 * or SR-IOV virtual vnic */ 270 * or SR-IOV virtual vnic
271 */
271 CMD_PROXY_BY_INDEX = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 43), 272 CMD_PROXY_BY_INDEX = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 43),
272 273
273 /* 274 /*
274 * in: (u64)a0=paddr of buffer to put latest VIC VIF-CONFIG-INFO TLV in 275 * For HPP toggle:
275 * (u32)a1=length of buffer in a0 276 * adapter-info-get
276 * out: (u64)a0=paddr of buffer with latest VIC VIF-CONFIG-INFO TLV 277 * in: (u64)a0=phsical address of buffer passed in from caller.
277 * (u32)a1=actual length of latest VIC VIF-CONFIG-INFO TLV */ 278 * (u16)a1=size of buffer specified in a0.
279 * out: (u64)a0=phsical address of buffer passed in from caller.
280 * (u16)a1=actual bytes from VIF-CONFIG-INFO TLV, or
281 * 0 if no VIF-CONFIG-INFO TLV was ever received. */
278 CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44), 282 CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44),
283
284 /* init_prov_info2:
285 * Variant of CMD_INIT_PROV_INFO, where it will not try to enable
286 * the vnic until CMD_ENABLE2 is issued.
287 * (u64)a0=paddr of vnic_devcmd_provinfo
288 * (u32)a1=sizeof provision info */
289 CMD_INIT_PROV_INFO2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 47),
290
291 /* enable2:
292 * (u32)a0=0 ==> standby
293 * =CMD_ENABLE2_ACTIVE ==> active
294 */
295 CMD_ENABLE2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 48),
296
297 /*
298 * cmd_status:
299 * Returns the status of the specified command
300 * Input:
301 * a0 = command for which status is being queried.
302 * Possible values are:
303 * CMD_SOFT_RESET
304 * CMD_HANG_RESET
305 * CMD_OPEN
306 * CMD_INIT
307 * CMD_INIT_PROV_INFO
308 * CMD_DEINIT
309 * CMD_INIT_PROV_INFO2
310 * CMD_ENABLE2
311 * Output:
312 * if status == STAT_ERROR
313 * a0 = ERR_ENOTSUPPORTED - status for command in a0 is
314 * not supported
315 * if status == STAT_NONE
316 * a0 = status of the devcmd specified in a0 as follows.
317 * ERR_SUCCESS - command in a0 completed successfully
318 * ERR_EINPROGRESS - command in a0 is still in progress
319 */
320 CMD_STATUS = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 49),
279}; 321};
280 322
323/* CMD_ENABLE2 flags */
324#define CMD_ENABLE2_ACTIVE 0x1
325
281/* flags for CMD_OPEN */ 326/* flags for CMD_OPEN */
282#define CMD_OPENF_OPROM 0x1 /* open coming from option rom */ 327#define CMD_OPENF_OPROM 0x1 /* open coming from option rom */
283 328
@@ -315,6 +360,8 @@ enum vnic_devcmd_error {
315 ERR_ETIMEDOUT = 8, 360 ERR_ETIMEDOUT = 8,
316 ERR_ELINKDOWN = 9, 361 ERR_ELINKDOWN = 9,
317 ERR_EMAXRES = 10, 362 ERR_EMAXRES = 10,
363 ERR_ENOTSUPPORTED = 11,
364 ERR_EINPROGRESS = 12,
318}; 365};
319 366
320/* 367/*
diff --git a/drivers/net/enic/vnic_vic.c b/drivers/net/enic/vnic_vic.c
index 4725b79de0ef..24ef8cd40545 100644
--- a/drivers/net/enic/vnic_vic.c
+++ b/drivers/net/enic/vnic_vic.c
@@ -23,7 +23,8 @@
23 23
24#include "vnic_vic.h" 24#include "vnic_vic.h"
25 25
26struct vic_provinfo *vic_provinfo_alloc(gfp_t flags, u8 *oui, u8 type) 26struct vic_provinfo *vic_provinfo_alloc(gfp_t flags, const u8 *oui,
27 const u8 type)
27{ 28{
28 struct vic_provinfo *vp; 29 struct vic_provinfo *vp;
29 30
@@ -47,7 +48,7 @@ void vic_provinfo_free(struct vic_provinfo *vp)
47} 48}
48 49
49int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length, 50int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length,
50 void *value) 51 const void *value)
51{ 52{
52 struct vic_provinfo_tlv *tlv; 53 struct vic_provinfo_tlv *tlv;
53 54
diff --git a/drivers/net/enic/vnic_vic.h b/drivers/net/enic/vnic_vic.h
index f700f5d9e81d..9ef81f148351 100644
--- a/drivers/net/enic/vnic_vic.h
+++ b/drivers/net/enic/vnic_vic.h
@@ -47,6 +47,7 @@ enum vic_generic_prov_os_type {
47 VIC_GENERIC_PROV_OS_TYPE_ESX = 1, 47 VIC_GENERIC_PROV_OS_TYPE_ESX = 1,
48 VIC_GENERIC_PROV_OS_TYPE_LINUX = 2, 48 VIC_GENERIC_PROV_OS_TYPE_LINUX = 2,
49 VIC_GENERIC_PROV_OS_TYPE_WINDOWS = 3, 49 VIC_GENERIC_PROV_OS_TYPE_WINDOWS = 3,
50 VIC_GENERIC_PROV_OS_TYPE_SOLARIS = 4,
50}; 51};
51 52
52struct vic_provinfo { 53struct vic_provinfo {
@@ -61,14 +62,22 @@ struct vic_provinfo {
61 } tlv[0]; 62 } tlv[0];
62} __packed; 63} __packed;
63 64
65#define VIC_PROVINFO_ADD_TLV(vp, tlvtype, tlvlen, data) \
66 do { \
67 err = vic_provinfo_add_tlv(vp, tlvtype, tlvlen, data); \
68 if (err) \
69 goto add_tlv_failure; \
70 } while (0)
71
64#define VIC_PROVINFO_MAX_DATA 1385 72#define VIC_PROVINFO_MAX_DATA 1385
65#define VIC_PROVINFO_MAX_TLV_DATA (VIC_PROVINFO_MAX_DATA - \ 73#define VIC_PROVINFO_MAX_TLV_DATA (VIC_PROVINFO_MAX_DATA - \
66 sizeof(struct vic_provinfo)) 74 sizeof(struct vic_provinfo))
67 75
68struct vic_provinfo *vic_provinfo_alloc(gfp_t flags, u8 *oui, u8 type); 76struct vic_provinfo *vic_provinfo_alloc(gfp_t flags, const u8 *oui,
77 const u8 type);
69void vic_provinfo_free(struct vic_provinfo *vp); 78void vic_provinfo_free(struct vic_provinfo *vp);
70int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length, 79int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length,
71 void *value); 80 const void *value);
72size_t vic_provinfo_size(struct vic_provinfo *vp); 81size_t vic_provinfo_size(struct vic_provinfo *vp);
73 82
74#endif /* _VNIC_VIC_H_ */ 83#endif /* _VNIC_VIC_H_ */
diff --git a/drivers/net/ewrk3.c b/drivers/net/ewrk3.c
index 380d0614a89a..b5f6173130f4 100644
--- a/drivers/net/ewrk3.c
+++ b/drivers/net/ewrk3.c
@@ -1545,7 +1545,7 @@ static int ewrk3_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1545 } 1545 }
1546 1546
1547 ecmd->supported |= SUPPORTED_10baseT_Half; 1547 ecmd->supported |= SUPPORTED_10baseT_Half;
1548 ecmd->speed = SPEED_10; 1548 ethtool_cmd_speed_set(ecmd, SPEED_10);
1549 ecmd->duplex = DUPLEX_HALF; 1549 ecmd->duplex = DUPLEX_HALF;
1550 return 0; 1550 return 0;
1551} 1551}
@@ -1604,55 +1604,47 @@ static u32 ewrk3_get_link(struct net_device *dev)
1604 return !(cmr & CMR_LINK); 1604 return !(cmr & CMR_LINK);
1605} 1605}
1606 1606
1607static int ewrk3_phys_id(struct net_device *dev, u32 data) 1607static int ewrk3_set_phys_id(struct net_device *dev,
1608 enum ethtool_phys_id_state state)
1608{ 1609{
1609 struct ewrk3_private *lp = netdev_priv(dev); 1610 struct ewrk3_private *lp = netdev_priv(dev);
1610 unsigned long iobase = dev->base_addr; 1611 unsigned long iobase = dev->base_addr;
1611 unsigned long flags;
1612 u8 cr; 1612 u8 cr;
1613 int count;
1614
1615 /* Toggle LED 4x per second */
1616 count = data << 2;
1617 1613
1618 spin_lock_irqsave(&lp->hw_lock, flags); 1614 spin_lock_irq(&lp->hw_lock);
1619
1620 /* Bail if a PHYS_ID is already in progress */
1621 if (lp->led_mask == 0) {
1622 spin_unlock_irqrestore(&lp->hw_lock, flags);
1623 return -EBUSY;
1624 }
1625 1615
1626 /* Prevent ISR from twiddling the LED */ 1616 switch (state) {
1627 lp->led_mask = 0; 1617 case ETHTOOL_ID_ACTIVE:
1618 /* Prevent ISR from twiddling the LED */
1619 lp->led_mask = 0;
1620 spin_unlock_irq(&lp->hw_lock);
1621 return 2; /* cycle on/off twice per second */
1628 1622
1629 while (count--) { 1623 case ETHTOOL_ID_ON:
1630 /* Toggle the LED */
1631 cr = inb(EWRK3_CR); 1624 cr = inb(EWRK3_CR);
1632 outb(cr ^ CR_LED, EWRK3_CR); 1625 outb(cr | CR_LED, EWRK3_CR);
1626 break;
1633 1627
1634 /* Wait a little while */ 1628 case ETHTOOL_ID_OFF:
1635 spin_unlock_irqrestore(&lp->hw_lock, flags); 1629 cr = inb(EWRK3_CR);
1636 msleep(250); 1630 outb(cr & ~CR_LED, EWRK3_CR);
1637 spin_lock_irqsave(&lp->hw_lock, flags); 1631 break;
1638 1632
1639 /* Exit if we got a signal */ 1633 case ETHTOOL_ID_INACTIVE:
1640 if (signal_pending(current)) 1634 lp->led_mask = CR_LED;
1641 break; 1635 cr = inb(EWRK3_CR);
1636 outb(cr & ~CR_LED, EWRK3_CR);
1642 } 1637 }
1638 spin_unlock_irq(&lp->hw_lock);
1643 1639
1644 lp->led_mask = CR_LED; 1640 return 0;
1645 cr = inb(EWRK3_CR);
1646 outb(cr & ~CR_LED, EWRK3_CR);
1647 spin_unlock_irqrestore(&lp->hw_lock, flags);
1648 return signal_pending(current) ? -ERESTARTSYS : 0;
1649} 1641}
1650 1642
1651static const struct ethtool_ops ethtool_ops_203 = { 1643static const struct ethtool_ops ethtool_ops_203 = {
1652 .get_drvinfo = ewrk3_get_drvinfo, 1644 .get_drvinfo = ewrk3_get_drvinfo,
1653 .get_settings = ewrk3_get_settings, 1645 .get_settings = ewrk3_get_settings,
1654 .set_settings = ewrk3_set_settings, 1646 .set_settings = ewrk3_set_settings,
1655 .phys_id = ewrk3_phys_id, 1647 .set_phys_id = ewrk3_set_phys_id,
1656}; 1648};
1657 1649
1658static const struct ethtool_ops ethtool_ops = { 1650static const struct ethtool_ops ethtool_ops = {
@@ -1660,7 +1652,7 @@ static const struct ethtool_ops ethtool_ops = {
1660 .get_settings = ewrk3_get_settings, 1652 .get_settings = ewrk3_get_settings,
1661 .set_settings = ewrk3_set_settings, 1653 .set_settings = ewrk3_set_settings,
1662 .get_link = ewrk3_get_link, 1654 .get_link = ewrk3_get_link,
1663 .phys_id = ewrk3_phys_id, 1655 .set_phys_id = ewrk3_set_phys_id,
1664}; 1656};
1665 1657
1666/* 1658/*
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index d5ab4dad5051..d09e8b0add01 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -774,7 +774,6 @@ struct fe_priv {
774 u32 driver_data; 774 u32 driver_data;
775 u32 device_id; 775 u32 device_id;
776 u32 register_size; 776 u32 register_size;
777 int rx_csum;
778 u32 mac_in_use; 777 u32 mac_in_use;
779 int mgmt_version; 778 int mgmt_version;
780 int mgmt_sema; 779 int mgmt_sema;
@@ -3956,6 +3955,7 @@ static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
3956static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 3955static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3957{ 3956{
3958 struct fe_priv *np = netdev_priv(dev); 3957 struct fe_priv *np = netdev_priv(dev);
3958 u32 speed;
3959 int adv; 3959 int adv;
3960 3960
3961 spin_lock_irq(&np->lock); 3961 spin_lock_irq(&np->lock);
@@ -3975,23 +3975,26 @@ static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3975 if (netif_carrier_ok(dev)) { 3975 if (netif_carrier_ok(dev)) {
3976 switch (np->linkspeed & (NVREG_LINKSPEED_MASK)) { 3976 switch (np->linkspeed & (NVREG_LINKSPEED_MASK)) {
3977 case NVREG_LINKSPEED_10: 3977 case NVREG_LINKSPEED_10:
3978 ecmd->speed = SPEED_10; 3978 speed = SPEED_10;
3979 break; 3979 break;
3980 case NVREG_LINKSPEED_100: 3980 case NVREG_LINKSPEED_100:
3981 ecmd->speed = SPEED_100; 3981 speed = SPEED_100;
3982 break; 3982 break;
3983 case NVREG_LINKSPEED_1000: 3983 case NVREG_LINKSPEED_1000:
3984 ecmd->speed = SPEED_1000; 3984 speed = SPEED_1000;
3985 break;
3986 default:
3987 speed = -1;
3985 break; 3988 break;
3986 } 3989 }
3987 ecmd->duplex = DUPLEX_HALF; 3990 ecmd->duplex = DUPLEX_HALF;
3988 if (np->duplex) 3991 if (np->duplex)
3989 ecmd->duplex = DUPLEX_FULL; 3992 ecmd->duplex = DUPLEX_FULL;
3990 } else { 3993 } else {
3991 ecmd->speed = -1; 3994 speed = -1;
3992 ecmd->duplex = -1; 3995 ecmd->duplex = -1;
3993 } 3996 }
3994 3997 ethtool_cmd_speed_set(ecmd, speed);
3995 ecmd->autoneg = np->autoneg; 3998 ecmd->autoneg = np->autoneg;
3996 3999
3997 ecmd->advertising = ADVERTISED_MII; 4000 ecmd->advertising = ADVERTISED_MII;
@@ -4030,6 +4033,7 @@ static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4030static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 4033static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4031{ 4034{
4032 struct fe_priv *np = netdev_priv(dev); 4035 struct fe_priv *np = netdev_priv(dev);
4036 u32 speed = ethtool_cmd_speed(ecmd);
4033 4037
4034 if (ecmd->port != PORT_MII) 4038 if (ecmd->port != PORT_MII)
4035 return -EINVAL; 4039 return -EINVAL;
@@ -4055,7 +4059,7 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4055 /* Note: autonegotiation disable, speed 1000 intentionally 4059 /* Note: autonegotiation disable, speed 1000 intentionally
4056 * forbidden - no one should need that. */ 4060 * forbidden - no one should need that. */
4057 4061
4058 if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100) 4062 if (speed != SPEED_10 && speed != SPEED_100)
4059 return -EINVAL; 4063 return -EINVAL;
4060 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) 4064 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
4061 return -EINVAL; 4065 return -EINVAL;
@@ -4139,13 +4143,13 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4139 4143
4140 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 4144 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4141 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 4145 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4142 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF) 4146 if (speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
4143 adv |= ADVERTISE_10HALF; 4147 adv |= ADVERTISE_10HALF;
4144 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL) 4148 if (speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
4145 adv |= ADVERTISE_10FULL; 4149 adv |= ADVERTISE_10FULL;
4146 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF) 4150 if (speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
4147 adv |= ADVERTISE_100HALF; 4151 adv |= ADVERTISE_100HALF;
4148 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL) 4152 if (speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
4149 adv |= ADVERTISE_100FULL; 4153 adv |= ADVERTISE_100FULL;
4150 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); 4154 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4151 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisements but disable tx pause */ 4155 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisements but disable tx pause */
@@ -4264,16 +4268,6 @@ static int nv_nway_reset(struct net_device *dev)
4264 return ret; 4268 return ret;
4265} 4269}
4266 4270
4267static int nv_set_tso(struct net_device *dev, u32 value)
4268{
4269 struct fe_priv *np = netdev_priv(dev);
4270
4271 if ((np->driver_data & DEV_HAS_CHECKSUM))
4272 return ethtool_op_set_tso(dev, value);
4273 else
4274 return -EOPNOTSUPP;
4275}
4276
4277static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) 4271static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4278{ 4272{
4279 struct fe_priv *np = netdev_priv(dev); 4273 struct fe_priv *np = netdev_priv(dev);
@@ -4480,58 +4474,36 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam*
4480 return 0; 4474 return 0;
4481} 4475}
4482 4476
4483static u32 nv_get_rx_csum(struct net_device *dev) 4477static u32 nv_fix_features(struct net_device *dev, u32 features)
4484{ 4478{
4485 struct fe_priv *np = netdev_priv(dev); 4479 /* vlan is dependent on rx checksum offload */
4486 return np->rx_csum != 0; 4480 if (features & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX))
4481 features |= NETIF_F_RXCSUM;
4482
4483 return features;
4487} 4484}
4488 4485
4489static int nv_set_rx_csum(struct net_device *dev, u32 data) 4486static int nv_set_features(struct net_device *dev, u32 features)
4490{ 4487{
4491 struct fe_priv *np = netdev_priv(dev); 4488 struct fe_priv *np = netdev_priv(dev);
4492 u8 __iomem *base = get_hwbase(dev); 4489 u8 __iomem *base = get_hwbase(dev);
4493 int retcode = 0; 4490 u32 changed = dev->features ^ features;
4494
4495 if (np->driver_data & DEV_HAS_CHECKSUM) {
4496 if (data) {
4497 np->rx_csum = 1;
4498 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
4499 } else {
4500 np->rx_csum = 0;
4501 /* vlan is dependent on rx checksum offload */
4502 if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE))
4503 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
4504 }
4505 if (netif_running(dev)) {
4506 spin_lock_irq(&np->lock);
4507 writel(np->txrxctl_bits, base + NvRegTxRxControl);
4508 spin_unlock_irq(&np->lock);
4509 }
4510 } else {
4511 return -EINVAL;
4512 }
4513 4491
4514 return retcode; 4492 if (changed & NETIF_F_RXCSUM) {
4515} 4493 spin_lock_irq(&np->lock);
4516 4494
4517static int nv_set_tx_csum(struct net_device *dev, u32 data) 4495 if (features & NETIF_F_RXCSUM)
4518{ 4496 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
4519 struct fe_priv *np = netdev_priv(dev); 4497 else
4498 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
4520 4499
4521 if (np->driver_data & DEV_HAS_CHECKSUM) 4500 if (netif_running(dev))
4522 return ethtool_op_set_tx_csum(dev, data); 4501 writel(np->txrxctl_bits, base + NvRegTxRxControl);
4523 else
4524 return -EOPNOTSUPP;
4525}
4526 4502
4527static int nv_set_sg(struct net_device *dev, u32 data) 4503 spin_unlock_irq(&np->lock);
4528{ 4504 }
4529 struct fe_priv *np = netdev_priv(dev);
4530 4505
4531 if (np->driver_data & DEV_HAS_CHECKSUM) 4506 return 0;
4532 return ethtool_op_set_sg(dev, data);
4533 else
4534 return -EOPNOTSUPP;
4535} 4507}
4536 4508
4537static int nv_get_sset_count(struct net_device *dev, int sset) 4509static int nv_get_sset_count(struct net_device *dev, int sset)
@@ -4896,15 +4868,10 @@ static const struct ethtool_ops ops = {
4896 .get_regs_len = nv_get_regs_len, 4868 .get_regs_len = nv_get_regs_len,
4897 .get_regs = nv_get_regs, 4869 .get_regs = nv_get_regs,
4898 .nway_reset = nv_nway_reset, 4870 .nway_reset = nv_nway_reset,
4899 .set_tso = nv_set_tso,
4900 .get_ringparam = nv_get_ringparam, 4871 .get_ringparam = nv_get_ringparam,
4901 .set_ringparam = nv_set_ringparam, 4872 .set_ringparam = nv_set_ringparam,
4902 .get_pauseparam = nv_get_pauseparam, 4873 .get_pauseparam = nv_get_pauseparam,
4903 .set_pauseparam = nv_set_pauseparam, 4874 .set_pauseparam = nv_set_pauseparam,
4904 .get_rx_csum = nv_get_rx_csum,
4905 .set_rx_csum = nv_set_rx_csum,
4906 .set_tx_csum = nv_set_tx_csum,
4907 .set_sg = nv_set_sg,
4908 .get_strings = nv_get_strings, 4875 .get_strings = nv_get_strings,
4909 .get_ethtool_stats = nv_get_ethtool_stats, 4876 .get_ethtool_stats = nv_get_ethtool_stats,
4910 .get_sset_count = nv_get_sset_count, 4877 .get_sset_count = nv_get_sset_count,
@@ -5235,6 +5202,8 @@ static const struct net_device_ops nv_netdev_ops = {
5235 .ndo_start_xmit = nv_start_xmit, 5202 .ndo_start_xmit = nv_start_xmit,
5236 .ndo_tx_timeout = nv_tx_timeout, 5203 .ndo_tx_timeout = nv_tx_timeout,
5237 .ndo_change_mtu = nv_change_mtu, 5204 .ndo_change_mtu = nv_change_mtu,
5205 .ndo_fix_features = nv_fix_features,
5206 .ndo_set_features = nv_set_features,
5238 .ndo_validate_addr = eth_validate_addr, 5207 .ndo_validate_addr = eth_validate_addr,
5239 .ndo_set_mac_address = nv_set_mac_address, 5208 .ndo_set_mac_address = nv_set_mac_address,
5240 .ndo_set_multicast_list = nv_set_multicast, 5209 .ndo_set_multicast_list = nv_set_multicast,
@@ -5251,6 +5220,8 @@ static const struct net_device_ops nv_netdev_ops_optimized = {
5251 .ndo_start_xmit = nv_start_xmit_optimized, 5220 .ndo_start_xmit = nv_start_xmit_optimized,
5252 .ndo_tx_timeout = nv_tx_timeout, 5221 .ndo_tx_timeout = nv_tx_timeout,
5253 .ndo_change_mtu = nv_change_mtu, 5222 .ndo_change_mtu = nv_change_mtu,
5223 .ndo_fix_features = nv_fix_features,
5224 .ndo_set_features = nv_set_features,
5254 .ndo_validate_addr = eth_validate_addr, 5225 .ndo_validate_addr = eth_validate_addr,
5255 .ndo_set_mac_address = nv_set_mac_address, 5226 .ndo_set_mac_address = nv_set_mac_address,
5256 .ndo_set_multicast_list = nv_set_multicast, 5227 .ndo_set_multicast_list = nv_set_multicast,
@@ -5364,11 +5335,10 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5364 np->pkt_limit = NV_PKTLIMIT_2; 5335 np->pkt_limit = NV_PKTLIMIT_2;
5365 5336
5366 if (id->driver_data & DEV_HAS_CHECKSUM) { 5337 if (id->driver_data & DEV_HAS_CHECKSUM) {
5367 np->rx_csum = 1;
5368 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 5338 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
5369 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 5339 dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG |
5370 dev->features |= NETIF_F_TSO; 5340 NETIF_F_TSO | NETIF_F_RXCSUM;
5371 dev->features |= NETIF_F_GRO; 5341 dev->features |= dev->hw_features;
5372 } 5342 }
5373 5343
5374 np->vlanctl_bits = 0; 5344 np->vlanctl_bits = 0;
@@ -5384,7 +5354,6 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5384 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ; 5354 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
5385 } 5355 }
5386 5356
5387
5388 err = -ENOMEM; 5357 err = -ENOMEM;
5389 np->base = ioremap(addr, np->register_size); 5358 np->base = ioremap(addr, np->register_size);
5390 if (!np->base) 5359 if (!np->base)
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 24cb953900dd..a9388944f1d3 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -956,8 +956,6 @@ static const struct ethtool_ops fs_ethtool_ops = {
956 .get_link = ethtool_op_get_link, 956 .get_link = ethtool_op_get_link,
957 .get_msglevel = fs_get_msglevel, 957 .get_msglevel = fs_get_msglevel,
958 .set_msglevel = fs_set_msglevel, 958 .set_msglevel = fs_set_msglevel,
959 .set_tx_csum = ethtool_op_set_tx_csum, /* local! */
960 .set_sg = ethtool_op_set_sg,
961 .get_regs = fs_get_regs, 959 .get_regs = fs_get_regs,
962}; 960};
963 961
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 2a0ad9a501bb..ff60b23a5b74 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -365,7 +365,7 @@ static void gfar_init_mac(struct net_device *ndev)
365 gfar_write(&regs->rir0, DEFAULT_RIR0); 365 gfar_write(&regs->rir0, DEFAULT_RIR0);
366 } 366 }
367 367
368 if (priv->rx_csum_enable) 368 if (ndev->features & NETIF_F_RXCSUM)
369 rctrl |= RCTRL_CHECKSUMMING; 369 rctrl |= RCTRL_CHECKSUMMING;
370 370
371 if (priv->extended_hash) { 371 if (priv->extended_hash) {
@@ -463,6 +463,7 @@ static const struct net_device_ops gfar_netdev_ops = {
463 .ndo_start_xmit = gfar_start_xmit, 463 .ndo_start_xmit = gfar_start_xmit,
464 .ndo_stop = gfar_close, 464 .ndo_stop = gfar_close,
465 .ndo_change_mtu = gfar_change_mtu, 465 .ndo_change_mtu = gfar_change_mtu,
466 .ndo_set_features = gfar_set_features,
466 .ndo_set_multicast_list = gfar_set_multi, 467 .ndo_set_multicast_list = gfar_set_multi,
467 .ndo_tx_timeout = gfar_timeout, 468 .ndo_tx_timeout = gfar_timeout,
468 .ndo_do_ioctl = gfar_ioctl, 469 .ndo_do_ioctl = gfar_ioctl,
@@ -513,7 +514,7 @@ void unlock_tx_qs(struct gfar_private *priv)
513/* Returns 1 if incoming frames use an FCB */ 514/* Returns 1 if incoming frames use an FCB */
514static inline int gfar_uses_fcb(struct gfar_private *priv) 515static inline int gfar_uses_fcb(struct gfar_private *priv)
515{ 516{
516 return priv->vlgrp || priv->rx_csum_enable || 517 return priv->vlgrp || (priv->ndev->features & NETIF_F_RXCSUM) ||
517 (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER); 518 (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
518} 519}
519 520
@@ -1030,10 +1031,11 @@ static int gfar_probe(struct platform_device *ofdev)
1030 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT); 1031 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT);
1031 1032
1032 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { 1033 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1033 priv->rx_csum_enable = 1; 1034 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1034 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA; 1035 NETIF_F_RXCSUM;
1035 } else 1036 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1036 priv->rx_csum_enable = 0; 1037 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1038 }
1037 1039
1038 priv->vlgrp = NULL; 1040 priv->vlgrp = NULL;
1039 1041
@@ -2697,7 +2699,7 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2697 if (priv->padding) 2699 if (priv->padding)
2698 skb_pull(skb, priv->padding); 2700 skb_pull(skb, priv->padding);
2699 2701
2700 if (priv->rx_csum_enable) 2702 if (dev->features & NETIF_F_RXCSUM)
2701 gfar_rx_checksum(skb, fcb); 2703 gfar_rx_checksum(skb, fcb);
2702 2704
2703 /* Tell the skb what kind of packet this is */ 2705 /* Tell the skb what kind of packet this is */
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index b2fe7edefad9..fc86f5195445 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -382,23 +382,6 @@ extern const char gfar_driver_version[];
382#define BD_LFLAG(flags) ((flags) << 16) 382#define BD_LFLAG(flags) ((flags) << 16)
383#define BD_LENGTH_MASK 0x0000ffff 383#define BD_LENGTH_MASK 0x0000ffff
384 384
385#define CLASS_CODE_UNRECOG 0x00
386#define CLASS_CODE_DUMMY1 0x01
387#define CLASS_CODE_ETHERTYPE1 0x02
388#define CLASS_CODE_ETHERTYPE2 0x03
389#define CLASS_CODE_USER_PROG1 0x04
390#define CLASS_CODE_USER_PROG2 0x05
391#define CLASS_CODE_USER_PROG3 0x06
392#define CLASS_CODE_USER_PROG4 0x07
393#define CLASS_CODE_TCP_IPV4 0x08
394#define CLASS_CODE_UDP_IPV4 0x09
395#define CLASS_CODE_AH_ESP_IPV4 0x0a
396#define CLASS_CODE_SCTP_IPV4 0x0b
397#define CLASS_CODE_TCP_IPV6 0x0c
398#define CLASS_CODE_UDP_IPV6 0x0d
399#define CLASS_CODE_AH_ESP_IPV6 0x0e
400#define CLASS_CODE_SCTP_IPV6 0x0f
401
402#define FPR_FILER_MASK 0xFFFFFFFF 385#define FPR_FILER_MASK 0xFFFFFFFF
403#define MAX_FILER_IDX 0xFF 386#define MAX_FILER_IDX 0xFF
404 387
@@ -1100,7 +1083,7 @@ struct gfar_private {
1100 struct device_node *phy_node; 1083 struct device_node *phy_node;
1101 struct device_node *tbi_node; 1084 struct device_node *tbi_node;
1102 u32 device_flags; 1085 u32 device_flags;
1103 unsigned char rx_csum_enable:1, 1086 unsigned char
1104 extended_hash:1, 1087 extended_hash:1,
1105 bd_stash_en:1, 1088 bd_stash_en:1,
1106 rx_filer_enable:1, 1089 rx_filer_enable:1,
@@ -1170,6 +1153,7 @@ extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev,
1170extern void gfar_configure_coalescing(struct gfar_private *priv, 1153extern void gfar_configure_coalescing(struct gfar_private *priv,
1171 unsigned long tx_mask, unsigned long rx_mask); 1154 unsigned long tx_mask, unsigned long rx_mask);
1172void gfar_init_sysfs(struct net_device *dev); 1155void gfar_init_sysfs(struct net_device *dev);
1156int gfar_set_features(struct net_device *dev, u32 features);
1173 1157
1174extern const struct ethtool_ops gfar_ethtool_ops; 1158extern const struct ethtool_ops gfar_ethtool_ops;
1175 1159
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 3bc8e276ba4d..493d743839d9 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -517,15 +517,15 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
517 return err; 517 return err;
518} 518}
519 519
520static int gfar_set_rx_csum(struct net_device *dev, uint32_t data) 520int gfar_set_features(struct net_device *dev, u32 features)
521{ 521{
522 struct gfar_private *priv = netdev_priv(dev); 522 struct gfar_private *priv = netdev_priv(dev);
523 unsigned long flags; 523 unsigned long flags;
524 int err = 0, i = 0; 524 int err = 0, i = 0;
525 u32 changed = dev->features ^ features;
525 526
526 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM)) 527 if (!(changed & NETIF_F_RXCSUM))
527 return -EOPNOTSUPP; 528 return 0;
528
529 529
530 if (dev->flags & IFF_UP) { 530 if (dev->flags & IFF_UP) {
531 /* Halt TX and RX, and process the frames which 531 /* Halt TX and RX, and process the frames which
@@ -546,58 +546,15 @@ static int gfar_set_rx_csum(struct net_device *dev, uint32_t data)
546 546
547 /* Now we take down the rings to rebuild them */ 547 /* Now we take down the rings to rebuild them */
548 stop_gfar(dev); 548 stop_gfar(dev);
549 }
550 549
551 spin_lock_irqsave(&priv->bflock, flags); 550 dev->features = features;
552 priv->rx_csum_enable = data;
553 spin_unlock_irqrestore(&priv->bflock, flags);
554 551
555 if (dev->flags & IFF_UP) {
556 err = startup_gfar(dev); 552 err = startup_gfar(dev);
557 netif_tx_wake_all_queues(dev); 553 netif_tx_wake_all_queues(dev);
558 } 554 }
559 return err; 555 return err;
560} 556}
561 557
562static uint32_t gfar_get_rx_csum(struct net_device *dev)
563{
564 struct gfar_private *priv = netdev_priv(dev);
565
566 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM))
567 return 0;
568
569 return priv->rx_csum_enable;
570}
571
572static int gfar_set_tx_csum(struct net_device *dev, uint32_t data)
573{
574 struct gfar_private *priv = netdev_priv(dev);
575
576 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM))
577 return -EOPNOTSUPP;
578
579 netif_tx_lock_bh(dev);
580
581 if (data)
582 dev->features |= NETIF_F_IP_CSUM;
583 else
584 dev->features &= ~NETIF_F_IP_CSUM;
585
586 netif_tx_unlock_bh(dev);
587
588 return 0;
589}
590
591static uint32_t gfar_get_tx_csum(struct net_device *dev)
592{
593 struct gfar_private *priv = netdev_priv(dev);
594
595 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM))
596 return 0;
597
598 return (dev->features & NETIF_F_IP_CSUM) != 0;
599}
600
601static uint32_t gfar_get_msglevel(struct net_device *dev) 558static uint32_t gfar_get_msglevel(struct net_device *dev)
602{ 559{
603 struct gfar_private *priv = netdev_priv(dev); 560 struct gfar_private *priv = netdev_priv(dev);
@@ -645,42 +602,6 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
645} 602}
646#endif 603#endif
647 604
648static int gfar_ethflow_to_class(int flow_type, u64 *class)
649{
650 switch (flow_type) {
651 case TCP_V4_FLOW:
652 *class = CLASS_CODE_TCP_IPV4;
653 break;
654 case UDP_V4_FLOW:
655 *class = CLASS_CODE_UDP_IPV4;
656 break;
657 case AH_V4_FLOW:
658 case ESP_V4_FLOW:
659 *class = CLASS_CODE_AH_ESP_IPV4;
660 break;
661 case SCTP_V4_FLOW:
662 *class = CLASS_CODE_SCTP_IPV4;
663 break;
664 case TCP_V6_FLOW:
665 *class = CLASS_CODE_TCP_IPV6;
666 break;
667 case UDP_V6_FLOW:
668 *class = CLASS_CODE_UDP_IPV6;
669 break;
670 case AH_V6_FLOW:
671 case ESP_V6_FLOW:
672 *class = CLASS_CODE_AH_ESP_IPV6;
673 break;
674 case SCTP_V6_FLOW:
675 *class = CLASS_CODE_SCTP_IPV6;
676 break;
677 default:
678 return 0;
679 }
680
681 return 1;
682}
683
684static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow) 605static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
685{ 606{
686 u32 fcr = 0x0, fpr = FPR_FILER_MASK; 607 u32 fcr = 0x0, fpr = FPR_FILER_MASK;
@@ -778,11 +699,6 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
778 case UDP_V6_FLOW: 699 case UDP_V6_FLOW:
779 cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP; 700 cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP;
780 break; 701 break;
781 case IPV4_FLOW:
782 cmp_rqfpr = RQFPR_IPV4;
783 case IPV6_FLOW:
784 cmp_rqfpr = RQFPR_IPV6;
785 break;
786 default: 702 default:
787 printk(KERN_ERR "Right now this class is not supported\n"); 703 printk(KERN_ERR "Right now this class is not supported\n");
788 return 0; 704 return 0;
@@ -848,18 +764,9 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
848 764
849static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *cmd) 765static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
850{ 766{
851 u64 class;
852
853 if (!gfar_ethflow_to_class(cmd->flow_type, &class))
854 return -EINVAL;
855
856 if (class < CLASS_CODE_USER_PROG1 ||
857 class > CLASS_CODE_SCTP_IPV6)
858 return -EINVAL;
859
860 /* write the filer rules here */ 767 /* write the filer rules here */
861 if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type)) 768 if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
862 return -1; 769 return -EINVAL;
863 770
864 return 0; 771 return 0;
865} 772}
@@ -894,11 +801,6 @@ const struct ethtool_ops gfar_ethtool_ops = {
894 .get_strings = gfar_gstrings, 801 .get_strings = gfar_gstrings,
895 .get_sset_count = gfar_sset_count, 802 .get_sset_count = gfar_sset_count,
896 .get_ethtool_stats = gfar_fill_stats, 803 .get_ethtool_stats = gfar_fill_stats,
897 .get_rx_csum = gfar_get_rx_csum,
898 .get_tx_csum = gfar_get_tx_csum,
899 .set_rx_csum = gfar_set_rx_csum,
900 .set_tx_csum = gfar_set_tx_csum,
901 .set_sg = ethtool_op_set_sg,
902 .get_msglevel = gfar_get_msglevel, 804 .get_msglevel = gfar_get_msglevel,
903 .set_msglevel = gfar_set_msglevel, 805 .set_msglevel = gfar_set_msglevel,
904#ifdef CONFIG_PM 806#ifdef CONFIG_PM
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
index 396ff7d785d1..f181304a7ab6 100644
--- a/drivers/net/greth.c
+++ b/drivers/net/greth.c
@@ -901,7 +901,7 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
901 901
902 skb_put(skb, pkt_len); 902 skb_put(skb, pkt_len);
903 903
904 if (greth->flags & GRETH_FLAG_RX_CSUM && hw_checksummed(status)) 904 if (dev->features & NETIF_F_RXCSUM && hw_checksummed(status))
905 skb->ip_summed = CHECKSUM_UNNECESSARY; 905 skb->ip_summed = CHECKSUM_UNNECESSARY;
906 else 906 else
907 skb_checksum_none_assert(skb); 907 skb_checksum_none_assert(skb);
@@ -1142,41 +1142,6 @@ static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, vo
1142 buff[i] = greth_read_bd(&greth_regs[i]); 1142 buff[i] = greth_read_bd(&greth_regs[i]);
1143} 1143}
1144 1144
1145static u32 greth_get_rx_csum(struct net_device *dev)
1146{
1147 struct greth_private *greth = netdev_priv(dev);
1148 return (greth->flags & GRETH_FLAG_RX_CSUM) != 0;
1149}
1150
1151static int greth_set_rx_csum(struct net_device *dev, u32 data)
1152{
1153 struct greth_private *greth = netdev_priv(dev);
1154
1155 spin_lock_bh(&greth->devlock);
1156
1157 if (data)
1158 greth->flags |= GRETH_FLAG_RX_CSUM;
1159 else
1160 greth->flags &= ~GRETH_FLAG_RX_CSUM;
1161
1162 spin_unlock_bh(&greth->devlock);
1163
1164 return 0;
1165}
1166
1167static u32 greth_get_tx_csum(struct net_device *dev)
1168{
1169 return (dev->features & NETIF_F_IP_CSUM) != 0;
1170}
1171
1172static int greth_set_tx_csum(struct net_device *dev, u32 data)
1173{
1174 netif_tx_lock_bh(dev);
1175 ethtool_op_set_tx_csum(dev, data);
1176 netif_tx_unlock_bh(dev);
1177 return 0;
1178}
1179
1180static const struct ethtool_ops greth_ethtool_ops = { 1145static const struct ethtool_ops greth_ethtool_ops = {
1181 .get_msglevel = greth_get_msglevel, 1146 .get_msglevel = greth_get_msglevel,
1182 .set_msglevel = greth_set_msglevel, 1147 .set_msglevel = greth_set_msglevel,
@@ -1185,10 +1150,6 @@ static const struct ethtool_ops greth_ethtool_ops = {
1185 .get_drvinfo = greth_get_drvinfo, 1150 .get_drvinfo = greth_get_drvinfo,
1186 .get_regs_len = greth_get_regs_len, 1151 .get_regs_len = greth_get_regs_len,
1187 .get_regs = greth_get_regs, 1152 .get_regs = greth_get_regs,
1188 .get_rx_csum = greth_get_rx_csum,
1189 .set_rx_csum = greth_set_rx_csum,
1190 .get_tx_csum = greth_get_tx_csum,
1191 .set_tx_csum = greth_set_tx_csum,
1192 .get_link = ethtool_op_get_link, 1153 .get_link = ethtool_op_get_link,
1193}; 1154};
1194 1155
@@ -1570,9 +1531,10 @@ static int __devinit greth_of_probe(struct platform_device *ofdev)
1570 GRETH_REGSAVE(regs->status, 0xFF); 1531 GRETH_REGSAVE(regs->status, 0xFF);
1571 1532
1572 if (greth->gbit_mac) { 1533 if (greth->gbit_mac) {
1573 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_HIGHDMA; 1534 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
1535 NETIF_F_RXCSUM;
1536 dev->features = dev->hw_features | NETIF_F_HIGHDMA;
1574 greth_netdev_ops.ndo_start_xmit = greth_start_xmit_gbit; 1537 greth_netdev_ops.ndo_start_xmit = greth_start_xmit_gbit;
1575 greth->flags = GRETH_FLAG_RX_CSUM;
1576 } 1538 }
1577 1539
1578 if (greth->multicast) { 1540 if (greth->multicast) {
diff --git a/drivers/net/greth.h b/drivers/net/greth.h
index be0f2062bd14..9a0040dee4da 100644
--- a/drivers/net/greth.h
+++ b/drivers/net/greth.h
@@ -77,9 +77,6 @@
77 */ 77 */
78#define MAX_FRAME_SIZE 1520 78#define MAX_FRAME_SIZE 1520
79 79
80/* Flags */
81#define GRETH_FLAG_RX_CSUM 0x1
82
83/* GRETH APB registers */ 80/* GRETH APB registers */
84struct greth_regs { 81struct greth_regs {
85 u32 control; 82 u32 control;
@@ -133,7 +130,6 @@ struct greth_private {
133 unsigned int duplex; 130 unsigned int duplex;
134 131
135 u32 msg_enable; 132 u32 msg_enable;
136 u32 flags;
137 133
138 u8 phyaddr; 134 u8 phyaddr;
139 u8 multicast; 135 u8 multicast;
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 80d25ed53344..a09041aa8509 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -132,13 +132,8 @@ static int tx_params[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
132/* 132/*
133 * RX_CHECKSUM turns on card-generated receive checksum generation for 133 * RX_CHECKSUM turns on card-generated receive checksum generation for
134 * TCP and UDP packets. Otherwise the upper layers do the calculation. 134 * TCP and UDP packets. Otherwise the upper layers do the calculation.
135 * TX_CHECKSUM won't do anything too useful, even if it works. There's no
136 * easy mechanism by which to tell the TCP/UDP stack that it need not
137 * generate checksums for this device. But if somebody can find a way
138 * to get that to work, most of the card work is in here already.
139 * 3/10/1999 Pete Wyckoff <wyckoff@ca.sandia.gov> 135 * 3/10/1999 Pete Wyckoff <wyckoff@ca.sandia.gov>
140 */ 136 */
141#undef TX_CHECKSUM
142#define RX_CHECKSUM 137#define RX_CHECKSUM
143 138
144/* Operational parameters that usually are not changed. */ 139/* Operational parameters that usually are not changed. */
@@ -630,11 +625,6 @@ static int __devinit hamachi_init_one (struct pci_dev *pdev,
630 625
631 SET_NETDEV_DEV(dev, &pdev->dev); 626 SET_NETDEV_DEV(dev, &pdev->dev);
632 627
633#ifdef TX_CHECKSUM
634 printk("check that skbcopy in ip_queue_xmit isn't happening\n");
635 dev->hard_header_len += 8; /* for cksum tag */
636#endif
637
638 for (i = 0; i < 6; i++) 628 for (i = 0; i < 6; i++)
639 dev->dev_addr[i] = 1 ? read_eeprom(ioaddr, 4 + i) 629 dev->dev_addr[i] = 1 ? read_eeprom(ioaddr, 4 + i)
640 : readb(ioaddr + StationAddr + i); 630 : readb(ioaddr + StationAddr + i);
@@ -937,11 +927,7 @@ static int hamachi_open(struct net_device *dev)
937 927
938 /* always 1, takes no more time to do it */ 928 /* always 1, takes no more time to do it */
939 writew(0x0001, ioaddr + RxChecksum); 929 writew(0x0001, ioaddr + RxChecksum);
940#ifdef TX_CHECKSUM
941 writew(0x0001, ioaddr + TxChecksum);
942#else
943 writew(0x0000, ioaddr + TxChecksum); 930 writew(0x0000, ioaddr + TxChecksum);
944#endif
945 writew(0x8000, ioaddr + MACCnfg); /* Soft reset the MAC */ 931 writew(0x8000, ioaddr + MACCnfg); /* Soft reset the MAC */
946 writew(0x215F, ioaddr + MACCnfg); 932 writew(0x215F, ioaddr + MACCnfg);
947 writew(0x000C, ioaddr + FrameGap0); 933 writew(0x000C, ioaddr + FrameGap0);
@@ -1226,40 +1212,6 @@ static void hamachi_init_ring(struct net_device *dev)
1226} 1212}
1227 1213
1228 1214
1229#ifdef TX_CHECKSUM
1230#define csum_add(it, val) \
1231do { \
1232 it += (u16) (val); \
1233 if (it & 0xffff0000) { \
1234 it &= 0xffff; \
1235 ++it; \
1236 } \
1237} while (0)
1238 /* printk("add %04x --> %04x\n", val, it); \ */
1239
1240/* uh->len already network format, do not swap */
1241#define pseudo_csum_udp(sum,ih,uh) do { \
1242 sum = 0; \
1243 csum_add(sum, (ih)->saddr >> 16); \
1244 csum_add(sum, (ih)->saddr & 0xffff); \
1245 csum_add(sum, (ih)->daddr >> 16); \
1246 csum_add(sum, (ih)->daddr & 0xffff); \
1247 csum_add(sum, cpu_to_be16(IPPROTO_UDP)); \
1248 csum_add(sum, (uh)->len); \
1249} while (0)
1250
1251/* swap len */
1252#define pseudo_csum_tcp(sum,ih,len) do { \
1253 sum = 0; \
1254 csum_add(sum, (ih)->saddr >> 16); \
1255 csum_add(sum, (ih)->saddr & 0xffff); \
1256 csum_add(sum, (ih)->daddr >> 16); \
1257 csum_add(sum, (ih)->daddr & 0xffff); \
1258 csum_add(sum, cpu_to_be16(IPPROTO_TCP)); \
1259 csum_add(sum, htons(len)); \
1260} while (0)
1261#endif
1262
1263static netdev_tx_t hamachi_start_xmit(struct sk_buff *skb, 1215static netdev_tx_t hamachi_start_xmit(struct sk_buff *skb,
1264 struct net_device *dev) 1216 struct net_device *dev)
1265{ 1217{
@@ -1292,36 +1244,6 @@ static netdev_tx_t hamachi_start_xmit(struct sk_buff *skb,
1292 1244
1293 hmp->tx_skbuff[entry] = skb; 1245 hmp->tx_skbuff[entry] = skb;
1294 1246
1295#ifdef TX_CHECKSUM
1296 {
1297 /* tack on checksum tag */
1298 u32 tagval = 0;
1299 struct ethhdr *eh = (struct ethhdr *)skb->data;
1300 if (eh->h_proto == cpu_to_be16(ETH_P_IP)) {
1301 struct iphdr *ih = (struct iphdr *)((char *)eh + ETH_HLEN);
1302 if (ih->protocol == IPPROTO_UDP) {
1303 struct udphdr *uh
1304 = (struct udphdr *)((char *)ih + ih->ihl*4);
1305 u32 offset = ((unsigned char *)uh + 6) - skb->data;
1306 u32 pseudo;
1307 pseudo_csum_udp(pseudo, ih, uh);
1308 pseudo = htons(pseudo);
1309 printk("udp cksum was %04x, sending pseudo %04x\n",
1310 uh->check, pseudo);
1311 uh->check = 0; /* zero out uh->check before card calc */
1312 /*
1313 * start at 14 (skip ethhdr), store at offset (uh->check),
1314 * use pseudo value given.
1315 */
1316 tagval = (14 << 24) | (offset << 16) | pseudo;
1317 } else if (ih->protocol == IPPROTO_TCP) {
1318 printk("tcp, no auto cksum\n");
1319 }
1320 }
1321 *(u32 *)skb_push(skb, 8) = tagval;
1322 }
1323#endif
1324
1325 hmp->tx_ring[entry].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev, 1247 hmp->tx_ring[entry].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
1326 skb->data, skb->len, PCI_DMA_TODEVICE)); 1248 skb->data, skb->len, PCI_DMA_TODEVICE));
1327 1249
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 8931168d3e74..18d8affecd1b 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -516,10 +516,6 @@ static int bpq_new_device(struct net_device *edev)
516 memcpy(bpq->dest_addr, bcast_addr, sizeof(bpq_eth_addr)); 516 memcpy(bpq->dest_addr, bcast_addr, sizeof(bpq_eth_addr));
517 memcpy(bpq->acpt_addr, bcast_addr, sizeof(bpq_eth_addr)); 517 memcpy(bpq->acpt_addr, bcast_addr, sizeof(bpq_eth_addr));
518 518
519 err = dev_alloc_name(ndev, ndev->name);
520 if (err < 0)
521 goto error;
522
523 err = register_netdevice(ndev); 519 err = register_netdevice(ndev);
524 if (err) 520 if (err)
525 goto error; 521 goto error;
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index 8e10d2f6a5ad..c52a1df5d922 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -188,14 +188,14 @@ struct hp100_private {
188 * variables 188 * variables
189 */ 189 */
190#ifdef CONFIG_ISA 190#ifdef CONFIG_ISA
191static const char *hp100_isa_tbl[] = { 191static const char *const hp100_isa_tbl[] __devinitconst = {
192 "HWPF150", /* HP J2573 rev A */ 192 "HWPF150", /* HP J2573 rev A */
193 "HWP1950", /* HP J2573 */ 193 "HWP1950", /* HP J2573 */
194}; 194};
195#endif 195#endif
196 196
197#ifdef CONFIG_EISA 197#ifdef CONFIG_EISA
198static struct eisa_device_id hp100_eisa_tbl[] = { 198static const struct eisa_device_id hp100_eisa_tbl[] __devinitconst = {
199 { "HWPF180" }, /* HP J2577 rev A */ 199 { "HWPF180" }, /* HP J2577 rev A */
200 { "HWP1920" }, /* HP 27248B */ 200 { "HWP1920" }, /* HP 27248B */
201 { "HWP1940" }, /* HP J2577 */ 201 { "HWP1940" }, /* HP J2577 */
@@ -336,7 +336,7 @@ static __devinit const char *hp100_read_id(int ioaddr)
336} 336}
337 337
338#ifdef CONFIG_ISA 338#ifdef CONFIG_ISA
339static __init int hp100_isa_probe1(struct net_device *dev, int ioaddr) 339static __devinit int hp100_isa_probe1(struct net_device *dev, int ioaddr)
340{ 340{
341 const char *sig; 341 const char *sig;
342 int i; 342 int i;
@@ -372,7 +372,7 @@ static __init int hp100_isa_probe1(struct net_device *dev, int ioaddr)
372 * EISA and PCI are handled by device infrastructure. 372 * EISA and PCI are handled by device infrastructure.
373 */ 373 */
374 374
375static int __init hp100_isa_probe(struct net_device *dev, int addr) 375static int __devinit hp100_isa_probe(struct net_device *dev, int addr)
376{ 376{
377 int err = -ENODEV; 377 int err = -ENODEV;
378 378
@@ -396,7 +396,7 @@ static int __init hp100_isa_probe(struct net_device *dev, int addr)
396#endif /* CONFIG_ISA */ 396#endif /* CONFIG_ISA */
397 397
398#if !defined(MODULE) && defined(CONFIG_ISA) 398#if !defined(MODULE) && defined(CONFIG_ISA)
399struct net_device * __init hp100_probe(int unit) 399struct net_device * __devinit hp100_probe(int unit)
400{ 400{
401 struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private)); 401 struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private));
402 int err; 402 int err;
@@ -2843,7 +2843,7 @@ static void cleanup_dev(struct net_device *d)
2843} 2843}
2844 2844
2845#ifdef CONFIG_EISA 2845#ifdef CONFIG_EISA
2846static int __init hp100_eisa_probe (struct device *gendev) 2846static int __devinit hp100_eisa_probe (struct device *gendev)
2847{ 2847{
2848 struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private)); 2848 struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private));
2849 struct eisa_device *edev = to_eisa_device(gendev); 2849 struct eisa_device *edev = to_eisa_device(gendev);
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 3bb990b6651a..079450fe5e96 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -2053,13 +2053,6 @@ static void emac_ethtool_get_pauseparam(struct net_device *ndev,
2053 mutex_unlock(&dev->link_lock); 2053 mutex_unlock(&dev->link_lock);
2054} 2054}
2055 2055
2056static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
2057{
2058 struct emac_instance *dev = netdev_priv(ndev);
2059
2060 return dev->tah_dev != NULL;
2061}
2062
2063static int emac_get_regs_len(struct emac_instance *dev) 2056static int emac_get_regs_len(struct emac_instance *dev)
2064{ 2057{
2065 if (emac_has_feature(dev, EMAC_FTR_EMAC4)) 2058 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
@@ -2203,15 +2196,11 @@ static const struct ethtool_ops emac_ethtool_ops = {
2203 .get_ringparam = emac_ethtool_get_ringparam, 2196 .get_ringparam = emac_ethtool_get_ringparam,
2204 .get_pauseparam = emac_ethtool_get_pauseparam, 2197 .get_pauseparam = emac_ethtool_get_pauseparam,
2205 2198
2206 .get_rx_csum = emac_ethtool_get_rx_csum,
2207
2208 .get_strings = emac_ethtool_get_strings, 2199 .get_strings = emac_ethtool_get_strings,
2209 .get_sset_count = emac_ethtool_get_sset_count, 2200 .get_sset_count = emac_ethtool_get_sset_count,
2210 .get_ethtool_stats = emac_ethtool_get_ethtool_stats, 2201 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2211 2202
2212 .get_link = ethtool_op_get_link, 2203 .get_link = ethtool_op_get_link,
2213 .get_tx_csum = ethtool_op_get_tx_csum,
2214 .get_sg = ethtool_op_get_sg,
2215}; 2204};
2216 2205
2217static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) 2206static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
@@ -2859,8 +2848,10 @@ static int __devinit emac_probe(struct platform_device *ofdev)
2859 if (err != 0) 2848 if (err != 0)
2860 goto err_detach_tah; 2849 goto err_detach_tah;
2861 2850
2862 if (dev->tah_dev) 2851 if (dev->tah_dev) {
2863 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 2852 ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG;
2853 ndev->features |= ndev->hw_features | NETIF_F_RXCSUM;
2854 }
2864 ndev->watchdog_timeo = 5 * HZ; 2855 ndev->watchdog_timeo = 5 * HZ;
2865 if (emac_phy_supports_gige(dev->phy_mode)) { 2856 if (emac_phy_supports_gige(dev->phy_mode)) {
2866 ndev->netdev_ops = &emac_gige_netdev_ops; 2857 ndev->netdev_ops = &emac_gige_netdev_ops;
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c
index 8ff68ae6b520..136d7544cc33 100644
--- a/drivers/net/ibmlana.c
+++ b/drivers/net/ibmlana.c
@@ -782,7 +782,8 @@ static int ibmlana_open(struct net_device *dev)
782 782
783 /* register resources - only necessary for IRQ */ 783 /* register resources - only necessary for IRQ */
784 784
785 result = request_irq(priv->realirq, irq_handler, IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev); 785 result = request_irq(priv->realirq, irq_handler, IRQF_SHARED,
786 dev->name, dev);
786 if (result != 0) { 787 if (result != 0) {
787 printk(KERN_ERR "%s: failed to register irq %d\n", dev->name, dev->irq); 788 printk(KERN_ERR "%s: failed to register irq %d\n", dev->name, dev->irq);
788 return result; 789 return result;
@@ -894,12 +895,12 @@ static int ibmlana_irq;
894static int ibmlana_io; 895static int ibmlana_io;
895static int startslot; /* counts through slots when probing multiple devices */ 896static int startslot; /* counts through slots when probing multiple devices */
896 897
897static short ibmlana_adapter_ids[] __initdata = { 898static const short ibmlana_adapter_ids[] __devinitconst = {
898 IBM_LANA_ID, 899 IBM_LANA_ID,
899 0x0000 900 0x0000
900}; 901};
901 902
902static char *ibmlana_adapter_names[] __devinitdata = { 903static const char *const ibmlana_adapter_names[] __devinitconst = {
903 "IBM LAN Adapter/A", 904 "IBM LAN Adapter/A",
904 NULL 905 NULL
905}; 906};
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 5522d459654c..b388d782c7c4 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -710,7 +710,7 @@ static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
710 SUPPORTED_FIBRE); 710 SUPPORTED_FIBRE);
711 cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | 711 cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
712 ADVERTISED_FIBRE); 712 ADVERTISED_FIBRE);
713 cmd->speed = SPEED_1000; 713 ethtool_cmd_speed_set(cmd, SPEED_1000);
714 cmd->duplex = DUPLEX_FULL; 714 cmd->duplex = DUPLEX_FULL;
715 cmd->port = PORT_FIBRE; 715 cmd->port = PORT_FIBRE;
716 cmd->phy_address = 0; 716 cmd->phy_address = 0;
@@ -729,45 +729,24 @@ static void netdev_get_drvinfo(struct net_device *dev,
729 sizeof(info->version) - 1); 729 sizeof(info->version) - 1);
730} 730}
731 731
732static void ibmveth_set_rx_csum_flags(struct net_device *dev, u32 data) 732static u32 ibmveth_fix_features(struct net_device *dev, u32 features)
733{ 733{
734 struct ibmveth_adapter *adapter = netdev_priv(dev); 734 /*
735 735 * Since the ibmveth firmware interface does not have the
736 if (data) { 736 * concept of separate tx/rx checksum offload enable, if rx
737 adapter->rx_csum = 1; 737 * checksum is disabled we also have to disable tx checksum
738 } else { 738 * offload. Once we disable rx checksum offload, we are no
739 /* 739 * longer allowed to send tx buffers that are not properly
740 * Since the ibmveth firmware interface does not have the 740 * checksummed.
741 * concept of separate tx/rx checksum offload enable, if rx 741 */
742 * checksum is disabled we also have to disable tx checksum
743 * offload. Once we disable rx checksum offload, we are no
744 * longer allowed to send tx buffers that are not properly
745 * checksummed.
746 */
747 adapter->rx_csum = 0;
748 dev->features &= ~NETIF_F_IP_CSUM;
749 dev->features &= ~NETIF_F_IPV6_CSUM;
750 }
751}
752 742
753static void ibmveth_set_tx_csum_flags(struct net_device *dev, u32 data) 743 if (!(features & NETIF_F_RXCSUM))
754{ 744 features &= ~NETIF_F_ALL_CSUM;
755 struct ibmveth_adapter *adapter = netdev_priv(dev);
756 745
757 if (data) { 746 return features;
758 if (adapter->fw_ipv4_csum_support)
759 dev->features |= NETIF_F_IP_CSUM;
760 if (adapter->fw_ipv6_csum_support)
761 dev->features |= NETIF_F_IPV6_CSUM;
762 adapter->rx_csum = 1;
763 } else {
764 dev->features &= ~NETIF_F_IP_CSUM;
765 dev->features &= ~NETIF_F_IPV6_CSUM;
766 }
767} 747}
768 748
769static int ibmveth_set_csum_offload(struct net_device *dev, u32 data, 749static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
770 void (*done) (struct net_device *, u32))
771{ 750{
772 struct ibmveth_adapter *adapter = netdev_priv(dev); 751 struct ibmveth_adapter *adapter = netdev_priv(dev);
773 unsigned long set_attr, clr_attr, ret_attr; 752 unsigned long set_attr, clr_attr, ret_attr;
@@ -827,8 +806,8 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data,
827 } else 806 } else
828 adapter->fw_ipv6_csum_support = data; 807 adapter->fw_ipv6_csum_support = data;
829 808
830 if (ret == H_SUCCESS || ret6 == H_SUCCESS) 809 if (ret != H_SUCCESS || ret6 != H_SUCCESS)
831 done(dev, data); 810 adapter->rx_csum = data;
832 else 811 else
833 rc1 = -EIO; 812 rc1 = -EIO;
834 } else { 813 } else {
@@ -844,41 +823,22 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data,
844 return rc1 ? rc1 : rc2; 823 return rc1 ? rc1 : rc2;
845} 824}
846 825
847static int ibmveth_set_rx_csum(struct net_device *dev, u32 data) 826static int ibmveth_set_features(struct net_device *dev, u32 features)
848{ 827{
849 struct ibmveth_adapter *adapter = netdev_priv(dev); 828 struct ibmveth_adapter *adapter = netdev_priv(dev);
829 int rx_csum = !!(features & NETIF_F_RXCSUM);
830 int rc;
850 831
851 if ((data && adapter->rx_csum) || (!data && !adapter->rx_csum)) 832 if (rx_csum == adapter->rx_csum)
852 return 0;
853
854 return ibmveth_set_csum_offload(dev, data, ibmveth_set_rx_csum_flags);
855}
856
857static int ibmveth_set_tx_csum(struct net_device *dev, u32 data)
858{
859 struct ibmveth_adapter *adapter = netdev_priv(dev);
860 int rc = 0;
861
862 if (data && (dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
863 return 0;
864 if (!data && !(dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
865 return 0; 833 return 0;
866 834
867 if (data && !adapter->rx_csum) 835 rc = ibmveth_set_csum_offload(dev, rx_csum);
868 rc = ibmveth_set_csum_offload(dev, data, 836 if (rc && !adapter->rx_csum)
869 ibmveth_set_tx_csum_flags); 837 dev->features = features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
870 else
871 ibmveth_set_tx_csum_flags(dev, data);
872 838
873 return rc; 839 return rc;
874} 840}
875 841
876static u32 ibmveth_get_rx_csum(struct net_device *dev)
877{
878 struct ibmveth_adapter *adapter = netdev_priv(dev);
879 return adapter->rx_csum;
880}
881
882static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data) 842static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
883{ 843{
884 int i; 844 int i;
@@ -914,13 +874,9 @@ static const struct ethtool_ops netdev_ethtool_ops = {
914 .get_drvinfo = netdev_get_drvinfo, 874 .get_drvinfo = netdev_get_drvinfo,
915 .get_settings = netdev_get_settings, 875 .get_settings = netdev_get_settings,
916 .get_link = ethtool_op_get_link, 876 .get_link = ethtool_op_get_link,
917 .set_tx_csum = ibmveth_set_tx_csum,
918 .get_rx_csum = ibmveth_get_rx_csum,
919 .set_rx_csum = ibmveth_set_rx_csum,
920 .get_strings = ibmveth_get_strings, 877 .get_strings = ibmveth_get_strings,
921 .get_sset_count = ibmveth_get_sset_count, 878 .get_sset_count = ibmveth_get_sset_count,
922 .get_ethtool_stats = ibmveth_get_ethtool_stats, 879 .get_ethtool_stats = ibmveth_get_ethtool_stats,
923 .set_sg = ethtool_op_set_sg,
924}; 880};
925 881
926static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 882static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -1345,6 +1301,8 @@ static const struct net_device_ops ibmveth_netdev_ops = {
1345 .ndo_set_multicast_list = ibmveth_set_multicast_list, 1301 .ndo_set_multicast_list = ibmveth_set_multicast_list,
1346 .ndo_do_ioctl = ibmveth_ioctl, 1302 .ndo_do_ioctl = ibmveth_ioctl,
1347 .ndo_change_mtu = ibmveth_change_mtu, 1303 .ndo_change_mtu = ibmveth_change_mtu,
1304 .ndo_fix_features = ibmveth_fix_features,
1305 .ndo_set_features = ibmveth_set_features,
1348 .ndo_validate_addr = eth_validate_addr, 1306 .ndo_validate_addr = eth_validate_addr,
1349 .ndo_set_mac_address = eth_mac_addr, 1307 .ndo_set_mac_address = eth_mac_addr,
1350#ifdef CONFIG_NET_POLL_CONTROLLER 1308#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1412,7 +1370,9 @@ static int __devinit ibmveth_probe(struct vio_dev *dev,
1412 netdev->netdev_ops = &ibmveth_netdev_ops; 1370 netdev->netdev_ops = &ibmveth_netdev_ops;
1413 netdev->ethtool_ops = &netdev_ethtool_ops; 1371 netdev->ethtool_ops = &netdev_ethtool_ops;
1414 SET_NETDEV_DEV(netdev, &dev->dev); 1372 SET_NETDEV_DEV(netdev, &dev->dev);
1415 netdev->features |= NETIF_F_SG; 1373 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
1374 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1375 netdev->features |= netdev->hw_features;
1416 1376
1417 memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len); 1377 memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
1418 1378
@@ -1437,7 +1397,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev,
1437 1397
1438 netdev_dbg(netdev, "registering netdev...\n"); 1398 netdev_dbg(netdev, "registering netdev...\n");
1439 1399
1440 ibmveth_set_csum_offload(netdev, 1, ibmveth_set_tx_csum_flags); 1400 ibmveth_set_features(netdev, netdev->features);
1441 1401
1442 rc = register_netdev(netdev); 1402 rc = register_netdev(netdev);
1443 1403
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index e07d487f015a..4fecaed67fc4 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -233,10 +233,6 @@ static int __init ifb_init_one(int index)
233 if (!dev_ifb) 233 if (!dev_ifb)
234 return -ENOMEM; 234 return -ENOMEM;
235 235
236 err = dev_alloc_name(dev_ifb, dev_ifb->name);
237 if (err < 0)
238 goto err;
239
240 dev_ifb->rtnl_link_ops = &ifb_link_ops; 236 dev_ifb->rtnl_link_ops = &ifb_link_ops;
241 err = register_netdevice(dev_ifb); 237 err = register_netdevice(dev_ifb);
242 if (err < 0) 238 if (err < 0)
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index 6b256c275e10..0f563c8c5ffc 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -244,6 +244,14 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
244 */ 244 */
245 size += NVM_WORD_SIZE_BASE_SHIFT; 245 size += NVM_WORD_SIZE_BASE_SHIFT;
246 246
247 /*
248 * Check for invalid size
249 */
250 if ((hw->mac.type == e1000_82576) && (size > 15)) {
251 printk("igb: The NVM size is not valid, "
252 "defaulting to 32K.\n");
253 size = 15;
254 }
247 nvm->word_size = 1 << size; 255 nvm->word_size = 1 << size;
248 if (nvm->word_size == (1 << 15)) 256 if (nvm->word_size == (1 << 15))
249 nvm->page_size = 128; 257 nvm->page_size = 128;
@@ -1877,7 +1885,7 @@ static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw)
1877 } 1885 }
1878 1886
1879 if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { 1887 if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
1880 /* if chekcsums compatibility bit is set validate checksums 1888 /* if checksums compatibility bit is set validate checksums
1881 * for all 4 ports. */ 1889 * for all 4 ports. */
1882 eeprom_regions_count = 4; 1890 eeprom_regions_count = 4;
1883 } 1891 }
@@ -1988,6 +1996,7 @@ static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw)
1988out: 1996out:
1989 return ret_val; 1997 return ret_val;
1990} 1998}
1999
1991/** 2000/**
1992 * igb_set_eee_i350 - Enable/disable EEE support 2001 * igb_set_eee_i350 - Enable/disable EEE support
1993 * @hw: pointer to the HW structure 2002 * @hw: pointer to the HW structure
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 1c687e298d5e..f4fa4b1751cf 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -360,7 +360,7 @@ extern int igb_up(struct igb_adapter *);
360extern void igb_down(struct igb_adapter *); 360extern void igb_down(struct igb_adapter *);
361extern void igb_reinit_locked(struct igb_adapter *); 361extern void igb_reinit_locked(struct igb_adapter *);
362extern void igb_reset(struct igb_adapter *); 362extern void igb_reset(struct igb_adapter *);
363extern int igb_set_spd_dplx(struct igb_adapter *, u16); 363extern int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
364extern int igb_setup_tx_resources(struct igb_ring *); 364extern int igb_setup_tx_resources(struct igb_ring *);
365extern int igb_setup_rx_resources(struct igb_ring *); 365extern int igb_setup_rx_resources(struct igb_ring *);
366extern void igb_free_tx_resources(struct igb_ring *); 366extern void igb_free_tx_resources(struct igb_ring *);
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index d976733bbcc2..fdc895e5a3f8 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -178,11 +178,11 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
178 178
179 if ((status & E1000_STATUS_SPEED_1000) || 179 if ((status & E1000_STATUS_SPEED_1000) ||
180 hw->phy.media_type != e1000_media_type_copper) 180 hw->phy.media_type != e1000_media_type_copper)
181 ecmd->speed = SPEED_1000; 181 ethtool_cmd_speed_set(ecmd, SPEED_1000);
182 else if (status & E1000_STATUS_SPEED_100) 182 else if (status & E1000_STATUS_SPEED_100)
183 ecmd->speed = SPEED_100; 183 ethtool_cmd_speed_set(ecmd, SPEED_100);
184 else 184 else
185 ecmd->speed = SPEED_10; 185 ethtool_cmd_speed_set(ecmd, SPEED_10);
186 186
187 if ((status & E1000_STATUS_FD) || 187 if ((status & E1000_STATUS_FD) ||
188 hw->phy.media_type != e1000_media_type_copper) 188 hw->phy.media_type != e1000_media_type_copper)
@@ -190,7 +190,7 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
190 else 190 else
191 ecmd->duplex = DUPLEX_HALF; 191 ecmd->duplex = DUPLEX_HALF;
192 } else { 192 } else {
193 ecmd->speed = -1; 193 ethtool_cmd_speed_set(ecmd, -1);
194 ecmd->duplex = -1; 194 ecmd->duplex = -1;
195 } 195 }
196 196
@@ -223,7 +223,8 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
223 if (adapter->fc_autoneg) 223 if (adapter->fc_autoneg)
224 hw->fc.requested_mode = e1000_fc_default; 224 hw->fc.requested_mode = e1000_fc_default;
225 } else { 225 } else {
226 if (igb_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) { 226 u32 speed = ethtool_cmd_speed(ecmd);
227 if (igb_set_spd_dplx(adapter, speed, ecmd->duplex)) {
227 clear_bit(__IGB_RESETTING, &adapter->state); 228 clear_bit(__IGB_RESETTING, &adapter->state);
228 return -EINVAL; 229 return -EINVAL;
229 } 230 }
@@ -1963,27 +1964,28 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1963/* bit defines for adapter->led_status */ 1964/* bit defines for adapter->led_status */
1964#define IGB_LED_ON 0 1965#define IGB_LED_ON 0
1965 1966
1966static int igb_phys_id(struct net_device *netdev, u32 data) 1967static int igb_set_phys_id(struct net_device *netdev,
1968 enum ethtool_phys_id_state state)
1967{ 1969{
1968 struct igb_adapter *adapter = netdev_priv(netdev); 1970 struct igb_adapter *adapter = netdev_priv(netdev);
1969 struct e1000_hw *hw = &adapter->hw; 1971 struct e1000_hw *hw = &adapter->hw;
1970 unsigned long timeout;
1971 1972
1972 timeout = data * 1000; 1973 switch (state) {
1973 1974 case ETHTOOL_ID_ACTIVE:
1974 /* 1975 igb_blink_led(hw);
1975 * msleep_interruptable only accepts unsigned int so we are limited 1976 return 2;
1976 * in how long a duration we can wait 1977 case ETHTOOL_ID_ON:
1977 */ 1978 igb_blink_led(hw);
1978 if (!timeout || timeout > UINT_MAX) 1979 break;
1979 timeout = UINT_MAX; 1980 case ETHTOOL_ID_OFF:
1980 1981 igb_led_off(hw);
1981 igb_blink_led(hw); 1982 break;
1982 msleep_interruptible(timeout); 1983 case ETHTOOL_ID_INACTIVE:
1983 1984 igb_led_off(hw);
1984 igb_led_off(hw); 1985 clear_bit(IGB_LED_ON, &adapter->led_status);
1985 clear_bit(IGB_LED_ON, &adapter->led_status); 1986 igb_cleanup_led(hw);
1986 igb_cleanup_led(hw); 1987 break;
1988 }
1987 1989
1988 return 0; 1990 return 0;
1989} 1991}
@@ -2215,7 +2217,7 @@ static const struct ethtool_ops igb_ethtool_ops = {
2215 .set_tso = igb_set_tso, 2217 .set_tso = igb_set_tso,
2216 .self_test = igb_diag_test, 2218 .self_test = igb_diag_test,
2217 .get_strings = igb_get_strings, 2219 .get_strings = igb_get_strings,
2218 .phys_id = igb_phys_id, 2220 .set_phys_id = igb_set_phys_id,
2219 .get_sset_count = igb_get_sset_count, 2221 .get_sset_count = igb_get_sset_count,
2220 .get_ethtool_stats = igb_get_ethtool_stats, 2222 .get_ethtool_stats = igb_get_ethtool_stats,
2221 .get_coalesce = igb_get_coalesce, 2223 .get_coalesce = igb_get_coalesce,
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 0dfd1b93829e..ce7838e55827 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -3532,6 +3532,25 @@ bool igb_has_link(struct igb_adapter *adapter)
3532 return link_active; 3532 return link_active;
3533} 3533}
3534 3534
3535static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
3536{
3537 bool ret = false;
3538 u32 ctrl_ext, thstat;
3539
3540 /* check for thermal sensor event on i350, copper only */
3541 if (hw->mac.type == e1000_i350) {
3542 thstat = rd32(E1000_THSTAT);
3543 ctrl_ext = rd32(E1000_CTRL_EXT);
3544
3545 if ((hw->phy.media_type == e1000_media_type_copper) &&
3546 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3547 ret = !!(thstat & event);
3548 }
3549 }
3550
3551 return ret;
3552}
3553
3535/** 3554/**
3536 * igb_watchdog - Timer Call-back 3555 * igb_watchdog - Timer Call-back
3537 * @data: pointer to adapter cast into an unsigned long 3556 * @data: pointer to adapter cast into an unsigned long
@@ -3550,7 +3569,7 @@ static void igb_watchdog_task(struct work_struct *work)
3550 watchdog_task); 3569 watchdog_task);
3551 struct e1000_hw *hw = &adapter->hw; 3570 struct e1000_hw *hw = &adapter->hw;
3552 struct net_device *netdev = adapter->netdev; 3571 struct net_device *netdev = adapter->netdev;
3553 u32 link, ctrl_ext, thstat; 3572 u32 link;
3554 int i; 3573 int i;
3555 3574
3556 link = igb_has_link(adapter); 3575 link = igb_has_link(adapter);
@@ -3574,25 +3593,14 @@ static void igb_watchdog_task(struct work_struct *work)
3574 ((ctrl & E1000_CTRL_RFCE) ? "RX" : 3593 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
3575 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None"))); 3594 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
3576 3595
3577 /* check for thermal sensor event on i350, 3596 /* check for thermal sensor event */
3578 * copper only */ 3597 if (igb_thermal_sensor_event(hw, E1000_THSTAT_LINK_THROTTLE)) {
3579 if (hw->mac.type == e1000_i350) { 3598 printk(KERN_INFO "igb: %s The network adapter "
3580 thstat = rd32(E1000_THSTAT); 3599 "link speed was downshifted "
3581 ctrl_ext = rd32(E1000_CTRL_EXT); 3600 "because it overheated.\n",
3582 if ((hw->phy.media_type == 3601 netdev->name);
3583 e1000_media_type_copper) && !(ctrl_ext &
3584 E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3585 if (thstat &
3586 E1000_THSTAT_LINK_THROTTLE) {
3587 printk(KERN_INFO "igb: %s The "
3588 "network adapter link "
3589 "speed was downshifted "
3590 "because it "
3591 "overheated.\n",
3592 netdev->name);
3593 }
3594 }
3595 } 3602 }
3603
3596 /* adjust timeout factor according to speed/duplex */ 3604 /* adjust timeout factor according to speed/duplex */
3597 adapter->tx_timeout_factor = 1; 3605 adapter->tx_timeout_factor = 1;
3598 switch (adapter->link_speed) { 3606 switch (adapter->link_speed) {
@@ -3618,22 +3626,15 @@ static void igb_watchdog_task(struct work_struct *work)
3618 if (netif_carrier_ok(netdev)) { 3626 if (netif_carrier_ok(netdev)) {
3619 adapter->link_speed = 0; 3627 adapter->link_speed = 0;
3620 adapter->link_duplex = 0; 3628 adapter->link_duplex = 0;
3621 /* check for thermal sensor event on i350 3629
3622 * copper only*/ 3630 /* check for thermal sensor event */
3623 if (hw->mac.type == e1000_i350) { 3631 if (igb_thermal_sensor_event(hw, E1000_THSTAT_PWR_DOWN)) {
3624 thstat = rd32(E1000_THSTAT); 3632 printk(KERN_ERR "igb: %s The network adapter "
3625 ctrl_ext = rd32(E1000_CTRL_EXT); 3633 "was stopped because it "
3626 if ((hw->phy.media_type == 3634 "overheated.\n",
3627 e1000_media_type_copper) && !(ctrl_ext &
3628 E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3629 if (thstat & E1000_THSTAT_PWR_DOWN) {
3630 printk(KERN_ERR "igb: %s The "
3631 "network adapter was stopped "
3632 "because it overheated.\n",
3633 netdev->name); 3635 netdev->name);
3634 }
3635 }
3636 } 3636 }
3637
3637 /* Links status message must follow this format */ 3638 /* Links status message must follow this format */
3638 printk(KERN_INFO "igb: %s NIC Link is Down\n", 3639 printk(KERN_INFO "igb: %s NIC Link is Down\n",
3639 netdev->name); 3640 netdev->name);
@@ -6348,21 +6349,25 @@ static void igb_restore_vlan(struct igb_adapter *adapter)
6348 } 6349 }
6349} 6350}
6350 6351
6351int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx) 6352int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
6352{ 6353{
6353 struct pci_dev *pdev = adapter->pdev; 6354 struct pci_dev *pdev = adapter->pdev;
6354 struct e1000_mac_info *mac = &adapter->hw.mac; 6355 struct e1000_mac_info *mac = &adapter->hw.mac;
6355 6356
6356 mac->autoneg = 0; 6357 mac->autoneg = 0;
6357 6358
6359 /* Make sure dplx is at most 1 bit and lsb of speed is not set
6360 * for the switch() below to work */
6361 if ((spd & 1) || (dplx & ~1))
6362 goto err_inval;
6363
6358 /* Fiber NIC's only allow 1000 Gbps Full duplex */ 6364 /* Fiber NIC's only allow 1000 Gbps Full duplex */
6359 if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) && 6365 if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) &&
6360 spddplx != (SPEED_1000 + DUPLEX_FULL)) { 6366 spd != SPEED_1000 &&
6361 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n"); 6367 dplx != DUPLEX_FULL)
6362 return -EINVAL; 6368 goto err_inval;
6363 }
6364 6369
6365 switch (spddplx) { 6370 switch (spd + dplx) {
6366 case SPEED_10 + DUPLEX_HALF: 6371 case SPEED_10 + DUPLEX_HALF:
6367 mac->forced_speed_duplex = ADVERTISE_10_HALF; 6372 mac->forced_speed_duplex = ADVERTISE_10_HALF;
6368 break; 6373 break;
@@ -6381,10 +6386,13 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
6381 break; 6386 break;
6382 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 6387 case SPEED_1000 + DUPLEX_HALF: /* not supported */
6383 default: 6388 default:
6384 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n"); 6389 goto err_inval;
6385 return -EINVAL;
6386 } 6390 }
6387 return 0; 6391 return 0;
6392
6393err_inval:
6394 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
6395 return -EINVAL;
6388} 6396}
6389 6397
6390static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake) 6398static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c
index 1d943aa7c7a6..b0b14d63dfbf 100644
--- a/drivers/net/igbvf/ethtool.c
+++ b/drivers/net/igbvf/ethtool.c
@@ -90,18 +90,18 @@ static int igbvf_get_settings(struct net_device *netdev,
90 status = er32(STATUS); 90 status = er32(STATUS);
91 if (status & E1000_STATUS_LU) { 91 if (status & E1000_STATUS_LU) {
92 if (status & E1000_STATUS_SPEED_1000) 92 if (status & E1000_STATUS_SPEED_1000)
93 ecmd->speed = 1000; 93 ethtool_cmd_speed_set(ecmd, SPEED_1000);
94 else if (status & E1000_STATUS_SPEED_100) 94 else if (status & E1000_STATUS_SPEED_100)
95 ecmd->speed = 100; 95 ethtool_cmd_speed_set(ecmd, SPEED_100);
96 else 96 else
97 ecmd->speed = 10; 97 ethtool_cmd_speed_set(ecmd, SPEED_10);
98 98
99 if (status & E1000_STATUS_FD) 99 if (status & E1000_STATUS_FD)
100 ecmd->duplex = DUPLEX_FULL; 100 ecmd->duplex = DUPLEX_FULL;
101 else 101 else
102 ecmd->duplex = DUPLEX_HALF; 102 ecmd->duplex = DUPLEX_HALF;
103 } else { 103 } else {
104 ecmd->speed = -1; 104 ethtool_cmd_speed_set(ecmd, -1);
105 ecmd->duplex = -1; 105 ecmd->duplex = -1;
106 } 106 }
107 107
@@ -391,11 +391,6 @@ static int igbvf_set_wol(struct net_device *netdev,
391 return -EOPNOTSUPP; 391 return -EOPNOTSUPP;
392} 392}
393 393
394static int igbvf_phys_id(struct net_device *netdev, u32 data)
395{
396 return 0;
397}
398
399static int igbvf_get_coalesce(struct net_device *netdev, 394static int igbvf_get_coalesce(struct net_device *netdev,
400 struct ethtool_coalesce *ec) 395 struct ethtool_coalesce *ec)
401{ 396{
@@ -527,7 +522,6 @@ static const struct ethtool_ops igbvf_ethtool_ops = {
527 .self_test = igbvf_diag_test, 522 .self_test = igbvf_diag_test,
528 .get_sset_count = igbvf_get_sset_count, 523 .get_sset_count = igbvf_get_sset_count,
529 .get_strings = igbvf_get_strings, 524 .get_strings = igbvf_get_strings,
530 .phys_id = igbvf_phys_id,
531 .get_ethtool_stats = igbvf_get_ethtool_stats, 525 .get_ethtool_stats = igbvf_get_ethtool_stats,
532 .get_coalesce = igbvf_get_coalesce, 526 .get_coalesce = igbvf_get_coalesce,
533 .set_coalesce = igbvf_set_coalesce, 527 .set_coalesce = igbvf_set_coalesce,
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
index c8ee8d28767b..96c95617195f 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ioc3-eth.c
@@ -90,8 +90,6 @@ struct ioc3_private {
90 u32 emcr, ehar_h, ehar_l; 90 u32 emcr, ehar_h, ehar_l;
91 spinlock_t ioc3_lock; 91 spinlock_t ioc3_lock;
92 struct mii_if_info mii; 92 struct mii_if_info mii;
93 unsigned long flags;
94#define IOC3_FLAG_RX_CHECKSUMS 1
95 93
96 struct pci_dev *pdev; 94 struct pci_dev *pdev;
97 95
@@ -609,7 +607,7 @@ static inline void ioc3_rx(struct net_device *dev)
609 goto next; 607 goto next;
610 } 608 }
611 609
612 if (likely(ip->flags & IOC3_FLAG_RX_CHECKSUMS)) 610 if (likely(dev->features & NETIF_F_RXCSUM))
613 ioc3_tcpudp_checksum(skb, 611 ioc3_tcpudp_checksum(skb,
614 w0 & ERXBUF_IPCKSUM_MASK, len); 612 w0 & ERXBUF_IPCKSUM_MASK, len);
615 613
@@ -1328,6 +1326,7 @@ static int __devinit ioc3_probe(struct pci_dev *pdev,
1328 dev->watchdog_timeo = 5 * HZ; 1326 dev->watchdog_timeo = 5 * HZ;
1329 dev->netdev_ops = &ioc3_netdev_ops; 1327 dev->netdev_ops = &ioc3_netdev_ops;
1330 dev->ethtool_ops = &ioc3_ethtool_ops; 1328 dev->ethtool_ops = &ioc3_ethtool_ops;
1329 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
1331 dev->features = NETIF_F_IP_CSUM; 1330 dev->features = NETIF_F_IP_CSUM;
1332 1331
1333 sw_physid1 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID1); 1332 sw_physid1 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID1);
@@ -1618,37 +1617,12 @@ static u32 ioc3_get_link(struct net_device *dev)
1618 return rc; 1617 return rc;
1619} 1618}
1620 1619
1621static u32 ioc3_get_rx_csum(struct net_device *dev)
1622{
1623 struct ioc3_private *ip = netdev_priv(dev);
1624
1625 return ip->flags & IOC3_FLAG_RX_CHECKSUMS;
1626}
1627
1628static int ioc3_set_rx_csum(struct net_device *dev, u32 data)
1629{
1630 struct ioc3_private *ip = netdev_priv(dev);
1631
1632 spin_lock_bh(&ip->ioc3_lock);
1633 if (data)
1634 ip->flags |= IOC3_FLAG_RX_CHECKSUMS;
1635 else
1636 ip->flags &= ~IOC3_FLAG_RX_CHECKSUMS;
1637 spin_unlock_bh(&ip->ioc3_lock);
1638
1639 return 0;
1640}
1641
1642static const struct ethtool_ops ioc3_ethtool_ops = { 1620static const struct ethtool_ops ioc3_ethtool_ops = {
1643 .get_drvinfo = ioc3_get_drvinfo, 1621 .get_drvinfo = ioc3_get_drvinfo,
1644 .get_settings = ioc3_get_settings, 1622 .get_settings = ioc3_get_settings,
1645 .set_settings = ioc3_set_settings, 1623 .set_settings = ioc3_set_settings,
1646 .nway_reset = ioc3_nway_reset, 1624 .nway_reset = ioc3_nway_reset,
1647 .get_link = ioc3_get_link, 1625 .get_link = ioc3_get_link,
1648 .get_rx_csum = ioc3_get_rx_csum,
1649 .set_rx_csum = ioc3_set_rx_csum,
1650 .get_tx_csum = ethtool_op_get_tx_csum,
1651 .set_tx_csum = ethtool_op_set_tx_csum
1652}; 1626};
1653 1627
1654static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1628static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 8800e1fe4129..69b5707db369 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -222,19 +222,19 @@ static void smsc_ircc_set_transceiver_for_speed(struct smsc_ircc_cb *self, u32 s
222static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self); 222static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self);
223 223
224/* Probing */ 224/* Probing */
225static int __init smsc_ircc_look_for_chips(void); 225static int smsc_ircc_look_for_chips(void);
226static const struct smsc_chip * __init smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type); 226static const struct smsc_chip * smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type);
227static int __init smsc_superio_flat(const struct smsc_chip *chips, unsigned short cfg_base, char *type); 227static int smsc_superio_flat(const struct smsc_chip *chips, unsigned short cfg_base, char *type);
228static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned short cfg_base, char *type); 228static int smsc_superio_paged(const struct smsc_chip *chips, unsigned short cfg_base, char *type);
229static int __init smsc_superio_fdc(unsigned short cfg_base); 229static int smsc_superio_fdc(unsigned short cfg_base);
230static int __init smsc_superio_lpc(unsigned short cfg_base); 230static int smsc_superio_lpc(unsigned short cfg_base);
231#ifdef CONFIG_PCI 231#ifdef CONFIG_PCI
232static int __init preconfigure_smsc_chip(struct smsc_ircc_subsystem_configuration *conf); 232static int preconfigure_smsc_chip(struct smsc_ircc_subsystem_configuration *conf);
233static int __init preconfigure_through_82801(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf); 233static int preconfigure_through_82801(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf);
234static void __init preconfigure_ali_port(struct pci_dev *dev, 234static void preconfigure_ali_port(struct pci_dev *dev,
235 unsigned short port); 235 unsigned short port);
236static int __init preconfigure_through_ali(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf); 236static int preconfigure_through_ali(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf);
237static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg, 237static int smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
238 unsigned short ircc_fir, 238 unsigned short ircc_fir,
239 unsigned short ircc_sir, 239 unsigned short ircc_sir,
240 unsigned char ircc_dma, 240 unsigned char ircc_dma,
@@ -366,7 +366,7 @@ static inline void register_bank(int iobase, int bank)
366} 366}
367 367
368/* PNP hotplug support */ 368/* PNP hotplug support */
369static const struct pnp_device_id smsc_ircc_pnp_table[] = { 369static const struct pnp_device_id smsc_ircc_pnp_table[] __devinitconst = {
370 { .id = "SMCf010", .driver_data = 0 }, 370 { .id = "SMCf010", .driver_data = 0 },
371 /* and presumably others */ 371 /* and presumably others */
372 { } 372 { }
@@ -515,7 +515,7 @@ static const struct net_device_ops smsc_ircc_netdev_ops = {
515 * Try to open driver instance 515 * Try to open driver instance
516 * 516 *
517 */ 517 */
518static int __init smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, u8 irq) 518static int __devinit smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, u8 irq)
519{ 519{
520 struct smsc_ircc_cb *self; 520 struct smsc_ircc_cb *self;
521 struct net_device *dev; 521 struct net_device *dev;
@@ -2273,7 +2273,7 @@ static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned sho
2273} 2273}
2274 2274
2275 2275
2276static int __init smsc_access(unsigned short cfg_base, unsigned char reg) 2276static int __devinit smsc_access(unsigned short cfg_base, unsigned char reg)
2277{ 2277{
2278 IRDA_DEBUG(1, "%s\n", __func__); 2278 IRDA_DEBUG(1, "%s\n", __func__);
2279 2279
@@ -2281,7 +2281,7 @@ static int __init smsc_access(unsigned short cfg_base, unsigned char reg)
2281 return inb(cfg_base) != reg ? -1 : 0; 2281 return inb(cfg_base) != reg ? -1 : 0;
2282} 2282}
2283 2283
2284static const struct smsc_chip * __init smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type) 2284static const struct smsc_chip * __devinit smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type)
2285{ 2285{
2286 u8 devid, xdevid, rev; 2286 u8 devid, xdevid, rev;
2287 2287
@@ -2406,7 +2406,7 @@ static int __init smsc_superio_lpc(unsigned short cfg_base)
2406#ifdef CONFIG_PCI 2406#ifdef CONFIG_PCI
2407#define PCIID_VENDOR_INTEL 0x8086 2407#define PCIID_VENDOR_INTEL 0x8086
2408#define PCIID_VENDOR_ALI 0x10b9 2408#define PCIID_VENDOR_ALI 0x10b9
2409static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __initdata = { 2409static const struct smsc_ircc_subsystem_configuration subsystem_configurations[] __devinitconst = {
2410 /* 2410 /*
2411 * Subsystems needing entries: 2411 * Subsystems needing entries:
2412 * 0x10b9:0x1533 0x103c:0x0850 HP nx9010 family 2412 * 0x10b9:0x1533 0x103c:0x0850 HP nx9010 family
@@ -2532,7 +2532,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini
2532 * (FIR port, SIR port, FIR DMA, FIR IRQ) 2532 * (FIR port, SIR port, FIR DMA, FIR IRQ)
2533 * through the chip configuration port. 2533 * through the chip configuration port.
2534 */ 2534 */
2535static int __init preconfigure_smsc_chip(struct 2535static int __devinit preconfigure_smsc_chip(struct
2536 smsc_ircc_subsystem_configuration 2536 smsc_ircc_subsystem_configuration
2537 *conf) 2537 *conf)
2538{ 2538{
@@ -2633,7 +2633,7 @@ static int __init preconfigure_smsc_chip(struct
2633 * or Intel 82801DB/DBL (ICH4/ICH4-L) LPC Interface Bridge. 2633 * or Intel 82801DB/DBL (ICH4/ICH4-L) LPC Interface Bridge.
2634 * They all work the same way! 2634 * They all work the same way!
2635 */ 2635 */
2636static int __init preconfigure_through_82801(struct pci_dev *dev, 2636static int __devinit preconfigure_through_82801(struct pci_dev *dev,
2637 struct 2637 struct
2638 smsc_ircc_subsystem_configuration 2638 smsc_ircc_subsystem_configuration
2639 *conf) 2639 *conf)
@@ -2786,7 +2786,7 @@ static int __init preconfigure_through_82801(struct pci_dev *dev,
2786 * This is based on reverse-engineering since ALi does not 2786 * This is based on reverse-engineering since ALi does not
2787 * provide any data sheet for the 1533 chip. 2787 * provide any data sheet for the 1533 chip.
2788 */ 2788 */
2789static void __init preconfigure_ali_port(struct pci_dev *dev, 2789static void __devinit preconfigure_ali_port(struct pci_dev *dev,
2790 unsigned short port) 2790 unsigned short port)
2791{ 2791{
2792 unsigned char reg; 2792 unsigned char reg;
@@ -2824,7 +2824,7 @@ static void __init preconfigure_ali_port(struct pci_dev *dev,
2824 IRDA_MESSAGE("Activated ALi 1533 ISA bridge port 0x%04x.\n", port); 2824 IRDA_MESSAGE("Activated ALi 1533 ISA bridge port 0x%04x.\n", port);
2825} 2825}
2826 2826
2827static int __init preconfigure_through_ali(struct pci_dev *dev, 2827static int __devinit preconfigure_through_ali(struct pci_dev *dev,
2828 struct 2828 struct
2829 smsc_ircc_subsystem_configuration 2829 smsc_ircc_subsystem_configuration
2830 *conf) 2830 *conf)
@@ -2837,7 +2837,7 @@ static int __init preconfigure_through_ali(struct pci_dev *dev,
2837 return preconfigure_smsc_chip(conf); 2837 return preconfigure_smsc_chip(conf);
2838} 2838}
2839 2839
2840static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg, 2840static int __devinit smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
2841 unsigned short ircc_fir, 2841 unsigned short ircc_fir,
2842 unsigned short ircc_sir, 2842 unsigned short ircc_sir,
2843 unsigned char ircc_dma, 2843 unsigned char ircc_dma,
@@ -2849,7 +2849,7 @@ static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
2849 int ret = 0; 2849 int ret = 0;
2850 2850
2851 for_each_pci_dev(dev) { 2851 for_each_pci_dev(dev) {
2852 struct smsc_ircc_subsystem_configuration *conf; 2852 const struct smsc_ircc_subsystem_configuration *conf;
2853 2853
2854 /* 2854 /*
2855 * Cache the subsystem vendor/device: 2855 * Cache the subsystem vendor/device:
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index 8f3df044e81e..49e8408f05fc 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -157,9 +157,6 @@ struct ixgb_adapter {
157 u16 link_duplex; 157 u16 link_duplex;
158 struct work_struct tx_timeout_task; 158 struct work_struct tx_timeout_task;
159 159
160 struct timer_list blink_timer;
161 unsigned long led_status;
162
163 /* TX */ 160 /* TX */
164 struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp; 161 struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp;
165 unsigned int restart_queue; 162 unsigned int restart_queue;
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
index cc53aa1541ba..6da890b9534c 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -104,10 +104,10 @@ ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
104 ecmd->transceiver = XCVR_EXTERNAL; 104 ecmd->transceiver = XCVR_EXTERNAL;
105 105
106 if (netif_carrier_ok(adapter->netdev)) { 106 if (netif_carrier_ok(adapter->netdev)) {
107 ecmd->speed = SPEED_10000; 107 ethtool_cmd_speed_set(ecmd, SPEED_10000);
108 ecmd->duplex = DUPLEX_FULL; 108 ecmd->duplex = DUPLEX_FULL;
109 } else { 109 } else {
110 ecmd->speed = -1; 110 ethtool_cmd_speed_set(ecmd, -1);
111 ecmd->duplex = -1; 111 ecmd->duplex = -1;
112 } 112 }
113 113
@@ -129,9 +129,10 @@ static int
129ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) 129ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
130{ 130{
131 struct ixgb_adapter *adapter = netdev_priv(netdev); 131 struct ixgb_adapter *adapter = netdev_priv(netdev);
132 u32 speed = ethtool_cmd_speed(ecmd);
132 133
133 if (ecmd->autoneg == AUTONEG_ENABLE || 134 if (ecmd->autoneg == AUTONEG_ENABLE ||
134 ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL) 135 (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
135 return -EINVAL; 136 return -EINVAL;
136 137
137 if (netif_running(adapter->netdev)) { 138 if (netif_running(adapter->netdev)) {
@@ -610,45 +611,23 @@ err_setup_rx:
610 return err; 611 return err;
611} 612}
612 613
613/* toggle LED 4 times per second = 2 "blinks" per second */
614#define IXGB_ID_INTERVAL (HZ/4)
615
616/* bit defines for adapter->led_status */
617#define IXGB_LED_ON 0
618
619static void
620ixgb_led_blink_callback(unsigned long data)
621{
622 struct ixgb_adapter *adapter = (struct ixgb_adapter *)data;
623
624 if (test_and_change_bit(IXGB_LED_ON, &adapter->led_status))
625 ixgb_led_off(&adapter->hw);
626 else
627 ixgb_led_on(&adapter->hw);
628
629 mod_timer(&adapter->blink_timer, jiffies + IXGB_ID_INTERVAL);
630}
631
632static int 614static int
633ixgb_phys_id(struct net_device *netdev, u32 data) 615ixgb_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
634{ 616{
635 struct ixgb_adapter *adapter = netdev_priv(netdev); 617 struct ixgb_adapter *adapter = netdev_priv(netdev);
636 618
637 if (!data) 619 switch (state) {
638 data = INT_MAX; 620 case ETHTOOL_ID_ACTIVE:
639 621 return 2;
640 if (!adapter->blink_timer.function) {
641 init_timer(&adapter->blink_timer);
642 adapter->blink_timer.function = ixgb_led_blink_callback;
643 adapter->blink_timer.data = (unsigned long)adapter;
644 }
645 622
646 mod_timer(&adapter->blink_timer, jiffies); 623 case ETHTOOL_ID_ON:
624 ixgb_led_on(&adapter->hw);
625 break;
647 626
648 msleep_interruptible(data * 1000); 627 case ETHTOOL_ID_OFF:
649 del_timer_sync(&adapter->blink_timer); 628 case ETHTOOL_ID_INACTIVE:
650 ixgb_led_off(&adapter->hw); 629 ixgb_led_off(&adapter->hw);
651 clear_bit(IXGB_LED_ON, &adapter->led_status); 630 }
652 631
653 return 0; 632 return 0;
654} 633}
@@ -766,7 +745,7 @@ static const struct ethtool_ops ixgb_ethtool_ops = {
766 .set_msglevel = ixgb_set_msglevel, 745 .set_msglevel = ixgb_set_msglevel,
767 .set_tso = ixgb_set_tso, 746 .set_tso = ixgb_set_tso,
768 .get_strings = ixgb_get_strings, 747 .get_strings = ixgb_get_strings,
769 .phys_id = ixgb_phys_id, 748 .set_phys_id = ixgb_set_phys_id,
770 .get_sset_count = ixgb_get_sset_count, 749 .get_sset_count = ixgb_get_sset_count,
771 .get_ethtool_stats = ixgb_get_ethtool_stats, 750 .get_ethtool_stats = ixgb_get_ethtool_stats,
772 .get_flags = ethtool_op_get_flags, 751 .get_flags = ethtool_op_get_flags,
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 8d468028bb55..e467b20ed1f0 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -106,6 +106,7 @@
106#define IXGBE_MAX_VF_FUNCTIONS 64 106#define IXGBE_MAX_VF_FUNCTIONS 64
107#define IXGBE_MAX_VFTA_ENTRIES 128 107#define IXGBE_MAX_VFTA_ENTRIES 128
108#define MAX_EMULATION_MAC_ADDRS 16 108#define MAX_EMULATION_MAC_ADDRS 16
109#define IXGBE_MAX_PF_MACVLANS 15
109#define VMDQ_P(p) ((p) + adapter->num_vfs) 110#define VMDQ_P(p) ((p) + adapter->num_vfs)
110 111
111struct vf_data_storage { 112struct vf_data_storage {
@@ -121,6 +122,15 @@ struct vf_data_storage {
121 u16 tx_rate; 122 u16 tx_rate;
122}; 123};
123 124
125struct vf_macvlans {
126 struct list_head l;
127 int vf;
128 int rar_entry;
129 bool free;
130 bool is_macvlan;
131 u8 vf_macvlan[ETH_ALEN];
132};
133
124/* wrapper around a pointer to a socket buffer, 134/* wrapper around a pointer to a socket buffer,
125 * so a DMA handle can be stored along with the buffer */ 135 * so a DMA handle can be stored along with the buffer */
126struct ixgbe_tx_buffer { 136struct ixgbe_tx_buffer {
@@ -331,10 +341,52 @@ struct ixgbe_q_vector {
331 341
332/* board specific private data structure */ 342/* board specific private data structure */
333struct ixgbe_adapter { 343struct ixgbe_adapter {
334 struct timer_list watchdog_timer; 344 unsigned long state;
345
346 /* Some features need tri-state capability,
347 * thus the additional *_CAPABLE flags.
348 */
349 u32 flags;
350#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1)
351#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 1)
352#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 2)
353#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 3)
354#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 4)
355#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 6)
356#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 7)
357#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 8)
358#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 9)
359#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 10)
360#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 11)
361#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 12)
362#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 13)
363#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 14)
364#define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 16)
365#define IXGBE_FLAG_RSS_CAPABLE (u32)(1 << 17)
366#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 18)
367#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 19)
368#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 20)
369#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22)
370#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 23)
371#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 24)
372#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 25)
373#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 26)
374#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 27)
375#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 28)
376#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 29)
377
378 u32 flags2;
379#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1)
380#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1)
381#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 2)
382#define IXGBE_FLAG2_TEMP_SENSOR_EVENT (u32)(1 << 3)
383#define IXGBE_FLAG2_SEARCH_FOR_SFP (u32)(1 << 4)
384#define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 5)
385#define IXGBE_FLAG2_RESET_REQUESTED (u32)(1 << 6)
386#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7)
387
335 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; 388 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
336 u16 bd_number; 389 u16 bd_number;
337 struct work_struct reset_task;
338 struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; 390 struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
339 391
340 /* DCB parameters */ 392 /* DCB parameters */
@@ -377,43 +429,6 @@ struct ixgbe_adapter {
377 u32 alloc_rx_page_failed; 429 u32 alloc_rx_page_failed;
378 u32 alloc_rx_buff_failed; 430 u32 alloc_rx_buff_failed;
379 431
380 /* Some features need tri-state capability,
381 * thus the additional *_CAPABLE flags.
382 */
383 u32 flags;
384#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1)
385#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 1)
386#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 2)
387#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 3)
388#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 4)
389#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 6)
390#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 7)
391#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 8)
392#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 9)
393#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 10)
394#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 11)
395#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 12)
396#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 13)
397#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 14)
398#define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 16)
399#define IXGBE_FLAG_RSS_CAPABLE (u32)(1 << 17)
400#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 18)
401#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 19)
402#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 20)
403#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22)
404#define IXGBE_FLAG_IN_SFP_LINK_TASK (u32)(1 << 23)
405#define IXGBE_FLAG_IN_SFP_MOD_TASK (u32)(1 << 24)
406#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 25)
407#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 26)
408#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 27)
409#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 28)
410#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 29)
411#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 30)
412
413 u32 flags2;
414#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1)
415#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1)
416#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 2)
417/* default to trying for four seconds */ 432/* default to trying for four seconds */
418#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) 433#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
419 434
@@ -434,7 +449,6 @@ struct ixgbe_adapter {
434 u32 rx_eitr_param; 449 u32 rx_eitr_param;
435 u32 tx_eitr_param; 450 u32 tx_eitr_param;
436 451
437 unsigned long state;
438 u64 tx_busy; 452 u64 tx_busy;
439 unsigned int tx_ring_count; 453 unsigned int tx_ring_count;
440 unsigned int rx_ring_count; 454 unsigned int rx_ring_count;
@@ -443,15 +457,12 @@ struct ixgbe_adapter {
443 bool link_up; 457 bool link_up;
444 unsigned long link_check_timeout; 458 unsigned long link_check_timeout;
445 459
446 struct work_struct watchdog_task; 460 struct work_struct service_task;
447 struct work_struct sfp_task; 461 struct timer_list service_timer;
448 struct timer_list sfp_timer;
449 struct work_struct multispeed_fiber_task;
450 struct work_struct sfp_config_module_task;
451 u32 fdir_pballoc; 462 u32 fdir_pballoc;
452 u32 atr_sample_rate; 463 u32 atr_sample_rate;
464 unsigned long fdir_overflow; /* number of times ATR was backed off */
453 spinlock_t fdir_perfect_lock; 465 spinlock_t fdir_perfect_lock;
454 struct work_struct fdir_reinit_task;
455#ifdef IXGBE_FCOE 466#ifdef IXGBE_FCOE
456 struct ixgbe_fcoe fcoe; 467 struct ixgbe_fcoe fcoe;
457#endif /* IXGBE_FCOE */ 468#endif /* IXGBE_FCOE */
@@ -461,7 +472,7 @@ struct ixgbe_adapter {
461 u16 eeprom_version; 472 u16 eeprom_version;
462 473
463 int node; 474 int node;
464 struct work_struct check_overtemp_task; 475 u32 led_reg;
465 u32 interrupt_event; 476 u32 interrupt_event;
466 char lsc_int_name[IFNAMSIZ + 9]; 477 char lsc_int_name[IFNAMSIZ + 9];
467 478
@@ -470,13 +481,17 @@ struct ixgbe_adapter {
470 unsigned int num_vfs; 481 unsigned int num_vfs;
471 struct vf_data_storage *vfinfo; 482 struct vf_data_storage *vfinfo;
472 int vf_rate_link_speed; 483 int vf_rate_link_speed;
484 struct vf_macvlans vf_mvs;
485 struct vf_macvlans *mv_list;
486 bool antispoofing_enabled;
473}; 487};
474 488
475enum ixbge_state_t { 489enum ixbge_state_t {
476 __IXGBE_TESTING, 490 __IXGBE_TESTING,
477 __IXGBE_RESETTING, 491 __IXGBE_RESETTING,
478 __IXGBE_DOWN, 492 __IXGBE_DOWN,
479 __IXGBE_SFP_MODULE_NOT_FOUND 493 __IXGBE_SERVICE_SCHED,
494 __IXGBE_IN_SFP_INIT,
480}; 495};
481 496
482struct ixgbe_rsc_cb { 497struct ixgbe_rsc_cb {
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 845c679c8b87..8179e5060a18 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -37,6 +37,7 @@
37#define IXGBE_82598_RAR_ENTRIES 16 37#define IXGBE_82598_RAR_ENTRIES 16
38#define IXGBE_82598_MC_TBL_SIZE 128 38#define IXGBE_82598_MC_TBL_SIZE 128
39#define IXGBE_82598_VFT_TBL_SIZE 128 39#define IXGBE_82598_VFT_TBL_SIZE 128
40#define IXGBE_82598_RX_PB_SIZE 512
40 41
41static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, 42static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
42 ixgbe_link_speed speed, 43 ixgbe_link_speed speed,
@@ -197,14 +198,35 @@ out:
197 * @hw: pointer to hardware structure 198 * @hw: pointer to hardware structure
198 * 199 *
199 * Starts the hardware using the generic start_hw function. 200 * Starts the hardware using the generic start_hw function.
200 * Then set pcie completion timeout 201 * Disables relaxed ordering Then set pcie completion timeout
202 *
201 **/ 203 **/
202static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) 204static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
203{ 205{
206 u32 regval;
207 u32 i;
204 s32 ret_val = 0; 208 s32 ret_val = 0;
205 209
206 ret_val = ixgbe_start_hw_generic(hw); 210 ret_val = ixgbe_start_hw_generic(hw);
207 211
212 /* Disable relaxed ordering */
213 for (i = 0; ((i < hw->mac.max_tx_queues) &&
214 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
215 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
216 regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
217 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
218 }
219
220 for (i = 0; ((i < hw->mac.max_rx_queues) &&
221 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
222 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
223 regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
224 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
225 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
226 }
227
228 hw->mac.rx_pb_size = IXGBE_82598_RX_PB_SIZE;
229
208 /* set the completion timeout for interface */ 230 /* set the completion timeout for interface */
209 if (ret_val == 0) 231 if (ret_val == 0)
210 ixgbe_set_pcie_completion_timeout(hw); 232 ixgbe_set_pcie_completion_timeout(hw);
@@ -1064,7 +1086,7 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1064 sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK; 1086 sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
1065 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS) 1087 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
1066 break; 1088 break;
1067 msleep(10); 1089 usleep_range(10000, 20000);
1068 } 1090 }
1069 1091
1070 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) { 1092 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
@@ -1188,6 +1210,38 @@ out:
1188 return physical_layer; 1210 return physical_layer;
1189} 1211}
1190 1212
1213/**
1214 * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
1215 * port devices.
1216 * @hw: pointer to the HW structure
1217 *
1218 * Calls common function and corrects issue with some single port devices
1219 * that enable LAN1 but not LAN0.
1220 **/
1221static void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
1222{
1223 struct ixgbe_bus_info *bus = &hw->bus;
1224 u16 pci_gen = 0;
1225 u16 pci_ctrl2 = 0;
1226
1227 ixgbe_set_lan_id_multi_port_pcie(hw);
1228
1229 /* check if LAN0 is disabled */
1230 hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
1231 if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
1232
1233 hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
1234
1235 /* if LAN0 is completely disabled force function to 0 */
1236 if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
1237 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
1238 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
1239
1240 bus->func = 0;
1241 }
1242 }
1243}
1244
1191static struct ixgbe_mac_operations mac_ops_82598 = { 1245static struct ixgbe_mac_operations mac_ops_82598 = {
1192 .init_hw = &ixgbe_init_hw_generic, 1246 .init_hw = &ixgbe_init_hw_generic,
1193 .reset_hw = &ixgbe_reset_hw_82598, 1247 .reset_hw = &ixgbe_reset_hw_82598,
@@ -1199,7 +1253,7 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
1199 .get_mac_addr = &ixgbe_get_mac_addr_generic, 1253 .get_mac_addr = &ixgbe_get_mac_addr_generic,
1200 .stop_adapter = &ixgbe_stop_adapter_generic, 1254 .stop_adapter = &ixgbe_stop_adapter_generic,
1201 .get_bus_info = &ixgbe_get_bus_info_generic, 1255 .get_bus_info = &ixgbe_get_bus_info_generic,
1202 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, 1256 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598,
1203 .read_analog_reg8 = &ixgbe_read_analog_reg8_82598, 1257 .read_analog_reg8 = &ixgbe_read_analog_reg8_82598,
1204 .write_analog_reg8 = &ixgbe_write_analog_reg8_82598, 1258 .write_analog_reg8 = &ixgbe_write_analog_reg8_82598,
1205 .setup_link = &ixgbe_setup_mac_link_82598, 1259 .setup_link = &ixgbe_setup_mac_link_82598,
@@ -1227,6 +1281,7 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
1227static struct ixgbe_eeprom_operations eeprom_ops_82598 = { 1281static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
1228 .init_params = &ixgbe_init_eeprom_params_generic, 1282 .init_params = &ixgbe_init_eeprom_params_generic,
1229 .read = &ixgbe_read_eerd_generic, 1283 .read = &ixgbe_read_eerd_generic,
1284 .read_buffer = &ixgbe_read_eerd_buffer_generic,
1230 .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, 1285 .calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
1231 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, 1286 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
1232 .update_checksum = &ixgbe_update_eeprom_checksum_generic, 1287 .update_checksum = &ixgbe_update_eeprom_checksum_generic,
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 00aeba385a2f..8ee661245af3 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -38,6 +38,7 @@
38#define IXGBE_82599_RAR_ENTRIES 128 38#define IXGBE_82599_RAR_ENTRIES 128
39#define IXGBE_82599_MC_TBL_SIZE 128 39#define IXGBE_82599_MC_TBL_SIZE 128
40#define IXGBE_82599_VFT_TBL_SIZE 128 40#define IXGBE_82599_VFT_TBL_SIZE 128
41#define IXGBE_82599_RX_PB_SIZE 512
41 42
42static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); 43static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
43static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); 44static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
@@ -61,6 +62,7 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
61 bool autoneg, 62 bool autoneg,
62 bool autoneg_wait_to_complete); 63 bool autoneg_wait_to_complete);
63static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); 64static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
65static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
64 66
65static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) 67static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
66{ 68{
@@ -86,7 +88,8 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
86 if ((mac->ops.get_media_type(hw) == 88 if ((mac->ops.get_media_type(hw) ==
87 ixgbe_media_type_backplane) && 89 ixgbe_media_type_backplane) &&
88 (hw->phy.smart_speed == ixgbe_smart_speed_auto || 90 (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
89 hw->phy.smart_speed == ixgbe_smart_speed_on)) 91 hw->phy.smart_speed == ixgbe_smart_speed_on) &&
92 !ixgbe_verify_lesm_fw_enabled_82599(hw))
90 mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed; 93 mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
91 else 94 else
92 mac->ops.setup_link = &ixgbe_setup_mac_link_82599; 95 mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
@@ -107,7 +110,6 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
107 110
108 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, 111 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
109 &data_offset); 112 &data_offset);
110
111 if (ret_val != 0) 113 if (ret_val != 0)
112 goto setup_sfp_out; 114 goto setup_sfp_out;
113 115
@@ -127,9 +129,13 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
127 } 129 }
128 130
129 /* Release the semaphore */ 131 /* Release the semaphore */
130 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 132 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
131 /* Delay obtaining semaphore again to allow FW access */ 133 /*
132 msleep(hw->eeprom.semaphore_delay); 134 * Delay obtaining semaphore again to allow FW access,
135 * semaphore_delay is in ms usleep_range needs us.
136 */
137 usleep_range(hw->eeprom.semaphore_delay * 1000,
138 hw->eeprom.semaphore_delay * 2000);
133 139
134 /* Now restart DSP by setting Restart_AN and clearing LMS */ 140 /* Now restart DSP by setting Restart_AN and clearing LMS */
135 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw, 141 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
@@ -138,7 +144,7 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
138 144
139 /* Wait for AN to leave state 0 */ 145 /* Wait for AN to leave state 0 */
140 for (i = 0; i < 10; i++) { 146 for (i = 0; i < 10; i++) {
141 msleep(4); 147 usleep_range(4000, 8000);
142 reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1); 148 reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
143 if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK) 149 if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
144 break; 150 break;
@@ -353,6 +359,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
353 case IXGBE_DEV_ID_82599_SFP: 359 case IXGBE_DEV_ID_82599_SFP:
354 case IXGBE_DEV_ID_82599_SFP_FCOE: 360 case IXGBE_DEV_ID_82599_SFP_FCOE:
355 case IXGBE_DEV_ID_82599_SFP_EM: 361 case IXGBE_DEV_ID_82599_SFP_EM:
362 case IXGBE_DEV_ID_82599_SFP_SF2:
356 media_type = ixgbe_media_type_fiber; 363 media_type = ixgbe_media_type_fiber;
357 break; 364 break;
358 case IXGBE_DEV_ID_82599_CX4: 365 case IXGBE_DEV_ID_82599_CX4:
@@ -361,6 +368,9 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
361 case IXGBE_DEV_ID_82599_T3_LOM: 368 case IXGBE_DEV_ID_82599_T3_LOM:
362 media_type = ixgbe_media_type_copper; 369 media_type = ixgbe_media_type_copper;
363 break; 370 break;
371 case IXGBE_DEV_ID_82599_LS:
372 media_type = ixgbe_media_type_fiber_lco;
373 break;
364 default: 374 default:
365 media_type = ixgbe_media_type_unknown; 375 media_type = ixgbe_media_type_unknown;
366 break; 376 break;
@@ -486,7 +496,7 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
486 * 496 *
487 * Set the link speed in the AUTOC register and restarts link. 497 * Set the link speed in the AUTOC register and restarts link.
488 **/ 498 **/
489s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, 499static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
490 ixgbe_link_speed speed, 500 ixgbe_link_speed speed,
491 bool autoneg, 501 bool autoneg,
492 bool autoneg_wait_to_complete) 502 bool autoneg_wait_to_complete)
@@ -1176,7 +1186,7 @@ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc)
1176 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1186 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1177 IXGBE_FDIRCTRL_INIT_DONE) 1187 IXGBE_FDIRCTRL_INIT_DONE)
1178 break; 1188 break;
1179 msleep(1); 1189 usleep_range(1000, 2000);
1180 } 1190 }
1181 if (i >= IXGBE_FDIR_INIT_DONE_POLL) 1191 if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1182 hw_dbg(hw, "Flow Director Signature poll time exceeded!\n"); 1192 hw_dbg(hw, "Flow Director Signature poll time exceeded!\n");
@@ -1271,7 +1281,7 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
1271 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1281 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1272 IXGBE_FDIRCTRL_INIT_DONE) 1282 IXGBE_FDIRCTRL_INIT_DONE)
1273 break; 1283 break;
1274 msleep(1); 1284 usleep_range(1000, 2000);
1275 } 1285 }
1276 if (i >= IXGBE_FDIR_INIT_DONE_POLL) 1286 if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1277 hw_dbg(hw, "Flow Director Perfect poll time exceeded!\n"); 1287 hw_dbg(hw, "Flow Director Perfect poll time exceeded!\n");
@@ -1740,30 +1750,29 @@ static s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
1740 * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx 1750 * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
1741 * @hw: pointer to hardware structure 1751 * @hw: pointer to hardware structure
1742 * 1752 *
1743 * Starts the hardware using the generic start_hw function. 1753 * Starts the hardware using the generic start_hw function
1744 * Then performs device-specific: 1754 * and the generation start_hw function.
1745 * Clears the rate limiter registers. 1755 * Then performs revision-specific operations, if any.
1746 **/ 1756 **/
1747static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) 1757static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
1748{ 1758{
1749 u32 q_num; 1759 s32 ret_val = 0;
1750 s32 ret_val;
1751 1760
1752 ret_val = ixgbe_start_hw_generic(hw); 1761 ret_val = ixgbe_start_hw_generic(hw);
1762 if (ret_val != 0)
1763 goto out;
1753 1764
1754 /* Clear the rate limiters */ 1765 ret_val = ixgbe_start_hw_gen2(hw);
1755 for (q_num = 0; q_num < hw->mac.max_tx_queues; q_num++) { 1766 if (ret_val != 0)
1756 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, q_num); 1767 goto out;
1757 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
1758 }
1759 IXGBE_WRITE_FLUSH(hw);
1760 1768
1761 /* We need to run link autotry after the driver loads */ 1769 /* We need to run link autotry after the driver loads */
1762 hw->mac.autotry_restart = true; 1770 hw->mac.autotry_restart = true;
1771 hw->mac.rx_pb_size = IXGBE_82599_RX_PB_SIZE;
1763 1772
1764 if (ret_val == 0) 1773 if (ret_val == 0)
1765 ret_val = ixgbe_verify_fw_version_82599(hw); 1774 ret_val = ixgbe_verify_fw_version_82599(hw);
1766 1775out:
1767 return ret_val; 1776 return ret_val;
1768} 1777}
1769 1778
@@ -1775,7 +1784,7 @@ static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
1775 * If PHY already detected, maintains current PHY type in hw struct, 1784 * If PHY already detected, maintains current PHY type in hw struct,
1776 * otherwise executes the PHY detection routine. 1785 * otherwise executes the PHY detection routine.
1777 **/ 1786 **/
1778s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) 1787static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
1779{ 1788{
1780 s32 status = IXGBE_ERR_PHY_ADDR_INVALID; 1789 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
1781 1790
@@ -1968,21 +1977,6 @@ static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
1968} 1977}
1969 1978
1970/** 1979/**
1971 * ixgbe_get_device_caps_82599 - Get additional device capabilities
1972 * @hw: pointer to hardware structure
1973 * @device_caps: the EEPROM word with the extra device capabilities
1974 *
1975 * This function will read the EEPROM location for the device capabilities,
1976 * and return the word through device_caps.
1977 **/
1978static s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps)
1979{
1980 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
1981
1982 return 0;
1983}
1984
1985/**
1986 * ixgbe_verify_fw_version_82599 - verify fw version for 82599 1980 * ixgbe_verify_fw_version_82599 - verify fw version for 82599
1987 * @hw: pointer to hardware structure 1981 * @hw: pointer to hardware structure
1988 * 1982 *
@@ -2030,6 +2024,110 @@ fw_version_out:
2030 return status; 2024 return status;
2031} 2025}
2032 2026
2027/**
2028 * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
2029 * @hw: pointer to hardware structure
2030 *
2031 * Returns true if the LESM FW module is present and enabled. Otherwise
2032 * returns false. Smart Speed must be disabled if LESM FW module is enabled.
2033 **/
2034static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
2035{
2036 bool lesm_enabled = false;
2037 u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
2038 s32 status;
2039
2040 /* get the offset to the Firmware Module block */
2041 status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2042
2043 if ((status != 0) ||
2044 (fw_offset == 0) || (fw_offset == 0xFFFF))
2045 goto out;
2046
2047 /* get the offset to the LESM Parameters block */
2048 status = hw->eeprom.ops.read(hw, (fw_offset +
2049 IXGBE_FW_LESM_PARAMETERS_PTR),
2050 &fw_lesm_param_offset);
2051
2052 if ((status != 0) ||
2053 (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
2054 goto out;
2055
2056 /* get the lesm state word */
2057 status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
2058 IXGBE_FW_LESM_STATE_1),
2059 &fw_lesm_state);
2060
2061 if ((status == 0) &&
2062 (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
2063 lesm_enabled = true;
2064
2065out:
2066 return lesm_enabled;
2067}
2068
2069/**
2070 * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
2071 * fastest available method
2072 *
2073 * @hw: pointer to hardware structure
2074 * @offset: offset of word in EEPROM to read
2075 * @words: number of words
2076 * @data: word(s) read from the EEPROM
2077 *
2078 * Retrieves 16 bit word(s) read from EEPROM
2079 **/
2080static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
2081 u16 words, u16 *data)
2082{
2083 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2084 s32 ret_val = IXGBE_ERR_CONFIG;
2085
2086 /*
2087 * If EEPROM is detected and can be addressed using 14 bits,
2088 * use EERD otherwise use bit bang
2089 */
2090 if ((eeprom->type == ixgbe_eeprom_spi) &&
2091 (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
2092 ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
2093 data);
2094 else
2095 ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
2096 words,
2097 data);
2098
2099 return ret_val;
2100}
2101
2102/**
2103 * ixgbe_read_eeprom_82599 - Read EEPROM word using
2104 * fastest available method
2105 *
2106 * @hw: pointer to hardware structure
2107 * @offset: offset of word in the EEPROM to read
2108 * @data: word read from the EEPROM
2109 *
2110 * Reads a 16 bit word from the EEPROM
2111 **/
2112static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
2113 u16 offset, u16 *data)
2114{
2115 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2116 s32 ret_val = IXGBE_ERR_CONFIG;
2117
2118 /*
2119 * If EEPROM is detected and can be addressed using 14 bits,
2120 * use EERD otherwise use bit bang
2121 */
2122 if ((eeprom->type == ixgbe_eeprom_spi) &&
2123 (offset <= IXGBE_EERD_MAX_ADDR))
2124 ret_val = ixgbe_read_eerd_generic(hw, offset, data);
2125 else
2126 ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
2127
2128 return ret_val;
2129}
2130
2033static struct ixgbe_mac_operations mac_ops_82599 = { 2131static struct ixgbe_mac_operations mac_ops_82599 = {
2034 .init_hw = &ixgbe_init_hw_generic, 2132 .init_hw = &ixgbe_init_hw_generic,
2035 .reset_hw = &ixgbe_reset_hw_82599, 2133 .reset_hw = &ixgbe_reset_hw_82599,
@@ -2040,7 +2138,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
2040 .enable_rx_dma = &ixgbe_enable_rx_dma_82599, 2138 .enable_rx_dma = &ixgbe_enable_rx_dma_82599,
2041 .get_mac_addr = &ixgbe_get_mac_addr_generic, 2139 .get_mac_addr = &ixgbe_get_mac_addr_generic,
2042 .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, 2140 .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
2043 .get_device_caps = &ixgbe_get_device_caps_82599, 2141 .get_device_caps = &ixgbe_get_device_caps_generic,
2044 .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, 2142 .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic,
2045 .stop_adapter = &ixgbe_stop_adapter_generic, 2143 .stop_adapter = &ixgbe_stop_adapter_generic,
2046 .get_bus_info = &ixgbe_get_bus_info_generic, 2144 .get_bus_info = &ixgbe_get_bus_info_generic,
@@ -2076,8 +2174,10 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
2076 2174
2077static struct ixgbe_eeprom_operations eeprom_ops_82599 = { 2175static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
2078 .init_params = &ixgbe_init_eeprom_params_generic, 2176 .init_params = &ixgbe_init_eeprom_params_generic,
2079 .read = &ixgbe_read_eerd_generic, 2177 .read = &ixgbe_read_eeprom_82599,
2178 .read_buffer = &ixgbe_read_eeprom_buffer_82599,
2080 .write = &ixgbe_write_eeprom_generic, 2179 .write = &ixgbe_write_eeprom_generic,
2180 .write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic,
2081 .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, 2181 .calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
2082 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, 2182 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
2083 .update_checksum = &ixgbe_update_eeprom_checksum_generic, 2183 .update_checksum = &ixgbe_update_eeprom_checksum_generic,
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index bcd952916eb2..b894b42a741c 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -54,6 +54,13 @@ static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
54static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, 54static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
55 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm); 55 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
56static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num); 56static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
57static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
58static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
59 u16 words, u16 *data);
60static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
61 u16 words, u16 *data);
62static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
63 u16 offset);
57 64
58/** 65/**
59 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx 66 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
@@ -96,6 +103,45 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
96} 103}
97 104
98/** 105/**
106 * ixgbe_start_hw_gen2 - Init sequence for common device family
107 * @hw: pointer to hw structure
108 *
109 * Performs the init sequence common to the second generation
110 * of 10 GbE devices.
111 * Devices in the second generation:
112 * 82599
113 * X540
114 **/
115s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
116{
117 u32 i;
118 u32 regval;
119
120 /* Clear the rate limiters */
121 for (i = 0; i < hw->mac.max_tx_queues; i++) {
122 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
123 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
124 }
125 IXGBE_WRITE_FLUSH(hw);
126
127 /* Disable relaxed ordering */
128 for (i = 0; i < hw->mac.max_tx_queues; i++) {
129 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
130 regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
131 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
132 }
133
134 for (i = 0; i < hw->mac.max_rx_queues; i++) {
135 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
136 regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
137 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
138 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
139 }
140
141 return 0;
142}
143
144/**
99 * ixgbe_init_hw_generic - Generic hardware initialization 145 * ixgbe_init_hw_generic - Generic hardware initialization
100 * @hw: pointer to hardware structure 146 * @hw: pointer to hardware structure
101 * 147 *
@@ -464,7 +510,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
464 reg_val &= ~(IXGBE_RXCTRL_RXEN); 510 reg_val &= ~(IXGBE_RXCTRL_RXEN);
465 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val); 511 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
466 IXGBE_WRITE_FLUSH(hw); 512 IXGBE_WRITE_FLUSH(hw);
467 msleep(2); 513 usleep_range(2000, 4000);
468 514
469 /* Clear interrupt mask to stop from interrupts being generated */ 515 /* Clear interrupt mask to stop from interrupts being generated */
470 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); 516 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
@@ -545,6 +591,8 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
545 /* Set default semaphore delay to 10ms which is a well 591 /* Set default semaphore delay to 10ms which is a well
546 * tested value */ 592 * tested value */
547 eeprom->semaphore_delay = 10; 593 eeprom->semaphore_delay = 10;
594 /* Clear EEPROM page size, it will be initialized as needed */
595 eeprom->word_page_size = 0;
548 596
549 /* 597 /*
550 * Check for EEPROM present first. 598 * Check for EEPROM present first.
@@ -577,26 +625,78 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
577} 625}
578 626
579/** 627/**
580 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM 628 * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
581 * @hw: pointer to hardware structure 629 * @hw: pointer to hardware structure
582 * @offset: offset within the EEPROM to be written to 630 * @offset: offset within the EEPROM to write
583 * @data: 16 bit word to be written to the EEPROM 631 * @words: number of words
632 * @data: 16 bit word(s) to write to EEPROM
584 * 633 *
585 * If ixgbe_eeprom_update_checksum is not called after this function, the 634 * Reads 16 bit word(s) from EEPROM through bit-bang method
586 * EEPROM will most likely contain an invalid checksum.
587 **/ 635 **/
588s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) 636s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
637 u16 words, u16 *data)
589{ 638{
590 s32 status; 639 s32 status = 0;
591 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI; 640 u16 i, count;
592 641
593 hw->eeprom.ops.init_params(hw); 642 hw->eeprom.ops.init_params(hw);
594 643
595 if (offset >= hw->eeprom.word_size) { 644 if (words == 0) {
645 status = IXGBE_ERR_INVALID_ARGUMENT;
646 goto out;
647 }
648
649 if (offset + words > hw->eeprom.word_size) {
596 status = IXGBE_ERR_EEPROM; 650 status = IXGBE_ERR_EEPROM;
597 goto out; 651 goto out;
598 } 652 }
599 653
654 /*
655 * The EEPROM page size cannot be queried from the chip. We do lazy
656 * initialization. It is worth to do that when we write large buffer.
657 */
658 if ((hw->eeprom.word_page_size == 0) &&
659 (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
660 ixgbe_detect_eeprom_page_size_generic(hw, offset);
661
662 /*
663 * We cannot hold synchronization semaphores for too long
664 * to avoid other entity starvation. However it is more efficient
665 * to read in bursts than synchronizing access for each word.
666 */
667 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
668 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
669 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
670 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
671 count, &data[i]);
672
673 if (status != 0)
674 break;
675 }
676
677out:
678 return status;
679}
680
681/**
682 * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
683 * @hw: pointer to hardware structure
684 * @offset: offset within the EEPROM to be written to
685 * @words: number of word(s)
686 * @data: 16 bit word(s) to be written to the EEPROM
687 *
688 * If ixgbe_eeprom_update_checksum is not called after this function, the
689 * EEPROM will most likely contain an invalid checksum.
690 **/
691static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
692 u16 words, u16 *data)
693{
694 s32 status;
695 u16 word;
696 u16 page_size;
697 u16 i;
698 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
699
600 /* Prepare the EEPROM for writing */ 700 /* Prepare the EEPROM for writing */
601 status = ixgbe_acquire_eeprom(hw); 701 status = ixgbe_acquire_eeprom(hw);
602 702
@@ -608,62 +708,147 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
608 } 708 }
609 709
610 if (status == 0) { 710 if (status == 0) {
611 ixgbe_standby_eeprom(hw); 711 for (i = 0; i < words; i++) {
712 ixgbe_standby_eeprom(hw);
612 713
613 /* Send the WRITE ENABLE command (8 bit opcode ) */ 714 /* Send the WRITE ENABLE command (8 bit opcode ) */
614 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_WREN_OPCODE_SPI, 715 ixgbe_shift_out_eeprom_bits(hw,
615 IXGBE_EEPROM_OPCODE_BITS); 716 IXGBE_EEPROM_WREN_OPCODE_SPI,
717 IXGBE_EEPROM_OPCODE_BITS);
616 718
617 ixgbe_standby_eeprom(hw); 719 ixgbe_standby_eeprom(hw);
618 720
619 /* 721 /*
620 * Some SPI eeproms use the 8th address bit embedded in the 722 * Some SPI eeproms use the 8th address bit embedded
621 * opcode 723 * in the opcode
622 */ 724 */
623 if ((hw->eeprom.address_bits == 8) && (offset >= 128)) 725 if ((hw->eeprom.address_bits == 8) &&
624 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; 726 ((offset + i) >= 128))
727 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
728
729 /* Send the Write command (8-bit opcode + addr) */
730 ixgbe_shift_out_eeprom_bits(hw, write_opcode,
731 IXGBE_EEPROM_OPCODE_BITS);
732 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
733 hw->eeprom.address_bits);
734
735 page_size = hw->eeprom.word_page_size;
736
737 /* Send the data in burst via SPI*/
738 do {
739 word = data[i];
740 word = (word >> 8) | (word << 8);
741 ixgbe_shift_out_eeprom_bits(hw, word, 16);
742
743 if (page_size == 0)
744 break;
745
746 /* do not wrap around page */
747 if (((offset + i) & (page_size - 1)) ==
748 (page_size - 1))
749 break;
750 } while (++i < words);
751
752 ixgbe_standby_eeprom(hw);
753 usleep_range(10000, 20000);
754 }
755 /* Done with writing - release the EEPROM */
756 ixgbe_release_eeprom(hw);
757 }
625 758
626 /* Send the Write command (8-bit opcode + addr) */ 759 return status;
627 ixgbe_shift_out_eeprom_bits(hw, write_opcode, 760}
628 IXGBE_EEPROM_OPCODE_BITS);
629 ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2),
630 hw->eeprom.address_bits);
631 761
632 /* Send the data */ 762/**
633 data = (data >> 8) | (data << 8); 763 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
634 ixgbe_shift_out_eeprom_bits(hw, data, 16); 764 * @hw: pointer to hardware structure
635 ixgbe_standby_eeprom(hw); 765 * @offset: offset within the EEPROM to be written to
766 * @data: 16 bit word to be written to the EEPROM
767 *
768 * If ixgbe_eeprom_update_checksum is not called after this function, the
769 * EEPROM will most likely contain an invalid checksum.
770 **/
771s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
772{
773 s32 status;
636 774
637 /* Done with writing - release the EEPROM */ 775 hw->eeprom.ops.init_params(hw);
638 ixgbe_release_eeprom(hw); 776
777 if (offset >= hw->eeprom.word_size) {
778 status = IXGBE_ERR_EEPROM;
779 goto out;
639 } 780 }
640 781
782 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
783
641out: 784out:
642 return status; 785 return status;
643} 786}
644 787
645/** 788/**
646 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang 789 * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
647 * @hw: pointer to hardware structure 790 * @hw: pointer to hardware structure
648 * @offset: offset within the EEPROM to be read 791 * @offset: offset within the EEPROM to be read
649 * @data: read 16 bit value from EEPROM 792 * @words: number of word(s)
793 * @data: read 16 bit words(s) from EEPROM
650 * 794 *
651 * Reads 16 bit value from EEPROM through bit-bang method 795 * Reads 16 bit word(s) from EEPROM through bit-bang method
652 **/ 796 **/
653s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 797s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
654 u16 *data) 798 u16 words, u16 *data)
655{ 799{
656 s32 status; 800 s32 status = 0;
657 u16 word_in; 801 u16 i, count;
658 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
659 802
660 hw->eeprom.ops.init_params(hw); 803 hw->eeprom.ops.init_params(hw);
661 804
662 if (offset >= hw->eeprom.word_size) { 805 if (words == 0) {
806 status = IXGBE_ERR_INVALID_ARGUMENT;
807 goto out;
808 }
809
810 if (offset + words > hw->eeprom.word_size) {
663 status = IXGBE_ERR_EEPROM; 811 status = IXGBE_ERR_EEPROM;
664 goto out; 812 goto out;
665 } 813 }
666 814
815 /*
816 * We cannot hold synchronization semaphores for too long
817 * to avoid other entity starvation. However it is more efficient
818 * to read in bursts than synchronizing access for each word.
819 */
820 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
821 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
822 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
823
824 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
825 count, &data[i]);
826
827 if (status != 0)
828 break;
829 }
830
831out:
832 return status;
833}
834
835/**
836 * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
837 * @hw: pointer to hardware structure
838 * @offset: offset within the EEPROM to be read
839 * @words: number of word(s)
840 * @data: read 16 bit word(s) from EEPROM
841 *
842 * Reads 16 bit word(s) from EEPROM through bit-bang method
843 **/
844static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
845 u16 words, u16 *data)
846{
847 s32 status;
848 u16 word_in;
849 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
850 u16 i;
851
667 /* Prepare the EEPROM for reading */ 852 /* Prepare the EEPROM for reading */
668 status = ixgbe_acquire_eeprom(hw); 853 status = ixgbe_acquire_eeprom(hw);
669 854
@@ -675,29 +860,145 @@ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
675 } 860 }
676 861
677 if (status == 0) { 862 if (status == 0) {
678 ixgbe_standby_eeprom(hw); 863 for (i = 0; i < words; i++) {
864 ixgbe_standby_eeprom(hw);
865 /*
866 * Some SPI eeproms use the 8th address bit embedded
867 * in the opcode
868 */
869 if ((hw->eeprom.address_bits == 8) &&
870 ((offset + i) >= 128))
871 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
872
873 /* Send the READ command (opcode + addr) */
874 ixgbe_shift_out_eeprom_bits(hw, read_opcode,
875 IXGBE_EEPROM_OPCODE_BITS);
876 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
877 hw->eeprom.address_bits);
878
879 /* Read the data. */
880 word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
881 data[i] = (word_in >> 8) | (word_in << 8);
882 }
679 883
680 /* 884 /* End this read operation */
681 * Some SPI eeproms use the 8th address bit embedded in the 885 ixgbe_release_eeprom(hw);
682 * opcode 886 }
683 */
684 if ((hw->eeprom.address_bits == 8) && (offset >= 128))
685 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
686 887
687 /* Send the READ command (opcode + addr) */ 888 return status;
688 ixgbe_shift_out_eeprom_bits(hw, read_opcode, 889}
689 IXGBE_EEPROM_OPCODE_BITS); 890
690 ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2), 891/**
691 hw->eeprom.address_bits); 892 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
893 * @hw: pointer to hardware structure
894 * @offset: offset within the EEPROM to be read
895 * @data: read 16 bit value from EEPROM
896 *
897 * Reads 16 bit value from EEPROM through bit-bang method
898 **/
899s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
900 u16 *data)
901{
902 s32 status;
692 903
693 /* Read the data. */ 904 hw->eeprom.ops.init_params(hw);
694 word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
695 *data = (word_in >> 8) | (word_in << 8);
696 905
697 /* End this read operation */ 906 if (offset >= hw->eeprom.word_size) {
698 ixgbe_release_eeprom(hw); 907 status = IXGBE_ERR_EEPROM;
908 goto out;
909 }
910
911 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
912
913out:
914 return status;
915}
916
917/**
918 * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
919 * @hw: pointer to hardware structure
920 * @offset: offset of word in the EEPROM to read
921 * @words: number of word(s)
922 * @data: 16 bit word(s) from the EEPROM
923 *
924 * Reads a 16 bit word(s) from the EEPROM using the EERD register.
925 **/
926s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
927 u16 words, u16 *data)
928{
929 u32 eerd;
930 s32 status = 0;
931 u32 i;
932
933 hw->eeprom.ops.init_params(hw);
934
935 if (words == 0) {
936 status = IXGBE_ERR_INVALID_ARGUMENT;
937 goto out;
938 }
939
940 if (offset >= hw->eeprom.word_size) {
941 status = IXGBE_ERR_EEPROM;
942 goto out;
699 } 943 }
700 944
945 for (i = 0; i < words; i++) {
946 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) +
947 IXGBE_EEPROM_RW_REG_START;
948
949 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
950 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
951
952 if (status == 0) {
953 data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
954 IXGBE_EEPROM_RW_REG_DATA);
955 } else {
956 hw_dbg(hw, "Eeprom read timed out\n");
957 goto out;
958 }
959 }
960out:
961 return status;
962}
963
964/**
965 * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
966 * @hw: pointer to hardware structure
967 * @offset: offset within the EEPROM to be used as a scratch pad
968 *
969 * Discover EEPROM page size by writing marching data at given offset.
970 * This function is called only when we are writing a new large buffer
971 * at given offset so the data would be overwritten anyway.
972 **/
973static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
974 u16 offset)
975{
976 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
977 s32 status = 0;
978 u16 i;
979
980 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
981 data[i] = i;
982
983 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
984 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
985 IXGBE_EEPROM_PAGE_SIZE_MAX, data);
986 hw->eeprom.word_page_size = 0;
987 if (status != 0)
988 goto out;
989
990 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
991 if (status != 0)
992 goto out;
993
994 /*
995 * When writing in burst more than the actual page size
996 * EEPROM address wraps around current page.
997 */
998 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
999
1000 hw_dbg(hw, "Detected EEPROM page size = %d words.",
1001 hw->eeprom.word_page_size);
701out: 1002out:
702 return status; 1003 return status;
703} 1004}
@@ -712,33 +1013,75 @@ out:
712 **/ 1013 **/
713s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) 1014s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
714{ 1015{
715 u32 eerd; 1016 return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
716 s32 status; 1017}
1018
1019/**
1020 * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1021 * @hw: pointer to hardware structure
1022 * @offset: offset of word in the EEPROM to write
1023 * @words: number of words
1024 * @data: word(s) write to the EEPROM
1025 *
1026 * Write a 16 bit word(s) to the EEPROM using the EEWR register.
1027 **/
1028s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1029 u16 words, u16 *data)
1030{
1031 u32 eewr;
1032 s32 status = 0;
1033 u16 i;
717 1034
718 hw->eeprom.ops.init_params(hw); 1035 hw->eeprom.ops.init_params(hw);
719 1036
1037 if (words == 0) {
1038 status = IXGBE_ERR_INVALID_ARGUMENT;
1039 goto out;
1040 }
1041
720 if (offset >= hw->eeprom.word_size) { 1042 if (offset >= hw->eeprom.word_size) {
721 status = IXGBE_ERR_EEPROM; 1043 status = IXGBE_ERR_EEPROM;
722 goto out; 1044 goto out;
723 } 1045 }
724 1046
725 eerd = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) + 1047 for (i = 0; i < words; i++) {
726 IXGBE_EEPROM_RW_REG_START; 1048 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1049 (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1050 IXGBE_EEPROM_RW_REG_START;
727 1051
728 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd); 1052 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
729 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ); 1053 if (status != 0) {
1054 hw_dbg(hw, "Eeprom write EEWR timed out\n");
1055 goto out;
1056 }
730 1057
731 if (status == 0) 1058 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
732 *data = (IXGBE_READ_REG(hw, IXGBE_EERD) >> 1059
733 IXGBE_EEPROM_RW_REG_DATA); 1060 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
734 else 1061 if (status != 0) {
735 hw_dbg(hw, "Eeprom read timed out\n"); 1062 hw_dbg(hw, "Eeprom write EEWR timed out\n");
1063 goto out;
1064 }
1065 }
736 1066
737out: 1067out:
738 return status; 1068 return status;
739} 1069}
740 1070
741/** 1071/**
1072 * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1073 * @hw: pointer to hardware structure
1074 * @offset: offset of word in the EEPROM to write
1075 * @data: word write to the EEPROM
1076 *
1077 * Write a 16 bit word to the EEPROM using the EEWR register.
1078 **/
1079s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1080{
1081 return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1082}
1083
1084/**
742 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status 1085 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
743 * @hw: pointer to hardware structure 1086 * @hw: pointer to hardware structure
744 * @ee_reg: EEPROM flag for polling 1087 * @ee_reg: EEPROM flag for polling
@@ -746,7 +1089,7 @@ out:
746 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the 1089 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
747 * read or write is done respectively. 1090 * read or write is done respectively.
748 **/ 1091 **/
749s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) 1092static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
750{ 1093{
751 u32 i; 1094 u32 i;
752 u32 reg; 1095 u32 reg;
@@ -846,6 +1189,28 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
846 udelay(50); 1189 udelay(50);
847 } 1190 }
848 1191
1192 if (i == timeout) {
1193 hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore "
1194 "not granted.\n");
1195 /*
1196 * this release is particularly important because our attempts
1197 * above to get the semaphore may have succeeded, and if there
1198 * was a timeout, we should unconditionally clear the semaphore
1199 * bits to free the driver to make progress
1200 */
1201 ixgbe_release_eeprom_semaphore(hw);
1202
1203 udelay(50);
1204 /*
1205 * one last try
1206 * If the SMBI bit is 0 when we read it, then the bit will be
1207 * set and we have the semaphore
1208 */
1209 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1210 if (!(swsm & IXGBE_SWSM_SMBI))
1211 status = 0;
1212 }
1213
849 /* Now get the semaphore between SW/FW through the SWESMBI bit */ 1214 /* Now get the semaphore between SW/FW through the SWESMBI bit */
850 if (status == 0) { 1215 if (status == 0) {
851 for (i = 0; i < timeout; i++) { 1216 for (i = 0; i < timeout; i++) {
@@ -1112,8 +1477,12 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
1112 1477
1113 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 1478 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1114 1479
1115 /* Delay before attempt to obtain semaphore again to allow FW access */ 1480 /*
1116 msleep(hw->eeprom.semaphore_delay); 1481 * Delay before attempt to obtain semaphore again to allow FW
1482 * access. semaphore_delay is in ms we need us for usleep_range
1483 */
1484 usleep_range(hw->eeprom.semaphore_delay * 1000,
1485 hw->eeprom.semaphore_delay * 2000);
1117} 1486}
1118 1487
1119/** 1488/**
@@ -2189,7 +2558,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2189 * thread currently using resource (swmask) 2558 * thread currently using resource (swmask)
2190 */ 2559 */
2191 ixgbe_release_eeprom_semaphore(hw); 2560 ixgbe_release_eeprom_semaphore(hw);
2192 msleep(5); 2561 usleep_range(5000, 10000);
2193 timeout--; 2562 timeout--;
2194 } 2563 }
2195 2564
@@ -2263,7 +2632,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
2263 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 2632 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2264 autoc_reg |= IXGBE_AUTOC_FLU; 2633 autoc_reg |= IXGBE_AUTOC_FLU;
2265 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 2634 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2266 msleep(10); 2635 usleep_range(10000, 20000);
2267 } 2636 }
2268 2637
2269 led_reg &= ~IXGBE_LED_MODE_MASK(index); 2638 led_reg &= ~IXGBE_LED_MODE_MASK(index);
@@ -2883,3 +3252,18 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
2883 pfvfspoof &= ~(1 << vf_target_shift); 3252 pfvfspoof &= ~(1 << vf_target_shift);
2884 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); 3253 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
2885} 3254}
3255
3256/**
3257 * ixgbe_get_device_caps_generic - Get additional device capabilities
3258 * @hw: pointer to hardware structure
3259 * @device_caps: the EEPROM word with the extra device capabilities
3260 *
3261 * This function will read the EEPROM location for the device capabilities,
3262 * and return the word through device_caps.
3263 **/
3264s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
3265{
3266 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
3267
3268 return 0;
3269}
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 508f635fc2ca..46be83cfb500 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -35,6 +35,7 @@ u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
35s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw); 35s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
36s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw); 36s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
37s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw); 37s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
38s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
38s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw); 39s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
39s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, 40s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
40 u32 pba_num_size); 41 u32 pba_num_size);
@@ -48,14 +49,22 @@ s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
48 49
49s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw); 50s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
50s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data); 51s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
52s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
53 u16 words, u16 *data);
51s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data); 54s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
55s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
56 u16 words, u16 *data);
57s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
58s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
59 u16 words, u16 *data);
52s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 60s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
53 u16 *data); 61 u16 *data);
62s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
63 u16 words, u16 *data);
54u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw); 64u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
55s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, 65s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
56 u16 *checksum_val); 66 u16 *checksum_val);
57s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); 67s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
58s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
59 68
60s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, 69s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
61 u32 enable_addr); 70 u32 enable_addr);
@@ -89,6 +98,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
89s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); 98s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
90void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf); 99void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf);
91void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); 100void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
101s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
92 102
93#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg))) 103#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
94 104
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c
index 1bc57e52cee3..771d01a60d06 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c
@@ -289,7 +289,7 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
289 * Configure queue statistics registers, all queues belonging to same traffic 289 * Configure queue statistics registers, all queues belonging to same traffic
290 * class uses a single set of queue statistics counters. 290 * class uses a single set of queue statistics counters.
291 */ 291 */
292s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) 292static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
293{ 293{
294 u32 reg = 0; 294 u32 reg = 0;
295 u8 i = 0; 295 u8 i = 0;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c
index 025af8c53ddb..d50cf78c234d 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c
@@ -39,36 +39,52 @@
39 */ 39 */
40static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw, u8 rx_pba) 40static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw, u8 rx_pba)
41{ 41{
42 s32 ret_val = 0; 42 int num_tcs = IXGBE_MAX_PACKET_BUFFERS;
43 u32 value = IXGBE_RXPBSIZE_64KB; 43 u32 rx_pb_size = hw->mac.rx_pb_size << IXGBE_RXPBSIZE_SHIFT;
44 u32 rxpktsize;
45 u32 txpktsize;
46 u32 txpbthresh;
44 u8 i = 0; 47 u8 i = 0;
45 48
46 /* Setup Rx packet buffer sizes */ 49 /*
47 switch (rx_pba) { 50 * This really means configure the first half of the TCs
48 case pba_80_48: 51 * (Traffic Classes) to use 5/8 of the Rx packet buffer
49 /* Setup the first four at 80KB */ 52 * space. To determine the size of the buffer for each TC,
50 value = IXGBE_RXPBSIZE_80KB; 53 * we are multiplying the average size by 5/4 and applying
51 for (; i < 4; i++) 54 * it to half of the traffic classes.
52 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value); 55 */
53 /* Setup the last four at 48KB...don't re-init i */ 56 if (rx_pba == pba_80_48) {
54 value = IXGBE_RXPBSIZE_48KB; 57 rxpktsize = (rx_pb_size * 5) / (num_tcs * 4);
55 /* Fall Through */ 58 rx_pb_size -= rxpktsize * (num_tcs / 2);
56 case pba_equal: 59 for (; i < (num_tcs / 2); i++)
57 default: 60 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
58 for (; i < IXGBE_MAX_PACKET_BUFFERS; i++) 61 }
59 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value); 62
60 63 /* Divide the remaining Rx packet buffer evenly among the TCs */
61 /* Setup Tx packet buffer sizes */ 64 rxpktsize = rx_pb_size / (num_tcs - i);
62 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { 65 for (; i < num_tcs; i++)
63 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 66 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
64 IXGBE_TXPBSIZE_20KB); 67
65 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 68 /*
66 IXGBE_TXPBTHRESH_DCB); 69 * Setup Tx packet buffer and threshold equally for all TCs
67 } 70 * TXPBTHRESH register is set in K so divide by 1024 and subtract
68 break; 71 * 10 since the largest packet we support is just over 9K.
72 */
73 txpktsize = IXGBE_TXPBSIZE_MAX / num_tcs;
74 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
75 for (i = 0; i < num_tcs; i++) {
76 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
77 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
78 }
79
80 /* Clear unused TCs, if any, to zero buffer size*/
81 for (; i < MAX_TRAFFIC_CLASS; i++) {
82 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
83 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
84 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
69 } 85 }
70 86
71 return ret_val; 87 return 0;
72} 88}
73 89
74/** 90/**
@@ -285,12 +301,17 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en)
285 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg); 301 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg);
286 /* 302 /*
287 * Enable Receive PFC 303 * Enable Receive PFC
288 * We will always honor XOFF frames we receive when 304 * 82599 will always honor XOFF frames we receive when
289 * we are in PFC mode. 305 * we are in PFC mode however X540 only honors enabled
306 * traffic classes.
290 */ 307 */
291 reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 308 reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
292 reg &= ~IXGBE_MFLCN_RFCE; 309 reg &= ~IXGBE_MFLCN_RFCE;
293 reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF; 310 reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF;
311
312 if (hw->mac.type == ixgbe_mac_X540)
313 reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT;
314
294 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg); 315 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
295 316
296 } else { 317 } else {
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ixgbe/ixgbe_dcb_82599.h
index 148fd8b477a9..2de71a503153 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.h
@@ -92,8 +92,10 @@
92#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */ 92#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */
93#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */ 93#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */
94#define IXGBE_RXPBSIZE_128KB 0x00020000 /* 128KB Packet Buffer */ 94#define IXGBE_RXPBSIZE_128KB 0x00020000 /* 128KB Packet Buffer */
95#define IXGBE_TXPBSIZE_MAX 0x00028000 /* 160KB Packet Buffer*/
95 96
96#define IXGBE_TXPBTHRESH_DCB 0xA /* THRESH value for DCB mode */ 97#define IXGBE_TXPBTHRESH_DCB 0xA /* THRESH value for DCB mode */
98#define IXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */
97 99
98/* SECTXMINIFG DCB */ 100/* SECTXMINIFG DCB */
99#define IXGBE_SECTX_DCB 0x00001F00 /* DCB TX Buffer IFG */ 101#define IXGBE_SECTX_DCB 0x00001F00 /* DCB TX Buffer IFG */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index 327c8614198c..5e7ed225851a 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -347,18 +347,28 @@ static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
347static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) 347static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
348{ 348{
349 struct ixgbe_adapter *adapter = netdev_priv(netdev); 349 struct ixgbe_adapter *adapter = netdev_priv(netdev);
350 struct dcb_app app = {
351 .selector = DCB_APP_IDTYPE_ETHTYPE,
352 .protocol = ETH_P_FCOE,
353 };
354 u8 up = dcb_getapp(netdev, &app);
350 int ret; 355 int ret;
351 356
352 if (!adapter->dcb_set_bitmap ||
353 !(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
354 return DCB_NO_HW_CHG;
355
356 ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, 357 ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
357 MAX_TRAFFIC_CLASS); 358 MAX_TRAFFIC_CLASS);
358
359 if (ret) 359 if (ret)
360 return DCB_NO_HW_CHG; 360 return DCB_NO_HW_CHG;
361 361
362 /* In IEEE mode app data must be parsed into DCBX format for
363 * hardware routines.
364 */
365 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)
366 up = (1 << up);
367
368#ifdef IXGBE_FCOE
369 if (up && (up != (1 << adapter->fcoe.up)))
370 adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
371
362 /* 372 /*
363 * Only take down the adapter if an app change occurred. FCoE 373 * Only take down the adapter if an app change occurred. FCoE
364 * may shuffle tx rings in this case and this can not be done 374 * may shuffle tx rings in this case and this can not be done
@@ -366,12 +376,15 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
366 */ 376 */
367 if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { 377 if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
368 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 378 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
369 msleep(1); 379 usleep_range(1000, 2000);
380
381 ixgbe_fcoe_setapp(adapter, up);
370 382
371 if (netif_running(netdev)) 383 if (netif_running(netdev))
372 netdev->netdev_ops->ndo_stop(netdev); 384 netdev->netdev_ops->ndo_stop(netdev);
373 ixgbe_clear_interrupt_scheme(adapter); 385 ixgbe_clear_interrupt_scheme(adapter);
374 } 386 }
387#endif
375 388
376 if (adapter->dcb_cfg.pfc_mode_enable) { 389 if (adapter->dcb_cfg.pfc_mode_enable) {
377 switch (adapter->hw.mac.type) { 390 switch (adapter->hw.mac.type) {
@@ -399,12 +412,14 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
399 } 412 }
400 } 413 }
401 414
415#ifdef IXGBE_FCOE
402 if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { 416 if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
403 ixgbe_init_interrupt_scheme(adapter); 417 ixgbe_init_interrupt_scheme(adapter);
404 if (netif_running(netdev)) 418 if (netif_running(netdev))
405 netdev->netdev_ops->ndo_open(netdev); 419 netdev->netdev_ops->ndo_open(netdev);
406 ret = DCB_HW_CHG_RST; 420 ret = DCB_HW_CHG_RST;
407 } 421 }
422#endif
408 423
409 if (adapter->dcb_set_bitmap & BIT_PFC) { 424 if (adapter->dcb_set_bitmap & BIT_PFC) {
410 u8 pfc_en; 425 u8 pfc_en;
@@ -558,68 +573,6 @@ static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
558 return dcb_getapp(netdev, &app); 573 return dcb_getapp(netdev, &app);
559} 574}
560 575
561/**
562 * ixgbe_dcbnl_setapp - set the DCBX application user priority
563 * @netdev : the corresponding netdev
564 * @idtype : identifies the id as ether type or TCP/UDP port number
565 * @id: id is either ether type or TCP/UDP port number
566 * @up: the 802.1p user priority bitmap
567 *
568 * Returns : 0 on success or 1 on error
569 */
570static u8 ixgbe_dcbnl_setapp(struct net_device *netdev,
571 u8 idtype, u16 id, u8 up)
572{
573 struct ixgbe_adapter *adapter = netdev_priv(netdev);
574 u8 rval = 1;
575 struct dcb_app app = {
576 .selector = idtype,
577 .protocol = id,
578 .priority = up
579 };
580
581 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
582 return rval;
583
584 rval = dcb_setapp(netdev, &app);
585
586 switch (idtype) {
587 case DCB_APP_IDTYPE_ETHTYPE:
588#ifdef IXGBE_FCOE
589 if (id == ETH_P_FCOE) {
590 u8 old_tc;
591
592 /* Get current programmed tc */
593 old_tc = adapter->fcoe.tc;
594 rval = ixgbe_fcoe_setapp(adapter, up);
595
596 if (rval ||
597 !(adapter->flags & IXGBE_FLAG_DCB_ENABLED) ||
598 !(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
599 break;
600
601 /* The FCoE application priority may be changed multiple
602 * times in quick succession with switches that build up
603 * TLVs. To avoid creating uneeded device resets this
604 * checks the actual HW configuration and clears
605 * BIT_APP_UPCHG if a HW configuration change is not
606 * need
607 */
608 if (old_tc == adapter->fcoe.tc)
609 adapter->dcb_set_bitmap &= ~BIT_APP_UPCHG;
610 else
611 adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
612 }
613#endif
614 break;
615 case DCB_APP_IDTYPE_PORTNUM:
616 break;
617 default:
618 break;
619 }
620 return rval;
621}
622
623static int ixgbe_dcbnl_ieee_getets(struct net_device *dev, 576static int ixgbe_dcbnl_ieee_getets(struct net_device *dev,
624 struct ieee_ets *ets) 577 struct ieee_ets *ets)
625{ 578{
@@ -745,25 +698,14 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
745 698
746 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) 699 if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
747 return -EINVAL; 700 return -EINVAL;
748#ifdef IXGBE_FCOE
749 if (app->selector == 1 && app->protocol == ETH_P_FCOE) {
750 if (adapter->fcoe.tc == app->priority)
751 goto setapp;
752 701
753 /* In IEEE mode map up to tc 1:1 */ 702 dcb_setapp(dev, app);
754 adapter->fcoe.tc = app->priority;
755 adapter->fcoe.up = app->priority;
756 703
757 /* Force hardware reset required to push FCoE 704#ifdef IXGBE_FCOE
758 * setup on {tx|rx}_rings 705 if (app->selector == 1 && app->protocol == ETH_P_FCOE &&
759 */ 706 adapter->fcoe.tc == app->priority)
760 adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
761 ixgbe_dcbnl_set_all(dev); 707 ixgbe_dcbnl_set_all(dev);
762 }
763
764setapp:
765#endif 708#endif
766 dcb_setapp(dev, app);
767 return 0; 709 return 0;
768} 710}
769 711
@@ -838,7 +780,6 @@ const struct dcbnl_rtnl_ops dcbnl_ops = {
838 .getpfcstate = ixgbe_dcbnl_getpfcstate, 780 .getpfcstate = ixgbe_dcbnl_getpfcstate,
839 .setpfcstate = ixgbe_dcbnl_setpfcstate, 781 .setpfcstate = ixgbe_dcbnl_setpfcstate,
840 .getapp = ixgbe_dcbnl_getapp, 782 .getapp = ixgbe_dcbnl_getapp,
841 .setapp = ixgbe_dcbnl_setapp,
842 .getdcbx = ixgbe_dcbnl_getdcbx, 783 .getdcbx = ixgbe_dcbnl_getdcbx,
843 .setdcbx = ixgbe_dcbnl_setdcbx, 784 .setdcbx = ixgbe_dcbnl_setdcbx,
844}; 785};
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 76380a2b35aa..cb1555bc8548 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -84,6 +84,7 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
84 {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)}, 84 {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
85 {"fdir_match", IXGBE_STAT(stats.fdirmatch)}, 85 {"fdir_match", IXGBE_STAT(stats.fdirmatch)},
86 {"fdir_miss", IXGBE_STAT(stats.fdirmiss)}, 86 {"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
87 {"fdir_overflow", IXGBE_STAT(fdir_overflow)},
87 {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)}, 88 {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)},
88 {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)}, 89 {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)},
89 {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)}, 90 {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)},
@@ -102,6 +103,10 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
102 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)}, 103 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
103 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)}, 104 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
104 {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)}, 105 {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
106 {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)},
107 {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)},
108 {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)},
109 {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)},
105#ifdef IXGBE_FCOE 110#ifdef IXGBE_FCOE
106 {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)}, 111 {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
107 {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)}, 112 {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
@@ -288,20 +293,20 @@ static int ixgbe_get_settings(struct net_device *netdev,
288 if (link_up) { 293 if (link_up) {
289 switch (link_speed) { 294 switch (link_speed) {
290 case IXGBE_LINK_SPEED_10GB_FULL: 295 case IXGBE_LINK_SPEED_10GB_FULL:
291 ecmd->speed = SPEED_10000; 296 ethtool_cmd_speed_set(ecmd, SPEED_10000);
292 break; 297 break;
293 case IXGBE_LINK_SPEED_1GB_FULL: 298 case IXGBE_LINK_SPEED_1GB_FULL:
294 ecmd->speed = SPEED_1000; 299 ethtool_cmd_speed_set(ecmd, SPEED_1000);
295 break; 300 break;
296 case IXGBE_LINK_SPEED_100_FULL: 301 case IXGBE_LINK_SPEED_100_FULL:
297 ecmd->speed = SPEED_100; 302 ethtool_cmd_speed_set(ecmd, SPEED_100);
298 break; 303 break;
299 default: 304 default:
300 break; 305 break;
301 } 306 }
302 ecmd->duplex = DUPLEX_FULL; 307 ecmd->duplex = DUPLEX_FULL;
303 } else { 308 } else {
304 ecmd->speed = -1; 309 ethtool_cmd_speed_set(ecmd, -1);
305 ecmd->duplex = -1; 310 ecmd->duplex = -1;
306 } 311 }
307 312
@@ -346,9 +351,10 @@ static int ixgbe_set_settings(struct net_device *netdev,
346 } 351 }
347 } else { 352 } else {
348 /* in this case we currently only support 10Gb/FULL */ 353 /* in this case we currently only support 10Gb/FULL */
354 u32 speed = ethtool_cmd_speed(ecmd);
349 if ((ecmd->autoneg == AUTONEG_ENABLE) || 355 if ((ecmd->autoneg == AUTONEG_ENABLE) ||
350 (ecmd->advertising != ADVERTISED_10000baseT_Full) || 356 (ecmd->advertising != ADVERTISED_10000baseT_Full) ||
351 (ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) 357 (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
352 return -EINVAL; 358 return -EINVAL;
353 } 359 }
354 360
@@ -846,11 +852,8 @@ static int ixgbe_get_eeprom(struct net_device *netdev,
846 if (!eeprom_buff) 852 if (!eeprom_buff)
847 return -ENOMEM; 853 return -ENOMEM;
848 854
849 for (i = 0; i < eeprom_len; i++) { 855 ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len,
850 if ((ret_val = hw->eeprom.ops.read(hw, first_word + i, 856 eeprom_buff);
851 &eeprom_buff[i])))
852 break;
853 }
854 857
855 /* Device's eeprom is always little-endian, word addressable */ 858 /* Device's eeprom is always little-endian, word addressable */
856 for (i = 0; i < eeprom_len; i++) 859 for (i = 0; i < eeprom_len; i++)
@@ -931,7 +934,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
931 } 934 }
932 935
933 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 936 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
934 msleep(1); 937 usleep_range(1000, 2000);
935 938
936 if (!netif_running(adapter->netdev)) { 939 if (!netif_running(adapter->netdev)) {
937 for (i = 0; i < adapter->num_tx_queues; i++) 940 for (i = 0; i < adapter->num_tx_queues; i++)
@@ -1030,9 +1033,6 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
1030 return IXGBE_TEST_LEN; 1033 return IXGBE_TEST_LEN;
1031 case ETH_SS_STATS: 1034 case ETH_SS_STATS:
1032 return IXGBE_STATS_LEN; 1035 return IXGBE_STATS_LEN;
1033 case ETH_SS_NTUPLE_FILTERS:
1034 return ETHTOOL_MAX_NTUPLE_LIST_ENTRY *
1035 ETHTOOL_MAX_NTUPLE_STRING_PER_ENTRY;
1036 default: 1036 default:
1037 return -EOPNOTSUPP; 1037 return -EOPNOTSUPP;
1038 } 1038 }
@@ -1238,46 +1238,62 @@ static const struct ixgbe_reg_test reg_test_82598[] = {
1238 { 0, 0, 0, 0 } 1238 { 0, 0, 0, 0 }
1239}; 1239};
1240 1240
1241static const u32 register_test_patterns[] = { 1241static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
1242 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF 1242 u32 mask, u32 write)
1243}; 1243{
1244 1244 u32 pat, val, before;
1245#define REG_PATTERN_TEST(R, M, W) \ 1245 static const u32 test_pattern[] = {
1246{ \ 1246 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
1247 u32 pat, val, before; \ 1247
1248 for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) { \ 1248 for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
1249 before = readl(adapter->hw.hw_addr + R); \ 1249 before = readl(adapter->hw.hw_addr + reg);
1250 writel((register_test_patterns[pat] & W), \ 1250 writel((test_pattern[pat] & write),
1251 (adapter->hw.hw_addr + R)); \ 1251 (adapter->hw.hw_addr + reg));
1252 val = readl(adapter->hw.hw_addr + R); \ 1252 val = readl(adapter->hw.hw_addr + reg);
1253 if (val != (register_test_patterns[pat] & W & M)) { \ 1253 if (val != (test_pattern[pat] & write & mask)) {
1254 e_err(drv, "pattern test reg %04X failed: got " \ 1254 e_err(drv, "pattern test reg %04X failed: got "
1255 "0x%08X expected 0x%08X\n", \ 1255 "0x%08X expected 0x%08X\n",
1256 R, val, (register_test_patterns[pat] & W & M)); \ 1256 reg, val, (test_pattern[pat] & write & mask));
1257 *data = R; \ 1257 *data = reg;
1258 writel(before, adapter->hw.hw_addr + R); \ 1258 writel(before, adapter->hw.hw_addr + reg);
1259 return 1; \ 1259 return 1;
1260 } \ 1260 }
1261 writel(before, adapter->hw.hw_addr + R); \ 1261 writel(before, adapter->hw.hw_addr + reg);
1262 } \ 1262 }
1263 return 0;
1263} 1264}
1264 1265
1265#define REG_SET_AND_CHECK(R, M, W) \ 1266static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
1266{ \ 1267 u32 mask, u32 write)
1267 u32 val, before; \ 1268{
1268 before = readl(adapter->hw.hw_addr + R); \ 1269 u32 val, before;
1269 writel((W & M), (adapter->hw.hw_addr + R)); \ 1270 before = readl(adapter->hw.hw_addr + reg);
1270 val = readl(adapter->hw.hw_addr + R); \ 1271 writel((write & mask), (adapter->hw.hw_addr + reg));
1271 if ((W & M) != (val & M)) { \ 1272 val = readl(adapter->hw.hw_addr + reg);
1272 e_err(drv, "set/check reg %04X test failed: got 0x%08X " \ 1273 if ((write & mask) != (val & mask)) {
1273 "expected 0x%08X\n", R, (val & M), (W & M)); \ 1274 e_err(drv, "set/check reg %04X test failed: got 0x%08X "
1274 *data = R; \ 1275 "expected 0x%08X\n", reg, (val & mask), (write & mask));
1275 writel(before, (adapter->hw.hw_addr + R)); \ 1276 *data = reg;
1276 return 1; \ 1277 writel(before, (adapter->hw.hw_addr + reg));
1277 } \ 1278 return 1;
1278 writel(before, (adapter->hw.hw_addr + R)); \ 1279 }
1280 writel(before, (adapter->hw.hw_addr + reg));
1281 return 0;
1279} 1282}
1280 1283
1284#define REG_PATTERN_TEST(reg, mask, write) \
1285 do { \
1286 if (reg_pattern_test(adapter, data, reg, mask, write)) \
1287 return 1; \
1288 } while (0) \
1289
1290
1291#define REG_SET_AND_CHECK(reg, mask, write) \
1292 do { \
1293 if (reg_set_and_check(adapter, data, reg, mask, write)) \
1294 return 1; \
1295 } while (0) \
1296
1281static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) 1297static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1282{ 1298{
1283 const struct ixgbe_reg_test *test; 1299 const struct ixgbe_reg_test *test;
@@ -1328,13 +1344,13 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1328 switch (test->test_type) { 1344 switch (test->test_type) {
1329 case PATTERN_TEST: 1345 case PATTERN_TEST:
1330 REG_PATTERN_TEST(test->reg + (i * 0x40), 1346 REG_PATTERN_TEST(test->reg + (i * 0x40),
1331 test->mask, 1347 test->mask,
1332 test->write); 1348 test->write);
1333 break; 1349 break;
1334 case SET_READ_TEST: 1350 case SET_READ_TEST:
1335 REG_SET_AND_CHECK(test->reg + (i * 0x40), 1351 REG_SET_AND_CHECK(test->reg + (i * 0x40),
1336 test->mask, 1352 test->mask,
1337 test->write); 1353 test->write);
1338 break; 1354 break;
1339 case WRITE_NO_TEST: 1355 case WRITE_NO_TEST:
1340 writel(test->write, 1356 writel(test->write,
@@ -1343,18 +1359,18 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1343 break; 1359 break;
1344 case TABLE32_TEST: 1360 case TABLE32_TEST:
1345 REG_PATTERN_TEST(test->reg + (i * 4), 1361 REG_PATTERN_TEST(test->reg + (i * 4),
1346 test->mask, 1362 test->mask,
1347 test->write); 1363 test->write);
1348 break; 1364 break;
1349 case TABLE64_TEST_LO: 1365 case TABLE64_TEST_LO:
1350 REG_PATTERN_TEST(test->reg + (i * 8), 1366 REG_PATTERN_TEST(test->reg + (i * 8),
1351 test->mask, 1367 test->mask,
1352 test->write); 1368 test->write);
1353 break; 1369 break;
1354 case TABLE64_TEST_HI: 1370 case TABLE64_TEST_HI:
1355 REG_PATTERN_TEST((test->reg + 4) + (i * 8), 1371 REG_PATTERN_TEST((test->reg + 4) + (i * 8),
1356 test->mask, 1372 test->mask,
1357 test->write); 1373 test->write);
1358 break; 1374 break;
1359 } 1375 }
1360 } 1376 }
@@ -1417,7 +1433,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1417 1433
1418 /* Disable all the interrupts */ 1434 /* Disable all the interrupts */
1419 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); 1435 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1420 msleep(10); 1436 usleep_range(10000, 20000);
1421 1437
1422 /* Test each interrupt */ 1438 /* Test each interrupt */
1423 for (; i < 10; i++) { 1439 for (; i < 10; i++) {
@@ -1437,7 +1453,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1437 ~mask & 0x00007FFF); 1453 ~mask & 0x00007FFF);
1438 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, 1454 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1439 ~mask & 0x00007FFF); 1455 ~mask & 0x00007FFF);
1440 msleep(10); 1456 usleep_range(10000, 20000);
1441 1457
1442 if (adapter->test_icr & mask) { 1458 if (adapter->test_icr & mask) {
1443 *data = 3; 1459 *data = 3;
@@ -1454,7 +1470,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1454 adapter->test_icr = 0; 1470 adapter->test_icr = 0;
1455 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); 1471 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1456 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); 1472 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
1457 msleep(10); 1473 usleep_range(10000, 20000);
1458 1474
1459 if (!(adapter->test_icr &mask)) { 1475 if (!(adapter->test_icr &mask)) {
1460 *data = 4; 1476 *data = 4;
@@ -1474,7 +1490,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1474 ~mask & 0x00007FFF); 1490 ~mask & 0x00007FFF);
1475 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, 1491 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1476 ~mask & 0x00007FFF); 1492 ~mask & 0x00007FFF);
1477 msleep(10); 1493 usleep_range(10000, 20000);
1478 1494
1479 if (adapter->test_icr) { 1495 if (adapter->test_icr) {
1480 *data = 5; 1496 *data = 5;
@@ -1485,7 +1501,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1485 1501
1486 /* Disable all the interrupts */ 1502 /* Disable all the interrupts */
1487 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); 1503 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1488 msleep(10); 1504 usleep_range(10000, 20000);
1489 1505
1490 /* Unhook test interrupt handler */ 1506 /* Unhook test interrupt handler */
1491 free_irq(irq, netdev); 1507 free_irq(irq, netdev);
@@ -1598,6 +1614,13 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
1598 struct ixgbe_hw *hw = &adapter->hw; 1614 struct ixgbe_hw *hw = &adapter->hw;
1599 u32 reg_data; 1615 u32 reg_data;
1600 1616
1617 /* X540 needs to set the MACC.FLU bit to force link up */
1618 if (adapter->hw.mac.type == ixgbe_mac_X540) {
1619 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_MACC);
1620 reg_data |= IXGBE_MACC_FLU;
1621 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MACC, reg_data);
1622 }
1623
1601 /* right now we only support MAC loopback in the driver */ 1624 /* right now we only support MAC loopback in the driver */
1602 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); 1625 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1603 /* Setup MAC loopback */ 1626 /* Setup MAC loopback */
@@ -1613,7 +1636,7 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
1613 reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU; 1636 reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU;
1614 IXGBE_WRITE_REG(&adapter->hw, IXGBE_AUTOC, reg_data); 1637 IXGBE_WRITE_REG(&adapter->hw, IXGBE_AUTOC, reg_data);
1615 IXGBE_WRITE_FLUSH(&adapter->hw); 1638 IXGBE_WRITE_FLUSH(&adapter->hw);
1616 msleep(10); 1639 usleep_range(10000, 20000);
1617 1640
1618 /* Disable Atlas Tx lanes; re-enabled in reset path */ 1641 /* Disable Atlas Tx lanes; re-enabled in reset path */
1619 if (hw->mac.type == ixgbe_mac_82598EB) { 1642 if (hw->mac.type == ixgbe_mac_82598EB) {
@@ -1999,25 +2022,30 @@ static int ixgbe_nway_reset(struct net_device *netdev)
1999 return 0; 2022 return 0;
2000} 2023}
2001 2024
2002static int ixgbe_phys_id(struct net_device *netdev, u32 data) 2025static int ixgbe_set_phys_id(struct net_device *netdev,
2026 enum ethtool_phys_id_state state)
2003{ 2027{
2004 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2028 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2005 struct ixgbe_hw *hw = &adapter->hw; 2029 struct ixgbe_hw *hw = &adapter->hw;
2006 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2007 u32 i;
2008 2030
2009 if (!data || data > 300) 2031 switch (state) {
2010 data = 300; 2032 case ETHTOOL_ID_ACTIVE:
2033 adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2034 return 2;
2011 2035
2012 for (i = 0; i < (data * 1000); i += 400) { 2036 case ETHTOOL_ID_ON:
2013 hw->mac.ops.led_on(hw, IXGBE_LED_ON); 2037 hw->mac.ops.led_on(hw, IXGBE_LED_ON);
2014 msleep_interruptible(200); 2038 break;
2039
2040 case ETHTOOL_ID_OFF:
2015 hw->mac.ops.led_off(hw, IXGBE_LED_ON); 2041 hw->mac.ops.led_off(hw, IXGBE_LED_ON);
2016 msleep_interruptible(200); 2042 break;
2017 }
2018 2043
2019 /* Restore LED settings */ 2044 case ETHTOOL_ID_INACTIVE:
2020 IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, led_reg); 2045 /* Restore LED settings */
2046 IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg);
2047 break;
2048 }
2021 2049
2022 return 0; 2050 return 0;
2023} 2051}
@@ -2230,8 +2258,13 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data)
2230 need_reset = (data & ETH_FLAG_RXVLAN) != 2258 need_reset = (data & ETH_FLAG_RXVLAN) !=
2231 (netdev->features & NETIF_F_HW_VLAN_RX); 2259 (netdev->features & NETIF_F_HW_VLAN_RX);
2232 2260
2261 if ((data & ETH_FLAG_RXHASH) &&
2262 !(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
2263 return -EOPNOTSUPP;
2264
2233 rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_LRO | ETH_FLAG_NTUPLE | 2265 rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_LRO | ETH_FLAG_NTUPLE |
2234 ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN); 2266 ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN |
2267 ETH_FLAG_RXHASH);
2235 if (rc) 2268 if (rc)
2236 return rc; 2269 return rc;
2237 2270
@@ -2465,7 +2498,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
2465 .set_tso = ixgbe_set_tso, 2498 .set_tso = ixgbe_set_tso,
2466 .self_test = ixgbe_diag_test, 2499 .self_test = ixgbe_diag_test,
2467 .get_strings = ixgbe_get_strings, 2500 .get_strings = ixgbe_get_strings,
2468 .phys_id = ixgbe_phys_id, 2501 .set_phys_id = ixgbe_set_phys_id,
2469 .get_sset_count = ixgbe_get_sset_count, 2502 .get_sset_count = ixgbe_get_sset_count,
2470 .get_ethtool_stats = ixgbe_get_ethtool_stats, 2503 .get_ethtool_stats = ixgbe_get_ethtool_stats,
2471 .get_coalesce = ixgbe_get_coalesce, 2504 .get_coalesce = ixgbe_get_coalesce,
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index dba7d77588ef..05920726e824 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -416,8 +416,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
416 if (!ddp->udl) 416 if (!ddp->udl)
417 goto ddp_out; 417 goto ddp_out;
418 418
419 ddp->err = (fcerr | fceofe); 419 if (fcerr | fceofe)
420 if (ddp->err)
421 goto ddp_out; 420 goto ddp_out;
422 421
423 fcstat = (sterr & IXGBE_RXDADV_STAT_FCSTAT); 422 fcstat = (sterr & IXGBE_RXDADV_STAT_FCSTAT);
@@ -428,6 +427,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
428 if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_FCPRSP) { 427 if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_FCPRSP) {
429 pci_unmap_sg(adapter->pdev, ddp->sgl, 428 pci_unmap_sg(adapter->pdev, ddp->sgl,
430 ddp->sgc, DMA_FROM_DEVICE); 429 ddp->sgc, DMA_FROM_DEVICE);
430 ddp->err = (fcerr | fceofe);
431 ddp->sgl = NULL; 431 ddp->sgl = NULL;
432 ddp->sgc = 0; 432 ddp->sgc = 0;
433 } 433 }
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 6f8adc7f5d7c..2dce3d038188 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -51,8 +51,12 @@
51char ixgbe_driver_name[] = "ixgbe"; 51char ixgbe_driver_name[] = "ixgbe";
52static const char ixgbe_driver_string[] = 52static const char ixgbe_driver_string[] =
53 "Intel(R) 10 Gigabit PCI Express Network Driver"; 53 "Intel(R) 10 Gigabit PCI Express Network Driver";
54 54#define MAJ 3
55#define DRV_VERSION "3.2.9-k2" 55#define MIN 3
56#define BUILD 8
57#define KFIX 2
58#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
59 __stringify(BUILD) "-k" __stringify(KFIX)
56const char ixgbe_driver_version[] = DRV_VERSION; 60const char ixgbe_driver_version[] = DRV_VERSION;
57static const char ixgbe_copyright[] = 61static const char ixgbe_copyright[] =
58 "Copyright (c) 1999-2011 Intel Corporation."; 62 "Copyright (c) 1999-2011 Intel Corporation.";
@@ -120,6 +124,10 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
120 board_82599 }, 124 board_82599 },
121 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), 125 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T),
122 board_X540 }, 126 board_X540 },
127 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2),
128 board_82599 },
129 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS),
130 board_82599 },
123 131
124 /* required last entry */ 132 /* required last entry */
125 {0, } 133 {0, }
@@ -185,6 +193,22 @@ static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
185 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; 193 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
186} 194}
187 195
196static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
197{
198 if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
199 !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
200 schedule_work(&adapter->service_task);
201}
202
203static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
204{
205 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
206
207 /* flush memory to make sure state is correct before next watchog */
208 smp_mb__before_clear_bit();
209 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
210}
211
188struct ixgbe_reg_info { 212struct ixgbe_reg_info {
189 u32 ofs; 213 u32 ofs;
190 char *name; 214 char *name;
@@ -811,7 +835,19 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
811#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \ 835#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
812 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */ 836 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
813 837
814static void ixgbe_tx_timeout(struct net_device *netdev); 838/**
839 * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout
840 * @adapter: driver private struct
841 **/
842static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
843{
844
845 /* Do the reset outside of interrupt context */
846 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
847 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
848 ixgbe_service_event_schedule(adapter);
849 }
850}
815 851
816/** 852/**
817 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes 853 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
@@ -893,7 +929,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
893 adapter->tx_timeout_count + 1, tx_ring->queue_index); 929 adapter->tx_timeout_count + 1, tx_ring->queue_index);
894 930
895 /* schedule immediate reset if we believe we hung */ 931 /* schedule immediate reset if we believe we hung */
896 ixgbe_tx_timeout(adapter->netdev); 932 ixgbe_tx_timeout_reset(adapter);
897 933
898 /* the adapter is about to reset, no point in enabling stuff */ 934 /* the adapter is about to reset, no point in enabling stuff */
899 return true; 935 return true;
@@ -943,8 +979,6 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
943 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; 979 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
944 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; 980 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
945 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN); 981 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
946 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
947 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
948 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl); 982 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
949} 983}
950 984
@@ -962,7 +996,6 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
962 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK; 996 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
963 txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); 997 txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
964 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; 998 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
965 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
966 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl); 999 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl);
967 break; 1000 break;
968 case ixgbe_mac_82599EB: 1001 case ixgbe_mac_82599EB:
@@ -972,7 +1005,6 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
972 txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << 1005 txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
973 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599); 1006 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
974 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; 1007 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
975 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
976 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl); 1008 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl);
977 break; 1009 break;
978 default: 1010 default:
@@ -1061,8 +1093,14 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
1061 1093
1062 return 0; 1094 return 0;
1063} 1095}
1064
1065#endif /* CONFIG_IXGBE_DCA */ 1096#endif /* CONFIG_IXGBE_DCA */
1097
1098static inline void ixgbe_rx_hash(union ixgbe_adv_rx_desc *rx_desc,
1099 struct sk_buff *skb)
1100{
1101 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
1102}
1103
1066/** 1104/**
1067 * ixgbe_receive_skb - Send a completed packet up the stack 1105 * ixgbe_receive_skb - Send a completed packet up the stack
1068 * @adapter: board private structure 1106 * @adapter: board private structure
@@ -1454,6 +1492,8 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1454 } 1492 }
1455 1493
1456 ixgbe_rx_checksum(adapter, rx_desc, skb); 1494 ixgbe_rx_checksum(adapter, rx_desc, skb);
1495 if (adapter->netdev->features & NETIF_F_RXHASH)
1496 ixgbe_rx_hash(rx_desc, skb);
1457 1497
1458 /* probably a little skewed due to removing CRC */ 1498 /* probably a little skewed due to removing CRC */
1459 total_rx_bytes += skb->len; 1499 total_rx_bytes += skb->len;
@@ -1787,35 +1827,51 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1787} 1827}
1788 1828
1789/** 1829/**
1790 * ixgbe_check_overtemp_task - worker thread to check over tempurature 1830 * ixgbe_check_overtemp_subtask - check for over tempurature
1791 * @work: pointer to work_struct containing our data 1831 * @adapter: pointer to adapter
1792 **/ 1832 **/
1793static void ixgbe_check_overtemp_task(struct work_struct *work) 1833static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
1794{ 1834{
1795 struct ixgbe_adapter *adapter = container_of(work,
1796 struct ixgbe_adapter,
1797 check_overtemp_task);
1798 struct ixgbe_hw *hw = &adapter->hw; 1835 struct ixgbe_hw *hw = &adapter->hw;
1799 u32 eicr = adapter->interrupt_event; 1836 u32 eicr = adapter->interrupt_event;
1800 1837
1801 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)) 1838 if (test_bit(__IXGBE_DOWN, &adapter->state))
1802 return; 1839 return;
1803 1840
1841 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
1842 !(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT))
1843 return;
1844
1845 adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT;
1846
1804 switch (hw->device_id) { 1847 switch (hw->device_id) {
1805 case IXGBE_DEV_ID_82599_T3_LOM: { 1848 case IXGBE_DEV_ID_82599_T3_LOM:
1806 u32 autoneg; 1849 /*
1807 bool link_up = false; 1850 * Since the warning interrupt is for both ports
1851 * we don't have to check if:
1852 * - This interrupt wasn't for our port.
1853 * - We may have missed the interrupt so always have to
1854 * check if we got a LSC
1855 */
1856 if (!(eicr & IXGBE_EICR_GPI_SDP0) &&
1857 !(eicr & IXGBE_EICR_LSC))
1858 return;
1859
1860 if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) {
1861 u32 autoneg;
1862 bool link_up = false;
1808 1863
1809 if (hw->mac.ops.check_link)
1810 hw->mac.ops.check_link(hw, &autoneg, &link_up, false); 1864 hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
1811 1865
1812 if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) || 1866 if (link_up)
1813 (eicr & IXGBE_EICR_LSC)) 1867 return;
1814 /* Check if this is due to overtemp */ 1868 }
1815 if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP) 1869
1816 break; 1870 /* Check if this is not due to overtemp */
1817 return; 1871 if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
1818 } 1872 return;
1873
1874 break;
1819 default: 1875 default:
1820 if (!(eicr & IXGBE_EICR_GPI_SDP0)) 1876 if (!(eicr & IXGBE_EICR_GPI_SDP0))
1821 return; 1877 return;
@@ -1825,8 +1881,8 @@ static void ixgbe_check_overtemp_task(struct work_struct *work)
1825 "Network adapter has been stopped because it has over heated. " 1881 "Network adapter has been stopped because it has over heated. "
1826 "Restart the computer. If the problem persists, " 1882 "Restart the computer. If the problem persists, "
1827 "power off the system and replace the adapter\n"); 1883 "power off the system and replace the adapter\n");
1828 /* write to clear the interrupt */ 1884
1829 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0); 1885 adapter->interrupt_event = 0;
1830} 1886}
1831 1887
1832static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) 1888static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
@@ -1848,15 +1904,19 @@ static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
1848 if (eicr & IXGBE_EICR_GPI_SDP2) { 1904 if (eicr & IXGBE_EICR_GPI_SDP2) {
1849 /* Clear the interrupt */ 1905 /* Clear the interrupt */
1850 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2); 1906 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
1851 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1907 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1852 schedule_work(&adapter->sfp_config_module_task); 1908 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
1909 ixgbe_service_event_schedule(adapter);
1910 }
1853 } 1911 }
1854 1912
1855 if (eicr & IXGBE_EICR_GPI_SDP1) { 1913 if (eicr & IXGBE_EICR_GPI_SDP1) {
1856 /* Clear the interrupt */ 1914 /* Clear the interrupt */
1857 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); 1915 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1858 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1916 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1859 schedule_work(&adapter->multispeed_fiber_task); 1917 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
1918 ixgbe_service_event_schedule(adapter);
1919 }
1860 } 1920 }
1861} 1921}
1862 1922
@@ -1870,7 +1930,7 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
1870 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { 1930 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1871 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 1931 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
1872 IXGBE_WRITE_FLUSH(hw); 1932 IXGBE_WRITE_FLUSH(hw);
1873 schedule_work(&adapter->watchdog_task); 1933 ixgbe_service_event_schedule(adapter);
1874 } 1934 }
1875} 1935}
1876 1936
@@ -1898,26 +1958,32 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1898 1958
1899 switch (hw->mac.type) { 1959 switch (hw->mac.type) {
1900 case ixgbe_mac_82599EB: 1960 case ixgbe_mac_82599EB:
1901 ixgbe_check_sfp_event(adapter, eicr);
1902 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
1903 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
1904 adapter->interrupt_event = eicr;
1905 schedule_work(&adapter->check_overtemp_task);
1906 }
1907 /* now fallthrough to handle Flow Director */
1908 case ixgbe_mac_X540: 1961 case ixgbe_mac_X540:
1909 /* Handle Flow Director Full threshold interrupt */ 1962 /* Handle Flow Director Full threshold interrupt */
1910 if (eicr & IXGBE_EICR_FLOW_DIR) { 1963 if (eicr & IXGBE_EICR_FLOW_DIR) {
1964 int reinit_count = 0;
1911 int i; 1965 int i;
1912 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR);
1913 /* Disable transmits before FDIR Re-initialization */
1914 netif_tx_stop_all_queues(netdev);
1915 for (i = 0; i < adapter->num_tx_queues; i++) { 1966 for (i = 0; i < adapter->num_tx_queues; i++) {
1916 struct ixgbe_ring *tx_ring = 1967 struct ixgbe_ring *ring = adapter->tx_ring[i];
1917 adapter->tx_ring[i];
1918 if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE, 1968 if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
1919 &tx_ring->state)) 1969 &ring->state))
1920 schedule_work(&adapter->fdir_reinit_task); 1970 reinit_count++;
1971 }
1972 if (reinit_count) {
1973 /* no more flow director interrupts until after init */
1974 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
1975 eicr &= ~IXGBE_EICR_FLOW_DIR;
1976 adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
1977 ixgbe_service_event_schedule(adapter);
1978 }
1979 }
1980 ixgbe_check_sfp_event(adapter, eicr);
1981 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
1982 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
1983 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1984 adapter->interrupt_event = eicr;
1985 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
1986 ixgbe_service_event_schedule(adapter);
1921 } 1987 }
1922 } 1988 }
1923 break; 1989 break;
@@ -1927,8 +1993,10 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1927 1993
1928 ixgbe_check_fan_failure(adapter, eicr); 1994 ixgbe_check_fan_failure(adapter, eicr);
1929 1995
1996 /* re-enable the original interrupt state, no lsc, no queues */
1930 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1997 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1931 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); 1998 IXGBE_WRITE_REG(hw, IXGBE_EIMS, eicr &
1999 ~(IXGBE_EIMS_LSC | IXGBE_EIMS_RTX_QUEUE));
1932 2000
1933 return IRQ_HANDLED; 2001 return IRQ_HANDLED;
1934} 2002}
@@ -2513,8 +2581,11 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
2513 ixgbe_check_sfp_event(adapter, eicr); 2581 ixgbe_check_sfp_event(adapter, eicr);
2514 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && 2582 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
2515 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) { 2583 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
2516 adapter->interrupt_event = eicr; 2584 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2517 schedule_work(&adapter->check_overtemp_task); 2585 adapter->interrupt_event = eicr;
2586 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2587 ixgbe_service_event_schedule(adapter);
2588 }
2518 } 2589 }
2519 break; 2590 break;
2520 default: 2591 default:
@@ -2731,7 +2802,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2731 2802
2732 /* poll to verify queue is enabled */ 2803 /* poll to verify queue is enabled */
2733 do { 2804 do {
2734 msleep(1); 2805 usleep_range(1000, 2000);
2735 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); 2806 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
2736 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); 2807 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
2737 if (!wait_loop) 2808 if (!wait_loop)
@@ -3023,7 +3094,7 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
3023 return; 3094 return;
3024 3095
3025 do { 3096 do {
3026 msleep(1); 3097 usleep_range(1000, 2000);
3027 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); 3098 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3028 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); 3099 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
3029 3100
@@ -3178,7 +3249,9 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
3178 /* enable Tx loopback for VF/PF communication */ 3249 /* enable Tx loopback for VF/PF communication */
3179 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); 3250 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3180 /* Enable MAC Anti-Spoofing */ 3251 /* Enable MAC Anti-Spoofing */
3181 hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0), 3252 hw->mac.ops.set_mac_anti_spoofing(hw,
3253 (adapter->antispoofing_enabled =
3254 (adapter->num_vfs != 0)),
3182 adapter->num_vfs); 3255 adapter->num_vfs);
3183} 3256}
3184 3257
@@ -3487,7 +3560,7 @@ static int ixgbe_write_uc_addr_list(struct net_device *netdev)
3487 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3560 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3488 struct ixgbe_hw *hw = &adapter->hw; 3561 struct ixgbe_hw *hw = &adapter->hw;
3489 unsigned int vfn = adapter->num_vfs; 3562 unsigned int vfn = adapter->num_vfs;
3490 unsigned int rar_entries = hw->mac.num_rar_entries - (vfn + 1); 3563 unsigned int rar_entries = IXGBE_MAX_PF_MACVLANS;
3491 int count = 0; 3564 int count = 0;
3492 3565
3493 /* return ENOMEM indicating insufficient memory for addresses */ 3566 /* return ENOMEM indicating insufficient memory for addresses */
@@ -3760,31 +3833,16 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
3760 **/ 3833 **/
3761static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) 3834static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
3762{ 3835{
3763 struct ixgbe_hw *hw = &adapter->hw; 3836 /*
3837 * We are assuming the worst case scenerio here, and that
3838 * is that an SFP was inserted/removed after the reset
3839 * but before SFP detection was enabled. As such the best
3840 * solution is to just start searching as soon as we start
3841 */
3842 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3843 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
3764 3844
3765 if (hw->phy.multispeed_fiber) { 3845 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
3766 /*
3767 * In multispeed fiber setups, the device may not have
3768 * had a physical connection when the driver loaded.
3769 * If that's the case, the initial link configuration
3770 * couldn't get the MAC into 10G or 1G mode, so we'll
3771 * never have a link status change interrupt fire.
3772 * We need to try and force an autonegotiation
3773 * session, then bring up link.
3774 */
3775 if (hw->mac.ops.setup_sfp)
3776 hw->mac.ops.setup_sfp(hw);
3777 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
3778 schedule_work(&adapter->multispeed_fiber_task);
3779 } else {
3780 /*
3781 * Direct Attach Cu and non-multispeed fiber modules
3782 * still need to be configured properly prior to
3783 * attempting link.
3784 */
3785 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_MOD_TASK))
3786 schedule_work(&adapter->sfp_config_module_task);
3787 }
3788} 3846}
3789 3847
3790/** 3848/**
@@ -3860,9 +3918,10 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
3860 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) 3918 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
3861 gpie |= IXGBE_SDP1_GPIEN; 3919 gpie |= IXGBE_SDP1_GPIEN;
3862 3920
3863 if (hw->mac.type == ixgbe_mac_82599EB) 3921 if (hw->mac.type == ixgbe_mac_82599EB) {
3864 gpie |= IXGBE_SDP1_GPIEN; 3922 gpie |= IXGBE_SDP1_GPIEN;
3865 gpie |= IXGBE_SDP2_GPIEN; 3923 gpie |= IXGBE_SDP2_GPIEN;
3924 }
3866 3925
3867 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 3926 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3868} 3927}
@@ -3913,17 +3972,6 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3913 e_crit(drv, "Fan has stopped, replace the adapter\n"); 3972 e_crit(drv, "Fan has stopped, replace the adapter\n");
3914 } 3973 }
3915 3974
3916 /*
3917 * For hot-pluggable SFP+ devices, a new SFP+ module may have
3918 * arrived before interrupts were enabled but after probe. Such
3919 * devices wouldn't have their type identified yet. We need to
3920 * kick off the SFP+ module setup first, then try to bring up link.
3921 * If we're not hot-pluggable SFP+, we just need to configure link
3922 * and bring it up.
3923 */
3924 if (hw->phy.type == ixgbe_phy_none)
3925 schedule_work(&adapter->sfp_config_module_task);
3926
3927 /* enable transmits */ 3975 /* enable transmits */
3928 netif_tx_start_all_queues(adapter->netdev); 3976 netif_tx_start_all_queues(adapter->netdev);
3929 3977
@@ -3931,7 +3979,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3931 * link up interrupt but shouldn't be a problem */ 3979 * link up interrupt but shouldn't be a problem */
3932 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 3980 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
3933 adapter->link_check_timeout = jiffies; 3981 adapter->link_check_timeout = jiffies;
3934 mod_timer(&adapter->watchdog_timer, jiffies); 3982 mod_timer(&adapter->service_timer, jiffies);
3935 3983
3936 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 3984 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
3937 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 3985 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
@@ -3944,8 +3992,11 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3944void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) 3992void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
3945{ 3993{
3946 WARN_ON(in_interrupt()); 3994 WARN_ON(in_interrupt());
3995 /* put off any impending NetWatchDogTimeout */
3996 adapter->netdev->trans_start = jiffies;
3997
3947 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 3998 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
3948 msleep(1); 3999 usleep_range(1000, 2000);
3949 ixgbe_down(adapter); 4000 ixgbe_down(adapter);
3950 /* 4001 /*
3951 * If SR-IOV enabled then wait a bit before bringing the adapter 4002 * If SR-IOV enabled then wait a bit before bringing the adapter
@@ -3972,10 +4023,20 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
3972 struct ixgbe_hw *hw = &adapter->hw; 4023 struct ixgbe_hw *hw = &adapter->hw;
3973 int err; 4024 int err;
3974 4025
4026 /* lock SFP init bit to prevent race conditions with the watchdog */
4027 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
4028 usleep_range(1000, 2000);
4029
4030 /* clear all SFP and link config related flags while holding SFP_INIT */
4031 adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP |
4032 IXGBE_FLAG2_SFP_NEEDS_RESET);
4033 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
4034
3975 err = hw->mac.ops.init_hw(hw); 4035 err = hw->mac.ops.init_hw(hw);
3976 switch (err) { 4036 switch (err) {
3977 case 0: 4037 case 0:
3978 case IXGBE_ERR_SFP_NOT_PRESENT: 4038 case IXGBE_ERR_SFP_NOT_PRESENT:
4039 case IXGBE_ERR_SFP_NOT_SUPPORTED:
3979 break; 4040 break;
3980 case IXGBE_ERR_MASTER_REQUESTS_PENDING: 4041 case IXGBE_ERR_MASTER_REQUESTS_PENDING:
3981 e_dev_err("master disable timed out\n"); 4042 e_dev_err("master disable timed out\n");
@@ -3993,6 +4054,8 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
3993 e_dev_err("Hardware Error: %d\n", err); 4054 e_dev_err("Hardware Error: %d\n", err);
3994 } 4055 }
3995 4056
4057 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
4058
3996 /* reprogram the RAR[0] in case user changed it. */ 4059 /* reprogram the RAR[0] in case user changed it. */
3997 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs, 4060 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
3998 IXGBE_RAH_AV); 4061 IXGBE_RAH_AV);
@@ -4121,26 +4184,12 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
4121 struct net_device *netdev = adapter->netdev; 4184 struct net_device *netdev = adapter->netdev;
4122 struct ixgbe_hw *hw = &adapter->hw; 4185 struct ixgbe_hw *hw = &adapter->hw;
4123 u32 rxctrl; 4186 u32 rxctrl;
4124 u32 txdctl;
4125 int i; 4187 int i;
4126 int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 4188 int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
4127 4189
4128 /* signal that we are down to the interrupt handler */ 4190 /* signal that we are down to the interrupt handler */
4129 set_bit(__IXGBE_DOWN, &adapter->state); 4191 set_bit(__IXGBE_DOWN, &adapter->state);
4130 4192
4131 /* disable receive for all VFs and wait one second */
4132 if (adapter->num_vfs) {
4133 /* ping all the active vfs to let them know we are going down */
4134 ixgbe_ping_all_vfs(adapter);
4135
4136 /* Disable all VFTE/VFRE TX/RX */
4137 ixgbe_disable_tx_rx(adapter);
4138
4139 /* Mark all the VFs as inactive */
4140 for (i = 0 ; i < adapter->num_vfs; i++)
4141 adapter->vfinfo[i].clear_to_send = 0;
4142 }
4143
4144 /* disable receives */ 4193 /* disable receives */
4145 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 4194 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4146 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); 4195 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
@@ -4150,15 +4199,11 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
4150 /* this call also flushes the previous write */ 4199 /* this call also flushes the previous write */
4151 ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]); 4200 ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
4152 4201
4153 msleep(10); 4202 usleep_range(10000, 20000);
4154 4203
4155 netif_tx_stop_all_queues(netdev); 4204 netif_tx_stop_all_queues(netdev);
4156 4205
4157 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); 4206 /* call carrier off first to avoid false dev_watchdog timeouts */
4158 del_timer_sync(&adapter->sfp_timer);
4159 del_timer_sync(&adapter->watchdog_timer);
4160 cancel_work_sync(&adapter->watchdog_task);
4161
4162 netif_carrier_off(netdev); 4207 netif_carrier_off(netdev);
4163 netif_tx_disable(netdev); 4208 netif_tx_disable(netdev);
4164 4209
@@ -4166,6 +4211,25 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
4166 4211
4167 ixgbe_napi_disable_all(adapter); 4212 ixgbe_napi_disable_all(adapter);
4168 4213
4214 adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT |
4215 IXGBE_FLAG2_RESET_REQUESTED);
4216 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
4217
4218 del_timer_sync(&adapter->service_timer);
4219
4220 /* disable receive for all VFs and wait one second */
4221 if (adapter->num_vfs) {
4222 /* ping all the active vfs to let them know we are going down */
4223 ixgbe_ping_all_vfs(adapter);
4224
4225 /* Disable all VFTE/VFRE TX/RX */
4226 ixgbe_disable_tx_rx(adapter);
4227
4228 /* Mark all the VFs as inactive */
4229 for (i = 0 ; i < adapter->num_vfs; i++)
4230 adapter->vfinfo[i].clear_to_send = 0;
4231 }
4232
4169 /* Cleanup the affinity_hint CPU mask memory and callback */ 4233 /* Cleanup the affinity_hint CPU mask memory and callback */
4170 for (i = 0; i < num_q_vectors; i++) { 4234 for (i = 0; i < num_q_vectors; i++) {
4171 struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; 4235 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
@@ -4175,21 +4239,13 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
4175 free_cpumask_var(q_vector->affinity_mask); 4239 free_cpumask_var(q_vector->affinity_mask);
4176 } 4240 }
4177 4241
4178 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
4179 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
4180 cancel_work_sync(&adapter->fdir_reinit_task);
4181
4182 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
4183 cancel_work_sync(&adapter->check_overtemp_task);
4184
4185 /* disable transmits in the hardware now that interrupts are off */ 4242 /* disable transmits in the hardware now that interrupts are off */
4186 for (i = 0; i < adapter->num_tx_queues; i++) { 4243 for (i = 0; i < adapter->num_tx_queues; i++) {
4187 u8 reg_idx = adapter->tx_ring[i]->reg_idx; 4244 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
4188 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); 4245 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
4189 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx),
4190 (txdctl & ~IXGBE_TXDCTL_ENABLE));
4191 } 4246 }
4192 /* Disable the Tx DMA engine on 82599 */ 4247
4248 /* Disable the Tx DMA engine on 82599 and X540 */
4193 switch (hw->mac.type) { 4249 switch (hw->mac.type) {
4194 case ixgbe_mac_82599EB: 4250 case ixgbe_mac_82599EB:
4195 case ixgbe_mac_X540: 4251 case ixgbe_mac_X540:
@@ -4201,9 +4257,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
4201 break; 4257 break;
4202 } 4258 }
4203 4259
4204 /* clear n-tuple filters that are cached */
4205 ethtool_ntuple_flush(netdev);
4206
4207 if (!pci_channel_offline(adapter->pdev)) 4260 if (!pci_channel_offline(adapter->pdev))
4208 ixgbe_reset(adapter); 4261 ixgbe_reset(adapter);
4209 4262
@@ -4267,25 +4320,8 @@ static void ixgbe_tx_timeout(struct net_device *netdev)
4267{ 4320{
4268 struct ixgbe_adapter *adapter = netdev_priv(netdev); 4321 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4269 4322
4270 adapter->tx_timeout_count++;
4271
4272 /* Do the reset outside of interrupt context */ 4323 /* Do the reset outside of interrupt context */
4273 schedule_work(&adapter->reset_task); 4324 ixgbe_tx_timeout_reset(adapter);
4274}
4275
4276static void ixgbe_reset_task(struct work_struct *work)
4277{
4278 struct ixgbe_adapter *adapter;
4279 adapter = container_of(work, struct ixgbe_adapter, reset_task);
4280
4281 /* If we're already down or resetting, just bail */
4282 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
4283 test_bit(__IXGBE_RESETTING, &adapter->state))
4284 return;
4285
4286 ixgbe_dump(adapter);
4287 netdev_err(adapter->netdev, "Reset adapter\n");
4288 ixgbe_reinit_locked(adapter);
4289} 4325}
4290 4326
4291/** 4327/**
@@ -4567,8 +4603,8 @@ static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
4567#ifdef CONFIG_IXGBE_DCB 4603#ifdef CONFIG_IXGBE_DCB
4568 4604
4569/* ixgbe_get_first_reg_idx - Return first register index associated with ring */ 4605/* ixgbe_get_first_reg_idx - Return first register index associated with ring */
4570void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, 4606static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
4571 unsigned int *tx, unsigned int *rx) 4607 unsigned int *tx, unsigned int *rx)
4572{ 4608{
4573 struct net_device *dev = adapter->netdev; 4609 struct net_device *dev = adapter->netdev;
4574 struct ixgbe_hw *hw = &adapter->hw; 4610 struct ixgbe_hw *hw = &adapter->hw;
@@ -5138,57 +5174,6 @@ void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
5138} 5174}
5139 5175
5140/** 5176/**
5141 * ixgbe_sfp_timer - worker thread to find a missing module
5142 * @data: pointer to our adapter struct
5143 **/
5144static void ixgbe_sfp_timer(unsigned long data)
5145{
5146 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
5147
5148 /*
5149 * Do the sfp_timer outside of interrupt context due to the
5150 * delays that sfp+ detection requires
5151 */
5152 schedule_work(&adapter->sfp_task);
5153}
5154
5155/**
5156 * ixgbe_sfp_task - worker thread to find a missing module
5157 * @work: pointer to work_struct containing our data
5158 **/
5159static void ixgbe_sfp_task(struct work_struct *work)
5160{
5161 struct ixgbe_adapter *adapter = container_of(work,
5162 struct ixgbe_adapter,
5163 sfp_task);
5164 struct ixgbe_hw *hw = &adapter->hw;
5165
5166 if ((hw->phy.type == ixgbe_phy_nl) &&
5167 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
5168 s32 ret = hw->phy.ops.identify_sfp(hw);
5169 if (ret == IXGBE_ERR_SFP_NOT_PRESENT)
5170 goto reschedule;
5171 ret = hw->phy.ops.reset(hw);
5172 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
5173 e_dev_err("failed to initialize because an unsupported "
5174 "SFP+ module type was detected.\n");
5175 e_dev_err("Reload the driver after installing a "
5176 "supported module.\n");
5177 unregister_netdev(adapter->netdev);
5178 } else {
5179 e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
5180 }
5181 /* don't need this routine any more */
5182 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
5183 }
5184 return;
5185reschedule:
5186 if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state))
5187 mod_timer(&adapter->sfp_timer,
5188 round_jiffies(jiffies + (2 * HZ)));
5189}
5190
5191/**
5192 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) 5177 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
5193 * @adapter: board private structure to initialize 5178 * @adapter: board private structure to initialize
5194 * 5179 *
@@ -5904,8 +5889,13 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5904 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 5889 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
5905 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); 5890 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
5906 break; 5891 break;
5907 case ixgbe_mac_82599EB:
5908 case ixgbe_mac_X540: 5892 case ixgbe_mac_X540:
5893 /* OS2BMC stats are X540 only*/
5894 hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
5895 hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
5896 hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
5897 hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
5898 case ixgbe_mac_82599EB:
5909 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); 5899 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
5910 IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */ 5900 IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
5911 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); 5901 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
@@ -5979,23 +5969,66 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5979} 5969}
5980 5970
5981/** 5971/**
5982 * ixgbe_watchdog - Timer Call-back 5972 * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table
5983 * @data: pointer to adapter cast into an unsigned long 5973 * @adapter - pointer to the device adapter structure
5984 **/ 5974 **/
5985static void ixgbe_watchdog(unsigned long data) 5975static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
5986{ 5976{
5987 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
5988 struct ixgbe_hw *hw = &adapter->hw; 5977 struct ixgbe_hw *hw = &adapter->hw;
5989 u64 eics = 0;
5990 int i; 5978 int i;
5991 5979
5992 /* 5980 if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
5993 * Do the watchdog outside of interrupt context due to the lovely 5981 return;
5994 * delays that some of the newer hardware requires 5982
5995 */ 5983 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
5996 5984
5985 /* if interface is down do nothing */
5997 if (test_bit(__IXGBE_DOWN, &adapter->state)) 5986 if (test_bit(__IXGBE_DOWN, &adapter->state))
5998 goto watchdog_short_circuit; 5987 return;
5988
5989 /* do nothing if we are not using signature filters */
5990 if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE))
5991 return;
5992
5993 adapter->fdir_overflow++;
5994
5995 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
5996 for (i = 0; i < adapter->num_tx_queues; i++)
5997 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
5998 &(adapter->tx_ring[i]->state));
5999 /* re-enable flow director interrupts */
6000 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
6001 } else {
6002 e_err(probe, "failed to finish FDIR re-initialization, "
6003 "ignored adding FDIR ATR filters\n");
6004 }
6005}
6006
6007/**
6008 * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts
6009 * @adapter - pointer to the device adapter structure
6010 *
6011 * This function serves two purposes. First it strobes the interrupt lines
6012 * in order to make certain interrupts are occuring. Secondly it sets the
6013 * bits needed to check for TX hangs. As a result we should immediately
6014 * determine if a hang has occured.
6015 */
6016static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
6017{
6018 struct ixgbe_hw *hw = &adapter->hw;
6019 u64 eics = 0;
6020 int i;
6021
6022 /* If we're down or resetting, just bail */
6023 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
6024 test_bit(__IXGBE_RESETTING, &adapter->state))
6025 return;
6026
6027 /* Force detection of hung controller */
6028 if (netif_carrier_ok(adapter->netdev)) {
6029 for (i = 0; i < adapter->num_tx_queues; i++)
6030 set_check_for_tx_hang(adapter->tx_ring[i]);
6031 }
5999 6032
6000 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { 6033 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
6001 /* 6034 /*
@@ -6005,108 +6038,172 @@ static void ixgbe_watchdog(unsigned long data)
6005 */ 6038 */
6006 IXGBE_WRITE_REG(hw, IXGBE_EICS, 6039 IXGBE_WRITE_REG(hw, IXGBE_EICS,
6007 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); 6040 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
6008 goto watchdog_reschedule; 6041 } else {
6009 } 6042 /* get one bit for every active tx/rx interrupt vector */
6010 6043 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
6011 /* get one bit for every active tx/rx interrupt vector */ 6044 struct ixgbe_q_vector *qv = adapter->q_vector[i];
6012 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { 6045 if (qv->rxr_count || qv->txr_count)
6013 struct ixgbe_q_vector *qv = adapter->q_vector[i]; 6046 eics |= ((u64)1 << i);
6014 if (qv->rxr_count || qv->txr_count) 6047 }
6015 eics |= ((u64)1 << i);
6016 } 6048 }
6017 6049
6018 /* Cause software interrupt to ensure rx rings are cleaned */ 6050 /* Cause software interrupt to ensure rings are cleaned */
6019 ixgbe_irq_rearm_queues(adapter, eics); 6051 ixgbe_irq_rearm_queues(adapter, eics);
6020 6052
6021watchdog_reschedule:
6022 /* Reset the timer */
6023 mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
6024
6025watchdog_short_circuit:
6026 schedule_work(&adapter->watchdog_task);
6027} 6053}
6028 6054
6029/** 6055/**
6030 * ixgbe_multispeed_fiber_task - worker thread to configure multispeed fiber 6056 * ixgbe_watchdog_update_link - update the link status
6031 * @work: pointer to work_struct containing our data 6057 * @adapter - pointer to the device adapter structure
6058 * @link_speed - pointer to a u32 to store the link_speed
6032 **/ 6059 **/
6033static void ixgbe_multispeed_fiber_task(struct work_struct *work) 6060static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
6034{ 6061{
6035 struct ixgbe_adapter *adapter = container_of(work,
6036 struct ixgbe_adapter,
6037 multispeed_fiber_task);
6038 struct ixgbe_hw *hw = &adapter->hw; 6062 struct ixgbe_hw *hw = &adapter->hw;
6039 u32 autoneg; 6063 u32 link_speed = adapter->link_speed;
6040 bool negotiation; 6064 bool link_up = adapter->link_up;
6065 int i;
6041 6066
6042 adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK; 6067 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
6043 autoneg = hw->phy.autoneg_advertised; 6068 return;
6044 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 6069
6045 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation); 6070 if (hw->mac.ops.check_link) {
6046 hw->mac.autotry_restart = false; 6071 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
6047 if (hw->mac.ops.setup_link) 6072 } else {
6048 hw->mac.ops.setup_link(hw, autoneg, negotiation, true); 6073 /* always assume link is up, if no check link function */
6049 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 6074 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
6050 adapter->flags &= ~IXGBE_FLAG_IN_SFP_LINK_TASK; 6075 link_up = true;
6076 }
6077 if (link_up) {
6078 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6079 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
6080 hw->mac.ops.fc_enable(hw, i);
6081 } else {
6082 hw->mac.ops.fc_enable(hw, 0);
6083 }
6084 }
6085
6086 if (link_up ||
6087 time_after(jiffies, (adapter->link_check_timeout +
6088 IXGBE_TRY_LINK_TIMEOUT))) {
6089 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
6090 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
6091 IXGBE_WRITE_FLUSH(hw);
6092 }
6093
6094 adapter->link_up = link_up;
6095 adapter->link_speed = link_speed;
6051} 6096}
6052 6097
6053/** 6098/**
6054 * ixgbe_sfp_config_module_task - worker thread to configure a new SFP+ module 6099 * ixgbe_watchdog_link_is_up - update netif_carrier status and
6055 * @work: pointer to work_struct containing our data 6100 * print link up message
6101 * @adapter - pointer to the device adapter structure
6056 **/ 6102 **/
6057static void ixgbe_sfp_config_module_task(struct work_struct *work) 6103static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
6058{ 6104{
6059 struct ixgbe_adapter *adapter = container_of(work, 6105 struct net_device *netdev = adapter->netdev;
6060 struct ixgbe_adapter,
6061 sfp_config_module_task);
6062 struct ixgbe_hw *hw = &adapter->hw; 6106 struct ixgbe_hw *hw = &adapter->hw;
6063 u32 err; 6107 u32 link_speed = adapter->link_speed;
6108 bool flow_rx, flow_tx;
6064 6109
6065 adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK; 6110 /* only continue if link was previously down */
6111 if (netif_carrier_ok(netdev))
6112 return;
6066 6113
6067 /* Time for electrical oscillations to settle down */ 6114 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
6068 msleep(100);
6069 err = hw->phy.ops.identify_sfp(hw);
6070 6115
6071 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 6116 switch (hw->mac.type) {
6072 e_dev_err("failed to initialize because an unsupported SFP+ " 6117 case ixgbe_mac_82598EB: {
6073 "module type was detected.\n"); 6118 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
6074 e_dev_err("Reload the driver after installing a supported " 6119 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
6075 "module.\n"); 6120 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
6076 unregister_netdev(adapter->netdev); 6121 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
6077 return; 6122 }
6123 break;
6124 case ixgbe_mac_X540:
6125 case ixgbe_mac_82599EB: {
6126 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
6127 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
6128 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
6129 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
6130 }
6131 break;
6132 default:
6133 flow_tx = false;
6134 flow_rx = false;
6135 break;
6078 } 6136 }
6079 if (hw->mac.ops.setup_sfp) 6137 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
6080 hw->mac.ops.setup_sfp(hw); 6138 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
6139 "10 Gbps" :
6140 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
6141 "1 Gbps" :
6142 (link_speed == IXGBE_LINK_SPEED_100_FULL ?
6143 "100 Mbps" :
6144 "unknown speed"))),
6145 ((flow_rx && flow_tx) ? "RX/TX" :
6146 (flow_rx ? "RX" :
6147 (flow_tx ? "TX" : "None"))));
6081 6148
6082 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) 6149 netif_carrier_on(netdev);
6083 /* This will also work for DA Twinax connections */ 6150#ifdef HAVE_IPLINK_VF_CONFIG
6084 schedule_work(&adapter->multispeed_fiber_task); 6151 ixgbe_check_vf_rate_limit(adapter);
6085 adapter->flags &= ~IXGBE_FLAG_IN_SFP_MOD_TASK; 6152#endif /* HAVE_IPLINK_VF_CONFIG */
6086} 6153}
6087 6154
6088/** 6155/**
6089 * ixgbe_fdir_reinit_task - worker thread to reinit FDIR filter table 6156 * ixgbe_watchdog_link_is_down - update netif_carrier status and
6090 * @work: pointer to work_struct containing our data 6157 * print link down message
6158 * @adapter - pointer to the adapter structure
6091 **/ 6159 **/
6092static void ixgbe_fdir_reinit_task(struct work_struct *work) 6160static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter* adapter)
6093{ 6161{
6094 struct ixgbe_adapter *adapter = container_of(work, 6162 struct net_device *netdev = adapter->netdev;
6095 struct ixgbe_adapter,
6096 fdir_reinit_task);
6097 struct ixgbe_hw *hw = &adapter->hw; 6163 struct ixgbe_hw *hw = &adapter->hw;
6164
6165 adapter->link_up = false;
6166 adapter->link_speed = 0;
6167
6168 /* only continue if link was up previously */
6169 if (!netif_carrier_ok(netdev))
6170 return;
6171
6172 /* poll for SFP+ cable when link is down */
6173 if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
6174 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
6175
6176 e_info(drv, "NIC Link is Down\n");
6177 netif_carrier_off(netdev);
6178}
6179
6180/**
6181 * ixgbe_watchdog_flush_tx - flush queues on link down
6182 * @adapter - pointer to the device adapter structure
6183 **/
6184static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
6185{
6098 int i; 6186 int i;
6187 int some_tx_pending = 0;
6099 6188
6100 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { 6189 if (!netif_carrier_ok(adapter->netdev)) {
6101 for (i = 0; i < adapter->num_tx_queues; i++) 6190 for (i = 0; i < adapter->num_tx_queues; i++) {
6102 set_bit(__IXGBE_TX_FDIR_INIT_DONE, 6191 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
6103 &(adapter->tx_ring[i]->state)); 6192 if (tx_ring->next_to_use != tx_ring->next_to_clean) {
6104 } else { 6193 some_tx_pending = 1;
6105 e_err(probe, "failed to finish FDIR re-initialization, " 6194 break;
6106 "ignored adding FDIR ATR filters\n"); 6195 }
6196 }
6197
6198 if (some_tx_pending) {
6199 /* We've lost link, so the controller stops DMA,
6200 * but we've got queued Tx work that's never going
6201 * to get done, so reset controller to flush Tx.
6202 * (Do the reset outside of interrupt context).
6203 */
6204 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
6205 }
6107 } 6206 }
6108 /* Done FDIR Re-initialization, enable transmits */
6109 netif_tx_start_all_queues(adapter->netdev);
6110} 6207}
6111 6208
6112static void ixgbe_spoof_check(struct ixgbe_adapter *adapter) 6209static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
@@ -6129,133 +6226,186 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
6129 e_warn(drv, "%d Spoofed packets detected\n", ssvpc); 6226 e_warn(drv, "%d Spoofed packets detected\n", ssvpc);
6130} 6227}
6131 6228
6132static DEFINE_MUTEX(ixgbe_watchdog_lock); 6229/**
6230 * ixgbe_watchdog_subtask - check and bring link up
6231 * @adapter - pointer to the device adapter structure
6232 **/
6233static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
6234{
6235 /* if interface is down do nothing */
6236 if (test_bit(__IXGBE_DOWN, &adapter->state))
6237 return;
6238
6239 ixgbe_watchdog_update_link(adapter);
6240
6241 if (adapter->link_up)
6242 ixgbe_watchdog_link_is_up(adapter);
6243 else
6244 ixgbe_watchdog_link_is_down(adapter);
6245
6246 ixgbe_spoof_check(adapter);
6247 ixgbe_update_stats(adapter);
6248
6249 ixgbe_watchdog_flush_tx(adapter);
6250}
6133 6251
6134/** 6252/**
6135 * ixgbe_watchdog_task - worker thread to bring link up 6253 * ixgbe_sfp_detection_subtask - poll for SFP+ cable
6136 * @work: pointer to work_struct containing our data 6254 * @adapter - the ixgbe adapter structure
6137 **/ 6255 **/
6138static void ixgbe_watchdog_task(struct work_struct *work) 6256static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
6139{ 6257{
6140 struct ixgbe_adapter *adapter = container_of(work,
6141 struct ixgbe_adapter,
6142 watchdog_task);
6143 struct net_device *netdev = adapter->netdev;
6144 struct ixgbe_hw *hw = &adapter->hw; 6258 struct ixgbe_hw *hw = &adapter->hw;
6145 u32 link_speed; 6259 s32 err;
6146 bool link_up;
6147 int i;
6148 struct ixgbe_ring *tx_ring;
6149 int some_tx_pending = 0;
6150 6260
6151 mutex_lock(&ixgbe_watchdog_lock); 6261 /* not searching for SFP so there is nothing to do here */
6262 if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
6263 !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
6264 return;
6152 6265
6153 link_up = adapter->link_up; 6266 /* someone else is in init, wait until next service event */
6154 link_speed = adapter->link_speed; 6267 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
6268 return;
6155 6269
6156 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { 6270 err = hw->phy.ops.identify_sfp(hw);
6157 hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 6271 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
6158 if (link_up) { 6272 goto sfp_out;
6159#ifdef CONFIG_DCB
6160 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6161 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
6162 hw->mac.ops.fc_enable(hw, i);
6163 } else {
6164 hw->mac.ops.fc_enable(hw, 0);
6165 }
6166#else
6167 hw->mac.ops.fc_enable(hw, 0);
6168#endif
6169 }
6170 6273
6171 if (link_up || 6274 if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
6172 time_after(jiffies, (adapter->link_check_timeout + 6275 /* If no cable is present, then we need to reset
6173 IXGBE_TRY_LINK_TIMEOUT))) { 6276 * the next time we find a good cable. */
6174 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; 6277 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
6175 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
6176 }
6177 adapter->link_up = link_up;
6178 adapter->link_speed = link_speed;
6179 } 6278 }
6180 6279
6181 if (link_up) { 6280 /* exit on error */
6182 if (!netif_carrier_ok(netdev)) { 6281 if (err)
6183 bool flow_rx, flow_tx; 6282 goto sfp_out;
6184
6185 switch (hw->mac.type) {
6186 case ixgbe_mac_82598EB: {
6187 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
6188 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
6189 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
6190 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
6191 }
6192 break;
6193 case ixgbe_mac_82599EB:
6194 case ixgbe_mac_X540: {
6195 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
6196 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
6197 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
6198 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
6199 }
6200 break;
6201 default:
6202 flow_tx = false;
6203 flow_rx = false;
6204 break;
6205 }
6206 6283
6207 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", 6284 /* exit if reset not needed */
6208 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? 6285 if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
6209 "10 Gbps" : 6286 goto sfp_out;
6210 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
6211 "1 Gbps" :
6212 (link_speed == IXGBE_LINK_SPEED_100_FULL ?
6213 "100 Mbps" :
6214 "unknown speed"))),
6215 ((flow_rx && flow_tx) ? "RX/TX" :
6216 (flow_rx ? "RX" :
6217 (flow_tx ? "TX" : "None"))));
6218
6219 netif_carrier_on(netdev);
6220 ixgbe_check_vf_rate_limit(adapter);
6221 } else {
6222 /* Force detection of hung controller */
6223 for (i = 0; i < adapter->num_tx_queues; i++) {
6224 tx_ring = adapter->tx_ring[i];
6225 set_check_for_tx_hang(tx_ring);
6226 }
6227 }
6228 } else {
6229 adapter->link_up = false;
6230 adapter->link_speed = 0;
6231 if (netif_carrier_ok(netdev)) {
6232 e_info(drv, "NIC Link is Down\n");
6233 netif_carrier_off(netdev);
6234 }
6235 }
6236 6287
6237 if (!netif_carrier_ok(netdev)) { 6288 adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET;
6238 for (i = 0; i < adapter->num_tx_queues; i++) {
6239 tx_ring = adapter->tx_ring[i];
6240 if (tx_ring->next_to_use != tx_ring->next_to_clean) {
6241 some_tx_pending = 1;
6242 break;
6243 }
6244 }
6245 6289
6246 if (some_tx_pending) { 6290 /*
6247 /* We've lost link, so the controller stops DMA, 6291 * A module may be identified correctly, but the EEPROM may not have
6248 * but we've got queued Tx work that's never going 6292 * support for that module. setup_sfp() will fail in that case, so
6249 * to get done, so reset controller to flush Tx. 6293 * we should not allow that module to load.
6250 * (Do the reset outside of interrupt context). 6294 */
6251 */ 6295 if (hw->mac.type == ixgbe_mac_82598EB)
6252 schedule_work(&adapter->reset_task); 6296 err = hw->phy.ops.reset(hw);
6253 } 6297 else
6298 err = hw->mac.ops.setup_sfp(hw);
6299
6300 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
6301 goto sfp_out;
6302
6303 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
6304 e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
6305
6306sfp_out:
6307 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
6308
6309 if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) &&
6310 (adapter->netdev->reg_state == NETREG_REGISTERED)) {
6311 e_dev_err("failed to initialize because an unsupported "
6312 "SFP+ module type was detected.\n");
6313 e_dev_err("Reload the driver after installing a "
6314 "supported module.\n");
6315 unregister_netdev(adapter->netdev);
6254 } 6316 }
6317}
6255 6318
6256 ixgbe_spoof_check(adapter); 6319/**
6257 ixgbe_update_stats(adapter); 6320 * ixgbe_sfp_link_config_subtask - set up link SFP after module install
6258 mutex_unlock(&ixgbe_watchdog_lock); 6321 * @adapter - the ixgbe adapter structure
6322 **/
6323static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
6324{
6325 struct ixgbe_hw *hw = &adapter->hw;
6326 u32 autoneg;
6327 bool negotiation;
6328
6329 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG))
6330 return;
6331
6332 /* someone else is in init, wait until next service event */
6333 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
6334 return;
6335
6336 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
6337
6338 autoneg = hw->phy.autoneg_advertised;
6339 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
6340 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
6341 hw->mac.autotry_restart = false;
6342 if (hw->mac.ops.setup_link)
6343 hw->mac.ops.setup_link(hw, autoneg, negotiation, true);
6344
6345 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
6346 adapter->link_check_timeout = jiffies;
6347 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
6348}
6349
6350/**
6351 * ixgbe_service_timer - Timer Call-back
6352 * @data: pointer to adapter cast into an unsigned long
6353 **/
6354static void ixgbe_service_timer(unsigned long data)
6355{
6356 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
6357 unsigned long next_event_offset;
6358
6359 /* poll faster when waiting for link */
6360 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
6361 next_event_offset = HZ / 10;
6362 else
6363 next_event_offset = HZ * 2;
6364
6365 /* Reset the timer */
6366 mod_timer(&adapter->service_timer, next_event_offset + jiffies);
6367
6368 ixgbe_service_event_schedule(adapter);
6369}
6370
6371static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
6372{
6373 if (!(adapter->flags2 & IXGBE_FLAG2_RESET_REQUESTED))
6374 return;
6375
6376 adapter->flags2 &= ~IXGBE_FLAG2_RESET_REQUESTED;
6377
6378 /* If we're already down or resetting, just bail */
6379 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
6380 test_bit(__IXGBE_RESETTING, &adapter->state))
6381 return;
6382
6383 ixgbe_dump(adapter);
6384 netdev_err(adapter->netdev, "Reset adapter\n");
6385 adapter->tx_timeout_count++;
6386
6387 ixgbe_reinit_locked(adapter);
6388}
6389
6390/**
6391 * ixgbe_service_task - manages and runs subtasks
6392 * @work: pointer to work_struct containing our data
6393 **/
6394static void ixgbe_service_task(struct work_struct *work)
6395{
6396 struct ixgbe_adapter *adapter = container_of(work,
6397 struct ixgbe_adapter,
6398 service_task);
6399
6400 ixgbe_reset_subtask(adapter);
6401 ixgbe_sfp_detection_subtask(adapter);
6402 ixgbe_sfp_link_config_subtask(adapter);
6403 ixgbe_check_overtemp_subtask(adapter);
6404 ixgbe_watchdog_subtask(adapter);
6405 ixgbe_fdir_reinit_subtask(adapter);
6406 ixgbe_check_hang_subtask(adapter);
6407
6408 ixgbe_service_event_complete(adapter);
6259} 6409}
6260 6410
6261static int ixgbe_tso(struct ixgbe_adapter *adapter, 6411static int ixgbe_tso(struct ixgbe_adapter *adapter,
@@ -7094,6 +7244,8 @@ static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
7094#ifdef CONFIG_PCI_IOV 7244#ifdef CONFIG_PCI_IOV
7095 struct ixgbe_hw *hw = &adapter->hw; 7245 struct ixgbe_hw *hw = &adapter->hw;
7096 int err; 7246 int err;
7247 int num_vf_macvlans, i;
7248 struct vf_macvlans *mv_list;
7097 7249
7098 if (hw->mac.type == ixgbe_mac_82598EB || !max_vfs) 7250 if (hw->mac.type == ixgbe_mac_82598EB || !max_vfs)
7099 return; 7251 return;
@@ -7110,6 +7262,26 @@ static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
7110 e_err(probe, "Failed to enable PCI sriov: %d\n", err); 7262 e_err(probe, "Failed to enable PCI sriov: %d\n", err);
7111 goto err_novfs; 7263 goto err_novfs;
7112 } 7264 }
7265
7266 num_vf_macvlans = hw->mac.num_rar_entries -
7267 (IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs);
7268
7269 adapter->mv_list = mv_list = kcalloc(num_vf_macvlans,
7270 sizeof(struct vf_macvlans),
7271 GFP_KERNEL);
7272 if (mv_list) {
7273 /* Initialize list of VF macvlans */
7274 INIT_LIST_HEAD(&adapter->vf_mvs.l);
7275 for (i = 0; i < num_vf_macvlans; i++) {
7276 mv_list->vf = -1;
7277 mv_list->free = true;
7278 mv_list->rar_entry = hw->mac.num_rar_entries -
7279 (i + adapter->num_vfs + 1);
7280 list_add(&mv_list->l, &adapter->vf_mvs.l);
7281 mv_list++;
7282 }
7283 }
7284
7113 /* If call to enable VFs succeeded then allocate memory 7285 /* If call to enable VFs succeeded then allocate memory
7114 * for per VF control structures. 7286 * for per VF control structures.
7115 */ 7287 */
@@ -7280,22 +7452,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
7280 hw->phy.mdio.mdio_read = ixgbe_mdio_read; 7452 hw->phy.mdio.mdio_read = ixgbe_mdio_read;
7281 hw->phy.mdio.mdio_write = ixgbe_mdio_write; 7453 hw->phy.mdio.mdio_write = ixgbe_mdio_write;
7282 7454
7283 /* set up this timer and work struct before calling get_invariants
7284 * which might start the timer
7285 */
7286 init_timer(&adapter->sfp_timer);
7287 adapter->sfp_timer.function = ixgbe_sfp_timer;
7288 adapter->sfp_timer.data = (unsigned long) adapter;
7289
7290 INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task);
7291
7292 /* multispeed fiber has its own tasklet, called from GPI SDP1 context */
7293 INIT_WORK(&adapter->multispeed_fiber_task, ixgbe_multispeed_fiber_task);
7294
7295 /* a new SFP+ module arrival, called from GPI SDP2 context */
7296 INIT_WORK(&adapter->sfp_config_module_task,
7297 ixgbe_sfp_config_module_task);
7298
7299 ii->get_invariants(hw); 7455 ii->get_invariants(hw);
7300 7456
7301 /* setup the private structure */ 7457 /* setup the private structure */
@@ -7329,17 +7485,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
7329 hw->phy.reset_if_overtemp = false; 7485 hw->phy.reset_if_overtemp = false;
7330 if (err == IXGBE_ERR_SFP_NOT_PRESENT && 7486 if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
7331 hw->mac.type == ixgbe_mac_82598EB) { 7487 hw->mac.type == ixgbe_mac_82598EB) {
7332 /*
7333 * Start a kernel thread to watch for a module to arrive.
7334 * Only do this for 82598, since 82599 will generate
7335 * interrupts on module arrival.
7336 */
7337 set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
7338 mod_timer(&adapter->sfp_timer,
7339 round_jiffies(jiffies + (2 * HZ)));
7340 err = 0; 7488 err = 0;
7341 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 7489 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
7342 e_dev_err("failed to initialize because an unsupported SFP+ " 7490 e_dev_err("failed to load because an unsupported SFP+ "
7343 "module type was detected.\n"); 7491 "module type was detected.\n");
7344 e_dev_err("Reload the driver after installing a supported " 7492 e_dev_err("Reload the driver after installing a supported "
7345 "module.\n"); 7493 "module.\n");
@@ -7361,9 +7509,16 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
7361 netdev->features |= NETIF_F_TSO; 7509 netdev->features |= NETIF_F_TSO;
7362 netdev->features |= NETIF_F_TSO6; 7510 netdev->features |= NETIF_F_TSO6;
7363 netdev->features |= NETIF_F_GRO; 7511 netdev->features |= NETIF_F_GRO;
7512 netdev->features |= NETIF_F_RXHASH;
7364 7513
7365 if (adapter->hw.mac.type == ixgbe_mac_82599EB) 7514 switch (adapter->hw.mac.type) {
7515 case ixgbe_mac_82599EB:
7516 case ixgbe_mac_X540:
7366 netdev->features |= NETIF_F_SCTP_CSUM; 7517 netdev->features |= NETIF_F_SCTP_CSUM;
7518 break;
7519 default:
7520 break;
7521 }
7367 7522
7368 netdev->vlan_features |= NETIF_F_TSO; 7523 netdev->vlan_features |= NETIF_F_TSO;
7369 netdev->vlan_features |= NETIF_F_TSO6; 7524 netdev->vlan_features |= NETIF_F_TSO6;
@@ -7424,17 +7579,19 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
7424 (hw->mac.type == ixgbe_mac_82599EB)))) 7579 (hw->mac.type == ixgbe_mac_82599EB))))
7425 hw->mac.ops.disable_tx_laser(hw); 7580 hw->mac.ops.disable_tx_laser(hw);
7426 7581
7427 init_timer(&adapter->watchdog_timer); 7582 setup_timer(&adapter->service_timer, &ixgbe_service_timer,
7428 adapter->watchdog_timer.function = ixgbe_watchdog; 7583 (unsigned long) adapter);
7429 adapter->watchdog_timer.data = (unsigned long)adapter;
7430 7584
7431 INIT_WORK(&adapter->reset_task, ixgbe_reset_task); 7585 INIT_WORK(&adapter->service_task, ixgbe_service_task);
7432 INIT_WORK(&adapter->watchdog_task, ixgbe_watchdog_task); 7586 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
7433 7587
7434 err = ixgbe_init_interrupt_scheme(adapter); 7588 err = ixgbe_init_interrupt_scheme(adapter);
7435 if (err) 7589 if (err)
7436 goto err_sw_init; 7590 goto err_sw_init;
7437 7591
7592 if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
7593 netdev->features &= ~NETIF_F_RXHASH;
7594
7438 switch (pdev->device) { 7595 switch (pdev->device) {
7439 case IXGBE_DEV_ID_82599_SFP: 7596 case IXGBE_DEV_ID_82599_SFP:
7440 /* Only this subdevice supports WOL */ 7597 /* Only this subdevice supports WOL */
@@ -7463,8 +7620,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
7463 7620
7464 /* print bus type/speed/width info */ 7621 /* print bus type/speed/width info */
7465 e_dev_info("(PCI Express:%s:%s) %pM\n", 7622 e_dev_info("(PCI Express:%s:%s) %pM\n",
7466 (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0Gb/s" : 7623 (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" :
7467 hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5Gb/s" : 7624 hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5GT/s" :
7468 "Unknown"), 7625 "Unknown"),
7469 (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" : 7626 (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" :
7470 hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" : 7627 hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" :
@@ -7513,13 +7670,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
7513 /* carrier off reporting is important to ethtool even BEFORE open */ 7670 /* carrier off reporting is important to ethtool even BEFORE open */
7514 netif_carrier_off(netdev); 7671 netif_carrier_off(netdev);
7515 7672
7516 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
7517 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
7518 INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task);
7519
7520 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
7521 INIT_WORK(&adapter->check_overtemp_task,
7522 ixgbe_check_overtemp_task);
7523#ifdef CONFIG_IXGBE_DCA 7673#ifdef CONFIG_IXGBE_DCA
7524 if (dca_add_requester(&pdev->dev) == 0) { 7674 if (dca_add_requester(&pdev->dev) == 0) {
7525 adapter->flags |= IXGBE_FLAG_DCA_ENABLED; 7675 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
@@ -7546,11 +7696,7 @@ err_sw_init:
7546err_eeprom: 7696err_eeprom:
7547 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 7697 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
7548 ixgbe_disable_sriov(adapter); 7698 ixgbe_disable_sriov(adapter);
7549 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); 7699 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
7550 del_timer_sync(&adapter->sfp_timer);
7551 cancel_work_sync(&adapter->sfp_task);
7552 cancel_work_sync(&adapter->multispeed_fiber_task);
7553 cancel_work_sync(&adapter->sfp_config_module_task);
7554 iounmap(hw->hw_addr); 7700 iounmap(hw->hw_addr);
7555err_ioremap: 7701err_ioremap:
7556 free_netdev(netdev); 7702 free_netdev(netdev);
@@ -7578,24 +7724,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
7578 struct net_device *netdev = adapter->netdev; 7724 struct net_device *netdev = adapter->netdev;
7579 7725
7580 set_bit(__IXGBE_DOWN, &adapter->state); 7726 set_bit(__IXGBE_DOWN, &adapter->state);
7581 7727 cancel_work_sync(&adapter->service_task);
7582 /*
7583 * The timers may be rescheduled, so explicitly disable them
7584 * from being rescheduled.
7585 */
7586 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
7587 del_timer_sync(&adapter->watchdog_timer);
7588 del_timer_sync(&adapter->sfp_timer);
7589
7590 cancel_work_sync(&adapter->watchdog_task);
7591 cancel_work_sync(&adapter->sfp_task);
7592 cancel_work_sync(&adapter->multispeed_fiber_task);
7593 cancel_work_sync(&adapter->sfp_config_module_task);
7594 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
7595 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
7596 cancel_work_sync(&adapter->fdir_reinit_task);
7597 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
7598 cancel_work_sync(&adapter->check_overtemp_task);
7599 7728
7600#ifdef CONFIG_IXGBE_DCA 7729#ifdef CONFIG_IXGBE_DCA
7601 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { 7730 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
diff --git a/drivers/net/ixgbe/ixgbe_mbx.h b/drivers/net/ixgbe/ixgbe_mbx.h
index fe6ea81dc7f8..b239bdac38da 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ixgbe/ixgbe_mbx.h
@@ -36,9 +36,6 @@
36#define IXGBE_VFMAILBOX 0x002FC 36#define IXGBE_VFMAILBOX 0x002FC
37#define IXGBE_VFMBMEM 0x00200 37#define IXGBE_VFMBMEM 0x00200
38 38
39#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * x))
40#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * vfn))
41
42#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */ 39#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
43#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ 40#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
44#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ 41#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
@@ -70,6 +67,7 @@
70#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ 67#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
71#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ 68#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
72#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ 69#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
70#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
73 71
74/* length of permanent address message returned from PF */ 72/* length of permanent address message returned from PF */
75#define IXGBE_VF_PERMADDR_MSG_LEN 4 73#define IXGBE_VF_PERMADDR_MSG_LEN 4
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index df5b8aa4795d..735f686c3b36 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -449,7 +449,8 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
449 MDIO_MMD_AN, 449 MDIO_MMD_AN,
450 &autoneg_reg); 450 &autoneg_reg);
451 451
452 autoneg_reg &= ~ADVERTISE_100FULL; 452 autoneg_reg &= ~(ADVERTISE_100FULL |
453 ADVERTISE_100HALF);
453 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) 454 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
454 autoneg_reg |= ADVERTISE_100FULL; 455 autoneg_reg |= ADVERTISE_100FULL;
455 456
@@ -656,7 +657,8 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
656 MDIO_MMD_AN, 657 MDIO_MMD_AN,
657 &autoneg_reg); 658 &autoneg_reg);
658 659
659 autoneg_reg &= ~ADVERTISE_100FULL; 660 autoneg_reg &= ~(ADVERTISE_100FULL |
661 ADVERTISE_100HALF);
660 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) 662 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
661 autoneg_reg |= ADVERTISE_100FULL; 663 autoneg_reg |= ADVERTISE_100FULL;
662 664
@@ -753,7 +755,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
753 &phy_data); 755 &phy_data);
754 if ((phy_data & MDIO_CTRL1_RESET) == 0) 756 if ((phy_data & MDIO_CTRL1_RESET) == 0)
755 break; 757 break;
756 msleep(10); 758 usleep_range(10000, 20000);
757 } 759 }
758 760
759 if ((phy_data & MDIO_CTRL1_RESET) != 0) { 761 if ((phy_data & MDIO_CTRL1_RESET) != 0) {
@@ -782,7 +784,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
782 case IXGBE_DELAY_NL: 784 case IXGBE_DELAY_NL:
783 data_offset++; 785 data_offset++;
784 hw_dbg(hw, "DELAY: %d MS\n", edata); 786 hw_dbg(hw, "DELAY: %d MS\n", edata);
785 msleep(edata); 787 usleep_range(edata * 1000, edata * 2000);
786 break; 788 break;
787 case IXGBE_DATA_NL: 789 case IXGBE_DATA_NL:
788 hw_dbg(hw, "DATA:\n"); 790 hw_dbg(hw, "DATA:\n");
@@ -1220,7 +1222,7 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
1220 swfw_mask = IXGBE_GSSR_PHY0_SM; 1222 swfw_mask = IXGBE_GSSR_PHY0_SM;
1221 1223
1222 do { 1224 do {
1223 if (ixgbe_acquire_swfw_sync(hw, swfw_mask) != 0) { 1225 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != 0) {
1224 status = IXGBE_ERR_SWFW_SYNC; 1226 status = IXGBE_ERR_SWFW_SYNC;
1225 goto read_byte_out; 1227 goto read_byte_out;
1226 } 1228 }
@@ -1267,7 +1269,7 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
1267 break; 1269 break;
1268 1270
1269fail: 1271fail:
1270 ixgbe_release_swfw_sync(hw, swfw_mask); 1272 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
1271 msleep(100); 1273 msleep(100);
1272 ixgbe_i2c_bus_clear(hw); 1274 ixgbe_i2c_bus_clear(hw);
1273 retry++; 1275 retry++;
@@ -1278,7 +1280,7 @@ fail:
1278 1280
1279 } while (retry < max_retry); 1281 } while (retry < max_retry);
1280 1282
1281 ixgbe_release_swfw_sync(hw, swfw_mask); 1283 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
1282 1284
1283read_byte_out: 1285read_byte_out:
1284 return status; 1286 return status;
@@ -1306,7 +1308,7 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
1306 else 1308 else
1307 swfw_mask = IXGBE_GSSR_PHY0_SM; 1309 swfw_mask = IXGBE_GSSR_PHY0_SM;
1308 1310
1309 if (ixgbe_acquire_swfw_sync(hw, swfw_mask) != 0) { 1311 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != 0) {
1310 status = IXGBE_ERR_SWFW_SYNC; 1312 status = IXGBE_ERR_SWFW_SYNC;
1311 goto write_byte_out; 1313 goto write_byte_out;
1312 } 1314 }
@@ -1350,7 +1352,7 @@ fail:
1350 hw_dbg(hw, "I2C byte write error.\n"); 1352 hw_dbg(hw, "I2C byte write error.\n");
1351 } while (retry < max_retry); 1353 } while (retry < max_retry);
1352 1354
1353 ixgbe_release_swfw_sync(hw, swfw_mask); 1355 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
1354 1356
1355write_byte_out: 1357write_byte_out:
1356 return status; 1358 return status;
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index 6e50d8328942..ac99b0458fe2 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -82,6 +82,21 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
82 return 0; 82 return 0;
83} 83}
84 84
85static void ixgbe_restore_vf_macvlans(struct ixgbe_adapter *adapter)
86{
87 struct ixgbe_hw *hw = &adapter->hw;
88 struct list_head *pos;
89 struct vf_macvlans *entry;
90
91 list_for_each(pos, &adapter->vf_mvs.l) {
92 entry = list_entry(pos, struct vf_macvlans, l);
93 if (entry->free == false)
94 hw->mac.ops.set_rar(hw, entry->rar_entry,
95 entry->vf_macvlan,
96 entry->vf, IXGBE_RAH_AV);
97 }
98}
99
85void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) 100void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
86{ 101{
87 struct ixgbe_hw *hw = &adapter->hw; 102 struct ixgbe_hw *hw = &adapter->hw;
@@ -102,6 +117,9 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
102 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); 117 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
103 } 118 }
104 } 119 }
120
121 /* Restore any VF macvlans */
122 ixgbe_restore_vf_macvlans(adapter);
105} 123}
106 124
107static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, 125static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
@@ -110,7 +128,7 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
110 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); 128 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
111} 129}
112 130
113void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf) 131static void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf)
114{ 132{
115 struct ixgbe_hw *hw = &adapter->hw; 133 struct ixgbe_hw *hw = &adapter->hw;
116 int new_mtu = msgbuf[1]; 134 int new_mtu = msgbuf[1];
@@ -200,6 +218,61 @@ static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
200 return 0; 218 return 0;
201} 219}
202 220
221static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
222 int vf, int index, unsigned char *mac_addr)
223{
224 struct ixgbe_hw *hw = &adapter->hw;
225 struct list_head *pos;
226 struct vf_macvlans *entry;
227
228 if (index <= 1) {
229 list_for_each(pos, &adapter->vf_mvs.l) {
230 entry = list_entry(pos, struct vf_macvlans, l);
231 if (entry->vf == vf) {
232 entry->vf = -1;
233 entry->free = true;
234 entry->is_macvlan = false;
235 hw->mac.ops.clear_rar(hw, entry->rar_entry);
236 }
237 }
238 }
239
240 /*
241 * If index was zero then we were asked to clear the uc list
242 * for the VF. We're done.
243 */
244 if (!index)
245 return 0;
246
247 entry = NULL;
248
249 list_for_each(pos, &adapter->vf_mvs.l) {
250 entry = list_entry(pos, struct vf_macvlans, l);
251 if (entry->free)
252 break;
253 }
254
255 /*
256 * If we traversed the entire list and didn't find a free entry
257 * then we're out of space on the RAR table. Also entry may
258 * be NULL because the original memory allocation for the list
259 * failed, which is not fatal but does mean we can't support
260 * VF requests for MACVLAN because we couldn't allocate
261 * memory for the list management required.
262 */
263 if (!entry || !entry->free)
264 return -ENOSPC;
265
266 entry->free = false;
267 entry->is_macvlan = true;
268 entry->vf = vf;
269 memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
270
271 hw->mac.ops.set_rar(hw, entry->rar_entry, mac_addr, vf, IXGBE_RAH_AV);
272
273 return 0;
274}
275
203int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) 276int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
204{ 277{
205 unsigned char vf_mac_addr[6]; 278 unsigned char vf_mac_addr[6];
@@ -251,12 +324,12 @@ static inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
251static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) 324static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
252{ 325{
253 u32 mbx_size = IXGBE_VFMAILBOX_SIZE; 326 u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
254 u32 msgbuf[mbx_size]; 327 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
255 struct ixgbe_hw *hw = &adapter->hw; 328 struct ixgbe_hw *hw = &adapter->hw;
256 s32 retval; 329 s32 retval;
257 int entries; 330 int entries;
258 u16 *hash_list; 331 u16 *hash_list;
259 int add, vid; 332 int add, vid, index;
260 u8 *new_mac; 333 u8 *new_mac;
261 334
262 retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); 335 retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
@@ -345,6 +418,24 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
345 retval = ixgbe_set_vf_vlan(adapter, add, vid, vf); 418 retval = ixgbe_set_vf_vlan(adapter, add, vid, vf);
346 } 419 }
347 break; 420 break;
421 case IXGBE_VF_SET_MACVLAN:
422 index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
423 IXGBE_VT_MSGINFO_SHIFT;
424 /*
425 * If the VF is allowed to set MAC filters then turn off
426 * anti-spoofing to avoid false positives. An index
427 * greater than 0 will indicate the VF is setting a
428 * macvlan MAC filter.
429 */
430 if (index > 0 && adapter->antispoofing_enabled) {
431 hw->mac.ops.set_mac_anti_spoofing(hw, false,
432 adapter->num_vfs);
433 hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
434 adapter->antispoofing_enabled = false;
435 }
436 retval = ixgbe_set_vf_macvlan(adapter, vf, index,
437 (unsigned char *)(&msgbuf[1]));
438 break;
348 default: 439 default:
349 e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); 440 e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
350 retval = IXGBE_ERR_MBX; 441 retval = IXGBE_ERR_MBX;
@@ -452,7 +543,8 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
452 goto out; 543 goto out;
453 ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf); 544 ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
454 ixgbe_set_vmolr(hw, vf, false); 545 ixgbe_set_vmolr(hw, vf, false);
455 hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); 546 if (adapter->antispoofing_enabled)
547 hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
456 adapter->vfinfo[vf].pf_vlan = vlan; 548 adapter->vfinfo[vf].pf_vlan = vlan;
457 adapter->vfinfo[vf].pf_qos = qos; 549 adapter->vfinfo[vf].pf_qos = qos;
458 dev_info(&adapter->pdev->dev, 550 dev_info(&adapter->pdev->dev,
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 25c1fb7eda06..fa43f2507f43 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -58,9 +58,11 @@
58#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529 58#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
59#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9 59#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
60#define IXGBE_DEV_ID_82599_SFP_EM 0x1507 60#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
61#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
61#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC 62#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
62#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8 63#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
63#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C 64#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C
65#define IXGBE_DEV_ID_82599_LS 0x154F
64#define IXGBE_DEV_ID_X540T 0x1528 66#define IXGBE_DEV_ID_X540T 0x1528
65 67
66/* General Registers */ 68/* General Registers */
@@ -163,6 +165,9 @@
163 (0x0D018 + ((_i - 64) * 0x40))) 165 (0x0D018 + ((_i - 64) * 0x40)))
164#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \ 166#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \
165 (0x0D028 + ((_i - 64) * 0x40))) 167 (0x0D028 + ((_i - 64) * 0x40)))
168#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \
169 (0x0D02C + ((_i - 64) * 0x40)))
170#define IXGBE_RSCDBU 0x03028
166#define IXGBE_RDDCC 0x02F20 171#define IXGBE_RDDCC 0x02F20
167#define IXGBE_RXMEMWRAP 0x03190 172#define IXGBE_RXMEMWRAP 0x03190
168#define IXGBE_STARCTRL 0x03024 173#define IXGBE_STARCTRL 0x03024
@@ -227,17 +232,23 @@
227#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */ 232#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */
228#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */ 233#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */
229#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */ 234#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */
230#define IXGBE_VT_CTL 0x051B0 235#define IXGBE_VT_CTL 0x051B0
231#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4)) 236#define IXGBE_PFMAILBOX(_i) (0x04B00 + (4 * (_i))) /* 64 total */
232#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4)) 237#define IXGBE_PFMBMEM(_i) (0x13000 + (64 * (_i))) /* 64 Mailboxes, 16 DW each */
233#define IXGBE_VMECM(_i) (0x08790 + ((_i) * 4)) 238#define IXGBE_PFMBICR(_i) (0x00710 + (4 * (_i))) /* 4 total */
234#define IXGBE_QDE 0x2F04 239#define IXGBE_PFMBIMR(_i) (0x00720 + (4 * (_i))) /* 4 total */
235#define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */ 240#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4))
236#define IXGBE_UTA(_i) (0x0F400 + ((_i) * 4)) 241#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4))
237#define IXGBE_VMRCTL(_i) (0x0F600 + ((_i) * 4)) 242#define IXGBE_VMECM(_i) (0x08790 + ((_i) * 4))
238#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4)) 243#define IXGBE_QDE 0x2F04
239#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4)) 244#define IXGBE_VMTXSW(_i) (0x05180 + ((_i) * 4)) /* 2 total */
240#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/ 245#define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */
246#define IXGBE_UTA(_i) (0x0F400 + ((_i) * 4))
247#define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4))
248#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4))
249#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4))
250#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/
251#define IXGBE_RXFECCERR0 0x051B8
241#define IXGBE_LLITHRESH 0x0EC90 252#define IXGBE_LLITHRESH 0x0EC90
242#define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */ 253#define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */
243#define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */ 254#define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */
@@ -364,7 +375,7 @@
364#define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */ 375#define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */
365#define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */ 376#define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */
366#define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000 /* Mask for Ext. flex filters */ 377#define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000 /* Mask for Ext. flex filters */
367#define IXGBE_WUFC_ALL_FILTERS 0x003F00FF /* Mask for all 6 wakeup filters*/ 378#define IXGBE_WUFC_ALL_FILTERS 0x003F00FF /* Mask for all wakeup filters */
368#define IXGBE_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ 379#define IXGBE_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */
369 380
370/* Wake Up Status */ 381/* Wake Up Status */
@@ -406,7 +417,6 @@
406#define IXGBE_SECTXSTAT 0x08804 417#define IXGBE_SECTXSTAT 0x08804
407#define IXGBE_SECTXBUFFAF 0x08808 418#define IXGBE_SECTXBUFFAF 0x08808
408#define IXGBE_SECTXMINIFG 0x08810 419#define IXGBE_SECTXMINIFG 0x08810
409#define IXGBE_SECTXSTAT 0x08804
410#define IXGBE_SECRXCTRL 0x08D00 420#define IXGBE_SECRXCTRL 0x08D00
411#define IXGBE_SECRXSTAT 0x08D04 421#define IXGBE_SECRXSTAT 0x08D04
412 422
@@ -499,21 +509,6 @@
499 509
500#define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE 0x4 510#define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE 0x4
501 511
502/* HW RSC registers */
503#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \
504 (0x0D02C + ((_i - 64) * 0x40)))
505#define IXGBE_RSCDBU 0x03028
506#define IXGBE_RSCCTL_RSCEN 0x01
507#define IXGBE_RSCCTL_MAXDESC_1 0x00
508#define IXGBE_RSCCTL_MAXDESC_4 0x04
509#define IXGBE_RSCCTL_MAXDESC_8 0x08
510#define IXGBE_RSCCTL_MAXDESC_16 0x0C
511#define IXGBE_RXDADV_RSCCNT_SHIFT 17
512#define IXGBE_GPIE_RSC_DELAY_SHIFT 11
513#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000
514#define IXGBE_RSCDBU_RSCACKDIS 0x00000080
515#define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000
516
517/* DCB registers */ 512/* DCB registers */
518#define IXGBE_RTRPCS 0x02430 513#define IXGBE_RTRPCS 0x02430
519#define IXGBE_RTTDCS 0x04900 514#define IXGBE_RTTDCS 0x04900
@@ -522,6 +517,7 @@
522#define IXGBE_RTRUP2TC 0x03020 517#define IXGBE_RTRUP2TC 0x03020
523#define IXGBE_RTTUP2TC 0x0C800 518#define IXGBE_RTTUP2TC 0x0C800
524#define IXGBE_RTRPT4C(_i) (0x02140 + ((_i) * 4)) /* 8 of these (0-7) */ 519#define IXGBE_RTRPT4C(_i) (0x02140 + ((_i) * 4)) /* 8 of these (0-7) */
520#define IXGBE_TXLLQ(_i) (0x082E0 + ((_i) * 4)) /* 4 of these (0-3) */
525#define IXGBE_RTRPT4S(_i) (0x02160 + ((_i) * 4)) /* 8 of these (0-7) */ 521#define IXGBE_RTRPT4S(_i) (0x02160 + ((_i) * 4)) /* 8 of these (0-7) */
526#define IXGBE_RTTDT2C(_i) (0x04910 + ((_i) * 4)) /* 8 of these (0-7) */ 522#define IXGBE_RTTDT2C(_i) (0x04910 + ((_i) * 4)) /* 8 of these (0-7) */
527#define IXGBE_RTTDT2S(_i) (0x04930 + ((_i) * 4)) /* 8 of these (0-7) */ 523#define IXGBE_RTTDT2S(_i) (0x04930 + ((_i) * 4)) /* 8 of these (0-7) */
@@ -540,7 +536,7 @@
540 (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT) 536 (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT)
541 537
542 538
543/* FCoE registers */ 539/* FCoE DMA Context Registers */
544#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */ 540#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */
545#define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */ 541#define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */
546#define IXGBE_FCBUFF 0x02418 /* FC Buffer Control */ 542#define IXGBE_FCBUFF 0x02418 /* FC Buffer Control */
@@ -677,6 +673,10 @@
677#define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */ 673#define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */
678#define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */ 674#define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */
679#define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */ 675#define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */
676#define IXGBE_O2BGPTC 0x041C4
677#define IXGBE_O2BSPC 0x087B0
678#define IXGBE_B2OSPC 0x041C0
679#define IXGBE_B2OGPRC 0x02F90
680#define IXGBE_PCRC8ECL 0x0E810 680#define IXGBE_PCRC8ECL 0x0E810
681#define IXGBE_PCRC8ECH 0x0E811 681#define IXGBE_PCRC8ECH 0x0E811
682#define IXGBE_PCRC8ECH_MASK 0x1F 682#define IXGBE_PCRC8ECH_MASK 0x1F
@@ -742,17 +742,10 @@
742#define IXGBE_PBACLR_82599 0x11068 742#define IXGBE_PBACLR_82599 0x11068
743#define IXGBE_CIAA_82599 0x11088 743#define IXGBE_CIAA_82599 0x11088
744#define IXGBE_CIAD_82599 0x1108C 744#define IXGBE_CIAD_82599 0x1108C
745#define IXGBE_PCIE_DIAG_0_82599 0x11090 745#define IXGBE_PICAUSE 0x110B0
746#define IXGBE_PCIE_DIAG_1_82599 0x11094 746#define IXGBE_PIENA 0x110B8
747#define IXGBE_PCIE_DIAG_2_82599 0x11098
748#define IXGBE_PCIE_DIAG_3_82599 0x1109C
749#define IXGBE_PCIE_DIAG_4_82599 0x110A0
750#define IXGBE_PCIE_DIAG_5_82599 0x110A4
751#define IXGBE_PCIE_DIAG_6_82599 0x110A8
752#define IXGBE_PCIE_DIAG_7_82599 0x110C0
753#define IXGBE_INTRPT_CSR_82599 0x110B0
754#define IXGBE_INTRPT_MASK_82599 0x110B8
755#define IXGBE_CDQ_MBR_82599 0x110B4 747#define IXGBE_CDQ_MBR_82599 0x110B4
748#define IXGBE_PCIESPARE 0x110BC
756#define IXGBE_MISC_REG_82599 0x110F0 749#define IXGBE_MISC_REG_82599 0x110F0
757#define IXGBE_ECC_CTRL_0_82599 0x11100 750#define IXGBE_ECC_CTRL_0_82599 0x11100
758#define IXGBE_ECC_CTRL_1_82599 0x11104 751#define IXGBE_ECC_CTRL_1_82599 0x11104
@@ -785,7 +778,19 @@
785#define IXGBE_SYSTIML 0x08C0C /* System time register Low - RO */ 778#define IXGBE_SYSTIML 0x08C0C /* System time register Low - RO */
786#define IXGBE_SYSTIMH 0x08C10 /* System time register High - RO */ 779#define IXGBE_SYSTIMH 0x08C10 /* System time register High - RO */
787#define IXGBE_TIMINCA 0x08C14 /* Increment attributes register - RW */ 780#define IXGBE_TIMINCA 0x08C14 /* Increment attributes register - RW */
788#define IXGBE_RXUDP 0x08C1C /* Time Sync Rx UDP Port - RW */ 781#define IXGBE_TIMADJL 0x08C18 /* Time Adjustment Offset register Low - RW */
782#define IXGBE_TIMADJH 0x08C1C /* Time Adjustment Offset register High - RW */
783#define IXGBE_TSAUXC 0x08C20 /* TimeSync Auxiliary Control register - RW */
784#define IXGBE_TRGTTIML0 0x08C24 /* Target Time Register 0 Low - RW */
785#define IXGBE_TRGTTIMH0 0x08C28 /* Target Time Register 0 High - RW */
786#define IXGBE_TRGTTIML1 0x08C2C /* Target Time Register 1 Low - RW */
787#define IXGBE_TRGTTIMH1 0x08C30 /* Target Time Register 1 High - RW */
788#define IXGBE_FREQOUT0 0x08C34 /* Frequency Out 0 Control register - RW */
789#define IXGBE_FREQOUT1 0x08C38 /* Frequency Out 1 Control register - RW */
790#define IXGBE_AUXSTMPL0 0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */
791#define IXGBE_AUXSTMPH0 0x08C40 /* Auxiliary Time Stamp 0 register High - RO */
792#define IXGBE_AUXSTMPL1 0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */
793#define IXGBE_AUXSTMPH1 0x08C48 /* Auxiliary Time Stamp 1 register High - RO */
789 794
790/* Diagnostic Registers */ 795/* Diagnostic Registers */
791#define IXGBE_RDSTATCTL 0x02C20 796#define IXGBE_RDSTATCTL 0x02C20
@@ -829,8 +834,20 @@
829#define IXGBE_TXDATARDPTR(_i) (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/ 834#define IXGBE_TXDATARDPTR(_i) (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/
830#define IXGBE_TXDESCRDPTR(_i) (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/ 835#define IXGBE_TXDESCRDPTR(_i) (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/
831#define IXGBE_PCIEECCCTL 0x1106C 836#define IXGBE_PCIEECCCTL 0x1106C
837#define IXGBE_RXWRPTR(_i) (0x03100 + ((_i) * 4)) /* 8 of these 3100-310C*/
838#define IXGBE_RXUSED(_i) (0x03120 + ((_i) * 4)) /* 8 of these 3120-312C*/
839#define IXGBE_RXRDPTR(_i) (0x03140 + ((_i) * 4)) /* 8 of these 3140-314C*/
840#define IXGBE_RXRDWRPTR(_i) (0x03160 + ((_i) * 4)) /* 8 of these 3160-310C*/
841#define IXGBE_TXWRPTR(_i) (0x0C100 + ((_i) * 4)) /* 8 of these C100-C10C*/
842#define IXGBE_TXUSED(_i) (0x0C120 + ((_i) * 4)) /* 8 of these C120-C12C*/
843#define IXGBE_TXRDPTR(_i) (0x0C140 + ((_i) * 4)) /* 8 of these C140-C14C*/
844#define IXGBE_TXRDWRPTR(_i) (0x0C160 + ((_i) * 4)) /* 8 of these C160-C10C*/
832#define IXGBE_PCIEECCCTL0 0x11100 845#define IXGBE_PCIEECCCTL0 0x11100
833#define IXGBE_PCIEECCCTL1 0x11104 846#define IXGBE_PCIEECCCTL1 0x11104
847#define IXGBE_RXDBUECC 0x03F70
848#define IXGBE_TXDBUECC 0x0CF70
849#define IXGBE_RXDBUEST 0x03F74
850#define IXGBE_TXDBUEST 0x0CF74
834#define IXGBE_PBTXECC 0x0C300 851#define IXGBE_PBTXECC 0x0C300
835#define IXGBE_PBRXECC 0x03300 852#define IXGBE_PBRXECC 0x03300
836#define IXGBE_GHECCR 0x110B0 853#define IXGBE_GHECCR 0x110B0
@@ -871,6 +888,7 @@
871#define IXGBE_AUTOC3 0x042AC 888#define IXGBE_AUTOC3 0x042AC
872#define IXGBE_ANLP1 0x042B0 889#define IXGBE_ANLP1 0x042B0
873#define IXGBE_ANLP2 0x042B4 890#define IXGBE_ANLP2 0x042B4
891#define IXGBE_MACC 0x04330
874#define IXGBE_ATLASCTL 0x04800 892#define IXGBE_ATLASCTL 0x04800
875#define IXGBE_MMNGC 0x042D0 893#define IXGBE_MMNGC 0x042D0
876#define IXGBE_ANLPNP1 0x042D4 894#define IXGBE_ANLPNP1 0x042D4
@@ -883,14 +901,49 @@
883#define IXGBE_MPVC 0x04318 901#define IXGBE_MPVC 0x04318
884#define IXGBE_SGMIIC 0x04314 902#define IXGBE_SGMIIC 0x04314
885 903
904/* Statistics Registers */
905#define IXGBE_RXNFGPC 0x041B0
906#define IXGBE_RXNFGBCL 0x041B4
907#define IXGBE_RXNFGBCH 0x041B8
908#define IXGBE_RXDGPC 0x02F50
909#define IXGBE_RXDGBCL 0x02F54
910#define IXGBE_RXDGBCH 0x02F58
911#define IXGBE_RXDDGPC 0x02F5C
912#define IXGBE_RXDDGBCL 0x02F60
913#define IXGBE_RXDDGBCH 0x02F64
914#define IXGBE_RXLPBKGPC 0x02F68
915#define IXGBE_RXLPBKGBCL 0x02F6C
916#define IXGBE_RXLPBKGBCH 0x02F70
917#define IXGBE_RXDLPBKGPC 0x02F74
918#define IXGBE_RXDLPBKGBCL 0x02F78
919#define IXGBE_RXDLPBKGBCH 0x02F7C
920#define IXGBE_TXDGPC 0x087A0
921#define IXGBE_TXDGBCL 0x087A4
922#define IXGBE_TXDGBCH 0x087A8
923
924#define IXGBE_RXDSTATCTRL 0x02F40
925
926/* Copper Pond 2 link timeout */
886#define IXGBE_VALIDATE_LINK_READY_TIMEOUT 50 927#define IXGBE_VALIDATE_LINK_READY_TIMEOUT 50
887 928
888/* Omer CORECTL */ 929/* Omer CORECTL */
889#define IXGBE_CORECTL 0x014F00 930#define IXGBE_CORECTL 0x014F00
890/* BARCTRL */ 931/* BARCTRL */
891#define IXGBE_BARCTRL 0x110F4 932#define IXGBE_BARCTRL 0x110F4
892#define IXGBE_BARCTRL_FLSIZE 0x0700 933#define IXGBE_BARCTRL_FLSIZE 0x0700
893#define IXGBE_BARCTRL_CSRSIZE 0x2000 934#define IXGBE_BARCTRL_FLSIZE_SHIFT 8
935#define IXGBE_BARCTRL_CSRSIZE 0x2000
936
937/* RSCCTL Bit Masks */
938#define IXGBE_RSCCTL_RSCEN 0x01
939#define IXGBE_RSCCTL_MAXDESC_1 0x00
940#define IXGBE_RSCCTL_MAXDESC_4 0x04
941#define IXGBE_RSCCTL_MAXDESC_8 0x08
942#define IXGBE_RSCCTL_MAXDESC_16 0x0C
943
944/* RSCDBU Bit Masks */
945#define IXGBE_RSCDBU_RSCSMALDIS_MASK 0x0000007F
946#define IXGBE_RSCDBU_RSCACKDIS 0x00000080
894 947
895/* RDRXCTL Bit Masks */ 948/* RDRXCTL Bit Masks */
896#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min Threshold Size */ 949#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min Threshold Size */
@@ -898,6 +951,8 @@
898#define IXGBE_RDRXCTL_MVMEN 0x00000020 951#define IXGBE_RDRXCTL_MVMEN 0x00000020
899#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */ 952#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */
900#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */ 953#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */
954#define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000 /* RSC First packet size */
955#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI */
901#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC enabled */ 956#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC enabled */
902#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC enabled */ 957#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC enabled */
903 958
@@ -969,8 +1024,8 @@
969#define IXGBE_MSCA_OP_CODE_SHIFT 26 /* OP CODE shift */ 1024#define IXGBE_MSCA_OP_CODE_SHIFT 26 /* OP CODE shift */
970#define IXGBE_MSCA_ADDR_CYCLE 0x00000000 /* OP CODE 00 (addr cycle) */ 1025#define IXGBE_MSCA_ADDR_CYCLE 0x00000000 /* OP CODE 00 (addr cycle) */
971#define IXGBE_MSCA_WRITE 0x04000000 /* OP CODE 01 (write) */ 1026#define IXGBE_MSCA_WRITE 0x04000000 /* OP CODE 01 (write) */
972#define IXGBE_MSCA_READ 0x08000000 /* OP CODE 10 (read) */ 1027#define IXGBE_MSCA_READ 0x0C000000 /* OP CODE 11 (read) */
973#define IXGBE_MSCA_READ_AUTOINC 0x0C000000 /* OP CODE 11 (read, auto inc)*/ 1028#define IXGBE_MSCA_READ_AUTOINC 0x08000000 /* OP CODE 10 (read, auto inc)*/
974#define IXGBE_MSCA_ST_CODE_MASK 0x30000000 /* ST Code mask */ 1029#define IXGBE_MSCA_ST_CODE_MASK 0x30000000 /* ST Code mask */
975#define IXGBE_MSCA_ST_CODE_SHIFT 28 /* ST Code shift */ 1030#define IXGBE_MSCA_ST_CODE_SHIFT 28 /* ST Code shift */
976#define IXGBE_MSCA_NEW_PROTOCOL 0x00000000 /* ST CODE 00 (new protocol) */ 1031#define IXGBE_MSCA_NEW_PROTOCOL 0x00000000 /* ST CODE 00 (new protocol) */
@@ -1057,6 +1112,7 @@
1057#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */ 1112#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */
1058#define IXGBE_GPIE_EIAME 0x40000000 1113#define IXGBE_GPIE_EIAME 0x40000000
1059#define IXGBE_GPIE_PBA_SUPPORT 0x80000000 1114#define IXGBE_GPIE_PBA_SUPPORT 0x80000000
1115#define IXGBE_GPIE_RSC_DELAY_SHIFT 11
1060#define IXGBE_GPIE_VTMODE_MASK 0x0000C000 /* VT Mode Mask */ 1116#define IXGBE_GPIE_VTMODE_MASK 0x0000C000 /* VT Mode Mask */
1061#define IXGBE_GPIE_VTMODE_16 0x00004000 /* 16 VFs 8 queues per VF */ 1117#define IXGBE_GPIE_VTMODE_16 0x00004000 /* 16 VFs 8 queues per VF */
1062#define IXGBE_GPIE_VTMODE_32 0x00008000 /* 32 VFs 4 queues per VF */ 1118#define IXGBE_GPIE_VTMODE_32 0x00008000 /* 32 VFs 4 queues per VF */
@@ -1291,6 +1347,11 @@
1291#define IXGBE_FTQF_POOL_SHIFT 8 1347#define IXGBE_FTQF_POOL_SHIFT 8
1292#define IXGBE_FTQF_5TUPLE_MASK_MASK 0x0000001F 1348#define IXGBE_FTQF_5TUPLE_MASK_MASK 0x0000001F
1293#define IXGBE_FTQF_5TUPLE_MASK_SHIFT 25 1349#define IXGBE_FTQF_5TUPLE_MASK_SHIFT 25
1350#define IXGBE_FTQF_SOURCE_ADDR_MASK 0x1E
1351#define IXGBE_FTQF_DEST_ADDR_MASK 0x1D
1352#define IXGBE_FTQF_SOURCE_PORT_MASK 0x1B
1353#define IXGBE_FTQF_DEST_PORT_MASK 0x17
1354#define IXGBE_FTQF_PROTOCOL_COMP_MASK 0x0F
1294#define IXGBE_FTQF_POOL_MASK_EN 0x40000000 1355#define IXGBE_FTQF_POOL_MASK_EN 0x40000000
1295#define IXGBE_FTQF_QUEUE_ENABLE 0x80000000 1356#define IXGBE_FTQF_QUEUE_ENABLE 0x80000000
1296 1357
@@ -1333,11 +1394,11 @@
1333 * 1394 *
1334 * Current filters: 1395 * Current filters:
1335 * EAPOL 802.1x (0x888e): Filter 0 1396 * EAPOL 802.1x (0x888e): Filter 0
1336 * BCN (0x8904): Filter 1 1397 * FCoE (0x8906): Filter 2
1337 * 1588 (0x88f7): Filter 3 1398 * 1588 (0x88f7): Filter 3
1399 * FIP (0x8914): Filter 4
1338 */ 1400 */
1339#define IXGBE_ETQF_FILTER_EAPOL 0 1401#define IXGBE_ETQF_FILTER_EAPOL 0
1340#define IXGBE_ETQF_FILTER_BCN 1
1341#define IXGBE_ETQF_FILTER_FCOE 2 1402#define IXGBE_ETQF_FILTER_FCOE 2
1342#define IXGBE_ETQF_FILTER_1588 3 1403#define IXGBE_ETQF_FILTER_1588 3
1343#define IXGBE_ETQF_FILTER_FIP 4 1404#define IXGBE_ETQF_FILTER_FIP 4
@@ -1448,6 +1509,11 @@
1448#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) 1509#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
1449#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) 1510#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
1450 1511
1512#define IXGBE_MACC_FLU 0x00000001
1513#define IXGBE_MACC_FSV_10G 0x00030000
1514#define IXGBE_MACC_FS 0x00040000
1515#define IXGBE_MAC_RX2TX_LPBK 0x00000002
1516
1451/* LINKS Bit Masks */ 1517/* LINKS Bit Masks */
1452#define IXGBE_LINKS_KX_AN_COMP 0x80000000 1518#define IXGBE_LINKS_KX_AN_COMP 0x80000000
1453#define IXGBE_LINKS_UP 0x40000000 1519#define IXGBE_LINKS_UP 0x40000000
@@ -1501,7 +1567,6 @@
1501#define IXGBE_ANLP1_ASM_PAUSE 0x0800 1567#define IXGBE_ANLP1_ASM_PAUSE 0x0800
1502#define IXGBE_ANLP1_AN_STATE_MASK 0x000f0000 1568#define IXGBE_ANLP1_AN_STATE_MASK 0x000f0000
1503 1569
1504
1505/* SW Semaphore Register bitmasks */ 1570/* SW Semaphore Register bitmasks */
1506#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ 1571#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
1507#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ 1572#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
@@ -1514,6 +1579,10 @@
1514#define IXGBE_GSSR_PHY1_SM 0x0004 1579#define IXGBE_GSSR_PHY1_SM 0x0004
1515#define IXGBE_GSSR_MAC_CSR_SM 0x0008 1580#define IXGBE_GSSR_MAC_CSR_SM 0x0008
1516#define IXGBE_GSSR_FLASH_SM 0x0010 1581#define IXGBE_GSSR_FLASH_SM 0x0010
1582#define IXGBE_GSSR_SW_MNG_SM 0x0400
1583
1584/* FW Status register bitmask */
1585#define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */
1517 1586
1518/* EEC Register */ 1587/* EEC Register */
1519#define IXGBE_EEC_SK 0x00000001 /* EEPROM Clock */ 1588#define IXGBE_EEC_SK 0x00000001 /* EEPROM Clock */
@@ -1534,6 +1603,7 @@
1534/* EEPROM Addressing bits based on type (0-small, 1-large) */ 1603/* EEPROM Addressing bits based on type (0-small, 1-large) */
1535#define IXGBE_EEC_ADDR_SIZE 0x00000400 1604#define IXGBE_EEC_ADDR_SIZE 0x00000400
1536#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */ 1605#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */
1606#define IXGBE_EERD_MAX_ADDR 0x00003FFF /* EERD alows 14 bits for addr. */
1537 1607
1538#define IXGBE_EEC_SIZE_SHIFT 11 1608#define IXGBE_EEC_SIZE_SHIFT 11
1539#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6 1609#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6
@@ -1563,8 +1633,10 @@
1563#define IXGBE_FW_PTR 0x0F 1633#define IXGBE_FW_PTR 0x0F
1564#define IXGBE_PBANUM0_PTR 0x15 1634#define IXGBE_PBANUM0_PTR 0x15
1565#define IXGBE_PBANUM1_PTR 0x16 1635#define IXGBE_PBANUM1_PTR 0x16
1566#define IXGBE_DEVICE_CAPS 0x2C 1636#define IXGBE_FREE_SPACE_PTR 0X3E
1567#define IXGBE_SAN_MAC_ADDR_PTR 0x28 1637#define IXGBE_SAN_MAC_ADDR_PTR 0x28
1638#define IXGBE_DEVICE_CAPS 0x2C
1639#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11
1568#define IXGBE_PCIE_MSIX_82599_CAPS 0x72 1640#define IXGBE_PCIE_MSIX_82599_CAPS 0x72
1569#define IXGBE_PCIE_MSIX_82598_CAPS 0x62 1641#define IXGBE_PCIE_MSIX_82598_CAPS 0x62
1570 1642
@@ -1601,6 +1673,10 @@
1601 1673
1602#define IXGBE_ETH_LENGTH_OF_ADDRESS 6 1674#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
1603 1675
1676#define IXGBE_EEPROM_PAGE_SIZE_MAX 128
1677#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */
1678#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */
1679
1604#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS 1680#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS
1605#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ 1681#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
1606#endif 1682#endif
@@ -1616,14 +1692,25 @@
1616#define IXGBE_FLUDONE_ATTEMPTS 20000 1692#define IXGBE_FLUDONE_ATTEMPTS 20000
1617#endif 1693#endif
1618 1694
1695#define IXGBE_PCIE_CTRL2 0x5 /* PCIe Control 2 Offset */
1696#define IXGBE_PCIE_CTRL2_DUMMY_ENABLE 0x8 /* Dummy Function Enable */
1697#define IXGBE_PCIE_CTRL2_LAN_DISABLE 0x2 /* LAN PCI Disable */
1698#define IXGBE_PCIE_CTRL2_DISABLE_SELECT 0x1 /* LAN Disable Select */
1699
1619#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0 1700#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0
1620#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3 1701#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3
1621#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1 1702#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1
1622#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2 1703#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2
1704#define IXGBE_FW_LESM_PARAMETERS_PTR 0x2
1705#define IXGBE_FW_LESM_STATE_1 0x1
1706#define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */
1623#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4 1707#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4
1624#define IXGBE_FW_PATCH_VERSION_4 0x7 1708#define IXGBE_FW_PATCH_VERSION_4 0x7
1625 1709#define IXGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */
1626/* Alternative SAN MAC Address Block */ 1710#define IXGBE_FCOE_IBA_CAPS_FCOE 0x20 /* FCOE flags */
1711#define IXGBE_ISCSI_FCOE_BLK_PTR 0x17 /* iSCSI/FCOE block */
1712#define IXGBE_ISCSI_FCOE_FLAGS_OFFSET 0x0 /* FCOE flags */
1713#define IXGBE_ISCSI_FCOE_FLAGS_ENABLE 0x1 /* FCOE flags enable bit */
1627#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */ 1714#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */
1628#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt. SAN MAC capability */ 1715#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt. SAN MAC capability */
1629#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt. SAN MAC 0 offset */ 1716#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt. SAN MAC 0 offset */
@@ -1688,6 +1775,7 @@
1688/* Transmit Config masks */ 1775/* Transmit Config masks */
1689#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */ 1776#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */
1690#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. write-back flushing */ 1777#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. write-back flushing */
1778#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */
1691/* Enable short packet padding to 64 bytes */ 1779/* Enable short packet padding to 64 bytes */
1692#define IXGBE_TX_PAD_ENABLE 0x00000400 1780#define IXGBE_TX_PAD_ENABLE 0x00000400
1693#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004 /* Allow jumbo frames */ 1781#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004 /* Allow jumbo frames */
@@ -1701,9 +1789,9 @@
1701#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ 1789#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */
1702#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */ 1790#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */
1703#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ 1791#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
1704#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
1705#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */ 1792#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */
1706#define IXGBE_RXDCTL_RLPML_EN 0x00008000 1793#define IXGBE_RXDCTL_RLPML_EN 0x00008000
1794#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
1707 1795
1708#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */ 1796#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */
1709#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/ 1797#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/
@@ -1719,6 +1807,8 @@
1719#define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */ 1807#define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */
1720#define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */ 1808#define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */
1721 1809
1810#define IXGBE_MFLCN_RPFCE_SHIFT 4
1811
1722/* Multiple Receive Queue Control */ 1812/* Multiple Receive Queue Control */
1723#define IXGBE_MRQC_RSSEN 0x00000001 /* RSS Enable */ 1813#define IXGBE_MRQC_RSSEN 0x00000001 /* RSS Enable */
1724#define IXGBE_MRQC_MRQE_MASK 0xF /* Bits 3:0 */ 1814#define IXGBE_MRQC_MRQE_MASK 0xF /* Bits 3:0 */
@@ -1859,6 +1949,8 @@
1859#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 1949#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0
1860#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0 1950#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0
1861#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 1951#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0
1952#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000
1953#define IXGBE_RXDADV_RSCCNT_SHIFT 17
1862#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5 1954#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5
1863#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 1955#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000
1864#define IXGBE_RXDADV_SPH 0x8000 1956#define IXGBE_RXDADV_SPH 0x8000
@@ -1934,15 +2026,6 @@
1934#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600)) 2026#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600))
1935#define IXGBE_VFLREC(_i) (0x00700 + (_i * 4)) 2027#define IXGBE_VFLREC(_i) (0x00700 + (_i * 4))
1936 2028
1937/* Little Endian defines */
1938#ifndef __le32
1939#define __le32 u32
1940#endif
1941#ifndef __le64
1942#define __le64 u64
1943
1944#endif
1945
1946enum ixgbe_fdir_pballoc_type { 2029enum ixgbe_fdir_pballoc_type {
1947 IXGBE_FDIR_PBALLOC_64K = 0, 2030 IXGBE_FDIR_PBALLOC_64K = 0,
1948 IXGBE_FDIR_PBALLOC_128K, 2031 IXGBE_FDIR_PBALLOC_128K,
@@ -2141,8 +2224,6 @@ typedef u32 ixgbe_link_speed;
2141 IXGBE_LINK_SPEED_1GB_FULL | \ 2224 IXGBE_LINK_SPEED_1GB_FULL | \
2142 IXGBE_LINK_SPEED_10GB_FULL) 2225 IXGBE_LINK_SPEED_10GB_FULL)
2143 2226
2144#define IXGBE_PCIE_DEV_CTRL_2 0xC8
2145#define PCIE_COMPL_TO_VALUE 0x05
2146 2227
2147/* Physical layer type */ 2228/* Physical layer type */
2148typedef u32 ixgbe_physical_layer; 2229typedef u32 ixgbe_physical_layer;
@@ -2315,6 +2396,7 @@ enum ixgbe_sfp_type {
2315enum ixgbe_media_type { 2396enum ixgbe_media_type {
2316 ixgbe_media_type_unknown = 0, 2397 ixgbe_media_type_unknown = 0,
2317 ixgbe_media_type_fiber, 2398 ixgbe_media_type_fiber,
2399 ixgbe_media_type_fiber_lco,
2318 ixgbe_media_type_copper, 2400 ixgbe_media_type_copper,
2319 ixgbe_media_type_backplane, 2401 ixgbe_media_type_backplane,
2320 ixgbe_media_type_cx4, 2402 ixgbe_media_type_cx4,
@@ -2478,6 +2560,10 @@ struct ixgbe_hw_stats {
2478 u64 fcoeptc; 2560 u64 fcoeptc;
2479 u64 fcoedwrc; 2561 u64 fcoedwrc;
2480 u64 fcoedwtc; 2562 u64 fcoedwtc;
2563 u64 b2ospc;
2564 u64 b2ogprc;
2565 u64 o2bgptc;
2566 u64 o2bspc;
2481}; 2567};
2482 2568
2483/* forward declaration */ 2569/* forward declaration */
@@ -2491,7 +2577,9 @@ typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
2491struct ixgbe_eeprom_operations { 2577struct ixgbe_eeprom_operations {
2492 s32 (*init_params)(struct ixgbe_hw *); 2578 s32 (*init_params)(struct ixgbe_hw *);
2493 s32 (*read)(struct ixgbe_hw *, u16, u16 *); 2579 s32 (*read)(struct ixgbe_hw *, u16, u16 *);
2580 s32 (*read_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
2494 s32 (*write)(struct ixgbe_hw *, u16, u16); 2581 s32 (*write)(struct ixgbe_hw *, u16, u16);
2582 s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
2495 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *); 2583 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
2496 s32 (*update_checksum)(struct ixgbe_hw *); 2584 s32 (*update_checksum)(struct ixgbe_hw *);
2497 u16 (*calc_checksum)(struct ixgbe_hw *); 2585 u16 (*calc_checksum)(struct ixgbe_hw *);
@@ -2577,6 +2665,7 @@ struct ixgbe_eeprom_info {
2577 u32 semaphore_delay; 2665 u32 semaphore_delay;
2578 u16 word_size; 2666 u16 word_size;
2579 u16 address_bits; 2667 u16 address_bits;
2668 u16 word_page_size;
2580}; 2669};
2581 2670
2582#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01 2671#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
@@ -2597,6 +2686,7 @@ struct ixgbe_mac_info {
2597 u32 vft_size; 2686 u32 vft_size;
2598 u32 num_rar_entries; 2687 u32 num_rar_entries;
2599 u32 rar_highwater; 2688 u32 rar_highwater;
2689 u32 rx_pb_size;
2600 u32 max_tx_queues; 2690 u32 max_tx_queues;
2601 u32 max_rx_queues; 2691 u32 max_rx_queues;
2602 u32 max_msix_vectors; 2692 u32 max_msix_vectors;
diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c
index d9323c08f5c7..4ed687be2fe3 100644
--- a/drivers/net/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ixgbe/ixgbe_x540.c
@@ -37,6 +37,7 @@
37#define IXGBE_X540_RAR_ENTRIES 128 37#define IXGBE_X540_RAR_ENTRIES 128
38#define IXGBE_X540_MC_TBL_SIZE 128 38#define IXGBE_X540_MC_TBL_SIZE 128
39#define IXGBE_X540_VFT_TBL_SIZE 128 39#define IXGBE_X540_VFT_TBL_SIZE 128
40#define IXGBE_X540_RX_PB_SIZE 384
40 41
41static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw); 42static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
42static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw); 43static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
@@ -226,6 +227,28 @@ mac_reset_top:
226} 227}
227 228
228/** 229/**
230 * ixgbe_start_hw_X540 - Prepare hardware for Tx/Rx
231 * @hw: pointer to hardware structure
232 *
233 * Starts the hardware using the generic start_hw function
234 * and the generation start_hw function.
235 * Then performs revision-specific operations, if any.
236 **/
237static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
238{
239 s32 ret_val = 0;
240
241 ret_val = ixgbe_start_hw_generic(hw);
242 if (ret_val != 0)
243 goto out;
244
245 ret_val = ixgbe_start_hw_gen2(hw);
246 hw->mac.rx_pb_size = IXGBE_X540_RX_PB_SIZE;
247out:
248 return ret_val;
249}
250
251/**
229 * ixgbe_get_supported_physical_layer_X540 - Returns physical layer type 252 * ixgbe_get_supported_physical_layer_X540 - Returns physical layer type
230 * @hw: pointer to hardware structure 253 * @hw: pointer to hardware structure
231 * 254 *
@@ -281,74 +304,105 @@ static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
281} 304}
282 305
283/** 306/**
284 * ixgbe_read_eerd_X540 - Read EEPROM word using EERD 307 * ixgbe_read_eerd_X540- Read EEPROM word using EERD
285 * @hw: pointer to hardware structure 308 * @hw: pointer to hardware structure
286 * @offset: offset of word in the EEPROM to read 309 * @offset: offset of word in the EEPROM to read
287 * @data: word read from the EERPOM 310 * @data: word read from the EEPROM
311 *
312 * Reads a 16 bit word from the EEPROM using the EERD register.
288 **/ 313 **/
289static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data) 314static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
290{ 315{
291 s32 status; 316 s32 status = 0;
292 317
293 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) 318 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
319 0)
294 status = ixgbe_read_eerd_generic(hw, offset, data); 320 status = ixgbe_read_eerd_generic(hw, offset, data);
295 else 321 else
296 status = IXGBE_ERR_SWFW_SYNC; 322 status = IXGBE_ERR_SWFW_SYNC;
297 323
298 ixgbe_release_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM); 324 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
299 return status; 325 return status;
300} 326}
301 327
302/** 328/**
303 * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR 329 * ixgbe_read_eerd_buffer_X540 - Read EEPROM word(s) using EERD
304 * @hw: pointer to hardware structure 330 * @hw: pointer to hardware structure
305 * @offset: offset of word in the EEPROM to write 331 * @offset: offset of word in the EEPROM to read
306 * @data: word write to the EEPROM 332 * @words: number of words
333 * @data: word(s) read from the EEPROM
307 * 334 *
308 * Write a 16 bit word to the EEPROM using the EEWR register. 335 * Reads a 16 bit word(s) from the EEPROM using the EERD register.
309 **/ 336 **/
310static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data) 337static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
338 u16 offset, u16 words, u16 *data)
311{ 339{
312 u32 eewr; 340 s32 status = 0;
313 s32 status;
314 341
315 hw->eeprom.ops.init_params(hw); 342 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
343 0)
344 status = ixgbe_read_eerd_buffer_generic(hw, offset,
345 words, data);
346 else
347 status = IXGBE_ERR_SWFW_SYNC;
316 348
317 if (offset >= hw->eeprom.word_size) { 349 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
318 status = IXGBE_ERR_EEPROM; 350 return status;
319 goto out; 351}
320 }
321 352
322 eewr = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) | 353/**
323 (data << IXGBE_EEPROM_RW_REG_DATA) | 354 * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR
324 IXGBE_EEPROM_RW_REG_START; 355 * @hw: pointer to hardware structure
356 * @offset: offset of word in the EEPROM to write
357 * @data: word write to the EEPROM
358 *
359 * Write a 16 bit word to the EEPROM using the EEWR register.
360 **/
361static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
362{
363 s32 status = 0;
325 364
326 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) { 365 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0)
327 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); 366 status = ixgbe_write_eewr_generic(hw, offset, data);
328 if (status != 0) { 367 else
329 hw_dbg(hw, "Eeprom write EEWR timed out\n"); 368 status = IXGBE_ERR_SWFW_SYNC;
330 goto out;
331 }
332 369
333 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr); 370 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
371 return status;
372}
334 373
335 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); 374/**
336 if (status != 0) { 375 * ixgbe_write_eewr_buffer_X540 - Write EEPROM word(s) using EEWR
337 hw_dbg(hw, "Eeprom write EEWR timed out\n"); 376 * @hw: pointer to hardware structure
338 goto out; 377 * @offset: offset of word in the EEPROM to write
339 } 378 * @words: number of words
340 } else { 379 * @data: word(s) write to the EEPROM
380 *
381 * Write a 16 bit word(s) to the EEPROM using the EEWR register.
382 **/
383static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
384 u16 offset, u16 words, u16 *data)
385{
386 s32 status = 0;
387
388 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
389 0)
390 status = ixgbe_write_eewr_buffer_generic(hw, offset,
391 words, data);
392 else
341 status = IXGBE_ERR_SWFW_SYNC; 393 status = IXGBE_ERR_SWFW_SYNC;
342 }
343 394
344out: 395 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
345 ixgbe_release_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM);
346 return status; 396 return status;
347} 397}
348 398
349/** 399/**
350 * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum 400 * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum
351 * @hw: pointer to hardware structure 401 *
402 * This function does not use synchronization for EERD and EEWR. It can
403 * be used internally by function which utilize ixgbe_acquire_swfw_sync_X540.
404 *
405 * @hw: pointer to hardware structure
352 **/ 406 **/
353static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) 407static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
354{ 408{
@@ -359,9 +413,15 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
359 u16 pointer = 0; 413 u16 pointer = 0;
360 u16 word = 0; 414 u16 word = 0;
361 415
416 /*
417 * Do not use hw->eeprom.ops.read because we do not want to take
418 * the synchronization semaphores here. Instead use
419 * ixgbe_read_eerd_generic
420 */
421
362 /* Include 0x0-0x3F in the checksum */ 422 /* Include 0x0-0x3F in the checksum */
363 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { 423 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
364 if (hw->eeprom.ops.read(hw, i, &word) != 0) { 424 if (ixgbe_read_eerd_generic(hw, i, &word) != 0) {
365 hw_dbg(hw, "EEPROM read failed\n"); 425 hw_dbg(hw, "EEPROM read failed\n");
366 break; 426 break;
367 } 427 }
@@ -376,7 +436,7 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
376 if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) 436 if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
377 continue; 437 continue;
378 438
379 if (hw->eeprom.ops.read(hw, i, &pointer) != 0) { 439 if (ixgbe_read_eerd_generic(hw, i, &pointer) != 0) {
380 hw_dbg(hw, "EEPROM read failed\n"); 440 hw_dbg(hw, "EEPROM read failed\n");
381 break; 441 break;
382 } 442 }
@@ -386,7 +446,7 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
386 pointer >= hw->eeprom.word_size) 446 pointer >= hw->eeprom.word_size)
387 continue; 447 continue;
388 448
389 if (hw->eeprom.ops.read(hw, pointer, &length) != 0) { 449 if (ixgbe_read_eerd_generic(hw, pointer, &length) != 0) {
390 hw_dbg(hw, "EEPROM read failed\n"); 450 hw_dbg(hw, "EEPROM read failed\n");
391 break; 451 break;
392 } 452 }
@@ -397,7 +457,7 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
397 continue; 457 continue;
398 458
399 for (j = pointer+1; j <= pointer+length; j++) { 459 for (j = pointer+1; j <= pointer+length; j++) {
400 if (hw->eeprom.ops.read(hw, j, &word) != 0) { 460 if (ixgbe_read_eerd_generic(hw, j, &word) != 0) {
401 hw_dbg(hw, "EEPROM read failed\n"); 461 hw_dbg(hw, "EEPROM read failed\n");
402 break; 462 break;
403 } 463 }
@@ -411,6 +471,62 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
411} 471}
412 472
413/** 473/**
474 * ixgbe_validate_eeprom_checksum_X540 - Validate EEPROM checksum
475 * @hw: pointer to hardware structure
476 * @checksum_val: calculated checksum
477 *
478 * Performs checksum calculation and validates the EEPROM checksum. If the
479 * caller does not need checksum_val, the value can be NULL.
480 **/
481static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
482 u16 *checksum_val)
483{
484 s32 status;
485 u16 checksum;
486 u16 read_checksum = 0;
487
488 /*
489 * Read the first word from the EEPROM. If this times out or fails, do
490 * not continue or we could be in for a very long wait while every
491 * EEPROM read fails
492 */
493 status = hw->eeprom.ops.read(hw, 0, &checksum);
494
495 if (status != 0) {
496 hw_dbg(hw, "EEPROM read failed\n");
497 goto out;
498 }
499
500 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
501 checksum = hw->eeprom.ops.calc_checksum(hw);
502
503 /*
504 * Do not use hw->eeprom.ops.read because we do not want to take
505 * the synchronization semaphores twice here.
506 */
507 ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM,
508 &read_checksum);
509
510 /*
511 * Verify read checksum from EEPROM is the same as
512 * calculated checksum
513 */
514 if (read_checksum != checksum)
515 status = IXGBE_ERR_EEPROM_CHECKSUM;
516
517 /* If the user cares, return the calculated checksum */
518 if (checksum_val)
519 *checksum_val = checksum;
520 } else {
521 status = IXGBE_ERR_SWFW_SYNC;
522 }
523
524 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
525out:
526 return status;
527}
528
529/**
414 * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash 530 * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash
415 * @hw: pointer to hardware structure 531 * @hw: pointer to hardware structure
416 * 532 *
@@ -421,11 +537,35 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
421static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw) 537static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
422{ 538{
423 s32 status; 539 s32 status;
540 u16 checksum;
541
542 /*
543 * Read the first word from the EEPROM. If this times out or fails, do
544 * not continue or we could be in for a very long wait while every
545 * EEPROM read fails
546 */
547 status = hw->eeprom.ops.read(hw, 0, &checksum);
548
549 if (status != 0)
550 hw_dbg(hw, "EEPROM read failed\n");
424 551
425 status = ixgbe_update_eeprom_checksum_generic(hw); 552 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
553 checksum = hw->eeprom.ops.calc_checksum(hw);
554
555 /*
556 * Do not use hw->eeprom.ops.write because we do not want to
557 * take the synchronization semaphores twice here.
558 */
559 status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM,
560 checksum);
426 561
427 if (status) 562 if (status == 0)
428 status = ixgbe_update_flash_X540(hw); 563 status = ixgbe_update_flash_X540(hw);
564 else
565 status = IXGBE_ERR_SWFW_SYNC;
566 }
567
568 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
429 569
430 return status; 570 return status;
431} 571}
@@ -452,7 +592,7 @@ static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
452 IXGBE_WRITE_REG(hw, IXGBE_EEC, flup); 592 IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
453 593
454 status = ixgbe_poll_flash_update_done_X540(hw); 594 status = ixgbe_poll_flash_update_done_X540(hw);
455 if (status) 595 if (status == 0)
456 hw_dbg(hw, "Flash update complete\n"); 596 hw_dbg(hw, "Flash update complete\n");
457 else 597 else
458 hw_dbg(hw, "Flash update time out\n"); 598 hw_dbg(hw, "Flash update time out\n");
@@ -466,11 +606,10 @@ static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
466 } 606 }
467 607
468 status = ixgbe_poll_flash_update_done_X540(hw); 608 status = ixgbe_poll_flash_update_done_X540(hw);
469 if (status) 609 if (status == 0)
470 hw_dbg(hw, "Flash update complete\n"); 610 hw_dbg(hw, "Flash update complete\n");
471 else 611 else
472 hw_dbg(hw, "Flash update time out\n"); 612 hw_dbg(hw, "Flash update time out\n");
473
474 } 613 }
475out: 614out:
476 return status; 615 return status;
@@ -542,7 +681,7 @@ static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
542 * resource (swmask) 681 * resource (swmask)
543 */ 682 */
544 ixgbe_release_swfw_sync_semaphore(hw); 683 ixgbe_release_swfw_sync_semaphore(hw);
545 msleep(5); 684 usleep_range(5000, 10000);
546 } 685 }
547 } 686 }
548 687
@@ -564,7 +703,7 @@ static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
564 } 703 }
565 } 704 }
566 705
567 msleep(5); 706 usleep_range(5000, 10000);
568 return 0; 707 return 0;
569} 708}
570 709
@@ -588,7 +727,7 @@ static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
588 IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync); 727 IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
589 728
590 ixgbe_release_swfw_sync_semaphore(hw); 729 ixgbe_release_swfw_sync_semaphore(hw);
591 msleep(5); 730 usleep_range(5000, 10000);
592} 731}
593 732
594/** 733/**
@@ -658,10 +797,70 @@ static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
658 IXGBE_WRITE_FLUSH(hw); 797 IXGBE_WRITE_FLUSH(hw);
659} 798}
660 799
800/**
801 * ixgbe_blink_led_start_X540 - Blink LED based on index.
802 * @hw: pointer to hardware structure
803 * @index: led number to blink
804 *
805 * Devices that implement the version 2 interface:
806 * X540
807 **/
808static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
809{
810 u32 macc_reg;
811 u32 ledctl_reg;
812
813 /*
814 * In order for the blink bit in the LED control register
815 * to work, link and speed must be forced in the MAC. We
816 * will reverse this when we stop the blinking.
817 */
818 macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
819 macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS;
820 IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
821
822 /* Set the LED to LINK_UP + BLINK. */
823 ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
824 ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
825 ledctl_reg |= IXGBE_LED_BLINK(index);
826 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg);
827 IXGBE_WRITE_FLUSH(hw);
828
829 return 0;
830}
831
832/**
833 * ixgbe_blink_led_stop_X540 - Stop blinking LED based on index.
834 * @hw: pointer to hardware structure
835 * @index: led number to stop blinking
836 *
837 * Devices that implement the version 2 interface:
838 * X540
839 **/
840static s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
841{
842 u32 macc_reg;
843 u32 ledctl_reg;
844
845 /* Restore the LED to its default value. */
846 ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
847 ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
848 ledctl_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
849 ledctl_reg &= ~IXGBE_LED_BLINK(index);
850 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg);
851
852 /* Unforce link and speed in the MAC. */
853 macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
854 macc_reg &= ~(IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS);
855 IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
856 IXGBE_WRITE_FLUSH(hw);
857
858 return 0;
859}
661static struct ixgbe_mac_operations mac_ops_X540 = { 860static struct ixgbe_mac_operations mac_ops_X540 = {
662 .init_hw = &ixgbe_init_hw_generic, 861 .init_hw = &ixgbe_init_hw_generic,
663 .reset_hw = &ixgbe_reset_hw_X540, 862 .reset_hw = &ixgbe_reset_hw_X540,
664 .start_hw = &ixgbe_start_hw_generic, 863 .start_hw = &ixgbe_start_hw_X540,
665 .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, 864 .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
666 .get_media_type = &ixgbe_get_media_type_X540, 865 .get_media_type = &ixgbe_get_media_type_X540,
667 .get_supported_physical_layer = 866 .get_supported_physical_layer =
@@ -669,7 +868,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
669 .enable_rx_dma = &ixgbe_enable_rx_dma_generic, 868 .enable_rx_dma = &ixgbe_enable_rx_dma_generic,
670 .get_mac_addr = &ixgbe_get_mac_addr_generic, 869 .get_mac_addr = &ixgbe_get_mac_addr_generic,
671 .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, 870 .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
672 .get_device_caps = NULL, 871 .get_device_caps = &ixgbe_get_device_caps_generic,
673 .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, 872 .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic,
674 .stop_adapter = &ixgbe_stop_adapter_generic, 873 .stop_adapter = &ixgbe_stop_adapter_generic,
675 .get_bus_info = &ixgbe_get_bus_info_generic, 874 .get_bus_info = &ixgbe_get_bus_info_generic,
@@ -681,8 +880,8 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
681 .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic, 880 .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic,
682 .led_on = &ixgbe_led_on_generic, 881 .led_on = &ixgbe_led_on_generic,
683 .led_off = &ixgbe_led_off_generic, 882 .led_off = &ixgbe_led_off_generic,
684 .blink_led_start = &ixgbe_blink_led_start_generic, 883 .blink_led_start = &ixgbe_blink_led_start_X540,
685 .blink_led_stop = &ixgbe_blink_led_stop_generic, 884 .blink_led_stop = &ixgbe_blink_led_stop_X540,
686 .set_rar = &ixgbe_set_rar_generic, 885 .set_rar = &ixgbe_set_rar_generic,
687 .clear_rar = &ixgbe_clear_rar_generic, 886 .clear_rar = &ixgbe_clear_rar_generic,
688 .set_vmdq = &ixgbe_set_vmdq_generic, 887 .set_vmdq = &ixgbe_set_vmdq_generic,
@@ -705,9 +904,11 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
705static struct ixgbe_eeprom_operations eeprom_ops_X540 = { 904static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
706 .init_params = &ixgbe_init_eeprom_params_X540, 905 .init_params = &ixgbe_init_eeprom_params_X540,
707 .read = &ixgbe_read_eerd_X540, 906 .read = &ixgbe_read_eerd_X540,
907 .read_buffer = &ixgbe_read_eerd_buffer_X540,
708 .write = &ixgbe_write_eewr_X540, 908 .write = &ixgbe_write_eewr_X540,
909 .write_buffer = &ixgbe_write_eewr_buffer_X540,
709 .calc_checksum = &ixgbe_calc_eeprom_checksum_X540, 910 .calc_checksum = &ixgbe_calc_eeprom_checksum_X540,
710 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, 911 .validate_checksum = &ixgbe_validate_eeprom_checksum_X540,
711 .update_checksum = &ixgbe_update_eeprom_checksum_X540, 912 .update_checksum = &ixgbe_update_eeprom_checksum_X540,
712}; 913};
713 914
diff --git a/drivers/net/ixgbevf/ethtool.c b/drivers/net/ixgbevf/ethtool.c
index 0563ab29264e..deee3754b1f7 100644
--- a/drivers/net/ixgbevf/ethtool.c
+++ b/drivers/net/ixgbevf/ethtool.c
@@ -104,11 +104,13 @@ static int ixgbevf_get_settings(struct net_device *netdev,
104 hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 104 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
105 105
106 if (link_up) { 106 if (link_up) {
107 ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? 107 ethtool_cmd_speed_set(
108 SPEED_10000 : SPEED_1000; 108 ecmd,
109 (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
110 SPEED_10000 : SPEED_1000);
109 ecmd->duplex = DUPLEX_FULL; 111 ecmd->duplex = DUPLEX_FULL;
110 } else { 112 } else {
111 ecmd->speed = -1; 113 ethtool_cmd_speed_set(ecmd, -1);
112 ecmd->duplex = -1; 114 ecmd->duplex = -1;
113 } 115 }
114 116
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index 05fa7c85deed..d7ab202fb95c 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -1460,6 +1460,34 @@ static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1460 } 1460 }
1461} 1461}
1462 1462
1463static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1464{
1465 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1466 struct ixgbe_hw *hw = &adapter->hw;
1467 int count = 0;
1468
1469 if ((netdev_uc_count(netdev)) > 10) {
1470 printk(KERN_ERR "Too many unicast filters - No Space\n");
1471 return -ENOSPC;
1472 }
1473
1474 if (!netdev_uc_empty(netdev)) {
1475 struct netdev_hw_addr *ha;
1476 netdev_for_each_uc_addr(ha, netdev) {
1477 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1478 udelay(200);
1479 }
1480 } else {
1481 /*
1482 * If the list is empty then send message to PF driver to
1483 * clear all macvlans on this VF.
1484 */
1485 hw->mac.ops.set_uc_addr(hw, 0, NULL);
1486 }
1487
1488 return count;
1489}
1490
1463/** 1491/**
1464 * ixgbevf_set_rx_mode - Multicast set 1492 * ixgbevf_set_rx_mode - Multicast set
1465 * @netdev: network interface device structure 1493 * @netdev: network interface device structure
@@ -1476,6 +1504,8 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
1476 /* reprogram multicast list */ 1504 /* reprogram multicast list */
1477 if (hw->mac.ops.update_mc_addr_list) 1505 if (hw->mac.ops.update_mc_addr_list)
1478 hw->mac.ops.update_mc_addr_list(hw, netdev); 1506 hw->mac.ops.update_mc_addr_list(hw, netdev);
1507
1508 ixgbevf_write_uc_addr_list(netdev);
1479} 1509}
1480 1510
1481static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) 1511static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
diff --git a/drivers/net/ixgbevf/mbx.h b/drivers/net/ixgbevf/mbx.h
index b2b5bf5daa3d..ea393eb03f3a 100644
--- a/drivers/net/ixgbevf/mbx.h
+++ b/drivers/net/ixgbevf/mbx.h
@@ -81,6 +81,7 @@
81#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ 81#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
82#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ 82#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
83#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ 83#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
84#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
84 85
85/* length of permanent address message returned from PF */ 86/* length of permanent address message returned from PF */
86#define IXGBE_VF_PERMADDR_MSG_LEN 4 87#define IXGBE_VF_PERMADDR_MSG_LEN 4
diff --git a/drivers/net/ixgbevf/vf.c b/drivers/net/ixgbevf/vf.c
index eecd3bf6833f..aa3682e8c473 100644
--- a/drivers/net/ixgbevf/vf.c
+++ b/drivers/net/ixgbevf/vf.c
@@ -216,6 +216,39 @@ static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
216 return 0; 216 return 0;
217} 217}
218 218
219static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
220{
221 struct ixgbe_mbx_info *mbx = &hw->mbx;
222 u32 msgbuf[3];
223 u8 *msg_addr = (u8 *)(&msgbuf[1]);
224 s32 ret_val;
225
226 memset(msgbuf, 0, sizeof(msgbuf));
227 /*
228 * If index is one then this is the start of a new list and needs
229 * indication to the PF so it can do it's own list management.
230 * If it is zero then that tells the PF to just clear all of
231 * this VF's macvlans and there is no new list.
232 */
233 msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
234 msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
235 if (addr)
236 memcpy(msg_addr, addr, 6);
237 ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
238
239 if (!ret_val)
240 ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
241
242 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
243
244 if (!ret_val)
245 if (msgbuf[0] ==
246 (IXGBE_VF_SET_MACVLAN | IXGBE_VT_MSGTYPE_NACK))
247 ret_val = -ENOMEM;
248
249 return ret_val;
250}
251
219/** 252/**
220 * ixgbevf_set_rar_vf - set device MAC address 253 * ixgbevf_set_rar_vf - set device MAC address
221 * @hw: pointer to hardware structure 254 * @hw: pointer to hardware structure
@@ -378,6 +411,7 @@ static struct ixgbe_mac_operations ixgbevf_mac_ops = {
378 .check_link = ixgbevf_check_mac_link_vf, 411 .check_link = ixgbevf_check_mac_link_vf,
379 .set_rar = ixgbevf_set_rar_vf, 412 .set_rar = ixgbevf_set_rar_vf,
380 .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf, 413 .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
414 .set_uc_addr = ixgbevf_set_uc_addr_vf,
381 .set_vfta = ixgbevf_set_vfta_vf, 415 .set_vfta = ixgbevf_set_vfta_vf,
382}; 416};
383 417
diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h
index 23eb114c149f..10306b492ee6 100644
--- a/drivers/net/ixgbevf/vf.h
+++ b/drivers/net/ixgbevf/vf.h
@@ -62,6 +62,7 @@ struct ixgbe_mac_operations {
62 62
63 /* RAR, Multicast, VLAN */ 63 /* RAR, Multicast, VLAN */
64 s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32); 64 s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32);
65 s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *);
65 s32 (*init_rx_addrs)(struct ixgbe_hw *); 66 s32 (*init_rx_addrs)(struct ixgbe_hw *);
66 s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *); 67 s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
67 s32 (*enable_mc)(struct ixgbe_hw *); 68 s32 (*enable_mc)(struct ixgbe_hw *);
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index 994c80939c7a..b5b174a8c149 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -2230,17 +2230,9 @@ jme_change_mtu(struct net_device *netdev, int new_mtu)
2230 jme_restart_rx_engine(jme); 2230 jme_restart_rx_engine(jme);
2231 } 2231 }
2232 2232
2233 if (new_mtu > 1900) {
2234 netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2235 NETIF_F_TSO | NETIF_F_TSO6);
2236 } else {
2237 if (test_bit(JME_FLAG_TXCSUM, &jme->flags))
2238 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2239 if (test_bit(JME_FLAG_TSO, &jme->flags))
2240 netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
2241 }
2242
2243 netdev->mtu = new_mtu; 2233 netdev->mtu = new_mtu;
2234 netdev_update_features(netdev);
2235
2244 jme_reset_link(jme); 2236 jme_reset_link(jme);
2245 2237
2246 return 0; 2238 return 0;
@@ -2563,7 +2555,8 @@ jme_set_settings(struct net_device *netdev,
2563 struct jme_adapter *jme = netdev_priv(netdev); 2555 struct jme_adapter *jme = netdev_priv(netdev);
2564 int rc, fdc = 0; 2556 int rc, fdc = 0;
2565 2557
2566 if (ecmd->speed == SPEED_1000 && ecmd->autoneg != AUTONEG_ENABLE) 2558 if (ethtool_cmd_speed(ecmd) == SPEED_1000
2559 && ecmd->autoneg != AUTONEG_ENABLE)
2567 return -EINVAL; 2560 return -EINVAL;
2568 2561
2569 /* 2562 /*
@@ -2640,19 +2633,20 @@ jme_set_msglevel(struct net_device *netdev, u32 value)
2640} 2633}
2641 2634
2642static u32 2635static u32
2643jme_get_rx_csum(struct net_device *netdev) 2636jme_fix_features(struct net_device *netdev, u32 features)
2644{ 2637{
2645 struct jme_adapter *jme = netdev_priv(netdev); 2638 if (netdev->mtu > 1900)
2646 return jme->reg_rxmcs & RXMCS_CHECKSUM; 2639 features &= ~(NETIF_F_ALL_TSO | NETIF_F_ALL_CSUM);
2640 return features;
2647} 2641}
2648 2642
2649static int 2643static int
2650jme_set_rx_csum(struct net_device *netdev, u32 on) 2644jme_set_features(struct net_device *netdev, u32 features)
2651{ 2645{
2652 struct jme_adapter *jme = netdev_priv(netdev); 2646 struct jme_adapter *jme = netdev_priv(netdev);
2653 2647
2654 spin_lock_bh(&jme->rxmcs_lock); 2648 spin_lock_bh(&jme->rxmcs_lock);
2655 if (on) 2649 if (features & NETIF_F_RXCSUM)
2656 jme->reg_rxmcs |= RXMCS_CHECKSUM; 2650 jme->reg_rxmcs |= RXMCS_CHECKSUM;
2657 else 2651 else
2658 jme->reg_rxmcs &= ~RXMCS_CHECKSUM; 2652 jme->reg_rxmcs &= ~RXMCS_CHECKSUM;
@@ -2663,42 +2657,6 @@ jme_set_rx_csum(struct net_device *netdev, u32 on)
2663} 2657}
2664 2658
2665static int 2659static int
2666jme_set_tx_csum(struct net_device *netdev, u32 on)
2667{
2668 struct jme_adapter *jme = netdev_priv(netdev);
2669
2670 if (on) {
2671 set_bit(JME_FLAG_TXCSUM, &jme->flags);
2672 if (netdev->mtu <= 1900)
2673 netdev->features |=
2674 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2675 } else {
2676 clear_bit(JME_FLAG_TXCSUM, &jme->flags);
2677 netdev->features &=
2678 ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
2679 }
2680
2681 return 0;
2682}
2683
2684static int
2685jme_set_tso(struct net_device *netdev, u32 on)
2686{
2687 struct jme_adapter *jme = netdev_priv(netdev);
2688
2689 if (on) {
2690 set_bit(JME_FLAG_TSO, &jme->flags);
2691 if (netdev->mtu <= 1900)
2692 netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
2693 } else {
2694 clear_bit(JME_FLAG_TSO, &jme->flags);
2695 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
2696 }
2697
2698 return 0;
2699}
2700
2701static int
2702jme_nway_reset(struct net_device *netdev) 2660jme_nway_reset(struct net_device *netdev)
2703{ 2661{
2704 struct jme_adapter *jme = netdev_priv(netdev); 2662 struct jme_adapter *jme = netdev_priv(netdev);
@@ -2839,11 +2797,6 @@ static const struct ethtool_ops jme_ethtool_ops = {
2839 .get_link = jme_get_link, 2797 .get_link = jme_get_link,
2840 .get_msglevel = jme_get_msglevel, 2798 .get_msglevel = jme_get_msglevel,
2841 .set_msglevel = jme_set_msglevel, 2799 .set_msglevel = jme_set_msglevel,
2842 .get_rx_csum = jme_get_rx_csum,
2843 .set_rx_csum = jme_set_rx_csum,
2844 .set_tx_csum = jme_set_tx_csum,
2845 .set_tso = jme_set_tso,
2846 .set_sg = ethtool_op_set_sg,
2847 .nway_reset = jme_nway_reset, 2800 .nway_reset = jme_nway_reset,
2848 .get_eeprom_len = jme_get_eeprom_len, 2801 .get_eeprom_len = jme_get_eeprom_len,
2849 .get_eeprom = jme_get_eeprom, 2802 .get_eeprom = jme_get_eeprom,
@@ -2903,6 +2856,8 @@ static const struct net_device_ops jme_netdev_ops = {
2903 .ndo_change_mtu = jme_change_mtu, 2856 .ndo_change_mtu = jme_change_mtu,
2904 .ndo_tx_timeout = jme_tx_timeout, 2857 .ndo_tx_timeout = jme_tx_timeout,
2905 .ndo_vlan_rx_register = jme_vlan_rx_register, 2858 .ndo_vlan_rx_register = jme_vlan_rx_register,
2859 .ndo_fix_features = jme_fix_features,
2860 .ndo_set_features = jme_set_features,
2906}; 2861};
2907 2862
2908static int __devinit 2863static int __devinit
@@ -2957,6 +2912,12 @@ jme_init_one(struct pci_dev *pdev,
2957 netdev->netdev_ops = &jme_netdev_ops; 2912 netdev->netdev_ops = &jme_netdev_ops;
2958 netdev->ethtool_ops = &jme_ethtool_ops; 2913 netdev->ethtool_ops = &jme_ethtool_ops;
2959 netdev->watchdog_timeo = TX_TIMEOUT; 2914 netdev->watchdog_timeo = TX_TIMEOUT;
2915 netdev->hw_features = NETIF_F_IP_CSUM |
2916 NETIF_F_IPV6_CSUM |
2917 NETIF_F_SG |
2918 NETIF_F_TSO |
2919 NETIF_F_TSO6 |
2920 NETIF_F_RXCSUM;
2960 netdev->features = NETIF_F_IP_CSUM | 2921 netdev->features = NETIF_F_IP_CSUM |
2961 NETIF_F_IPV6_CSUM | 2922 NETIF_F_IPV6_CSUM |
2962 NETIF_F_SG | 2923 NETIF_F_SG |
@@ -3040,8 +3001,9 @@ jme_init_one(struct pci_dev *pdev,
3040 jme->reg_txpfc = 0; 3001 jme->reg_txpfc = 0;
3041 jme->reg_pmcs = PMCS_MFEN; 3002 jme->reg_pmcs = PMCS_MFEN;
3042 jme->reg_gpreg1 = GPREG1_DEFAULT; 3003 jme->reg_gpreg1 = GPREG1_DEFAULT;
3043 set_bit(JME_FLAG_TXCSUM, &jme->flags); 3004
3044 set_bit(JME_FLAG_TSO, &jme->flags); 3005 if (jme->reg_rxmcs & RXMCS_CHECKSUM)
3006 netdev->features |= NETIF_F_RXCSUM;
3045 3007
3046 /* 3008 /*
3047 * Get Max Read Req Size from PCI Config Space 3009 * Get Max Read Req Size from PCI Config Space
diff --git a/drivers/net/jme.h b/drivers/net/jme.h
index 8bf30451e821..e9aaeca96abc 100644
--- a/drivers/net/jme.h
+++ b/drivers/net/jme.h
@@ -468,8 +468,6 @@ struct jme_adapter {
468enum jme_flags_bits { 468enum jme_flags_bits {
469 JME_FLAG_MSI = 1, 469 JME_FLAG_MSI = 1,
470 JME_FLAG_SSET = 2, 470 JME_FLAG_SSET = 2,
471 JME_FLAG_TXCSUM = 3,
472 JME_FLAG_TSO = 4,
473 JME_FLAG_POLL = 5, 471 JME_FLAG_POLL = 5,
474 JME_FLAG_SHUTDOWN = 6, 472 JME_FLAG_SHUTDOWN = 6,
475}; 473};
diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c
index 7f7d5708a658..41ea5920c158 100644
--- a/drivers/net/ksz884x.c
+++ b/drivers/net/ksz884x.c
@@ -1221,7 +1221,6 @@ struct ksz_port_info {
1221#define LINK_INT_WORKING (1 << 0) 1221#define LINK_INT_WORKING (1 << 0)
1222#define SMALL_PACKET_TX_BUG (1 << 1) 1222#define SMALL_PACKET_TX_BUG (1 << 1)
1223#define HALF_DUPLEX_SIGNAL_BUG (1 << 2) 1223#define HALF_DUPLEX_SIGNAL_BUG (1 << 2)
1224#define IPV6_CSUM_GEN_HACK (1 << 3)
1225#define RX_HUGE_FRAME (1 << 4) 1224#define RX_HUGE_FRAME (1 << 4)
1226#define STP_SUPPORT (1 << 8) 1225#define STP_SUPPORT (1 << 8)
1227 1226
@@ -3748,7 +3747,6 @@ static int hw_init(struct ksz_hw *hw)
3748 if (1 == rc) 3747 if (1 == rc)
3749 hw->features |= HALF_DUPLEX_SIGNAL_BUG; 3748 hw->features |= HALF_DUPLEX_SIGNAL_BUG;
3750 } 3749 }
3751 hw->features |= IPV6_CSUM_GEN_HACK;
3752 return rc; 3750 return rc;
3753} 3751}
3754 3752
@@ -4887,8 +4885,7 @@ static netdev_tx_t netdev_tx(struct sk_buff *skb, struct net_device *dev)
4887 left = hw_alloc_pkt(hw, skb->len, num); 4885 left = hw_alloc_pkt(hw, skb->len, num);
4888 if (left) { 4886 if (left) {
4889 if (left < num || 4887 if (left < num ||
4890 ((hw->features & IPV6_CSUM_GEN_HACK) && 4888 ((CHECKSUM_PARTIAL == skb->ip_summed) &&
4891 (CHECKSUM_PARTIAL == skb->ip_summed) &&
4892 (ETH_P_IPV6 == htons(skb->protocol)))) { 4889 (ETH_P_IPV6 == htons(skb->protocol)))) {
4893 struct sk_buff *org_skb = skb; 4890 struct sk_buff *org_skb = skb;
4894 4891
@@ -6001,6 +5998,7 @@ static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6001 struct dev_priv *priv = netdev_priv(dev); 5998 struct dev_priv *priv = netdev_priv(dev);
6002 struct dev_info *hw_priv = priv->adapter; 5999 struct dev_info *hw_priv = priv->adapter;
6003 struct ksz_port *port = &priv->port; 6000 struct ksz_port *port = &priv->port;
6001 u32 speed = ethtool_cmd_speed(cmd);
6004 int rc; 6002 int rc;
6005 6003
6006 /* 6004 /*
@@ -6009,11 +6007,11 @@ static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6009 */ 6007 */
6010 if (cmd->autoneg && priv->advertising == cmd->advertising) { 6008 if (cmd->autoneg && priv->advertising == cmd->advertising) {
6011 cmd->advertising |= ADVERTISED_ALL; 6009 cmd->advertising |= ADVERTISED_ALL;
6012 if (10 == cmd->speed) 6010 if (10 == speed)
6013 cmd->advertising &= 6011 cmd->advertising &=
6014 ~(ADVERTISED_100baseT_Full | 6012 ~(ADVERTISED_100baseT_Full |
6015 ADVERTISED_100baseT_Half); 6013 ADVERTISED_100baseT_Half);
6016 else if (100 == cmd->speed) 6014 else if (100 == speed)
6017 cmd->advertising &= 6015 cmd->advertising &=
6018 ~(ADVERTISED_10baseT_Full | 6016 ~(ADVERTISED_10baseT_Full |
6019 ADVERTISED_10baseT_Half); 6017 ADVERTISED_10baseT_Half);
@@ -6035,8 +6033,8 @@ static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6035 port->force_link = 0; 6033 port->force_link = 0;
6036 } else { 6034 } else {
6037 port->duplex = cmd->duplex + 1; 6035 port->duplex = cmd->duplex + 1;
6038 if (cmd->speed != 1000) 6036 if (1000 != speed)
6039 port->speed = cmd->speed; 6037 port->speed = speed;
6040 if (cmd->autoneg) 6038 if (cmd->autoneg)
6041 port->force_link = 0; 6039 port->force_link = 0;
6042 else 6040 else
@@ -6583,57 +6581,33 @@ static void netdev_get_ethtool_stats(struct net_device *dev,
6583} 6581}
6584 6582
6585/** 6583/**
6586 * netdev_get_rx_csum - get receive checksum support 6584 * netdev_set_features - set receive checksum support
6587 * @dev: Network device. 6585 * @dev: Network device.
6588 * 6586 * @features: New device features (offloads).
6589 * This function gets receive checksum support setting.
6590 *
6591 * Return true if receive checksum is enabled; false otherwise.
6592 */
6593static u32 netdev_get_rx_csum(struct net_device *dev)
6594{
6595 struct dev_priv *priv = netdev_priv(dev);
6596 struct dev_info *hw_priv = priv->adapter;
6597 struct ksz_hw *hw = &hw_priv->hw;
6598
6599 return hw->rx_cfg &
6600 (DMA_RX_CSUM_UDP |
6601 DMA_RX_CSUM_TCP |
6602 DMA_RX_CSUM_IP);
6603}
6604
6605/**
6606 * netdev_set_rx_csum - set receive checksum support
6607 * @dev: Network device.
6608 * @data: Zero to disable receive checksum support.
6609 * 6587 *
6610 * This function sets receive checksum support setting. 6588 * This function sets receive checksum support setting.
6611 * 6589 *
6612 * Return 0 if successful; otherwise an error code. 6590 * Return 0 if successful; otherwise an error code.
6613 */ 6591 */
6614static int netdev_set_rx_csum(struct net_device *dev, u32 data) 6592static int netdev_set_features(struct net_device *dev, u32 features)
6615{ 6593{
6616 struct dev_priv *priv = netdev_priv(dev); 6594 struct dev_priv *priv = netdev_priv(dev);
6617 struct dev_info *hw_priv = priv->adapter; 6595 struct dev_info *hw_priv = priv->adapter;
6618 struct ksz_hw *hw = &hw_priv->hw; 6596 struct ksz_hw *hw = &hw_priv->hw;
6619 u32 new_setting = hw->rx_cfg;
6620 6597
6621 if (data)
6622 new_setting |=
6623 (DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP |
6624 DMA_RX_CSUM_IP);
6625 else
6626 new_setting &=
6627 ~(DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP |
6628 DMA_RX_CSUM_IP);
6629 new_setting &= ~DMA_RX_CSUM_UDP;
6630 mutex_lock(&hw_priv->lock); 6598 mutex_lock(&hw_priv->lock);
6631 if (new_setting != hw->rx_cfg) { 6599
6632 hw->rx_cfg = new_setting; 6600 /* see note in hw_setup() */
6633 if (hw->enabled) 6601 if (features & NETIF_F_RXCSUM)
6634 writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL); 6602 hw->rx_cfg |= DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP;
6635 } 6603 else
6604 hw->rx_cfg &= ~(DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP);
6605
6606 if (hw->enabled)
6607 writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
6608
6636 mutex_unlock(&hw_priv->lock); 6609 mutex_unlock(&hw_priv->lock);
6610
6637 return 0; 6611 return 0;
6638} 6612}
6639 6613
@@ -6658,12 +6632,6 @@ static struct ethtool_ops netdev_ethtool_ops = {
6658 .get_strings = netdev_get_strings, 6632 .get_strings = netdev_get_strings,
6659 .get_sset_count = netdev_get_sset_count, 6633 .get_sset_count = netdev_get_sset_count,
6660 .get_ethtool_stats = netdev_get_ethtool_stats, 6634 .get_ethtool_stats = netdev_get_ethtool_stats,
6661 .get_rx_csum = netdev_get_rx_csum,
6662 .set_rx_csum = netdev_set_rx_csum,
6663 .get_tx_csum = ethtool_op_get_tx_csum,
6664 .set_tx_csum = ethtool_op_set_tx_csum,
6665 .get_sg = ethtool_op_get_sg,
6666 .set_sg = ethtool_op_set_sg,
6667}; 6635};
6668 6636
6669/* 6637/*
@@ -6828,14 +6796,15 @@ static int __init netdev_init(struct net_device *dev)
6828 /* 500 ms timeout */ 6796 /* 500 ms timeout */
6829 dev->watchdog_timeo = HZ / 2; 6797 dev->watchdog_timeo = HZ / 2;
6830 6798
6831 dev->features |= NETIF_F_IP_CSUM; 6799 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_RXCSUM;
6832 6800
6833 /* 6801 /*
6834 * Hardware does not really support IPv6 checksum generation, but 6802 * Hardware does not really support IPv6 checksum generation, but
6835 * driver actually runs faster with this on. Refer IPV6_CSUM_GEN_HACK. 6803 * driver actually runs faster with this on.
6836 */ 6804 */
6837 dev->features |= NETIF_F_IPV6_CSUM; 6805 dev->hw_features |= NETIF_F_IPV6_CSUM;
6838 dev->features |= NETIF_F_SG; 6806
6807 dev->features |= dev->hw_features;
6839 6808
6840 sema_init(&priv->proc_sem, 1); 6809 sema_init(&priv->proc_sem, 1);
6841 6810
@@ -6860,6 +6829,7 @@ static const struct net_device_ops netdev_ops = {
6860 .ndo_start_xmit = netdev_tx, 6829 .ndo_start_xmit = netdev_tx,
6861 .ndo_tx_timeout = netdev_tx_timeout, 6830 .ndo_tx_timeout = netdev_tx_timeout,
6862 .ndo_change_mtu = netdev_change_mtu, 6831 .ndo_change_mtu = netdev_change_mtu,
6832 .ndo_set_features = netdev_set_features,
6863 .ndo_set_mac_address = netdev_set_mac_address, 6833 .ndo_set_mac_address = netdev_set_mac_address,
6864 .ndo_validate_addr = eth_validate_addr, 6834 .ndo_validate_addr = eth_validate_addr,
6865 .ndo_do_ioctl = netdev_ioctl, 6835 .ndo_do_ioctl = netdev_ioctl,
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index d70fb76edb77..4ce9e5f2c069 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -174,7 +174,8 @@ static void loopback_setup(struct net_device *dev)
174 | NETIF_F_HIGHDMA 174 | NETIF_F_HIGHDMA
175 | NETIF_F_LLTX 175 | NETIF_F_LLTX
176 | NETIF_F_NETNS_LOCAL 176 | NETIF_F_NETNS_LOCAL
177 | NETIF_F_VLAN_CHALLENGED; 177 | NETIF_F_VLAN_CHALLENGED
178 | NETIF_F_LOOPBACK;
178 dev->ethtool_ops = &loopback_ethtool_ops; 179 dev->ethtool_ops = &loopback_ethtool_ops;
179 dev->header_ops = &eth_header_ops; 180 dev->header_ops = &eth_header_ops;
180 dev->netdev_ops = &loopback_ops; 181 dev->netdev_ops = &loopback_ops;
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 79ccb54ab00c..629bd2649c0c 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -576,6 +576,11 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
576 * add that if/when we get our hands on a full-blown MII PHY. 576 * add that if/when we get our hands on a full-blown MII PHY.
577 */ 577 */
578 578
579 if (status & MACB_BIT(ISR_ROVR)) {
580 /* We missed at least one packet */
581 bp->hw_stats.rx_overruns++;
582 }
583
579 if (status & MACB_BIT(HRESP)) { 584 if (status & MACB_BIT(HRESP)) {
580 /* 585 /*
581 * TODO: Reset the hardware, and maybe move the printk 586 * TODO: Reset the hardware, and maybe move the printk
@@ -1024,7 +1029,8 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
1024 hwstat->rx_jabbers + 1029 hwstat->rx_jabbers +
1025 hwstat->rx_undersize_pkts + 1030 hwstat->rx_undersize_pkts +
1026 hwstat->rx_length_mismatch); 1031 hwstat->rx_length_mismatch);
1027 nstat->rx_over_errors = hwstat->rx_resource_errors; 1032 nstat->rx_over_errors = hwstat->rx_resource_errors +
1033 hwstat->rx_overruns;
1028 nstat->rx_crc_errors = hwstat->rx_fcs_errors; 1034 nstat->rx_crc_errors = hwstat->rx_fcs_errors;
1029 nstat->rx_frame_errors = hwstat->rx_align_errors; 1035 nstat->rx_frame_errors = hwstat->rx_align_errors;
1030 nstat->rx_fifo_errors = hwstat->rx_overruns; 1036 nstat->rx_fifo_errors = hwstat->rx_overruns;
@@ -1171,8 +1177,7 @@ static int __init macb_probe(struct platform_device *pdev)
1171 } 1177 }
1172 1178
1173 dev->irq = platform_get_irq(pdev, 0); 1179 dev->irq = platform_get_irq(pdev, 0);
1174 err = request_irq(dev->irq, macb_interrupt, IRQF_SAMPLE_RANDOM, 1180 err = request_irq(dev->irq, macb_interrupt, 0, dev->name, dev);
1175 dev->name, dev);
1176 if (err) { 1181 if (err) {
1177 printk(KERN_ERR 1182 printk(KERN_ERR
1178 "%s: Unable to request IRQ %d (error %d)\n", 1183 "%s: Unable to request IRQ %d (error %d)\n",
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 78e34e9e4f00..d7c0bc62da7f 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -415,7 +415,7 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
415#define MACVLAN_FEATURES \ 415#define MACVLAN_FEATURES \
416 (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ 416 (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
417 NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \ 417 NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \
418 NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO) 418 NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM)
419 419
420#define MACVLAN_STATE_MASK \ 420#define MACVLAN_STATE_MASK \
421 ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) 421 ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
@@ -517,12 +517,6 @@ static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
517 snprintf(drvinfo->version, 32, "0.1"); 517 snprintf(drvinfo->version, 32, "0.1");
518} 518}
519 519
520static u32 macvlan_ethtool_get_rx_csum(struct net_device *dev)
521{
522 const struct macvlan_dev *vlan = netdev_priv(dev);
523 return dev_ethtool_get_rx_csum(vlan->lowerdev);
524}
525
526static int macvlan_ethtool_get_settings(struct net_device *dev, 520static int macvlan_ethtool_get_settings(struct net_device *dev,
527 struct ethtool_cmd *cmd) 521 struct ethtool_cmd *cmd)
528{ 522{
@@ -530,18 +524,10 @@ static int macvlan_ethtool_get_settings(struct net_device *dev,
530 return dev_ethtool_get_settings(vlan->lowerdev, cmd); 524 return dev_ethtool_get_settings(vlan->lowerdev, cmd);
531} 525}
532 526
533static u32 macvlan_ethtool_get_flags(struct net_device *dev)
534{
535 const struct macvlan_dev *vlan = netdev_priv(dev);
536 return dev_ethtool_get_flags(vlan->lowerdev);
537}
538
539static const struct ethtool_ops macvlan_ethtool_ops = { 527static const struct ethtool_ops macvlan_ethtool_ops = {
540 .get_link = ethtool_op_get_link, 528 .get_link = ethtool_op_get_link,
541 .get_settings = macvlan_ethtool_get_settings, 529 .get_settings = macvlan_ethtool_get_settings,
542 .get_rx_csum = macvlan_ethtool_get_rx_csum,
543 .get_drvinfo = macvlan_ethtool_get_drvinfo, 530 .get_drvinfo = macvlan_ethtool_get_drvinfo,
544 .get_flags = macvlan_ethtool_get_flags,
545}; 531};
546 532
547static const struct net_device_ops macvlan_netdev_ops = { 533static const struct net_device_ops macvlan_netdev_ops = {
@@ -799,6 +785,7 @@ static int macvlan_device_event(struct notifier_block *unused,
799 struct net_device *dev = ptr; 785 struct net_device *dev = ptr;
800 struct macvlan_dev *vlan, *next; 786 struct macvlan_dev *vlan, *next;
801 struct macvlan_port *port; 787 struct macvlan_port *port;
788 LIST_HEAD(list_kill);
802 789
803 if (!macvlan_port_exists(dev)) 790 if (!macvlan_port_exists(dev))
804 return NOTIFY_DONE; 791 return NOTIFY_DONE;
@@ -824,7 +811,9 @@ static int macvlan_device_event(struct notifier_block *unused,
824 break; 811 break;
825 812
826 list_for_each_entry_safe(vlan, next, &port->vlans, list) 813 list_for_each_entry_safe(vlan, next, &port->vlans, list)
827 vlan->dev->rtnl_link_ops->dellink(vlan->dev, NULL); 814 vlan->dev->rtnl_link_ops->dellink(vlan->dev, &list_kill);
815 unregister_netdevice_many(&list_kill);
816 list_del(&list_kill);
828 break; 817 break;
829 case NETDEV_PRE_TYPE_CHANGE: 818 case NETDEV_PRE_TYPE_CHANGE:
830 /* Forbid underlaying device to change its type. */ 819 /* Forbid underlaying device to change its type. */
diff --git a/drivers/net/mdio.c b/drivers/net/mdio.c
index e85bf04cf813..16fbb11d92ac 100644
--- a/drivers/net/mdio.c
+++ b/drivers/net/mdio.c
@@ -176,6 +176,9 @@ static u32 mdio45_get_an(const struct mdio_if_info *mdio, u16 addr)
176 * @npage_adv: Modes currently advertised on next pages 176 * @npage_adv: Modes currently advertised on next pages
177 * @npage_lpa: Modes advertised by link partner on next pages 177 * @npage_lpa: Modes advertised by link partner on next pages
178 * 178 *
179 * The @ecmd parameter is expected to have been cleared before calling
180 * mdio45_ethtool_gset_npage().
181 *
179 * Since the CSRs for auto-negotiation using next pages are not fully 182 * Since the CSRs for auto-negotiation using next pages are not fully
180 * standardised, this function does not attempt to decode them. The 183 * standardised, this function does not attempt to decode them. The
181 * caller must pass them in. 184 * caller must pass them in.
@@ -185,6 +188,7 @@ void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio,
185 u32 npage_adv, u32 npage_lpa) 188 u32 npage_adv, u32 npage_lpa)
186{ 189{
187 int reg; 190 int reg;
191 u32 speed;
188 192
189 ecmd->transceiver = XCVR_INTERNAL; 193 ecmd->transceiver = XCVR_INTERNAL;
190 ecmd->phy_address = mdio->prtad; 194 ecmd->phy_address = mdio->prtad;
@@ -287,33 +291,36 @@ void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio,
287 if (modes & (ADVERTISED_10000baseT_Full | 291 if (modes & (ADVERTISED_10000baseT_Full |
288 ADVERTISED_10000baseKX4_Full | 292 ADVERTISED_10000baseKX4_Full |
289 ADVERTISED_10000baseKR_Full)) { 293 ADVERTISED_10000baseKR_Full)) {
290 ecmd->speed = SPEED_10000; 294 speed = SPEED_10000;
291 ecmd->duplex = DUPLEX_FULL; 295 ecmd->duplex = DUPLEX_FULL;
292 } else if (modes & (ADVERTISED_1000baseT_Full | 296 } else if (modes & (ADVERTISED_1000baseT_Full |
293 ADVERTISED_1000baseT_Half | 297 ADVERTISED_1000baseT_Half |
294 ADVERTISED_1000baseKX_Full)) { 298 ADVERTISED_1000baseKX_Full)) {
295 ecmd->speed = SPEED_1000; 299 speed = SPEED_1000;
296 ecmd->duplex = !(modes & ADVERTISED_1000baseT_Half); 300 ecmd->duplex = !(modes & ADVERTISED_1000baseT_Half);
297 } else if (modes & (ADVERTISED_100baseT_Full | 301 } else if (modes & (ADVERTISED_100baseT_Full |
298 ADVERTISED_100baseT_Half)) { 302 ADVERTISED_100baseT_Half)) {
299 ecmd->speed = SPEED_100; 303 speed = SPEED_100;
300 ecmd->duplex = !!(modes & ADVERTISED_100baseT_Full); 304 ecmd->duplex = !!(modes & ADVERTISED_100baseT_Full);
301 } else { 305 } else {
302 ecmd->speed = SPEED_10; 306 speed = SPEED_10;
303 ecmd->duplex = !!(modes & ADVERTISED_10baseT_Full); 307 ecmd->duplex = !!(modes & ADVERTISED_10baseT_Full);
304 } 308 }
305 } else { 309 } else {
306 /* Report forced settings */ 310 /* Report forced settings */
307 reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD, 311 reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD,
308 MDIO_CTRL1); 312 MDIO_CTRL1);
309 ecmd->speed = (((reg & MDIO_PMA_CTRL1_SPEED1000) ? 100 : 1) * 313 speed = (((reg & MDIO_PMA_CTRL1_SPEED1000) ? 100 : 1)
310 ((reg & MDIO_PMA_CTRL1_SPEED100) ? 100 : 10)); 314 * ((reg & MDIO_PMA_CTRL1_SPEED100) ? 100 : 10));
311 ecmd->duplex = (reg & MDIO_CTRL1_FULLDPLX || 315 ecmd->duplex = (reg & MDIO_CTRL1_FULLDPLX ||
312 ecmd->speed == SPEED_10000); 316 speed == SPEED_10000);
313 } 317 }
314 318
319 ethtool_cmd_speed_set(ecmd, speed);
320
315 /* 10GBASE-T MDI/MDI-X */ 321 /* 10GBASE-T MDI/MDI-X */
316 if (ecmd->port == PORT_TP && ecmd->speed == SPEED_10000) { 322 if (ecmd->port == PORT_TP
323 && (ethtool_cmd_speed(ecmd) == SPEED_10000)) {
317 switch (mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD, 324 switch (mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD,
318 MDIO_PMA_10GBT_SWAPPOL)) { 325 MDIO_PMA_10GBT_SWAPPOL)) {
319 case MDIO_PMA_10GBT_SWAPPOL_ABNX | MDIO_PMA_10GBT_SWAPPOL_CDNX: 326 case MDIO_PMA_10GBT_SWAPPOL_ABNX | MDIO_PMA_10GBT_SWAPPOL_CDNX:
diff --git a/drivers/net/mii.c b/drivers/net/mii.c
index d4fc00b1ff93..c62e7816d548 100644
--- a/drivers/net/mii.c
+++ b/drivers/net/mii.c
@@ -62,6 +62,9 @@ static u32 mii_get_an(struct mii_if_info *mii, u16 addr)
62 * @mii: MII interface 62 * @mii: MII interface
63 * @ecmd: requested ethtool_cmd 63 * @ecmd: requested ethtool_cmd
64 * 64 *
65 * The @ecmd parameter is expected to have been cleared before calling
66 * mii_ethtool_gset().
67 *
65 * Returns 0 for success, negative on error. 68 * Returns 0 for success, negative on error.
66 */ 69 */
67int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) 70int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
@@ -122,22 +125,25 @@ int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
122 125
123 if (nego & (ADVERTISED_1000baseT_Full | 126 if (nego & (ADVERTISED_1000baseT_Full |
124 ADVERTISED_1000baseT_Half)) { 127 ADVERTISED_1000baseT_Half)) {
125 ecmd->speed = SPEED_1000; 128 ethtool_cmd_speed_set(ecmd, SPEED_1000);
126 ecmd->duplex = !!(nego & ADVERTISED_1000baseT_Full); 129 ecmd->duplex = !!(nego & ADVERTISED_1000baseT_Full);
127 } else if (nego & (ADVERTISED_100baseT_Full | 130 } else if (nego & (ADVERTISED_100baseT_Full |
128 ADVERTISED_100baseT_Half)) { 131 ADVERTISED_100baseT_Half)) {
129 ecmd->speed = SPEED_100; 132 ethtool_cmd_speed_set(ecmd, SPEED_100);
130 ecmd->duplex = !!(nego & ADVERTISED_100baseT_Full); 133 ecmd->duplex = !!(nego & ADVERTISED_100baseT_Full);
131 } else { 134 } else {
132 ecmd->speed = SPEED_10; 135 ethtool_cmd_speed_set(ecmd, SPEED_10);
133 ecmd->duplex = !!(nego & ADVERTISED_10baseT_Full); 136 ecmd->duplex = !!(nego & ADVERTISED_10baseT_Full);
134 } 137 }
135 } else { 138 } else {
136 ecmd->autoneg = AUTONEG_DISABLE; 139 ecmd->autoneg = AUTONEG_DISABLE;
137 140
138 ecmd->speed = ((bmcr & BMCR_SPEED1000 && 141 ethtool_cmd_speed_set(ecmd,
139 (bmcr & BMCR_SPEED100) == 0) ? SPEED_1000 : 142 ((bmcr & BMCR_SPEED1000 &&
140 (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10); 143 (bmcr & BMCR_SPEED100) == 0) ?
144 SPEED_1000 :
145 ((bmcr & BMCR_SPEED100) ?
146 SPEED_100 : SPEED_10)));
141 ecmd->duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; 147 ecmd->duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
142 } 148 }
143 149
@@ -158,10 +164,11 @@ int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
158int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) 164int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
159{ 165{
160 struct net_device *dev = mii->dev; 166 struct net_device *dev = mii->dev;
167 u32 speed = ethtool_cmd_speed(ecmd);
161 168
162 if (ecmd->speed != SPEED_10 && 169 if (speed != SPEED_10 &&
163 ecmd->speed != SPEED_100 && 170 speed != SPEED_100 &&
164 ecmd->speed != SPEED_1000) 171 speed != SPEED_1000)
165 return -EINVAL; 172 return -EINVAL;
166 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) 173 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
167 return -EINVAL; 174 return -EINVAL;
@@ -173,7 +180,7 @@ int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
173 return -EINVAL; 180 return -EINVAL;
174 if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE) 181 if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
175 return -EINVAL; 182 return -EINVAL;
176 if ((ecmd->speed == SPEED_1000) && (!mii->supports_gmii)) 183 if ((speed == SPEED_1000) && (!mii->supports_gmii))
177 return -EINVAL; 184 return -EINVAL;
178 185
179 /* ignore supported, maxtxpkt, maxrxpkt */ 186 /* ignore supported, maxtxpkt, maxrxpkt */
@@ -231,9 +238,9 @@ int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
231 bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); 238 bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
232 tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | 239 tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
233 BMCR_SPEED1000 | BMCR_FULLDPLX); 240 BMCR_SPEED1000 | BMCR_FULLDPLX);
234 if (ecmd->speed == SPEED_1000) 241 if (speed == SPEED_1000)
235 tmp |= BMCR_SPEED1000; 242 tmp |= BMCR_SPEED1000;
236 else if (ecmd->speed == SPEED_100) 243 else if (speed == SPEED_100)
237 tmp |= BMCR_SPEED100; 244 tmp |= BMCR_SPEED100;
238 if (ecmd->duplex == DUPLEX_FULL) { 245 if (ecmd->duplex == DUPLEX_FULL) {
239 tmp |= BMCR_FULLDPLX; 246 tmp |= BMCR_FULLDPLX;
diff --git a/drivers/net/mlx4/en_ethtool.c b/drivers/net/mlx4/en_ethtool.c
index d54b7abf0225..2e858e4dcf4d 100644
--- a/drivers/net/mlx4/en_ethtool.c
+++ b/drivers/net/mlx4/en_ethtool.c
@@ -57,37 +57,6 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
57 drvinfo->eedump_len = 0; 57 drvinfo->eedump_len = 0;
58} 58}
59 59
60static u32 mlx4_en_get_tso(struct net_device *dev)
61{
62 return (dev->features & NETIF_F_TSO) != 0;
63}
64
65static int mlx4_en_set_tso(struct net_device *dev, u32 data)
66{
67 struct mlx4_en_priv *priv = netdev_priv(dev);
68
69 if (data) {
70 if (!priv->mdev->LSO_support)
71 return -EPERM;
72 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
73 } else
74 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
75 return 0;
76}
77
78static u32 mlx4_en_get_rx_csum(struct net_device *dev)
79{
80 struct mlx4_en_priv *priv = netdev_priv(dev);
81 return priv->rx_csum;
82}
83
84static int mlx4_en_set_rx_csum(struct net_device *dev, u32 data)
85{
86 struct mlx4_en_priv *priv = netdev_priv(dev);
87 priv->rx_csum = (data != 0);
88 return 0;
89}
90
91static const char main_strings[][ETH_GSTRING_LEN] = { 60static const char main_strings[][ETH_GSTRING_LEN] = {
92 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", 61 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
93 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", 62 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
@@ -296,10 +265,10 @@ static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
296 265
297 trans_type = priv->port_state.transciver; 266 trans_type = priv->port_state.transciver;
298 if (netif_carrier_ok(dev)) { 267 if (netif_carrier_ok(dev)) {
299 cmd->speed = priv->port_state.link_speed; 268 ethtool_cmd_speed_set(cmd, priv->port_state.link_speed);
300 cmd->duplex = DUPLEX_FULL; 269 cmd->duplex = DUPLEX_FULL;
301 } else { 270 } else {
302 cmd->speed = -1; 271 ethtool_cmd_speed_set(cmd, -1);
303 cmd->duplex = -1; 272 cmd->duplex = -1;
304 } 273 }
305 274
@@ -323,7 +292,8 @@ static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
323static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 292static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
324{ 293{
325 if ((cmd->autoneg == AUTONEG_ENABLE) || 294 if ((cmd->autoneg == AUTONEG_ENABLE) ||
326 (cmd->speed != SPEED_10000) || (cmd->duplex != DUPLEX_FULL)) 295 (ethtool_cmd_speed(cmd) != SPEED_10000) ||
296 (cmd->duplex != DUPLEX_FULL))
327 return -EINVAL; 297 return -EINVAL;
328 298
329 /* Nothing to change */ 299 /* Nothing to change */
@@ -483,17 +453,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
483 .get_drvinfo = mlx4_en_get_drvinfo, 453 .get_drvinfo = mlx4_en_get_drvinfo,
484 .get_settings = mlx4_en_get_settings, 454 .get_settings = mlx4_en_get_settings,
485 .set_settings = mlx4_en_set_settings, 455 .set_settings = mlx4_en_set_settings,
486#ifdef NETIF_F_TSO
487 .get_tso = mlx4_en_get_tso,
488 .set_tso = mlx4_en_set_tso,
489#endif
490 .get_sg = ethtool_op_get_sg,
491 .set_sg = ethtool_op_set_sg,
492 .get_link = ethtool_op_get_link, 456 .get_link = ethtool_op_get_link,
493 .get_rx_csum = mlx4_en_get_rx_csum,
494 .set_rx_csum = mlx4_en_set_rx_csum,
495 .get_tx_csum = ethtool_op_get_tx_csum,
496 .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
497 .get_strings = mlx4_en_get_strings, 457 .get_strings = mlx4_en_get_strings,
498 .get_sset_count = mlx4_en_get_sset_count, 458 .get_sset_count = mlx4_en_get_sset_count,
499 .get_ethtool_stats = mlx4_en_get_ethtool_stats, 459 .get_ethtool_stats = mlx4_en_get_ethtool_stats,
@@ -508,7 +468,6 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
508 .set_pauseparam = mlx4_en_set_pauseparam, 468 .set_pauseparam = mlx4_en_set_pauseparam,
509 .get_ringparam = mlx4_en_get_ringparam, 469 .get_ringparam = mlx4_en_get_ringparam,
510 .set_ringparam = mlx4_en_set_ringparam, 470 .set_ringparam = mlx4_en_set_ringparam,
511 .get_flags = ethtool_op_get_flags,
512}; 471};
513 472
514 473
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 77063f91c564..61850adae6f7 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -1083,7 +1083,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1083 priv->prof = prof; 1083 priv->prof = prof;
1084 priv->port = port; 1084 priv->port = port;
1085 priv->port_up = false; 1085 priv->port_up = false;
1086 priv->rx_csum = 1;
1087 priv->flags = prof->flags; 1086 priv->flags = prof->flags;
1088 priv->tx_ring_num = prof->tx_ring_num; 1087 priv->tx_ring_num = prof->tx_ring_num;
1089 priv->rx_ring_num = prof->rx_ring_num; 1088 priv->rx_ring_num = prof->rx_ring_num;
@@ -1141,21 +1140,16 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1141 /* 1140 /*
1142 * Set driver features 1141 * Set driver features
1143 */ 1142 */
1144 dev->features |= NETIF_F_SG; 1143 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1145 dev->vlan_features |= NETIF_F_SG; 1144 if (mdev->LSO_support)
1146 dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1145 dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
1147 dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1146
1148 dev->features |= NETIF_F_HIGHDMA; 1147 dev->vlan_features = dev->hw_features;
1149 dev->features |= NETIF_F_HW_VLAN_TX | 1148
1150 NETIF_F_HW_VLAN_RX | 1149 dev->hw_features |= NETIF_F_RXCSUM;
1151 NETIF_F_HW_VLAN_FILTER; 1150 dev->features = dev->hw_features | NETIF_F_HIGHDMA |
1152 dev->features |= NETIF_F_GRO; 1151 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
1153 if (mdev->LSO_support) { 1152 NETIF_F_HW_VLAN_FILTER;
1154 dev->features |= NETIF_F_TSO;
1155 dev->features |= NETIF_F_TSO6;
1156 dev->vlan_features |= NETIF_F_TSO;
1157 dev->vlan_features |= NETIF_F_TSO6;
1158 }
1159 1153
1160 mdev->pndev[port] = dev; 1154 mdev->pndev[port] = dev;
1161 1155
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index 62dd21b06df4..277215fb9d72 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -584,7 +584,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
584 ring->bytes += length; 584 ring->bytes += length;
585 ring->packets++; 585 ring->packets++;
586 586
587 if (likely(priv->rx_csum)) { 587 if (likely(dev->features & NETIF_F_RXCSUM)) {
588 if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && 588 if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
589 (cqe->checksum == cpu_to_be16(0xffff))) { 589 (cqe->checksum == cpu_to_be16(0xffff))) {
590 priv->port_stats.rx_chksum_good++; 590 priv->port_stats.rx_chksum_good++;
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index e30f6099c0de..0b5150df0585 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -451,7 +451,6 @@ struct mlx4_en_priv {
451 int registered; 451 int registered;
452 int allocated; 452 int allocated;
453 int stride; 453 int stride;
454 int rx_csum;
455 u64 mac; 454 u64 mac;
456 int mac_index; 455 int mac_index;
457 unsigned max_mtu; 456 unsigned max_mtu;
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 34425b94452f..a5d9b1c310b3 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -1444,13 +1444,13 @@ mv643xx_eth_get_settings_phyless(struct mv643xx_eth_private *mp,
1444 cmd->advertising = ADVERTISED_MII; 1444 cmd->advertising = ADVERTISED_MII;
1445 switch (port_status & PORT_SPEED_MASK) { 1445 switch (port_status & PORT_SPEED_MASK) {
1446 case PORT_SPEED_10: 1446 case PORT_SPEED_10:
1447 cmd->speed = SPEED_10; 1447 ethtool_cmd_speed_set(cmd, SPEED_10);
1448 break; 1448 break;
1449 case PORT_SPEED_100: 1449 case PORT_SPEED_100:
1450 cmd->speed = SPEED_100; 1450 ethtool_cmd_speed_set(cmd, SPEED_100);
1451 break; 1451 break;
1452 case PORT_SPEED_1000: 1452 case PORT_SPEED_1000:
1453 cmd->speed = SPEED_1000; 1453 ethtool_cmd_speed_set(cmd, SPEED_1000);
1454 break; 1454 break;
1455 default: 1455 default:
1456 cmd->speed = -1; 1456 cmd->speed = -1;
@@ -1575,18 +1575,12 @@ mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
1575 return 0; 1575 return 0;
1576} 1576}
1577 1577
1578static u32
1579mv643xx_eth_get_rx_csum(struct net_device *dev)
1580{
1581 struct mv643xx_eth_private *mp = netdev_priv(dev);
1582
1583 return !!(rdlp(mp, PORT_CONFIG) & 0x02000000);
1584}
1585 1578
1586static int 1579static int
1587mv643xx_eth_set_rx_csum(struct net_device *dev, u32 rx_csum) 1580mv643xx_eth_set_features(struct net_device *dev, u32 features)
1588{ 1581{
1589 struct mv643xx_eth_private *mp = netdev_priv(dev); 1582 struct mv643xx_eth_private *mp = netdev_priv(dev);
1583 u32 rx_csum = features & NETIF_F_RXCSUM;
1590 1584
1591 wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000); 1585 wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000);
1592 1586
@@ -1634,11 +1628,6 @@ static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
1634 } 1628 }
1635} 1629}
1636 1630
1637static int mv643xx_eth_set_flags(struct net_device *dev, u32 data)
1638{
1639 return ethtool_op_set_flags(dev, data, ETH_FLAG_LRO);
1640}
1641
1642static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset) 1631static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
1643{ 1632{
1644 if (sset == ETH_SS_STATS) 1633 if (sset == ETH_SS_STATS)
@@ -1657,14 +1646,8 @@ static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
1657 .set_coalesce = mv643xx_eth_set_coalesce, 1646 .set_coalesce = mv643xx_eth_set_coalesce,
1658 .get_ringparam = mv643xx_eth_get_ringparam, 1647 .get_ringparam = mv643xx_eth_get_ringparam,
1659 .set_ringparam = mv643xx_eth_set_ringparam, 1648 .set_ringparam = mv643xx_eth_set_ringparam,
1660 .get_rx_csum = mv643xx_eth_get_rx_csum,
1661 .set_rx_csum = mv643xx_eth_set_rx_csum,
1662 .set_tx_csum = ethtool_op_set_tx_csum,
1663 .set_sg = ethtool_op_set_sg,
1664 .get_strings = mv643xx_eth_get_strings, 1649 .get_strings = mv643xx_eth_get_strings,
1665 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats, 1650 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
1666 .get_flags = ethtool_op_get_flags,
1667 .set_flags = mv643xx_eth_set_flags,
1668 .get_sset_count = mv643xx_eth_get_sset_count, 1651 .get_sset_count = mv643xx_eth_get_sset_count,
1669}; 1652};
1670 1653
@@ -2264,7 +2247,7 @@ static void port_start(struct mv643xx_eth_private *mp)
2264 * frames to RX queue #0, and include the pseudo-header when 2247 * frames to RX queue #0, and include the pseudo-header when
2265 * calculating receive checksums. 2248 * calculating receive checksums.
2266 */ 2249 */
2267 wrlp(mp, PORT_CONFIG, 0x02000000); 2250 mv643xx_eth_set_features(mp->dev, mp->dev->features);
2268 2251
2269 /* 2252 /*
2270 * Treat BPDUs as normal multicasts, and disable partition mode. 2253 * Treat BPDUs as normal multicasts, and disable partition mode.
@@ -2848,6 +2831,7 @@ static const struct net_device_ops mv643xx_eth_netdev_ops = {
2848 .ndo_validate_addr = eth_validate_addr, 2831 .ndo_validate_addr = eth_validate_addr,
2849 .ndo_do_ioctl = mv643xx_eth_ioctl, 2832 .ndo_do_ioctl = mv643xx_eth_ioctl,
2850 .ndo_change_mtu = mv643xx_eth_change_mtu, 2833 .ndo_change_mtu = mv643xx_eth_change_mtu,
2834 .ndo_set_features = mv643xx_eth_set_features,
2851 .ndo_tx_timeout = mv643xx_eth_tx_timeout, 2835 .ndo_tx_timeout = mv643xx_eth_tx_timeout,
2852 .ndo_get_stats = mv643xx_eth_get_stats, 2836 .ndo_get_stats = mv643xx_eth_get_stats,
2853#ifdef CONFIG_NET_POLL_CONTROLLER 2837#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -2930,7 +2914,9 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2930 dev->watchdog_timeo = 2 * HZ; 2914 dev->watchdog_timeo = 2 * HZ;
2931 dev->base_addr = 0; 2915 dev->base_addr = 0;
2932 2916
2933 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM; 2917 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
2918 NETIF_F_RXCSUM | NETIF_F_LRO;
2919 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
2934 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM; 2920 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM;
2935 2921
2936 SET_NETDEV_DEV(dev, &pdev->dev); 2922 SET_NETDEV_DEV(dev, &pdev->dev);
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 1446de59ae53..b1358f79ba0a 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -205,7 +205,6 @@ struct myri10ge_priv {
205 int tx_boundary; /* boundary transmits cannot cross */ 205 int tx_boundary; /* boundary transmits cannot cross */
206 int num_slices; 206 int num_slices;
207 int running; /* running? */ 207 int running; /* running? */
208 int csum_flag; /* rx_csums? */
209 int small_bytes; 208 int small_bytes;
210 int big_bytes; 209 int big_bytes;
211 int max_intr_slots; 210 int max_intr_slots;
@@ -1386,7 +1385,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum,
1386 skb->protocol = eth_type_trans(skb, dev); 1385 skb->protocol = eth_type_trans(skb, dev);
1387 skb_record_rx_queue(skb, ss - &mgp->ss[0]); 1386 skb_record_rx_queue(skb, ss - &mgp->ss[0]);
1388 1387
1389 if (mgp->csum_flag) { 1388 if (dev->features & NETIF_F_RXCSUM) {
1390 if ((skb->protocol == htons(ETH_P_IP)) || 1389 if ((skb->protocol == htons(ETH_P_IP)) ||
1391 (skb->protocol == htons(ETH_P_IPV6))) { 1390 (skb->protocol == htons(ETH_P_IPV6))) {
1392 skb->csum = csum; 1391 skb->csum = csum;
@@ -1645,7 +1644,7 @@ myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1645 int i; 1644 int i;
1646 1645
1647 cmd->autoneg = AUTONEG_DISABLE; 1646 cmd->autoneg = AUTONEG_DISABLE;
1648 cmd->speed = SPEED_10000; 1647 ethtool_cmd_speed_set(cmd, SPEED_10000);
1649 cmd->duplex = DUPLEX_FULL; 1648 cmd->duplex = DUPLEX_FULL;
1650 1649
1651 /* 1650 /*
@@ -1757,43 +1756,6 @@ myri10ge_get_ringparam(struct net_device *netdev,
1757 ring->tx_pending = ring->tx_max_pending; 1756 ring->tx_pending = ring->tx_max_pending;
1758} 1757}
1759 1758
1760static u32 myri10ge_get_rx_csum(struct net_device *netdev)
1761{
1762 struct myri10ge_priv *mgp = netdev_priv(netdev);
1763
1764 if (mgp->csum_flag)
1765 return 1;
1766 else
1767 return 0;
1768}
1769
1770static int myri10ge_set_rx_csum(struct net_device *netdev, u32 csum_enabled)
1771{
1772 struct myri10ge_priv *mgp = netdev_priv(netdev);
1773 int err = 0;
1774
1775 if (csum_enabled)
1776 mgp->csum_flag = MXGEFW_FLAGS_CKSUM;
1777 else {
1778 netdev->features &= ~NETIF_F_LRO;
1779 mgp->csum_flag = 0;
1780
1781 }
1782 return err;
1783}
1784
1785static int myri10ge_set_tso(struct net_device *netdev, u32 tso_enabled)
1786{
1787 struct myri10ge_priv *mgp = netdev_priv(netdev);
1788 u32 flags = mgp->features & (NETIF_F_TSO6 | NETIF_F_TSO);
1789
1790 if (tso_enabled)
1791 netdev->features |= flags;
1792 else
1793 netdev->features &= ~flags;
1794 return 0;
1795}
1796
1797static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = { 1759static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = {
1798 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", 1760 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
1799 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", 1761 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
@@ -1944,11 +1906,6 @@ static u32 myri10ge_get_msglevel(struct net_device *netdev)
1944 return mgp->msg_enable; 1906 return mgp->msg_enable;
1945} 1907}
1946 1908
1947static int myri10ge_set_flags(struct net_device *netdev, u32 value)
1948{
1949 return ethtool_op_set_flags(netdev, value, ETH_FLAG_LRO);
1950}
1951
1952static const struct ethtool_ops myri10ge_ethtool_ops = { 1909static const struct ethtool_ops myri10ge_ethtool_ops = {
1953 .get_settings = myri10ge_get_settings, 1910 .get_settings = myri10ge_get_settings,
1954 .get_drvinfo = myri10ge_get_drvinfo, 1911 .get_drvinfo = myri10ge_get_drvinfo,
@@ -1957,19 +1914,12 @@ static const struct ethtool_ops myri10ge_ethtool_ops = {
1957 .get_pauseparam = myri10ge_get_pauseparam, 1914 .get_pauseparam = myri10ge_get_pauseparam,
1958 .set_pauseparam = myri10ge_set_pauseparam, 1915 .set_pauseparam = myri10ge_set_pauseparam,
1959 .get_ringparam = myri10ge_get_ringparam, 1916 .get_ringparam = myri10ge_get_ringparam,
1960 .get_rx_csum = myri10ge_get_rx_csum,
1961 .set_rx_csum = myri10ge_set_rx_csum,
1962 .set_tx_csum = ethtool_op_set_tx_hw_csum,
1963 .set_sg = ethtool_op_set_sg,
1964 .set_tso = myri10ge_set_tso,
1965 .get_link = ethtool_op_get_link, 1917 .get_link = ethtool_op_get_link,
1966 .get_strings = myri10ge_get_strings, 1918 .get_strings = myri10ge_get_strings,
1967 .get_sset_count = myri10ge_get_sset_count, 1919 .get_sset_count = myri10ge_get_sset_count,
1968 .get_ethtool_stats = myri10ge_get_ethtool_stats, 1920 .get_ethtool_stats = myri10ge_get_ethtool_stats,
1969 .set_msglevel = myri10ge_set_msglevel, 1921 .set_msglevel = myri10ge_set_msglevel,
1970 .get_msglevel = myri10ge_get_msglevel, 1922 .get_msglevel = myri10ge_get_msglevel,
1971 .get_flags = ethtool_op_get_flags,
1972 .set_flags = myri10ge_set_flags
1973}; 1923};
1974 1924
1975static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss) 1925static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss)
@@ -3136,6 +3086,14 @@ static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
3136 return 0; 3086 return 0;
3137} 3087}
3138 3088
3089static u32 myri10ge_fix_features(struct net_device *dev, u32 features)
3090{
3091 if (!(features & NETIF_F_RXCSUM))
3092 features &= ~NETIF_F_LRO;
3093
3094 return features;
3095}
3096
3139static int myri10ge_change_mtu(struct net_device *dev, int new_mtu) 3097static int myri10ge_change_mtu(struct net_device *dev, int new_mtu)
3140{ 3098{
3141 struct myri10ge_priv *mgp = netdev_priv(dev); 3099 struct myri10ge_priv *mgp = netdev_priv(dev);
@@ -3834,6 +3792,7 @@ static const struct net_device_ops myri10ge_netdev_ops = {
3834 .ndo_get_stats = myri10ge_get_stats, 3792 .ndo_get_stats = myri10ge_get_stats,
3835 .ndo_validate_addr = eth_validate_addr, 3793 .ndo_validate_addr = eth_validate_addr,
3836 .ndo_change_mtu = myri10ge_change_mtu, 3794 .ndo_change_mtu = myri10ge_change_mtu,
3795 .ndo_fix_features = myri10ge_fix_features,
3837 .ndo_set_multicast_list = myri10ge_set_multicast_list, 3796 .ndo_set_multicast_list = myri10ge_set_multicast_list,
3838 .ndo_set_mac_address = myri10ge_set_mac_address, 3797 .ndo_set_mac_address = myri10ge_set_mac_address,
3839}; 3798};
@@ -3860,7 +3819,6 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3860 mgp = netdev_priv(netdev); 3819 mgp = netdev_priv(netdev);
3861 mgp->dev = netdev; 3820 mgp->dev = netdev;
3862 mgp->pdev = pdev; 3821 mgp->pdev = pdev;
3863 mgp->csum_flag = MXGEFW_FLAGS_CKSUM;
3864 mgp->pause = myri10ge_flow_control; 3822 mgp->pause = myri10ge_flow_control;
3865 mgp->intr_coal_delay = myri10ge_intr_coal_delay; 3823 mgp->intr_coal_delay = myri10ge_intr_coal_delay;
3866 mgp->msg_enable = netif_msg_init(myri10ge_debug, MYRI10GE_MSG_DEFAULT); 3824 mgp->msg_enable = netif_msg_init(myri10ge_debug, MYRI10GE_MSG_DEFAULT);
@@ -3976,11 +3934,11 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3976 netdev->netdev_ops = &myri10ge_netdev_ops; 3934 netdev->netdev_ops = &myri10ge_netdev_ops;
3977 netdev->mtu = myri10ge_initial_mtu; 3935 netdev->mtu = myri10ge_initial_mtu;
3978 netdev->base_addr = mgp->iomem_base; 3936 netdev->base_addr = mgp->iomem_base;
3979 netdev->features = mgp->features; 3937 netdev->hw_features = mgp->features | NETIF_F_LRO | NETIF_F_RXCSUM;
3938 netdev->features = netdev->hw_features;
3980 3939
3981 if (dac_enabled) 3940 if (dac_enabled)
3982 netdev->features |= NETIF_F_HIGHDMA; 3941 netdev->features |= NETIF_F_HIGHDMA;
3983 netdev->features |= NETIF_F_LRO;
3984 3942
3985 netdev->vlan_features |= mgp->features; 3943 netdev->vlan_features |= mgp->features;
3986 if (mgp->fw_ver_tiny < 37) 3944 if (mgp->fw_ver_tiny < 37)
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 1074231f0a0d..b78be088c4ad 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -2820,7 +2820,7 @@ static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
2820 u32 tmp; 2820 u32 tmp;
2821 2821
2822 ecmd->port = dev->if_port; 2822 ecmd->port = dev->if_port;
2823 ecmd->speed = np->speed; 2823 ethtool_cmd_speed_set(ecmd, np->speed);
2824 ecmd->duplex = np->duplex; 2824 ecmd->duplex = np->duplex;
2825 ecmd->autoneg = np->autoneg; 2825 ecmd->autoneg = np->autoneg;
2826 ecmd->advertising = 0; 2826 ecmd->advertising = 0;
@@ -2878,9 +2878,9 @@ static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
2878 tmp = mii_nway_result( 2878 tmp = mii_nway_result(
2879 np->advertising & mdio_read(dev, MII_LPA)); 2879 np->advertising & mdio_read(dev, MII_LPA));
2880 if (tmp == LPA_100FULL || tmp == LPA_100HALF) 2880 if (tmp == LPA_100FULL || tmp == LPA_100HALF)
2881 ecmd->speed = SPEED_100; 2881 ethtool_cmd_speed_set(ecmd, SPEED_100);
2882 else 2882 else
2883 ecmd->speed = SPEED_10; 2883 ethtool_cmd_speed_set(ecmd, SPEED_10);
2884 if (tmp == LPA_100FULL || tmp == LPA_10FULL) 2884 if (tmp == LPA_100FULL || tmp == LPA_10FULL)
2885 ecmd->duplex = DUPLEX_FULL; 2885 ecmd->duplex = DUPLEX_FULL;
2886 else 2886 else
@@ -2908,7 +2908,8 @@ static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
2908 return -EINVAL; 2908 return -EINVAL;
2909 } 2909 }
2910 } else if (ecmd->autoneg == AUTONEG_DISABLE) { 2910 } else if (ecmd->autoneg == AUTONEG_DISABLE) {
2911 if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100) 2911 u32 speed = ethtool_cmd_speed(ecmd);
2912 if (speed != SPEED_10 && speed != SPEED_100)
2912 return -EINVAL; 2913 return -EINVAL;
2913 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) 2914 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
2914 return -EINVAL; 2915 return -EINVAL;
@@ -2956,7 +2957,7 @@ static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
2956 if (ecmd->advertising & ADVERTISED_100baseT_Full) 2957 if (ecmd->advertising & ADVERTISED_100baseT_Full)
2957 np->advertising |= ADVERTISE_100FULL; 2958 np->advertising |= ADVERTISE_100FULL;
2958 } else { 2959 } else {
2959 np->speed = ecmd->speed; 2960 np->speed = ethtool_cmd_speed(ecmd);
2960 np->duplex = ecmd->duplex; 2961 np->duplex = ecmd->duplex;
2961 /* user overriding the initial full duplex parm? */ 2962 /* user overriding the initial full duplex parm? */
2962 if (np->duplex == DUPLEX_HALF) 2963 if (np->duplex == DUPLEX_HALF)
diff --git a/drivers/net/ne3210.c b/drivers/net/ne3210.c
index 243ed2aee88e..e8984b0ca521 100644
--- a/drivers/net/ne3210.c
+++ b/drivers/net/ne3210.c
@@ -80,17 +80,20 @@ static void ne3210_block_output(struct net_device *dev, int count, const unsigne
80 80
81#define NE3210_DEBUG 0x0 81#define NE3210_DEBUG 0x0
82 82
83static unsigned char irq_map[] __initdata = {15, 12, 11, 10, 9, 7, 5, 3}; 83static const unsigned char irq_map[] __devinitconst =
84static unsigned int shmem_map[] __initdata = {0xff0, 0xfe0, 0xfff0, 0xd8, 0xffe0, 0xffc0, 0xd0, 0x0}; 84 { 15, 12, 11, 10, 9, 7, 5, 3 };
85static const char *ifmap[] __initdata = {"UTP", "?", "BNC", "AUI"}; 85static const unsigned int shmem_map[] __devinitconst =
86static int ifmap_val[] __initdata = { 86 { 0xff0, 0xfe0, 0xfff0, 0xd8, 0xffe0, 0xffc0, 0xd0, 0x0 };
87static const char *const ifmap[] __devinitconst =
88 { "UTP", "?", "BNC", "AUI" };
89static const int ifmap_val[] __devinitconst = {
87 IF_PORT_10BASET, 90 IF_PORT_10BASET,
88 IF_PORT_UNKNOWN, 91 IF_PORT_UNKNOWN,
89 IF_PORT_10BASE2, 92 IF_PORT_10BASE2,
90 IF_PORT_AUI, 93 IF_PORT_AUI,
91}; 94};
92 95
93static int __init ne3210_eisa_probe (struct device *device) 96static int __devinit ne3210_eisa_probe (struct device *device)
94{ 97{
95 unsigned long ioaddr, phys_mem; 98 unsigned long ioaddr, phys_mem;
96 int i, retval, port_index; 99 int i, retval, port_index;
@@ -313,7 +316,7 @@ static void ne3210_block_output(struct net_device *dev, int count,
313 memcpy_toio(shmem, buf, count); 316 memcpy_toio(shmem, buf, count);
314} 317}
315 318
316static struct eisa_device_id ne3210_ids[] = { 319static const struct eisa_device_id ne3210_ids[] __devinitconst = {
317 { "EGL0101" }, 320 { "EGL0101" },
318 { "NVL1801" }, 321 { "NVL1801" },
319 { "" }, 322 { "" },
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index eb41e44921e6..a83e101440fd 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -242,34 +242,6 @@ static struct netconsole_target *to_target(struct config_item *item)
242} 242}
243 243
244/* 244/*
245 * Wrapper over simple_strtol (base 10) with sanity and range checking.
246 * We return (signed) long only because we may want to return errors.
247 * Do not use this to convert numbers that are allowed to be negative.
248 */
249static long strtol10_check_range(const char *cp, long min, long max)
250{
251 long ret;
252 char *p = (char *) cp;
253
254 WARN_ON(min < 0);
255 WARN_ON(max < min);
256
257 ret = simple_strtol(p, &p, 10);
258
259 if (*p && (*p != '\n')) {
260 printk(KERN_ERR "netconsole: invalid input\n");
261 return -EINVAL;
262 }
263 if ((ret < min) || (ret > max)) {
264 printk(KERN_ERR "netconsole: input %ld must be between "
265 "%ld and %ld\n", ret, min, max);
266 return -EINVAL;
267 }
268
269 return ret;
270}
271
272/*
273 * Attribute operations for netconsole_target. 245 * Attribute operations for netconsole_target.
274 */ 246 */
275 247
@@ -327,12 +299,14 @@ static ssize_t store_enabled(struct netconsole_target *nt,
327 const char *buf, 299 const char *buf,
328 size_t count) 300 size_t count)
329{ 301{
302 int enabled;
330 int err; 303 int err;
331 long enabled;
332 304
333 enabled = strtol10_check_range(buf, 0, 1); 305 err = kstrtoint(buf, 10, &enabled);
334 if (enabled < 0) 306 if (err < 0)
335 return enabled; 307 return err;
308 if (enabled < 0 || enabled > 1)
309 return -EINVAL;
336 310
337 if (enabled) { /* 1 */ 311 if (enabled) { /* 1 */
338 312
@@ -384,8 +358,7 @@ static ssize_t store_local_port(struct netconsole_target *nt,
384 const char *buf, 358 const char *buf,
385 size_t count) 359 size_t count)
386{ 360{
387 long local_port; 361 int rv;
388#define __U16_MAX ((__u16) ~0U)
389 362
390 if (nt->enabled) { 363 if (nt->enabled) {
391 printk(KERN_ERR "netconsole: target (%s) is enabled, " 364 printk(KERN_ERR "netconsole: target (%s) is enabled, "
@@ -394,12 +367,9 @@ static ssize_t store_local_port(struct netconsole_target *nt,
394 return -EINVAL; 367 return -EINVAL;
395 } 368 }
396 369
397 local_port = strtol10_check_range(buf, 0, __U16_MAX); 370 rv = kstrtou16(buf, 10, &nt->np.local_port);
398 if (local_port < 0) 371 if (rv < 0)
399 return local_port; 372 return rv;
400
401 nt->np.local_port = local_port;
402
403 return strnlen(buf, count); 373 return strnlen(buf, count);
404} 374}
405 375
@@ -407,8 +377,7 @@ static ssize_t store_remote_port(struct netconsole_target *nt,
407 const char *buf, 377 const char *buf,
408 size_t count) 378 size_t count)
409{ 379{
410 long remote_port; 380 int rv;
411#define __U16_MAX ((__u16) ~0U)
412 381
413 if (nt->enabled) { 382 if (nt->enabled) {
414 printk(KERN_ERR "netconsole: target (%s) is enabled, " 383 printk(KERN_ERR "netconsole: target (%s) is enabled, "
@@ -417,12 +386,9 @@ static ssize_t store_remote_port(struct netconsole_target *nt,
417 return -EINVAL; 386 return -EINVAL;
418 } 387 }
419 388
420 remote_port = strtol10_check_range(buf, 0, __U16_MAX); 389 rv = kstrtou16(buf, 10, &nt->np.remote_port);
421 if (remote_port < 0) 390 if (rv < 0)
422 return remote_port; 391 return rv;
423
424 nt->np.remote_port = remote_port;
425
426 return strnlen(buf, count); 392 return strnlen(buf, count);
427} 393}
428 394
@@ -463,8 +429,6 @@ static ssize_t store_remote_mac(struct netconsole_target *nt,
463 size_t count) 429 size_t count)
464{ 430{
465 u8 remote_mac[ETH_ALEN]; 431 u8 remote_mac[ETH_ALEN];
466 char *p = (char *) buf;
467 int i;
468 432
469 if (nt->enabled) { 433 if (nt->enabled) {
470 printk(KERN_ERR "netconsole: target (%s) is enabled, " 434 printk(KERN_ERR "netconsole: target (%s) is enabled, "
@@ -473,23 +437,13 @@ static ssize_t store_remote_mac(struct netconsole_target *nt,
473 return -EINVAL; 437 return -EINVAL;
474 } 438 }
475 439
476 for (i = 0; i < ETH_ALEN - 1; i++) { 440 if (!mac_pton(buf, remote_mac))
477 remote_mac[i] = simple_strtoul(p, &p, 16); 441 return -EINVAL;
478 if (*p != ':') 442 if (buf[3 * ETH_ALEN - 1] && buf[3 * ETH_ALEN - 1] != '\n')
479 goto invalid; 443 return -EINVAL;
480 p++;
481 }
482 remote_mac[ETH_ALEN - 1] = simple_strtoul(p, &p, 16);
483 if (*p && (*p != '\n'))
484 goto invalid;
485
486 memcpy(nt->np.remote_mac, remote_mac, ETH_ALEN); 444 memcpy(nt->np.remote_mac, remote_mac, ETH_ALEN);
487 445
488 return strnlen(buf, count); 446 return strnlen(buf, count);
489
490invalid:
491 printk(KERN_ERR "netconsole: invalid input\n");
492 return -EINVAL;
493} 447}
494 448
495/* 449/*
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 679dc8519c5b..77220687b92a 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -1177,7 +1177,7 @@ struct netxen_adapter {
1177 u8 max_sds_rings; 1177 u8 max_sds_rings;
1178 u8 driver_mismatch; 1178 u8 driver_mismatch;
1179 u8 msix_supported; 1179 u8 msix_supported;
1180 u8 rx_csum; 1180 u8 __pad;
1181 u8 pci_using_dac; 1181 u8 pci_using_dac;
1182 u8 portnum; 1182 u8 portnum;
1183 u8 physical_port; 1183 u8 physical_port;
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index 3bdcc803ec68..b34fb74d07e3 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -117,7 +117,7 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
117 117
118 ecmd->port = PORT_TP; 118 ecmd->port = PORT_TP;
119 119
120 ecmd->speed = adapter->link_speed; 120 ethtool_cmd_speed_set(ecmd, adapter->link_speed);
121 ecmd->duplex = adapter->link_duplex; 121 ecmd->duplex = adapter->link_duplex;
122 ecmd->autoneg = adapter->link_autoneg; 122 ecmd->autoneg = adapter->link_autoneg;
123 123
@@ -134,7 +134,7 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
134 } 134 }
135 135
136 if (netif_running(dev) && adapter->has_link_events) { 136 if (netif_running(dev) && adapter->has_link_events) {
137 ecmd->speed = adapter->link_speed; 137 ethtool_cmd_speed_set(ecmd, adapter->link_speed);
138 ecmd->autoneg = adapter->link_autoneg; 138 ecmd->autoneg = adapter->link_autoneg;
139 ecmd->duplex = adapter->link_duplex; 139 ecmd->duplex = adapter->link_duplex;
140 goto skip; 140 goto skip;
@@ -146,10 +146,10 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
146 u16 pcifn = adapter->ahw.pci_func; 146 u16 pcifn = adapter->ahw.pci_func;
147 147
148 val = NXRD32(adapter, P3_LINK_SPEED_REG(pcifn)); 148 val = NXRD32(adapter, P3_LINK_SPEED_REG(pcifn));
149 ecmd->speed = P3_LINK_SPEED_MHZ * 149 ethtool_cmd_speed_set(ecmd, P3_LINK_SPEED_MHZ *
150 P3_LINK_SPEED_VAL(pcifn, val); 150 P3_LINK_SPEED_VAL(pcifn, val));
151 } else 151 } else
152 ecmd->speed = SPEED_10000; 152 ethtool_cmd_speed_set(ecmd, SPEED_10000);
153 153
154 ecmd->duplex = DUPLEX_FULL; 154 ecmd->duplex = DUPLEX_FULL;
155 ecmd->autoneg = AUTONEG_DISABLE; 155 ecmd->autoneg = AUTONEG_DISABLE;
@@ -251,6 +251,7 @@ static int
251netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 251netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
252{ 252{
253 struct netxen_adapter *adapter = netdev_priv(dev); 253 struct netxen_adapter *adapter = netdev_priv(dev);
254 u32 speed = ethtool_cmd_speed(ecmd);
254 int ret; 255 int ret;
255 256
256 if (adapter->ahw.port_type != NETXEN_NIC_GBE) 257 if (adapter->ahw.port_type != NETXEN_NIC_GBE)
@@ -259,14 +260,14 @@ netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
259 if (!(adapter->capabilities & NX_FW_CAPABILITY_GBE_LINK_CFG)) 260 if (!(adapter->capabilities & NX_FW_CAPABILITY_GBE_LINK_CFG))
260 return -EOPNOTSUPP; 261 return -EOPNOTSUPP;
261 262
262 ret = nx_fw_cmd_set_gbe_port(adapter, ecmd->speed, ecmd->duplex, 263 ret = nx_fw_cmd_set_gbe_port(adapter, speed, ecmd->duplex,
263 ecmd->autoneg); 264 ecmd->autoneg);
264 if (ret == NX_RCODE_NOT_SUPPORTED) 265 if (ret == NX_RCODE_NOT_SUPPORTED)
265 return -EOPNOTSUPP; 266 return -EOPNOTSUPP;
266 else if (ret) 267 else if (ret)
267 return -EIO; 268 return -EIO;
268 269
269 adapter->link_speed = ecmd->speed; 270 adapter->link_speed = speed;
270 adapter->link_duplex = ecmd->duplex; 271 adapter->link_duplex = ecmd->duplex;
271 adapter->link_autoneg = ecmd->autoneg; 272 adapter->link_autoneg = ecmd->autoneg;
272 273
@@ -676,62 +677,6 @@ netxen_nic_get_ethtool_stats(struct net_device *dev,
676 } 677 }
677} 678}
678 679
679static u32 netxen_nic_get_tx_csum(struct net_device *dev)
680{
681 return dev->features & NETIF_F_IP_CSUM;
682}
683
684static u32 netxen_nic_get_rx_csum(struct net_device *dev)
685{
686 struct netxen_adapter *adapter = netdev_priv(dev);
687 return adapter->rx_csum;
688}
689
690static int netxen_nic_set_rx_csum(struct net_device *dev, u32 data)
691{
692 struct netxen_adapter *adapter = netdev_priv(dev);
693
694 if (data) {
695 adapter->rx_csum = data;
696 return 0;
697 }
698
699 if (dev->features & NETIF_F_LRO) {
700 if (netxen_config_hw_lro(adapter, NETXEN_NIC_LRO_DISABLED))
701 return -EIO;
702
703 dev->features &= ~NETIF_F_LRO;
704 netxen_send_lro_cleanup(adapter);
705 netdev_info(dev, "disabling LRO as rx_csum is off\n");
706 }
707 adapter->rx_csum = data;
708 return 0;
709}
710
711static u32 netxen_nic_get_tso(struct net_device *dev)
712{
713 struct netxen_adapter *adapter = netdev_priv(dev);
714
715 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
716 return (dev->features & (NETIF_F_TSO | NETIF_F_TSO6)) != 0;
717
718 return (dev->features & NETIF_F_TSO) != 0;
719}
720
721static int netxen_nic_set_tso(struct net_device *dev, u32 data)
722{
723 if (data) {
724 struct netxen_adapter *adapter = netdev_priv(dev);
725
726 dev->features |= NETIF_F_TSO;
727 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
728 dev->features |= NETIF_F_TSO6;
729 } else
730 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
731
732 return 0;
733}
734
735static void 680static void
736netxen_nic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 681netxen_nic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
737{ 682{
@@ -866,43 +811,6 @@ static int netxen_get_intr_coalesce(struct net_device *netdev,
866 return 0; 811 return 0;
867} 812}
868 813
869static int netxen_nic_set_flags(struct net_device *netdev, u32 data)
870{
871 struct netxen_adapter *adapter = netdev_priv(netdev);
872 int hw_lro;
873
874 if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO))
875 return -EINVAL;
876
877 if (!(adapter->capabilities & NX_FW_CAPABILITY_HW_LRO))
878 return -EINVAL;
879
880 if (!adapter->rx_csum) {
881 netdev_info(netdev, "rx csum is off, cannot toggle LRO\n");
882 return -EINVAL;
883 }
884
885 if (!!(data & ETH_FLAG_LRO) == !!(netdev->features & NETIF_F_LRO))
886 return 0;
887
888 if (data & ETH_FLAG_LRO) {
889 hw_lro = NETXEN_NIC_LRO_ENABLED;
890 netdev->features |= NETIF_F_LRO;
891 } else {
892 hw_lro = NETXEN_NIC_LRO_DISABLED;
893 netdev->features &= ~NETIF_F_LRO;
894 }
895
896 if (netxen_config_hw_lro(adapter, hw_lro))
897 return -EIO;
898
899 if ((hw_lro == 0) && netxen_send_lro_cleanup(adapter))
900 return -EIO;
901
902
903 return 0;
904}
905
906const struct ethtool_ops netxen_nic_ethtool_ops = { 814const struct ethtool_ops netxen_nic_ethtool_ops = {
907 .get_settings = netxen_nic_get_settings, 815 .get_settings = netxen_nic_get_settings,
908 .set_settings = netxen_nic_set_settings, 816 .set_settings = netxen_nic_set_settings,
@@ -916,21 +824,12 @@ const struct ethtool_ops netxen_nic_ethtool_ops = {
916 .set_ringparam = netxen_nic_set_ringparam, 824 .set_ringparam = netxen_nic_set_ringparam,
917 .get_pauseparam = netxen_nic_get_pauseparam, 825 .get_pauseparam = netxen_nic_get_pauseparam,
918 .set_pauseparam = netxen_nic_set_pauseparam, 826 .set_pauseparam = netxen_nic_set_pauseparam,
919 .get_tx_csum = netxen_nic_get_tx_csum,
920 .set_tx_csum = ethtool_op_set_tx_csum,
921 .set_sg = ethtool_op_set_sg,
922 .get_tso = netxen_nic_get_tso,
923 .set_tso = netxen_nic_set_tso,
924 .get_wol = netxen_nic_get_wol, 827 .get_wol = netxen_nic_get_wol,
925 .set_wol = netxen_nic_set_wol, 828 .set_wol = netxen_nic_set_wol,
926 .self_test = netxen_nic_diag_test, 829 .self_test = netxen_nic_diag_test,
927 .get_strings = netxen_nic_get_strings, 830 .get_strings = netxen_nic_get_strings,
928 .get_ethtool_stats = netxen_nic_get_ethtool_stats, 831 .get_ethtool_stats = netxen_nic_get_ethtool_stats,
929 .get_sset_count = netxen_get_sset_count, 832 .get_sset_count = netxen_get_sset_count,
930 .get_rx_csum = netxen_nic_get_rx_csum,
931 .set_rx_csum = netxen_nic_set_rx_csum,
932 .get_coalesce = netxen_get_intr_coalesce, 833 .get_coalesce = netxen_get_intr_coalesce,
933 .set_coalesce = netxen_set_intr_coalesce, 834 .set_coalesce = netxen_set_intr_coalesce,
934 .get_flags = ethtool_op_get_flags,
935 .set_flags = netxen_nic_set_flags,
936}; 835};
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 731077d8d962..7f999671c7b2 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -1483,7 +1483,8 @@ static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter,
1483 if (!skb) 1483 if (!skb)
1484 goto no_skb; 1484 goto no_skb;
1485 1485
1486 if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) { 1486 if (likely((adapter->netdev->features & NETIF_F_RXCSUM)
1487 && cksum == STATUS_CKSUM_OK)) {
1487 adapter->stats.csummed++; 1488 adapter->stats.csummed++;
1488 skb->ip_summed = CHECKSUM_UNNECESSARY; 1489 skb->ip_summed = CHECKSUM_UNNECESSARY;
1489 } else 1490 } else
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index e8a4b6655999..b644383017f9 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -485,6 +485,37 @@ static void netxen_set_multicast_list(struct net_device *dev)
485 adapter->set_multi(dev); 485 adapter->set_multi(dev);
486} 486}
487 487
488static u32 netxen_fix_features(struct net_device *dev, u32 features)
489{
490 if (!(features & NETIF_F_RXCSUM)) {
491 netdev_info(dev, "disabling LRO as RXCSUM is off\n");
492
493 features &= ~NETIF_F_LRO;
494 }
495
496 return features;
497}
498
499static int netxen_set_features(struct net_device *dev, u32 features)
500{
501 struct netxen_adapter *adapter = netdev_priv(dev);
502 int hw_lro;
503
504 if (!((dev->features ^ features) & NETIF_F_LRO))
505 return 0;
506
507 hw_lro = (features & NETIF_F_LRO) ? NETXEN_NIC_LRO_ENABLED
508 : NETXEN_NIC_LRO_DISABLED;
509
510 if (netxen_config_hw_lro(adapter, hw_lro))
511 return -EIO;
512
513 if (!(features & NETIF_F_LRO) && netxen_send_lro_cleanup(adapter))
514 return -EIO;
515
516 return 0;
517}
518
488static const struct net_device_ops netxen_netdev_ops = { 519static const struct net_device_ops netxen_netdev_ops = {
489 .ndo_open = netxen_nic_open, 520 .ndo_open = netxen_nic_open,
490 .ndo_stop = netxen_nic_close, 521 .ndo_stop = netxen_nic_close,
@@ -495,6 +526,8 @@ static const struct net_device_ops netxen_netdev_ops = {
495 .ndo_set_mac_address = netxen_nic_set_mac, 526 .ndo_set_mac_address = netxen_nic_set_mac,
496 .ndo_change_mtu = netxen_nic_change_mtu, 527 .ndo_change_mtu = netxen_nic_change_mtu,
497 .ndo_tx_timeout = netxen_tx_timeout, 528 .ndo_tx_timeout = netxen_tx_timeout,
529 .ndo_fix_features = netxen_fix_features,
530 .ndo_set_features = netxen_set_features,
498#ifdef CONFIG_NET_POLL_CONTROLLER 531#ifdef CONFIG_NET_POLL_CONTROLLER
499 .ndo_poll_controller = netxen_nic_poll_controller, 532 .ndo_poll_controller = netxen_nic_poll_controller,
500#endif 533#endif
@@ -905,7 +938,7 @@ netxen_nic_request_irq(struct netxen_adapter *adapter)
905 struct nx_host_sds_ring *sds_ring; 938 struct nx_host_sds_ring *sds_ring;
906 int err, ring; 939 int err, ring;
907 940
908 unsigned long flags = IRQF_SAMPLE_RANDOM; 941 unsigned long flags = 0;
909 struct net_device *netdev = adapter->netdev; 942 struct net_device *netdev = adapter->netdev;
910 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; 943 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
911 944
@@ -1196,7 +1229,6 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
1196 int err = 0; 1229 int err = 0;
1197 struct pci_dev *pdev = adapter->pdev; 1230 struct pci_dev *pdev = adapter->pdev;
1198 1231
1199 adapter->rx_csum = 1;
1200 adapter->mc_enabled = 0; 1232 adapter->mc_enabled = 0;
1201 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 1233 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
1202 adapter->max_mc_count = 38; 1234 adapter->max_mc_count = 38;
@@ -1210,14 +1242,13 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
1210 1242
1211 SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops); 1243 SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops);
1212 1244
1213 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO); 1245 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1214 netdev->features |= (NETIF_F_GRO); 1246 NETIF_F_RXCSUM;
1215 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
1216 1247
1217 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 1248 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
1218 netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6); 1249 netdev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
1219 netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6); 1250
1220 } 1251 netdev->vlan_features |= netdev->hw_features;
1221 1252
1222 if (adapter->pci_using_dac) { 1253 if (adapter->pci_using_dac) {
1223 netdev->features |= NETIF_F_HIGHDMA; 1254 netdev->features |= NETIF_F_HIGHDMA;
@@ -1225,10 +1256,12 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
1225 } 1256 }
1226 1257
1227 if (adapter->capabilities & NX_FW_CAPABILITY_FVLANTX) 1258 if (adapter->capabilities & NX_FW_CAPABILITY_FVLANTX)
1228 netdev->features |= (NETIF_F_HW_VLAN_TX); 1259 netdev->hw_features |= NETIF_F_HW_VLAN_TX;
1229 1260
1230 if (adapter->capabilities & NX_FW_CAPABILITY_HW_LRO) 1261 if (adapter->capabilities & NX_FW_CAPABILITY_HW_LRO)
1231 netdev->features |= NETIF_F_LRO; 1262 netdev->hw_features |= NETIF_F_LRO;
1263
1264 netdev->features |= netdev->hw_features;
1232 1265
1233 netdev->irq = adapter->msix_entries[0].vector; 1266 netdev->irq = adapter->msix_entries[0].vector;
1234 1267
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 32678b6c6b39..cc25bff0bd3b 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -1233,7 +1233,7 @@ static int link_status_1g_rgmii(struct niu *np, int *link_up_p)
1233 1233
1234 bmsr = err; 1234 bmsr = err;
1235 if (bmsr & BMSR_LSTATUS) { 1235 if (bmsr & BMSR_LSTATUS) {
1236 u16 adv, lpa, common, estat; 1236 u16 adv, lpa;
1237 1237
1238 err = mii_read(np, np->phy_addr, MII_ADVERTISE); 1238 err = mii_read(np, np->phy_addr, MII_ADVERTISE);
1239 if (err < 0) 1239 if (err < 0)
@@ -1245,12 +1245,9 @@ static int link_status_1g_rgmii(struct niu *np, int *link_up_p)
1245 goto out; 1245 goto out;
1246 lpa = err; 1246 lpa = err;
1247 1247
1248 common = adv & lpa;
1249
1250 err = mii_read(np, np->phy_addr, MII_ESTATUS); 1248 err = mii_read(np, np->phy_addr, MII_ESTATUS);
1251 if (err < 0) 1249 if (err < 0)
1252 goto out; 1250 goto out;
1253 estat = err;
1254 link_up = 1; 1251 link_up = 1;
1255 current_speed = SPEED_1000; 1252 current_speed = SPEED_1000;
1256 current_duplex = DUPLEX_FULL; 1253 current_duplex = DUPLEX_FULL;
@@ -1650,7 +1647,7 @@ static int xcvr_init_10g(struct niu *np)
1650 break; 1647 break;
1651 } 1648 }
1652 1649
1653 return 0; 1650 return err;
1654} 1651}
1655 1652
1656static int mii_reset(struct niu *np) 1653static int mii_reset(struct niu *np)
@@ -2381,17 +2378,14 @@ static int serdes_init_10g_serdes(struct niu *np)
2381 struct niu_link_config *lp = &np->link_config; 2378 struct niu_link_config *lp = &np->link_config;
2382 unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i; 2379 unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
2383 u64 ctrl_val, test_cfg_val, sig, mask, val; 2380 u64 ctrl_val, test_cfg_val, sig, mask, val;
2384 u64 reset_val;
2385 2381
2386 switch (np->port) { 2382 switch (np->port) {
2387 case 0: 2383 case 0:
2388 reset_val = ENET_SERDES_RESET_0;
2389 ctrl_reg = ENET_SERDES_0_CTRL_CFG; 2384 ctrl_reg = ENET_SERDES_0_CTRL_CFG;
2390 test_cfg_reg = ENET_SERDES_0_TEST_CFG; 2385 test_cfg_reg = ENET_SERDES_0_TEST_CFG;
2391 pll_cfg = ENET_SERDES_0_PLL_CFG; 2386 pll_cfg = ENET_SERDES_0_PLL_CFG;
2392 break; 2387 break;
2393 case 1: 2388 case 1:
2394 reset_val = ENET_SERDES_RESET_1;
2395 ctrl_reg = ENET_SERDES_1_CTRL_CFG; 2389 ctrl_reg = ENET_SERDES_1_CTRL_CFG;
2396 test_cfg_reg = ENET_SERDES_1_TEST_CFG; 2390 test_cfg_reg = ENET_SERDES_1_TEST_CFG;
2397 pll_cfg = ENET_SERDES_1_PLL_CFG; 2391 pll_cfg = ENET_SERDES_1_PLL_CFG;
@@ -6071,8 +6065,7 @@ static int niu_request_irq(struct niu *np)
6071 for (i = 0; i < np->num_ldg; i++) { 6065 for (i = 0; i < np->num_ldg; i++) {
6072 struct niu_ldg *lp = &np->ldg[i]; 6066 struct niu_ldg *lp = &np->ldg[i];
6073 6067
6074 err = request_irq(lp->irq, niu_interrupt, 6068 err = request_irq(lp->irq, niu_interrupt, IRQF_SHARED,
6075 IRQF_SHARED | IRQF_SAMPLE_RANDOM,
6076 np->irq_name[i], lp); 6069 np->irq_name[i], lp);
6077 if (err) 6070 if (err)
6078 goto out_free_irqs; 6071 goto out_free_irqs;
@@ -6851,7 +6844,7 @@ static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6851 cmd->supported = lp->supported; 6844 cmd->supported = lp->supported;
6852 cmd->advertising = lp->active_advertising; 6845 cmd->advertising = lp->active_advertising;
6853 cmd->autoneg = lp->active_autoneg; 6846 cmd->autoneg = lp->active_autoneg;
6854 cmd->speed = lp->active_speed; 6847 ethtool_cmd_speed_set(cmd, lp->active_speed);
6855 cmd->duplex = lp->active_duplex; 6848 cmd->duplex = lp->active_duplex;
6856 cmd->port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP; 6849 cmd->port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP;
6857 cmd->transceiver = (np->flags & NIU_FLAGS_XCVR_SERDES) ? 6850 cmd->transceiver = (np->flags & NIU_FLAGS_XCVR_SERDES) ?
@@ -6866,7 +6859,7 @@ static int niu_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6866 struct niu_link_config *lp = &np->link_config; 6859 struct niu_link_config *lp = &np->link_config;
6867 6860
6868 lp->advertising = cmd->advertising; 6861 lp->advertising = cmd->advertising;
6869 lp->speed = cmd->speed; 6862 lp->speed = ethtool_cmd_speed(cmd);
6870 lp->duplex = cmd->duplex; 6863 lp->duplex = cmd->duplex;
6871 lp->autoneg = cmd->autoneg; 6864 lp->autoneg = cmd->autoneg;
6872 return niu_init_link(np); 6865 return niu_init_link(np);
@@ -7023,6 +7016,7 @@ static int niu_ethflow_to_class(int flow_type, u64 *class)
7023 case UDP_V4_FLOW: 7016 case UDP_V4_FLOW:
7024 *class = CLASS_CODE_UDP_IPV4; 7017 *class = CLASS_CODE_UDP_IPV4;
7025 break; 7018 break;
7019 case AH_ESP_V4_FLOW:
7026 case AH_V4_FLOW: 7020 case AH_V4_FLOW:
7027 case ESP_V4_FLOW: 7021 case ESP_V4_FLOW:
7028 *class = CLASS_CODE_AH_ESP_IPV4; 7022 *class = CLASS_CODE_AH_ESP_IPV4;
@@ -7036,6 +7030,7 @@ static int niu_ethflow_to_class(int flow_type, u64 *class)
7036 case UDP_V6_FLOW: 7030 case UDP_V6_FLOW:
7037 *class = CLASS_CODE_UDP_IPV6; 7031 *class = CLASS_CODE_UDP_IPV6;
7038 break; 7032 break;
7033 case AH_ESP_V6_FLOW:
7039 case AH_V6_FLOW: 7034 case AH_V6_FLOW:
7040 case ESP_V6_FLOW: 7035 case ESP_V6_FLOW:
7041 *class = CLASS_CODE_AH_ESP_IPV6; 7036 *class = CLASS_CODE_AH_ESP_IPV6;
@@ -7889,37 +7884,35 @@ static void niu_force_led(struct niu *np, int on)
7889 nw64_mac(reg, val); 7884 nw64_mac(reg, val);
7890} 7885}
7891 7886
7892static int niu_phys_id(struct net_device *dev, u32 data) 7887static int niu_set_phys_id(struct net_device *dev,
7888 enum ethtool_phys_id_state state)
7889
7893{ 7890{
7894 struct niu *np = netdev_priv(dev); 7891 struct niu *np = netdev_priv(dev);
7895 u64 orig_led_state;
7896 int i;
7897 7892
7898 if (!netif_running(dev)) 7893 if (!netif_running(dev))
7899 return -EAGAIN; 7894 return -EAGAIN;
7900 7895
7901 if (data == 0) 7896 switch (state) {
7902 data = 2; 7897 case ETHTOOL_ID_ACTIVE:
7898 np->orig_led_state = niu_led_state_save(np);
7899 return 1; /* cycle on/off once per second */
7903 7900
7904 orig_led_state = niu_led_state_save(np); 7901 case ETHTOOL_ID_ON:
7905 for (i = 0; i < (data * 2); i++) { 7902 niu_force_led(np, 1);
7906 int on = ((i % 2) == 0); 7903 break;
7907 7904
7908 niu_force_led(np, on); 7905 case ETHTOOL_ID_OFF:
7906 niu_force_led(np, 0);
7907 break;
7909 7908
7910 if (msleep_interruptible(500)) 7909 case ETHTOOL_ID_INACTIVE:
7911 break; 7910 niu_led_state_restore(np, np->orig_led_state);
7912 } 7911 }
7913 niu_led_state_restore(np, orig_led_state);
7914 7912
7915 return 0; 7913 return 0;
7916} 7914}
7917 7915
7918static int niu_set_flags(struct net_device *dev, u32 data)
7919{
7920 return ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH);
7921}
7922
7923static const struct ethtool_ops niu_ethtool_ops = { 7916static const struct ethtool_ops niu_ethtool_ops = {
7924 .get_drvinfo = niu_get_drvinfo, 7917 .get_drvinfo = niu_get_drvinfo,
7925 .get_link = ethtool_op_get_link, 7918 .get_link = ethtool_op_get_link,
@@ -7933,11 +7926,9 @@ static const struct ethtool_ops niu_ethtool_ops = {
7933 .get_strings = niu_get_strings, 7926 .get_strings = niu_get_strings,
7934 .get_sset_count = niu_get_sset_count, 7927 .get_sset_count = niu_get_sset_count,
7935 .get_ethtool_stats = niu_get_ethtool_stats, 7928 .get_ethtool_stats = niu_get_ethtool_stats,
7936 .phys_id = niu_phys_id, 7929 .set_phys_id = niu_set_phys_id,
7937 .get_rxnfc = niu_get_nfc, 7930 .get_rxnfc = niu_get_nfc,
7938 .set_rxnfc = niu_set_nfc, 7931 .set_rxnfc = niu_set_nfc,
7939 .set_flags = niu_set_flags,
7940 .get_flags = ethtool_op_get_flags,
7941}; 7932};
7942 7933
7943static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent, 7934static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
@@ -8131,7 +8122,7 @@ static int __devinit niu_pci_vpd_scan_props(struct niu *np,
8131 netif_printk(np, probe, KERN_DEBUG, np->dev, 8122 netif_printk(np, probe, KERN_DEBUG, np->dev,
8132 "VPD_SCAN: start[%x] end[%x]\n", start, end); 8123 "VPD_SCAN: start[%x] end[%x]\n", start, end);
8133 while (start < end) { 8124 while (start < end) {
8134 int len, err, instance, type, prop_len; 8125 int len, err, prop_len;
8135 char namebuf[64]; 8126 char namebuf[64];
8136 u8 *prop_buf; 8127 u8 *prop_buf;
8137 int max_len; 8128 int max_len;
@@ -8147,8 +8138,6 @@ static int __devinit niu_pci_vpd_scan_props(struct niu *np,
8147 len = err; 8138 len = err;
8148 start += 3; 8139 start += 3;
8149 8140
8150 instance = niu_pci_eeprom_read(np, start);
8151 type = niu_pci_eeprom_read(np, start + 3);
8152 prop_len = niu_pci_eeprom_read(np, start + 4); 8141 prop_len = niu_pci_eeprom_read(np, start + 4);
8153 err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64); 8142 err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64);
8154 if (err < 0) 8143 if (err < 0)
@@ -9768,8 +9757,8 @@ static void __devinit niu_device_announce(struct niu *np)
9768 9757
9769static void __devinit niu_set_basic_features(struct net_device *dev) 9758static void __devinit niu_set_basic_features(struct net_device *dev)
9770{ 9759{
9771 dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM | 9760 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXHASH;
9772 NETIF_F_GRO | NETIF_F_RXHASH); 9761 dev->features |= dev->hw_features | NETIF_F_RXCSUM;
9773} 9762}
9774 9763
9775static int __devinit niu_pci_init_one(struct pci_dev *pdev, 9764static int __devinit niu_pci_init_one(struct pci_dev *pdev,
diff --git a/drivers/net/niu.h b/drivers/net/niu.h
index a41fa8ebe05f..51e177e1860d 100644
--- a/drivers/net/niu.h
+++ b/drivers/net/niu.h
@@ -3279,6 +3279,7 @@ struct niu {
3279 unsigned long xpcs_off; 3279 unsigned long xpcs_off;
3280 3280
3281 struct timer_list timer; 3281 struct timer_list timer;
3282 u64 orig_led_state;
3282 const struct niu_phy_ops *phy_ops; 3283 const struct niu_phy_ops *phy_ops;
3283 int phy_addr; 3284 int phy_addr;
3284 3285
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 6667e0667a88..3e4040f2f3cb 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -1251,7 +1251,7 @@ static int ns83820_get_settings(struct net_device *ndev,
1251 /* 1251 /*
1252 * Here's the list of available ethtool commands from other drivers: 1252 * Here's the list of available ethtool commands from other drivers:
1253 * cmd->advertising = 1253 * cmd->advertising =
1254 * cmd->speed = 1254 * ethtool_cmd_speed_set(cmd, ...)
1255 * cmd->duplex = 1255 * cmd->duplex =
1256 * cmd->port = 0; 1256 * cmd->port = 0;
1257 * cmd->phy_address = 1257 * cmd->phy_address =
@@ -1289,13 +1289,13 @@ static int ns83820_get_settings(struct net_device *ndev,
1289 cmd->duplex = fullduplex ? DUPLEX_FULL : DUPLEX_HALF; 1289 cmd->duplex = fullduplex ? DUPLEX_FULL : DUPLEX_HALF;
1290 switch (cfg / CFG_SPDSTS0 & 3) { 1290 switch (cfg / CFG_SPDSTS0 & 3) {
1291 case 2: 1291 case 2:
1292 cmd->speed = SPEED_1000; 1292 ethtool_cmd_speed_set(cmd, SPEED_1000);
1293 break; 1293 break;
1294 case 1: 1294 case 1:
1295 cmd->speed = SPEED_100; 1295 ethtool_cmd_speed_set(cmd, SPEED_100);
1296 break; 1296 break;
1297 default: 1297 default:
1298 cmd->speed = SPEED_10; 1298 ethtool_cmd_speed_set(cmd, SPEED_10);
1299 break; 1299 break;
1300 } 1300 }
1301 cmd->autoneg = (tbicr & TBICR_MR_AN_ENABLE) 1301 cmd->autoneg = (tbicr & TBICR_MR_AN_ENABLE)
diff --git a/drivers/net/pch_gbe/pch_gbe.h b/drivers/net/pch_gbe/pch_gbe.h
index bf126e76fabf..59fac77d0dbb 100644
--- a/drivers/net/pch_gbe/pch_gbe.h
+++ b/drivers/net/pch_gbe/pch_gbe.h
@@ -597,8 +597,6 @@ struct pch_gbe_hw_stats {
597 * @rx_ring: Pointer of Rx descriptor ring structure 597 * @rx_ring: Pointer of Rx descriptor ring structure
598 * @rx_buffer_len: Receive buffer length 598 * @rx_buffer_len: Receive buffer length
599 * @tx_queue_len: Transmit queue length 599 * @tx_queue_len: Transmit queue length
600 * @rx_csum: Receive TCP/IP checksum enable/disable
601 * @tx_csum: Transmit TCP/IP checksum enable/disable
602 * @have_msi: PCI MSI mode flag 600 * @have_msi: PCI MSI mode flag
603 */ 601 */
604 602
@@ -623,8 +621,6 @@ struct pch_gbe_adapter {
623 struct pch_gbe_rx_ring *rx_ring; 621 struct pch_gbe_rx_ring *rx_ring;
624 unsigned long rx_buffer_len; 622 unsigned long rx_buffer_len;
625 unsigned long tx_queue_len; 623 unsigned long tx_queue_len;
626 bool rx_csum;
627 bool tx_csum;
628 bool have_msi; 624 bool have_msi;
629}; 625};
630 626
diff --git a/drivers/net/pch_gbe/pch_gbe_ethtool.c b/drivers/net/pch_gbe/pch_gbe_ethtool.c
index d2174a40d708..ea2d8e41887a 100644
--- a/drivers/net/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/pch_gbe/pch_gbe_ethtool.c
@@ -92,7 +92,7 @@ static int pch_gbe_get_settings(struct net_device *netdev,
92 ecmd->advertising &= ~(ADVERTISED_TP | ADVERTISED_1000baseT_Half); 92 ecmd->advertising &= ~(ADVERTISED_TP | ADVERTISED_1000baseT_Half);
93 93
94 if (!netif_carrier_ok(adapter->netdev)) 94 if (!netif_carrier_ok(adapter->netdev))
95 ecmd->speed = -1; 95 ethtool_cmd_speed_set(ecmd, -1);
96 return ret; 96 return ret;
97} 97}
98 98
@@ -109,12 +109,15 @@ static int pch_gbe_set_settings(struct net_device *netdev,
109{ 109{
110 struct pch_gbe_adapter *adapter = netdev_priv(netdev); 110 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
111 struct pch_gbe_hw *hw = &adapter->hw; 111 struct pch_gbe_hw *hw = &adapter->hw;
112 u32 speed = ethtool_cmd_speed(ecmd);
112 int ret; 113 int ret;
113 114
114 pch_gbe_hal_write_phy_reg(hw, MII_BMCR, BMCR_RESET); 115 pch_gbe_hal_write_phy_reg(hw, MII_BMCR, BMCR_RESET);
115 116
116 if (ecmd->speed == USHRT_MAX) { 117 /* when set_settings() is called with a ethtool_cmd previously
117 ecmd->speed = SPEED_1000; 118 * filled by get_settings() on a down link, speed is -1: */
119 if (speed == UINT_MAX) {
120 speed = SPEED_1000;
118 ecmd->duplex = DUPLEX_FULL; 121 ecmd->duplex = DUPLEX_FULL;
119 } 122 }
120 ret = mii_ethtool_sset(&adapter->mii, ecmd); 123 ret = mii_ethtool_sset(&adapter->mii, ecmd);
@@ -122,7 +125,7 @@ static int pch_gbe_set_settings(struct net_device *netdev,
122 pr_err("Error: mii_ethtool_sset\n"); 125 pr_err("Error: mii_ethtool_sset\n");
123 return ret; 126 return ret;
124 } 127 }
125 hw->mac.link_speed = ecmd->speed; 128 hw->mac.link_speed = speed;
126 hw->mac.link_duplex = ecmd->duplex; 129 hw->mac.link_duplex = ecmd->duplex;
127 hw->phy.autoneg_advertised = ecmd->advertising; 130 hw->phy.autoneg_advertised = ecmd->advertising;
128 hw->mac.autoneg = ecmd->autoneg; 131 hw->mac.autoneg = ecmd->autoneg;
@@ -434,57 +437,6 @@ static int pch_gbe_set_pauseparam(struct net_device *netdev,
434} 437}
435 438
436/** 439/**
437 * pch_gbe_get_rx_csum - Report whether receive checksums are turned on or off
438 * @netdev: Network interface device structure
439 * Returns
440 * true(1): Checksum On
441 * false(0): Checksum Off
442 */
443static u32 pch_gbe_get_rx_csum(struct net_device *netdev)
444{
445 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
446
447 return adapter->rx_csum;
448}
449
450/**
451 * pch_gbe_set_rx_csum - Turn receive checksum on or off
452 * @netdev: Network interface device structure
453 * @data: Checksum On[true] or Off[false]
454 * Returns
455 * 0: Successful.
456 * Negative value: Failed.
457 */
458static int pch_gbe_set_rx_csum(struct net_device *netdev, u32 data)
459{
460 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
461
462 adapter->rx_csum = data;
463 if ((netif_running(netdev)))
464 pch_gbe_reinit_locked(adapter);
465 else
466 pch_gbe_reset(adapter);
467
468 return 0;
469}
470
471/**
472 * pch_gbe_set_tx_csum - Turn transmit checksums on or off
473 * @netdev: Network interface device structure
474 * @data: Checksum on[true] or off[false]
475 * Returns
476 * 0: Successful.
477 * Negative value: Failed.
478 */
479static int pch_gbe_set_tx_csum(struct net_device *netdev, u32 data)
480{
481 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
482
483 adapter->tx_csum = data;
484 return ethtool_op_set_tx_ipv6_csum(netdev, data);
485}
486
487/**
488 * pch_gbe_get_strings - Return a set of strings that describe the requested 440 * pch_gbe_get_strings - Return a set of strings that describe the requested
489 * objects 441 * objects
490 * @netdev: Network interface device structure 442 * @netdev: Network interface device structure
@@ -554,9 +506,6 @@ static const struct ethtool_ops pch_gbe_ethtool_ops = {
554 .set_ringparam = pch_gbe_set_ringparam, 506 .set_ringparam = pch_gbe_set_ringparam,
555 .get_pauseparam = pch_gbe_get_pauseparam, 507 .get_pauseparam = pch_gbe_get_pauseparam,
556 .set_pauseparam = pch_gbe_set_pauseparam, 508 .set_pauseparam = pch_gbe_set_pauseparam,
557 .get_rx_csum = pch_gbe_get_rx_csum,
558 .set_rx_csum = pch_gbe_set_rx_csum,
559 .set_tx_csum = pch_gbe_set_tx_csum,
560 .get_strings = pch_gbe_get_strings, 509 .get_strings = pch_gbe_get_strings,
561 .get_ethtool_stats = pch_gbe_get_ethtool_stats, 510 .get_ethtool_stats = pch_gbe_get_ethtool_stats,
562 .get_sset_count = pch_gbe_get_sset_count, 511 .get_sset_count = pch_gbe_get_sset_count,
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index 56d049a472da..c2476fd96573 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -659,6 +659,7 @@ static void pch_gbe_configure_tx(struct pch_gbe_adapter *adapter)
659 */ 659 */
660static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter) 660static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
661{ 661{
662 struct net_device *netdev = adapter->netdev;
662 struct pch_gbe_hw *hw = &adapter->hw; 663 struct pch_gbe_hw *hw = &adapter->hw;
663 u32 rx_mode, tcpip; 664 u32 rx_mode, tcpip;
664 665
@@ -669,7 +670,7 @@ static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
669 670
670 tcpip = ioread32(&hw->reg->TCPIP_ACC); 671 tcpip = ioread32(&hw->reg->TCPIP_ACC);
671 672
672 if (adapter->rx_csum) { 673 if (netdev->features & NETIF_F_RXCSUM) {
673 tcpip &= ~PCH_GBE_RX_TCPIPACC_OFF; 674 tcpip &= ~PCH_GBE_RX_TCPIPACC_OFF;
674 tcpip |= PCH_GBE_RX_TCPIPACC_EN; 675 tcpip |= PCH_GBE_RX_TCPIPACC_EN;
675 } else { 676 } else {
@@ -890,12 +891,12 @@ static void pch_gbe_watchdog(unsigned long data)
890 struct pch_gbe_adapter *adapter = (struct pch_gbe_adapter *)data; 891 struct pch_gbe_adapter *adapter = (struct pch_gbe_adapter *)data;
891 struct net_device *netdev = adapter->netdev; 892 struct net_device *netdev = adapter->netdev;
892 struct pch_gbe_hw *hw = &adapter->hw; 893 struct pch_gbe_hw *hw = &adapter->hw;
893 struct ethtool_cmd cmd;
894 894
895 pr_debug("right now = %ld\n", jiffies); 895 pr_debug("right now = %ld\n", jiffies);
896 896
897 pch_gbe_update_stats(adapter); 897 pch_gbe_update_stats(adapter);
898 if ((mii_link_ok(&adapter->mii)) && (!netif_carrier_ok(netdev))) { 898 if ((mii_link_ok(&adapter->mii)) && (!netif_carrier_ok(netdev))) {
899 struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
899 netdev->tx_queue_len = adapter->tx_queue_len; 900 netdev->tx_queue_len = adapter->tx_queue_len;
900 /* mii library handles link maintenance tasks */ 901 /* mii library handles link maintenance tasks */
901 if (mii_ethtool_gset(&adapter->mii, &cmd)) { 902 if (mii_ethtool_gset(&adapter->mii, &cmd)) {
@@ -905,7 +906,7 @@ static void pch_gbe_watchdog(unsigned long data)
905 PCH_GBE_WATCHDOG_PERIOD)); 906 PCH_GBE_WATCHDOG_PERIOD));
906 return; 907 return;
907 } 908 }
908 hw->mac.link_speed = cmd.speed; 909 hw->mac.link_speed = ethtool_cmd_speed(&cmd);
909 hw->mac.link_duplex = cmd.duplex; 910 hw->mac.link_duplex = cmd.duplex;
910 /* Set the RGMII control. */ 911 /* Set the RGMII control. */
911 pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed, 912 pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
@@ -915,7 +916,7 @@ static void pch_gbe_watchdog(unsigned long data)
915 hw->mac.link_duplex); 916 hw->mac.link_duplex);
916 netdev_dbg(netdev, 917 netdev_dbg(netdev,
917 "Link is Up %d Mbps %s-Duplex\n", 918 "Link is Up %d Mbps %s-Duplex\n",
918 cmd.speed, 919 hw->mac.link_speed,
919 cmd.duplex == DUPLEX_FULL ? "Full" : "Half"); 920 cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
920 netif_carrier_on(netdev); 921 netif_carrier_on(netdev);
921 netif_wake_queue(netdev); 922 netif_wake_queue(netdev);
@@ -953,7 +954,7 @@ static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
953 frame_ctrl = 0; 954 frame_ctrl = 0;
954 if (unlikely(skb->len < PCH_GBE_SHORT_PKT)) 955 if (unlikely(skb->len < PCH_GBE_SHORT_PKT))
955 frame_ctrl |= PCH_GBE_TXD_CTRL_APAD; 956 frame_ctrl |= PCH_GBE_TXD_CTRL_APAD;
956 if (unlikely(!adapter->tx_csum)) 957 if (skb->ip_summed == CHECKSUM_NONE)
957 frame_ctrl |= PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF; 958 frame_ctrl |= PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
958 959
959 /* Performs checksum processing */ 960 /* Performs checksum processing */
@@ -961,7 +962,7 @@ static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
961 * It is because the hardware accelerator does not support a checksum, 962 * It is because the hardware accelerator does not support a checksum,
962 * when the received data size is less than 64 bytes. 963 * when the received data size is less than 64 bytes.
963 */ 964 */
964 if ((skb->len < PCH_GBE_SHORT_PKT) && (adapter->tx_csum)) { 965 if (skb->len < PCH_GBE_SHORT_PKT && skb->ip_summed != CHECKSUM_NONE) {
965 frame_ctrl |= PCH_GBE_TXD_CTRL_APAD | 966 frame_ctrl |= PCH_GBE_TXD_CTRL_APAD |
966 PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF; 967 PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
967 if (skb->protocol == htons(ETH_P_IP)) { 968 if (skb->protocol == htons(ETH_P_IP)) {
@@ -1429,7 +1430,7 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1429 length = (rx_desc->rx_words_eob) - 3; 1430 length = (rx_desc->rx_words_eob) - 3;
1430 1431
1431 /* Decide the data conversion method */ 1432 /* Decide the data conversion method */
1432 if (!adapter->rx_csum) { 1433 if (!(netdev->features & NETIF_F_RXCSUM)) {
1433 /* [Header:14][payload] */ 1434 /* [Header:14][payload] */
1434 if (NET_IP_ALIGN) { 1435 if (NET_IP_ALIGN) {
1435 /* Because alignment differs, 1436 /* Because alignment differs,
@@ -2032,6 +2033,29 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
2032} 2033}
2033 2034
2034/** 2035/**
2036 * pch_gbe_set_features - Reset device after features changed
2037 * @netdev: Network interface device structure
2038 * @features: New features
2039 * Returns
2040 * 0: HW state updated successfully
2041 */
2042static int pch_gbe_set_features(struct net_device *netdev, u32 features)
2043{
2044 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2045 u32 changed = features ^ netdev->features;
2046
2047 if (!(changed & NETIF_F_RXCSUM))
2048 return 0;
2049
2050 if (netif_running(netdev))
2051 pch_gbe_reinit_locked(adapter);
2052 else
2053 pch_gbe_reset(adapter);
2054
2055 return 0;
2056}
2057
2058/**
2035 * pch_gbe_ioctl - Controls register through a MII interface 2059 * pch_gbe_ioctl - Controls register through a MII interface
2036 * @netdev: Network interface device structure 2060 * @netdev: Network interface device structure
2037 * @ifr: Pointer to ifr structure 2061 * @ifr: Pointer to ifr structure
@@ -2131,6 +2155,7 @@ static const struct net_device_ops pch_gbe_netdev_ops = {
2131 .ndo_set_mac_address = pch_gbe_set_mac, 2155 .ndo_set_mac_address = pch_gbe_set_mac,
2132 .ndo_tx_timeout = pch_gbe_tx_timeout, 2156 .ndo_tx_timeout = pch_gbe_tx_timeout,
2133 .ndo_change_mtu = pch_gbe_change_mtu, 2157 .ndo_change_mtu = pch_gbe_change_mtu,
2158 .ndo_set_features = pch_gbe_set_features,
2134 .ndo_do_ioctl = pch_gbe_ioctl, 2159 .ndo_do_ioctl = pch_gbe_ioctl,
2135 .ndo_set_multicast_list = &pch_gbe_set_multi, 2160 .ndo_set_multicast_list = &pch_gbe_set_multi,
2136#ifdef CONFIG_NET_POLL_CONTROLLER 2161#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -2336,7 +2361,9 @@ static int pch_gbe_probe(struct pci_dev *pdev,
2336 netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD; 2361 netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
2337 netif_napi_add(netdev, &adapter->napi, 2362 netif_napi_add(netdev, &adapter->napi,
2338 pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT); 2363 pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
2339 netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO; 2364 netdev->hw_features = NETIF_F_RXCSUM |
2365 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2366 netdev->features = netdev->hw_features;
2340 pch_gbe_set_ethtool_ops(netdev); 2367 pch_gbe_set_ethtool_ops(netdev);
2341 2368
2342 pch_gbe_mac_load_mac_addr(&adapter->hw); 2369 pch_gbe_mac_load_mac_addr(&adapter->hw);
@@ -2375,11 +2402,6 @@ static int pch_gbe_probe(struct pci_dev *pdev,
2375 2402
2376 pch_gbe_check_options(adapter); 2403 pch_gbe_check_options(adapter);
2377 2404
2378 if (adapter->tx_csum)
2379 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2380 else
2381 netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
2382
2383 /* initialize the wol settings based on the eeprom settings */ 2405 /* initialize the wol settings based on the eeprom settings */
2384 adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING; 2406 adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING;
2385 dev_info(&pdev->dev, "MAC address : %pM\n", netdev->dev_addr); 2407 dev_info(&pdev->dev, "MAC address : %pM\n", netdev->dev_addr);
diff --git a/drivers/net/pch_gbe/pch_gbe_param.c b/drivers/net/pch_gbe/pch_gbe_param.c
index ef0996a0eaaa..5b5d90a47e29 100644
--- a/drivers/net/pch_gbe/pch_gbe_param.c
+++ b/drivers/net/pch_gbe/pch_gbe_param.c
@@ -426,6 +426,8 @@ full_duplex_only:
426void pch_gbe_check_options(struct pch_gbe_adapter *adapter) 426void pch_gbe_check_options(struct pch_gbe_adapter *adapter)
427{ 427{
428 struct pch_gbe_hw *hw = &adapter->hw; 428 struct pch_gbe_hw *hw = &adapter->hw;
429 struct net_device *dev = adapter->netdev;
430 int val;
429 431
430 { /* Transmit Descriptor Count */ 432 { /* Transmit Descriptor Count */
431 static const struct pch_gbe_option opt = { 433 static const struct pch_gbe_option opt = {
@@ -466,9 +468,10 @@ void pch_gbe_check_options(struct pch_gbe_adapter *adapter)
466 .err = "defaulting to Enabled", 468 .err = "defaulting to Enabled",
467 .def = PCH_GBE_DEFAULT_RX_CSUM 469 .def = PCH_GBE_DEFAULT_RX_CSUM
468 }; 470 };
469 adapter->rx_csum = XsumRX; 471 val = XsumRX;
470 pch_gbe_validate_option((int *)(&adapter->rx_csum), 472 pch_gbe_validate_option(&val, &opt, adapter);
471 &opt, adapter); 473 if (!val)
474 dev->features &= ~NETIF_F_RXCSUM;
472 } 475 }
473 { /* Checksum Offload Enable/Disable */ 476 { /* Checksum Offload Enable/Disable */
474 static const struct pch_gbe_option opt = { 477 static const struct pch_gbe_option opt = {
@@ -477,9 +480,10 @@ void pch_gbe_check_options(struct pch_gbe_adapter *adapter)
477 .err = "defaulting to Enabled", 480 .err = "defaulting to Enabled",
478 .def = PCH_GBE_DEFAULT_TX_CSUM 481 .def = PCH_GBE_DEFAULT_TX_CSUM
479 }; 482 };
480 adapter->tx_csum = XsumTX; 483 val = XsumTX;
481 pch_gbe_validate_option((int *)(&adapter->tx_csum), 484 pch_gbe_validate_option(&val, &opt, adapter);
482 &opt, adapter); 485 if (!val)
486 dev->features &= ~NETIF_F_ALL_CSUM;
483 } 487 }
484 { /* Flow Control */ 488 { /* Flow Control */
485 static const struct pch_gbe_option opt = { 489 static const struct pch_gbe_option opt = {
diff --git a/drivers/net/pch_gbe/pch_gbe_phy.c b/drivers/net/pch_gbe/pch_gbe_phy.c
index 923a687acd30..28bb9603d736 100644
--- a/drivers/net/pch_gbe/pch_gbe_phy.c
+++ b/drivers/net/pch_gbe/pch_gbe_phy.c
@@ -247,7 +247,7 @@ inline void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw)
247void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw) 247void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw)
248{ 248{
249 struct pch_gbe_adapter *adapter; 249 struct pch_gbe_adapter *adapter;
250 struct ethtool_cmd cmd; 250 struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
251 int ret; 251 int ret;
252 u16 mii_reg; 252 u16 mii_reg;
253 253
@@ -256,7 +256,7 @@ void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw)
256 if (ret) 256 if (ret)
257 pr_err("Error: mii_ethtool_gset\n"); 257 pr_err("Error: mii_ethtool_gset\n");
258 258
259 cmd.speed = hw->mac.link_speed; 259 ethtool_cmd_speed_set(&cmd, hw->mac.link_speed);
260 cmd.duplex = hw->mac.link_duplex; 260 cmd.duplex = hw->mac.link_duplex;
261 cmd.advertising = hw->phy.autoneg_advertised; 261 cmd.advertising = hw->phy.autoneg_advertised;
262 cmd.autoneg = hw->mac.autoneg; 262 cmd.autoneg = hw->mac.autoneg;
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index 108591756440..288e4f1317ee 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -1860,7 +1860,7 @@ static int smc_netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
1860 tmp = inw(ioaddr + CONFIG); 1860 tmp = inw(ioaddr + CONFIG);
1861 ecmd->port = (tmp & CFG_AUI_SELECT) ? PORT_AUI : PORT_TP; 1861 ecmd->port = (tmp & CFG_AUI_SELECT) ? PORT_AUI : PORT_TP;
1862 ecmd->transceiver = XCVR_INTERNAL; 1862 ecmd->transceiver = XCVR_INTERNAL;
1863 ecmd->speed = SPEED_10; 1863 ethtool_cmd_speed_set(ecmd, SPEED_10);
1864 ecmd->phy_address = ioaddr + MGMT; 1864 ecmd->phy_address = ioaddr + MGMT;
1865 1865
1866 SMC_SELECT_BANK(0); 1866 SMC_SELECT_BANK(0);
@@ -1875,8 +1875,8 @@ static int smc_netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
1875 u16 tmp; 1875 u16 tmp;
1876 unsigned int ioaddr = dev->base_addr; 1876 unsigned int ioaddr = dev->base_addr;
1877 1877
1878 if (ecmd->speed != SPEED_10) 1878 if (ethtool_cmd_speed(ecmd) != SPEED_10)
1879 return -EINVAL; 1879 return -EINVAL;
1880 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) 1880 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
1881 return -EINVAL; 1881 return -EINVAL;
1882 if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI) 1882 if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI)
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 768037602dff..b48aba9e4227 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -295,12 +295,14 @@ struct pcnet32_private {
295 struct net_device *next; 295 struct net_device *next;
296 struct mii_if_info mii_if; 296 struct mii_if_info mii_if;
297 struct timer_list watchdog_timer; 297 struct timer_list watchdog_timer;
298 struct timer_list blink_timer;
299 u32 msg_enable; /* debug message level */ 298 u32 msg_enable; /* debug message level */
300 299
301 /* each bit indicates an available PHY */ 300 /* each bit indicates an available PHY */
302 u32 phymask; 301 u32 phymask;
303 unsigned short chip_version; /* which variant this is */ 302 unsigned short chip_version; /* which variant this is */
303
304 /* saved registers during ethtool blink */
305 u16 save_regs[4];
304}; 306};
305 307
306static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *); 308static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *);
@@ -324,8 +326,6 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits);
324static void pcnet32_ethtool_test(struct net_device *dev, 326static void pcnet32_ethtool_test(struct net_device *dev,
325 struct ethtool_test *eth_test, u64 * data); 327 struct ethtool_test *eth_test, u64 * data);
326static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1); 328static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1);
327static int pcnet32_phys_id(struct net_device *dev, u32 data);
328static void pcnet32_led_blink_callback(struct net_device *dev);
329static int pcnet32_get_regs_len(struct net_device *dev); 329static int pcnet32_get_regs_len(struct net_device *dev);
330static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, 330static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
331 void *ptr); 331 void *ptr);
@@ -1022,7 +1022,8 @@ clean_up:
1022 return rc; 1022 return rc;
1023} /* end pcnet32_loopback_test */ 1023} /* end pcnet32_loopback_test */
1024 1024
1025static void pcnet32_led_blink_callback(struct net_device *dev) 1025static int pcnet32_set_phys_id(struct net_device *dev,
1026 enum ethtool_phys_id_state state)
1026{ 1027{
1027 struct pcnet32_private *lp = netdev_priv(dev); 1028 struct pcnet32_private *lp = netdev_priv(dev);
1028 struct pcnet32_access *a = &lp->a; 1029 struct pcnet32_access *a = &lp->a;
@@ -1030,50 +1031,31 @@ static void pcnet32_led_blink_callback(struct net_device *dev)
1030 unsigned long flags; 1031 unsigned long flags;
1031 int i; 1032 int i;
1032 1033
1033 spin_lock_irqsave(&lp->lock, flags); 1034 switch (state) {
1034 for (i = 4; i < 8; i++) 1035 case ETHTOOL_ID_ACTIVE:
1035 a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000); 1036 /* Save the current value of the bcrs */
1036 spin_unlock_irqrestore(&lp->lock, flags); 1037 spin_lock_irqsave(&lp->lock, flags);
1037 1038 for (i = 4; i < 8; i++)
1038 mod_timer(&lp->blink_timer, PCNET32_BLINK_TIMEOUT); 1039 lp->save_regs[i - 4] = a->read_bcr(ioaddr, i);
1039} 1040 spin_unlock_irqrestore(&lp->lock, flags);
1041 return 2; /* cycle on/off twice per second */
1040 1042
1041static int pcnet32_phys_id(struct net_device *dev, u32 data) 1043 case ETHTOOL_ID_ON:
1042{ 1044 case ETHTOOL_ID_OFF:
1043 struct pcnet32_private *lp = netdev_priv(dev); 1045 /* Blink the led */
1044 struct pcnet32_access *a = &lp->a; 1046 spin_lock_irqsave(&lp->lock, flags);
1045 ulong ioaddr = dev->base_addr; 1047 for (i = 4; i < 8; i++)
1046 unsigned long flags; 1048 a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000);
1047 int i, regs[4]; 1049 spin_unlock_irqrestore(&lp->lock, flags);
1050 break;
1048 1051
1049 if (!lp->blink_timer.function) { 1052 case ETHTOOL_ID_INACTIVE:
1050 init_timer(&lp->blink_timer); 1053 /* Restore the original value of the bcrs */
1051 lp->blink_timer.function = (void *)pcnet32_led_blink_callback; 1054 spin_lock_irqsave(&lp->lock, flags);
1052 lp->blink_timer.data = (unsigned long)dev; 1055 for (i = 4; i < 8; i++)
1056 a->write_bcr(ioaddr, i, lp->save_regs[i - 4]);
1057 spin_unlock_irqrestore(&lp->lock, flags);
1053 } 1058 }
1054
1055 /* Save the current value of the bcrs */
1056 spin_lock_irqsave(&lp->lock, flags);
1057 for (i = 4; i < 8; i++)
1058 regs[i - 4] = a->read_bcr(ioaddr, i);
1059 spin_unlock_irqrestore(&lp->lock, flags);
1060
1061 mod_timer(&lp->blink_timer, jiffies);
1062 set_current_state(TASK_INTERRUPTIBLE);
1063
1064 /* AV: the limit here makes no sense whatsoever */
1065 if ((!data) || (data > (u32) (MAX_SCHEDULE_TIMEOUT / HZ)))
1066 data = (u32) (MAX_SCHEDULE_TIMEOUT / HZ);
1067
1068 msleep_interruptible(data * 1000);
1069 del_timer_sync(&lp->blink_timer);
1070
1071 /* Restore the original value of the bcrs */
1072 spin_lock_irqsave(&lp->lock, flags);
1073 for (i = 4; i < 8; i++)
1074 a->write_bcr(ioaddr, i, regs[i - 4]);
1075 spin_unlock_irqrestore(&lp->lock, flags);
1076
1077 return 0; 1059 return 0;
1078} 1060}
1079 1061
@@ -1450,7 +1432,7 @@ static const struct ethtool_ops pcnet32_ethtool_ops = {
1450 .set_ringparam = pcnet32_set_ringparam, 1432 .set_ringparam = pcnet32_set_ringparam,
1451 .get_strings = pcnet32_get_strings, 1433 .get_strings = pcnet32_get_strings,
1452 .self_test = pcnet32_ethtool_test, 1434 .self_test = pcnet32_ethtool_test,
1453 .phys_id = pcnet32_phys_id, 1435 .set_phys_id = pcnet32_set_phys_id,
1454 .get_regs_len = pcnet32_get_regs_len, 1436 .get_regs_len = pcnet32_get_regs_len,
1455 .get_regs = pcnet32_get_regs, 1437 .get_regs = pcnet32_get_regs,
1456 .get_sset_count = pcnet32_get_sset_count, 1438 .get_sset_count = pcnet32_get_sset_count,
@@ -2117,7 +2099,7 @@ static int pcnet32_open(struct net_device *dev)
2117 int first_phy = -1; 2099 int first_phy = -1;
2118 u16 bmcr; 2100 u16 bmcr;
2119 u32 bcr9; 2101 u32 bcr9;
2120 struct ethtool_cmd ecmd; 2102 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
2121 2103
2122 /* 2104 /*
2123 * There is really no good other way to handle multiple PHYs 2105 * There is really no good other way to handle multiple PHYs
@@ -2133,9 +2115,9 @@ static int pcnet32_open(struct net_device *dev)
2133 ecmd.port = PORT_MII; 2115 ecmd.port = PORT_MII;
2134 ecmd.transceiver = XCVR_INTERNAL; 2116 ecmd.transceiver = XCVR_INTERNAL;
2135 ecmd.autoneg = AUTONEG_DISABLE; 2117 ecmd.autoneg = AUTONEG_DISABLE;
2136 ecmd.speed = 2118 ethtool_cmd_speed_set(&ecmd,
2137 lp-> 2119 (lp->options & PCNET32_PORT_100) ?
2138 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10; 2120 SPEED_100 : SPEED_10);
2139 bcr9 = lp->a.read_bcr(ioaddr, 9); 2121 bcr9 = lp->a.read_bcr(ioaddr, 9);
2140 2122
2141 if (lp->options & PCNET32_PORT_FD) { 2123 if (lp->options & PCNET32_PORT_FD) {
@@ -2781,11 +2763,11 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
2781 netif_carrier_on(dev); 2763 netif_carrier_on(dev);
2782 if (lp->mii) { 2764 if (lp->mii) {
2783 if (netif_msg_link(lp)) { 2765 if (netif_msg_link(lp)) {
2784 struct ethtool_cmd ecmd; 2766 struct ethtool_cmd ecmd = {
2767 .cmd = ETHTOOL_GSET };
2785 mii_ethtool_gset(&lp->mii_if, &ecmd); 2768 mii_ethtool_gset(&lp->mii_if, &ecmd);
2786 netdev_info(dev, "link up, %sMbps, %s-duplex\n", 2769 netdev_info(dev, "link up, %uMbps, %s-duplex\n",
2787 (ecmd.speed == SPEED_100) 2770 ethtool_cmd_speed(&ecmd),
2788 ? "100" : "10",
2789 (ecmd.duplex == DUPLEX_FULL) 2771 (ecmd.duplex == DUPLEX_FULL)
2790 ? "full" : "half"); 2772 ? "full" : "half");
2791 } 2773 }
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index f7670330f988..a47595760751 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -238,6 +238,8 @@ static void phy_sanitize_settings(struct phy_device *phydev)
238 */ 238 */
239int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd) 239int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
240{ 240{
241 u32 speed = ethtool_cmd_speed(cmd);
242
241 if (cmd->phy_address != phydev->addr) 243 if (cmd->phy_address != phydev->addr)
242 return -EINVAL; 244 return -EINVAL;
243 245
@@ -253,16 +255,16 @@ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
253 return -EINVAL; 255 return -EINVAL;
254 256
255 if (cmd->autoneg == AUTONEG_DISABLE && 257 if (cmd->autoneg == AUTONEG_DISABLE &&
256 ((cmd->speed != SPEED_1000 && 258 ((speed != SPEED_1000 &&
257 cmd->speed != SPEED_100 && 259 speed != SPEED_100 &&
258 cmd->speed != SPEED_10) || 260 speed != SPEED_10) ||
259 (cmd->duplex != DUPLEX_HALF && 261 (cmd->duplex != DUPLEX_HALF &&
260 cmd->duplex != DUPLEX_FULL))) 262 cmd->duplex != DUPLEX_FULL)))
261 return -EINVAL; 263 return -EINVAL;
262 264
263 phydev->autoneg = cmd->autoneg; 265 phydev->autoneg = cmd->autoneg;
264 266
265 phydev->speed = cmd->speed; 267 phydev->speed = speed;
266 268
267 phydev->advertising = cmd->advertising; 269 phydev->advertising = cmd->advertising;
268 270
@@ -286,7 +288,7 @@ int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
286 288
287 cmd->advertising = phydev->advertising; 289 cmd->advertising = phydev->advertising;
288 290
289 cmd->speed = phydev->speed; 291 ethtool_cmd_speed_set(cmd, phydev->speed);
290 cmd->duplex = phydev->duplex; 292 cmd->duplex = phydev->duplex;
291 cmd->port = PORT_MII; 293 cmd->port = PORT_MII;
292 cmd->phy_address = phydev->addr; 294 cmd->phy_address = phydev->addr;
diff --git a/drivers/net/pptp.c b/drivers/net/pptp.c
index 51dfcf8023c7..1286fe212dc4 100644
--- a/drivers/net/pptp.c
+++ b/drivers/net/pptp.c
@@ -175,6 +175,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
175 struct pptp_opt *opt = &po->proto.pptp; 175 struct pptp_opt *opt = &po->proto.pptp;
176 struct pptp_gre_header *hdr; 176 struct pptp_gre_header *hdr;
177 unsigned int header_len = sizeof(*hdr); 177 unsigned int header_len = sizeof(*hdr);
178 struct flowi4 fl4;
178 int islcp; 179 int islcp;
179 int len; 180 int len;
180 unsigned char *data; 181 unsigned char *data;
@@ -189,7 +190,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
189 if (sk_pppox(po)->sk_state & PPPOX_DEAD) 190 if (sk_pppox(po)->sk_state & PPPOX_DEAD)
190 goto tx_error; 191 goto tx_error;
191 192
192 rt = ip_route_output_ports(&init_net, NULL, 193 rt = ip_route_output_ports(&init_net, &fl4, NULL,
193 opt->dst_addr.sin_addr.s_addr, 194 opt->dst_addr.sin_addr.s_addr,
194 opt->src_addr.sin_addr.s_addr, 195 opt->src_addr.sin_addr.s_addr,
195 0, 0, IPPROTO_GRE, 196 0, 0, IPPROTO_GRE,
@@ -270,8 +271,8 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
270 iph->frag_off = 0; 271 iph->frag_off = 0;
271 iph->protocol = IPPROTO_GRE; 272 iph->protocol = IPPROTO_GRE;
272 iph->tos = 0; 273 iph->tos = 0;
273 iph->daddr = rt->rt_dst; 274 iph->daddr = fl4.daddr;
274 iph->saddr = rt->rt_src; 275 iph->saddr = fl4.saddr;
275 iph->ttl = ip4_dst_hoplimit(&rt->dst); 276 iph->ttl = ip4_dst_hoplimit(&rt->dst);
276 iph->tot_len = htons(skb->len); 277 iph->tot_len = htons(skb->len);
277 278
@@ -434,6 +435,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
434 struct pppox_sock *po = pppox_sk(sk); 435 struct pppox_sock *po = pppox_sk(sk);
435 struct pptp_opt *opt = &po->proto.pptp; 436 struct pptp_opt *opt = &po->proto.pptp;
436 struct rtable *rt; 437 struct rtable *rt;
438 struct flowi4 fl4;
437 int error = 0; 439 int error = 0;
438 440
439 if (sp->sa_protocol != PX_PROTO_PPTP) 441 if (sp->sa_protocol != PX_PROTO_PPTP)
@@ -463,7 +465,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
463 po->chan.private = sk; 465 po->chan.private = sk;
464 po->chan.ops = &pptp_chan_ops; 466 po->chan.ops = &pptp_chan_ops;
465 467
466 rt = ip_route_output_ports(&init_net, sk, 468 rt = ip_route_output_ports(&init_net, &fl4, sk,
467 opt->dst_addr.sin_addr.s_addr, 469 opt->dst_addr.sin_addr.s_addr,
468 opt->src_addr.sin_addr.s_addr, 470 opt->src_addr.sin_addr.s_addr,
469 0, 0, 471 0, 0,
diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c
index ffdf7349ef7a..b1f251da1535 100644
--- a/drivers/net/ps3_gelic_net.c
+++ b/drivers/net/ps3_gelic_net.c
@@ -951,7 +951,7 @@ static void gelic_net_pass_skb_up(struct gelic_descr *descr,
951 skb->protocol = eth_type_trans(skb, netdev); 951 skb->protocol = eth_type_trans(skb, netdev);
952 952
953 /* checksum offload */ 953 /* checksum offload */
954 if (card->rx_csum) { 954 if (netdev->features & NETIF_F_RXCSUM) {
955 if ((data_status & GELIC_DESCR_DATA_STATUS_CHK_MASK) && 955 if ((data_status & GELIC_DESCR_DATA_STATUS_CHK_MASK) &&
956 (!(data_error & GELIC_DESCR_DATA_ERROR_CHK_MASK))) 956 (!(data_error & GELIC_DESCR_DATA_ERROR_CHK_MASK)))
957 skb->ip_summed = CHECKSUM_UNNECESSARY; 957 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1243,17 +1243,17 @@ static int gelic_ether_get_settings(struct net_device *netdev,
1243 1243
1244 switch (card->ether_port_status & GELIC_LV1_ETHER_SPEED_MASK) { 1244 switch (card->ether_port_status & GELIC_LV1_ETHER_SPEED_MASK) {
1245 case GELIC_LV1_ETHER_SPEED_10: 1245 case GELIC_LV1_ETHER_SPEED_10:
1246 cmd->speed = SPEED_10; 1246 ethtool_cmd_speed_set(cmd, SPEED_10);
1247 break; 1247 break;
1248 case GELIC_LV1_ETHER_SPEED_100: 1248 case GELIC_LV1_ETHER_SPEED_100:
1249 cmd->speed = SPEED_100; 1249 ethtool_cmd_speed_set(cmd, SPEED_100);
1250 break; 1250 break;
1251 case GELIC_LV1_ETHER_SPEED_1000: 1251 case GELIC_LV1_ETHER_SPEED_1000:
1252 cmd->speed = SPEED_1000; 1252 ethtool_cmd_speed_set(cmd, SPEED_1000);
1253 break; 1253 break;
1254 default: 1254 default:
1255 pr_info("%s: speed unknown\n", __func__); 1255 pr_info("%s: speed unknown\n", __func__);
1256 cmd->speed = SPEED_10; 1256 ethtool_cmd_speed_set(cmd, SPEED_10);
1257 break; 1257 break;
1258 } 1258 }
1259 1259
@@ -1312,21 +1312,6 @@ static int gelic_ether_set_settings(struct net_device *netdev,
1312 return 0; 1312 return 0;
1313} 1313}
1314 1314
1315u32 gelic_net_get_rx_csum(struct net_device *netdev)
1316{
1317 struct gelic_card *card = netdev_card(netdev);
1318
1319 return card->rx_csum;
1320}
1321
1322int gelic_net_set_rx_csum(struct net_device *netdev, u32 data)
1323{
1324 struct gelic_card *card = netdev_card(netdev);
1325
1326 card->rx_csum = data;
1327 return 0;
1328}
1329
1330static void gelic_net_get_wol(struct net_device *netdev, 1315static void gelic_net_get_wol(struct net_device *netdev,
1331 struct ethtool_wolinfo *wol) 1316 struct ethtool_wolinfo *wol)
1332{ 1317{
@@ -1411,10 +1396,6 @@ static const struct ethtool_ops gelic_ether_ethtool_ops = {
1411 .get_settings = gelic_ether_get_settings, 1396 .get_settings = gelic_ether_get_settings,
1412 .set_settings = gelic_ether_set_settings, 1397 .set_settings = gelic_ether_set_settings,
1413 .get_link = ethtool_op_get_link, 1398 .get_link = ethtool_op_get_link,
1414 .get_tx_csum = ethtool_op_get_tx_csum,
1415 .set_tx_csum = ethtool_op_set_tx_csum,
1416 .get_rx_csum = gelic_net_get_rx_csum,
1417 .set_rx_csum = gelic_net_set_rx_csum,
1418 .get_wol = gelic_net_get_wol, 1399 .get_wol = gelic_net_get_wol,
1419 .set_wol = gelic_net_set_wol, 1400 .set_wol = gelic_net_set_wol,
1420}; 1401};
@@ -1512,7 +1493,11 @@ int __devinit gelic_net_setup_netdev(struct net_device *netdev,
1512 int status; 1493 int status;
1513 u64 v1, v2; 1494 u64 v1, v2;
1514 1495
1496 netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
1497
1515 netdev->features = NETIF_F_IP_CSUM; 1498 netdev->features = NETIF_F_IP_CSUM;
1499 if (GELIC_CARD_RX_CSUM_DEFAULT)
1500 netdev->features |= NETIF_F_RXCSUM;
1516 1501
1517 status = lv1_net_control(bus_id(card), dev_id(card), 1502 status = lv1_net_control(bus_id(card), dev_id(card),
1518 GELIC_LV1_GET_MAC_ADDRESS, 1503 GELIC_LV1_GET_MAC_ADDRESS,
@@ -1756,7 +1741,6 @@ static int __devinit ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
1756 /* setup card structure */ 1741 /* setup card structure */
1757 card->irq_mask = GELIC_CARD_RXINT | GELIC_CARD_TXINT | 1742 card->irq_mask = GELIC_CARD_RXINT | GELIC_CARD_TXINT |
1758 GELIC_CARD_PORT_STATUS_CHANGED; 1743 GELIC_CARD_PORT_STATUS_CHANGED;
1759 card->rx_csum = GELIC_CARD_RX_CSUM_DEFAULT;
1760 1744
1761 1745
1762 if (gelic_card_init_chain(card, &card->tx_chain, 1746 if (gelic_card_init_chain(card, &card->tx_chain,
diff --git a/drivers/net/ps3_gelic_net.h b/drivers/net/ps3_gelic_net.h
index fadadf9097a3..d9a55b93898b 100644
--- a/drivers/net/ps3_gelic_net.h
+++ b/drivers/net/ps3_gelic_net.h
@@ -290,7 +290,6 @@ struct gelic_card {
290 struct gelic_descr_chain tx_chain; 290 struct gelic_descr_chain tx_chain;
291 struct gelic_descr_chain rx_chain; 291 struct gelic_descr_chain rx_chain;
292 int rx_dma_restart_required; 292 int rx_dma_restart_required;
293 int rx_csum;
294 /* 293 /*
295 * tx_lock guards tx descriptor list and 294 * tx_lock guards tx descriptor list and
296 * tx_dma_progress. 295 * tx_dma_progress.
@@ -377,8 +376,6 @@ extern int gelic_net_setup_netdev(struct net_device *netdev,
377/* shared ethtool ops */ 376/* shared ethtool ops */
378extern void gelic_net_get_drvinfo(struct net_device *netdev, 377extern void gelic_net_get_drvinfo(struct net_device *netdev,
379 struct ethtool_drvinfo *info); 378 struct ethtool_drvinfo *info);
380extern u32 gelic_net_get_rx_csum(struct net_device *netdev);
381extern int gelic_net_set_rx_csum(struct net_device *netdev, u32 data);
382extern void gelic_net_poll_controller(struct net_device *netdev); 379extern void gelic_net_poll_controller(struct net_device *netdev);
383 380
384#endif /* _GELIC_NET_H */ 381#endif /* _GELIC_NET_H */
diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c
index b5ae29d20f2e..2e62938c0f82 100644
--- a/drivers/net/ps3_gelic_wireless.c
+++ b/drivers/net/ps3_gelic_wireless.c
@@ -2581,10 +2581,6 @@ static const struct net_device_ops gelic_wl_netdevice_ops = {
2581static const struct ethtool_ops gelic_wl_ethtool_ops = { 2581static const struct ethtool_ops gelic_wl_ethtool_ops = {
2582 .get_drvinfo = gelic_net_get_drvinfo, 2582 .get_drvinfo = gelic_net_get_drvinfo,
2583 .get_link = gelic_wl_get_link, 2583 .get_link = gelic_wl_get_link,
2584 .get_tx_csum = ethtool_op_get_tx_csum,
2585 .set_tx_csum = ethtool_op_set_tx_csum,
2586 .get_rx_csum = gelic_net_get_rx_csum,
2587 .set_rx_csum = gelic_net_set_rx_csum,
2588}; 2584};
2589 2585
2590static void __devinit gelic_wl_setup_netdev_ops(struct net_device *netdev) 2586static void __devinit gelic_wl_setup_netdev_ops(struct net_device *netdev)
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 348b4f1367c9..d495a6859fd9 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -1725,7 +1725,7 @@ static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
1725 } 1725 }
1726 ecmd->advertising = ql_supported_modes(qdev); 1726 ecmd->advertising = ql_supported_modes(qdev);
1727 ecmd->autoneg = ql_get_auto_cfg_status(qdev); 1727 ecmd->autoneg = ql_get_auto_cfg_status(qdev);
1728 ecmd->speed = ql_get_speed(qdev); 1728 ethtool_cmd_speed_set(ecmd, ql_get_speed(qdev));
1729 ecmd->duplex = ql_get_full_dup(qdev); 1729 ecmd->duplex = ql_get_full_dup(qdev);
1730 return 0; 1730 return 0;
1731} 1731}
@@ -3468,7 +3468,7 @@ static int ql_adapter_up(struct ql3_adapter *qdev)
3468{ 3468{
3469 struct net_device *ndev = qdev->ndev; 3469 struct net_device *ndev = qdev->ndev;
3470 int err; 3470 int err;
3471 unsigned long irq_flags = IRQF_SAMPLE_RANDOM | IRQF_SHARED; 3471 unsigned long irq_flags = IRQF_SHARED;
3472 unsigned long hw_flags; 3472 unsigned long hw_flags;
3473 3473
3474 if (ql_alloc_mem_resources(qdev)) { 3474 if (ql_alloc_mem_resources(qdev)) {
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index b0dead00b2d1..480ef5cb6ef9 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -29,13 +29,15 @@
29 29
30#include <linux/io.h> 30#include <linux/io.h>
31#include <asm/byteorder.h> 31#include <asm/byteorder.h>
32#include <linux/bitops.h>
33#include <linux/if_vlan.h>
32 34
33#include "qlcnic_hdr.h" 35#include "qlcnic_hdr.h"
34 36
35#define _QLCNIC_LINUX_MAJOR 5 37#define _QLCNIC_LINUX_MAJOR 5
36#define _QLCNIC_LINUX_MINOR 0 38#define _QLCNIC_LINUX_MINOR 0
37#define _QLCNIC_LINUX_SUBVERSION 15 39#define _QLCNIC_LINUX_SUBVERSION 18
38#define QLCNIC_LINUX_VERSIONID "5.0.15" 40#define QLCNIC_LINUX_VERSIONID "5.0.18"
39#define QLCNIC_DRV_IDC_VER 0x01 41#define QLCNIC_DRV_IDC_VER 0x01
40#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 42#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
41 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) 43 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -93,8 +95,6 @@
93#define TX_IP_PKT 0x04 95#define TX_IP_PKT 0x04
94#define TX_TCP_LSO 0x05 96#define TX_TCP_LSO 0x05
95#define TX_TCP_LSO6 0x06 97#define TX_TCP_LSO6 0x06
96#define TX_IPSEC 0x07
97#define TX_IPSEC_CMD 0x0a
98#define TX_TCPV6_PKT 0x0b 98#define TX_TCPV6_PKT 0x0b
99#define TX_UDPV6_PKT 0x0c 99#define TX_UDPV6_PKT 0x0c
100 100
@@ -118,7 +118,6 @@
118#define PHAN_PEG_RCV_INITIALIZED 0xff01 118#define PHAN_PEG_RCV_INITIALIZED 0xff01
119 119
120#define NUM_RCV_DESC_RINGS 3 120#define NUM_RCV_DESC_RINGS 3
121#define NUM_STS_DESC_RINGS 4
122 121
123#define RCV_RING_NORMAL 0 122#define RCV_RING_NORMAL 0
124#define RCV_RING_JUMBO 1 123#define RCV_RING_JUMBO 1
@@ -201,7 +200,7 @@ struct rcv_desc {
201 __le16 reserved; 200 __le16 reserved;
202 __le32 buffer_length; /* allocated buffer length (usually 2K) */ 201 __le32 buffer_length; /* allocated buffer length (usually 2K) */
203 __le64 addr_buffer; 202 __le64 addr_buffer;
204}; 203} __packed;
205 204
206/* opcode field in status_desc */ 205/* opcode field in status_desc */
207#define QLCNIC_SYN_OFFLOAD 0x03 206#define QLCNIC_SYN_OFFLOAD 0x03
@@ -293,6 +292,7 @@ struct uni_data_desc{
293/* Flash Defines and Structures */ 292/* Flash Defines and Structures */
294#define QLCNIC_FLT_LOCATION 0x3F1000 293#define QLCNIC_FLT_LOCATION 0x3F1000
295#define QLCNIC_FW_IMAGE_REGION 0x74 294#define QLCNIC_FW_IMAGE_REGION 0x74
295#define QLCNIC_BOOTLD_REGION 0X72
296struct qlcnic_flt_header { 296struct qlcnic_flt_header {
297 u16 version; 297 u16 version;
298 u16 len; 298 u16 len;
@@ -307,7 +307,7 @@ struct qlcnic_flt_entry {
307 u8 reserved1; 307 u8 reserved1;
308 u32 size; 308 u32 size;
309 u32 start_addr; 309 u32 start_addr;
310 u32 end_add; 310 u32 end_addr;
311}; 311};
312 312
313/* Magic number to let user know flash is programmed */ 313/* Magic number to let user know flash is programmed */
@@ -366,12 +366,6 @@ struct qlcnic_skb_frag {
366 u64 length; 366 u64 length;
367}; 367};
368 368
369struct qlcnic_recv_crb {
370 u32 crb_rcv_producer[NUM_RCV_DESC_RINGS];
371 u32 crb_sts_consumer[NUM_STS_DESC_RINGS];
372 u32 sw_int_mask[NUM_STS_DESC_RINGS];
373};
374
375/* Following defines are for the state of the buffers */ 369/* Following defines are for the state of the buffers */
376#define QLCNIC_BUFFER_FREE 0 370#define QLCNIC_BUFFER_FREE 0
377#define QLCNIC_BUFFER_BUSY 1 371#define QLCNIC_BUFFER_BUSY 1
@@ -388,10 +382,10 @@ struct qlcnic_cmd_buffer {
388 382
389/* In rx_buffer, we do not need multiple fragments as is a single buffer */ 383/* In rx_buffer, we do not need multiple fragments as is a single buffer */
390struct qlcnic_rx_buffer { 384struct qlcnic_rx_buffer {
391 struct list_head list; 385 u16 ref_handle;
392 struct sk_buff *skb; 386 struct sk_buff *skb;
387 struct list_head list;
393 u64 dma; 388 u64 dma;
394 u16 ref_handle;
395}; 389};
396 390
397/* Board types */ 391/* Board types */
@@ -399,6 +393,48 @@ struct qlcnic_rx_buffer {
399#define QLCNIC_XGBE 0x02 393#define QLCNIC_XGBE 0x02
400 394
401/* 395/*
396 * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is
397 * adjusted based on configured MTU.
398 */
399#define QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US 3
400#define QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS 256
401
402#define QLCNIC_INTR_DEFAULT 0x04
403#define QLCNIC_CONFIG_INTR_COALESCE 3
404
405struct qlcnic_nic_intr_coalesce {
406 u8 type;
407 u8 sts_ring_mask;
408 u16 rx_packets;
409 u16 rx_time_us;
410 u16 flag;
411 u32 timer_out;
412};
413
414struct qlcnic_dump_template_hdr {
415 __le32 type;
416 __le32 offset;
417 __le32 size;
418 __le32 cap_mask;
419 __le32 num_entries;
420 __le32 version;
421 __le32 timestamp;
422 __le32 checksum;
423 __le32 drv_cap_mask;
424 __le32 sys_info[3];
425 __le32 saved_state[16];
426 __le32 cap_sizes[8];
427 __le32 rsvd[0];
428};
429
430struct qlcnic_fw_dump {
431 u8 clr; /* flag to indicate if dump is cleared */
432 u32 size; /* total size of the dump */
433 void *data; /* dump data area */
434 struct qlcnic_dump_template_hdr *tmpl_hdr;
435};
436
437/*
402 * One hardware_context{} per adapter 438 * One hardware_context{} per adapter
403 * contains interrupt info as well shared hardware info. 439 * contains interrupt info as well shared hardware info.
404 */ 440 */
@@ -416,6 +452,9 @@ struct qlcnic_hardware_context {
416 u8 linkup; 452 u8 linkup;
417 u16 port_type; 453 u16 port_type;
418 u16 board_type; 454 u16 board_type;
455
456 struct qlcnic_nic_intr_coalesce coal;
457 struct qlcnic_fw_dump fw_dump;
419}; 458};
420 459
421struct qlcnic_adapter_stats { 460struct qlcnic_adapter_stats {
@@ -443,50 +482,49 @@ struct qlcnic_adapter_stats {
443 * be one Rcv Descriptor for normal packets, one for jumbo and may be others. 482 * be one Rcv Descriptor for normal packets, one for jumbo and may be others.
444 */ 483 */
445struct qlcnic_host_rds_ring { 484struct qlcnic_host_rds_ring {
446 u32 producer; 485 void __iomem *crb_rcv_producer;
486 struct rcv_desc *desc_head;
487 struct qlcnic_rx_buffer *rx_buf_arr;
447 u32 num_desc; 488 u32 num_desc;
489 u32 producer;
448 u32 dma_size; 490 u32 dma_size;
449 u32 skb_size; 491 u32 skb_size;
450 u32 flags; 492 u32 flags;
451 void __iomem *crb_rcv_producer;
452 struct rcv_desc *desc_head;
453 struct qlcnic_rx_buffer *rx_buf_arr;
454 struct list_head free_list; 493 struct list_head free_list;
455 spinlock_t lock; 494 spinlock_t lock;
456 dma_addr_t phys_addr; 495 dma_addr_t phys_addr;
457}; 496} ____cacheline_internodealigned_in_smp;
458 497
459struct qlcnic_host_sds_ring { 498struct qlcnic_host_sds_ring {
460 u32 consumer; 499 u32 consumer;
461 u32 num_desc; 500 u32 num_desc;
462 void __iomem *crb_sts_consumer; 501 void __iomem *crb_sts_consumer;
463 void __iomem *crb_intr_mask;
464 502
465 struct status_desc *desc_head; 503 struct status_desc *desc_head;
466 struct qlcnic_adapter *adapter; 504 struct qlcnic_adapter *adapter;
467 struct napi_struct napi; 505 struct napi_struct napi;
468 struct list_head free_list[NUM_RCV_DESC_RINGS]; 506 struct list_head free_list[NUM_RCV_DESC_RINGS];
469 507
508 void __iomem *crb_intr_mask;
470 int irq; 509 int irq;
471 510
472 dma_addr_t phys_addr; 511 dma_addr_t phys_addr;
473 char name[IFNAMSIZ+4]; 512 char name[IFNAMSIZ+4];
474}; 513} ____cacheline_internodealigned_in_smp;
475 514
476struct qlcnic_host_tx_ring { 515struct qlcnic_host_tx_ring {
477 u32 producer; 516 u32 producer;
478 __le32 *hw_consumer;
479 u32 sw_consumer; 517 u32 sw_consumer;
480 void __iomem *crb_cmd_producer;
481 u32 num_desc; 518 u32 num_desc;
482 519 void __iomem *crb_cmd_producer;
483 struct netdev_queue *txq;
484
485 struct qlcnic_cmd_buffer *cmd_buf_arr;
486 struct cmd_desc_type0 *desc_head; 520 struct cmd_desc_type0 *desc_head;
521 struct qlcnic_cmd_buffer *cmd_buf_arr;
522 __le32 *hw_consumer;
523
487 dma_addr_t phys_addr; 524 dma_addr_t phys_addr;
488 dma_addr_t hw_cons_phys_addr; 525 dma_addr_t hw_cons_phys_addr;
489}; 526 struct netdev_queue *txq;
527} ____cacheline_internodealigned_in_smp;
490 528
491/* 529/*
492 * Receive context. There is one such structure per instance of the 530 * Receive context. There is one such structure per instance of the
@@ -495,12 +533,12 @@ struct qlcnic_host_tx_ring {
495 * present elsewhere. 533 * present elsewhere.
496 */ 534 */
497struct qlcnic_recv_context { 535struct qlcnic_recv_context {
536 struct qlcnic_host_rds_ring *rds_rings;
537 struct qlcnic_host_sds_ring *sds_rings;
498 u32 state; 538 u32 state;
499 u16 context_id; 539 u16 context_id;
500 u16 virt_port; 540 u16 virt_port;
501 541
502 struct qlcnic_host_rds_ring *rds_rings;
503 struct qlcnic_host_sds_ring *sds_rings;
504}; 542};
505 543
506/* HW context creation */ 544/* HW context creation */
@@ -539,9 +577,6 @@ struct qlcnic_recv_context {
539#define QLCNIC_CDRP_CMD_DESTROY_RX_CTX 0x00000008 577#define QLCNIC_CDRP_CMD_DESTROY_RX_CTX 0x00000008
540#define QLCNIC_CDRP_CMD_CREATE_TX_CTX 0x00000009 578#define QLCNIC_CDRP_CMD_CREATE_TX_CTX 0x00000009
541#define QLCNIC_CDRP_CMD_DESTROY_TX_CTX 0x0000000a 579#define QLCNIC_CDRP_CMD_DESTROY_TX_CTX 0x0000000a
542#define QLCNIC_CDRP_CMD_SETUP_STATISTICS 0x0000000e
543#define QLCNIC_CDRP_CMD_GET_STATISTICS 0x0000000f
544#define QLCNIC_CDRP_CMD_DELETE_STATISTICS 0x00000010
545#define QLCNIC_CDRP_CMD_SET_MTU 0x00000012 580#define QLCNIC_CDRP_CMD_SET_MTU 0x00000012
546#define QLCNIC_CDRP_CMD_READ_PHY 0x00000013 581#define QLCNIC_CDRP_CMD_READ_PHY 0x00000013
547#define QLCNIC_CDRP_CMD_WRITE_PHY 0x00000014 582#define QLCNIC_CDRP_CMD_WRITE_PHY 0x00000014
@@ -550,17 +585,11 @@ struct qlcnic_recv_context {
550#define QLCNIC_CDRP_CMD_SET_FLOW_CTL 0x00000017 585#define QLCNIC_CDRP_CMD_SET_FLOW_CTL 0x00000017
551#define QLCNIC_CDRP_CMD_READ_MAX_MTU 0x00000018 586#define QLCNIC_CDRP_CMD_READ_MAX_MTU 0x00000018
552#define QLCNIC_CDRP_CMD_READ_MAX_LRO 0x00000019 587#define QLCNIC_CDRP_CMD_READ_MAX_LRO 0x00000019
553#define QLCNIC_CDRP_CMD_CONFIGURE_TOE 0x0000001a
554#define QLCNIC_CDRP_CMD_FUNC_ATTRIB 0x0000001b
555#define QLCNIC_CDRP_CMD_READ_PEXQ_PARAMETERS 0x0000001c
556#define QLCNIC_CDRP_CMD_GET_LIC_CAPABILITIES 0x0000001d
557#define QLCNIC_CDRP_CMD_READ_MAX_LRO_PER_BOARD 0x0000001e
558#define QLCNIC_CDRP_CMD_MAC_ADDRESS 0x0000001f 588#define QLCNIC_CDRP_CMD_MAC_ADDRESS 0x0000001f
559 589
560#define QLCNIC_CDRP_CMD_GET_PCI_INFO 0x00000020 590#define QLCNIC_CDRP_CMD_GET_PCI_INFO 0x00000020
561#define QLCNIC_CDRP_CMD_GET_NIC_INFO 0x00000021 591#define QLCNIC_CDRP_CMD_GET_NIC_INFO 0x00000021
562#define QLCNIC_CDRP_CMD_SET_NIC_INFO 0x00000022 592#define QLCNIC_CDRP_CMD_SET_NIC_INFO 0x00000022
563#define QLCNIC_CDRP_CMD_RESET_NPAR 0x00000023
564#define QLCNIC_CDRP_CMD_GET_ESWITCH_CAPABILITY 0x00000024 593#define QLCNIC_CDRP_CMD_GET_ESWITCH_CAPABILITY 0x00000024
565#define QLCNIC_CDRP_CMD_TOGGLE_ESWITCH 0x00000025 594#define QLCNIC_CDRP_CMD_TOGGLE_ESWITCH 0x00000025
566#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS 0x00000026 595#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS 0x00000026
@@ -568,8 +597,12 @@ struct qlcnic_recv_context {
568#define QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH 0x00000028 597#define QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH 0x00000028
569#define QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG 0x00000029 598#define QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG 0x00000029
570#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATS 0x0000002a 599#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATS 0x0000002a
600#define QLCNIC_CDRP_CMD_CONFIG_PORT 0x0000002E
601#define QLCNIC_CDRP_CMD_TEMP_SIZE 0x0000002f
602#define QLCNIC_CDRP_CMD_GET_TEMP_HDR 0x00000030
571 603
572#define QLCNIC_RCODE_SUCCESS 0 604#define QLCNIC_RCODE_SUCCESS 0
605#define QLCNIC_RCODE_NOT_SUPPORTED 9
573#define QLCNIC_RCODE_TIMEOUT 17 606#define QLCNIC_RCODE_TIMEOUT 17
574#define QLCNIC_DESTROY_CTX_RESET 0 607#define QLCNIC_DESTROY_CTX_RESET 0
575 608
@@ -598,14 +631,14 @@ struct qlcnic_hostrq_sds_ring {
598 __le32 ring_size; /* Ring entries */ 631 __le32 ring_size; /* Ring entries */
599 __le16 msi_index; 632 __le16 msi_index;
600 __le16 rsvd; /* Padding */ 633 __le16 rsvd; /* Padding */
601}; 634} __packed;
602 635
603struct qlcnic_hostrq_rds_ring { 636struct qlcnic_hostrq_rds_ring {
604 __le64 host_phys_addr; /* Ring base addr */ 637 __le64 host_phys_addr; /* Ring base addr */
605 __le64 buff_size; /* Packet buffer size */ 638 __le64 buff_size; /* Packet buffer size */
606 __le32 ring_size; /* Ring entries */ 639 __le32 ring_size; /* Ring entries */
607 __le32 ring_kind; /* Class of ring */ 640 __le32 ring_kind; /* Class of ring */
608}; 641} __packed;
609 642
610struct qlcnic_hostrq_rx_ctx { 643struct qlcnic_hostrq_rx_ctx {
611 __le64 host_rsp_dma_addr; /* Response dma'd here */ 644 __le64 host_rsp_dma_addr; /* Response dma'd here */
@@ -626,17 +659,17 @@ struct qlcnic_hostrq_rx_ctx {
626 - N hostrq_rds_rings 659 - N hostrq_rds_rings
627 - N hostrq_sds_rings */ 660 - N hostrq_sds_rings */
628 char data[0]; 661 char data[0];
629}; 662} __packed;
630 663
631struct qlcnic_cardrsp_rds_ring{ 664struct qlcnic_cardrsp_rds_ring{
632 __le32 host_producer_crb; /* Crb to use */ 665 __le32 host_producer_crb; /* Crb to use */
633 __le32 rsvd1; /* Padding */ 666 __le32 rsvd1; /* Padding */
634}; 667} __packed;
635 668
636struct qlcnic_cardrsp_sds_ring { 669struct qlcnic_cardrsp_sds_ring {
637 __le32 host_consumer_crb; /* Crb to use */ 670 __le32 host_consumer_crb; /* Crb to use */
638 __le32 interrupt_crb; /* Crb to use */ 671 __le32 interrupt_crb; /* Crb to use */
639}; 672} __packed;
640 673
641struct qlcnic_cardrsp_rx_ctx { 674struct qlcnic_cardrsp_rx_ctx {
642 /* These ring offsets are relative to data[0] below */ 675 /* These ring offsets are relative to data[0] below */
@@ -655,7 +688,7 @@ struct qlcnic_cardrsp_rx_ctx {
655 - N cardrsp_rds_rings 688 - N cardrsp_rds_rings
656 - N cardrs_sds_rings */ 689 - N cardrs_sds_rings */
657 char data[0]; 690 char data[0];
658}; 691} __packed;
659 692
660#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \ 693#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \
661 (sizeof(HOSTRQ_RX) + \ 694 (sizeof(HOSTRQ_RX) + \
@@ -675,7 +708,7 @@ struct qlcnic_hostrq_cds_ring {
675 __le64 host_phys_addr; /* Ring base addr */ 708 __le64 host_phys_addr; /* Ring base addr */
676 __le32 ring_size; /* Ring entries */ 709 __le32 ring_size; /* Ring entries */
677 __le32 rsvd; /* Padding */ 710 __le32 rsvd; /* Padding */
678}; 711} __packed;
679 712
680struct qlcnic_hostrq_tx_ctx { 713struct qlcnic_hostrq_tx_ctx {
681 __le64 host_rsp_dma_addr; /* Response dma'd here */ 714 __le64 host_rsp_dma_addr; /* Response dma'd here */
@@ -690,12 +723,12 @@ struct qlcnic_hostrq_tx_ctx {
690 __le16 rsvd3; /* Padding */ 723 __le16 rsvd3; /* Padding */
691 struct qlcnic_hostrq_cds_ring cds_ring; /* Desc of cds ring */ 724 struct qlcnic_hostrq_cds_ring cds_ring; /* Desc of cds ring */
692 u8 reserved[128]; /* future expansion */ 725 u8 reserved[128]; /* future expansion */
693}; 726} __packed;
694 727
695struct qlcnic_cardrsp_cds_ring { 728struct qlcnic_cardrsp_cds_ring {
696 __le32 host_producer_crb; /* Crb to use */ 729 __le32 host_producer_crb; /* Crb to use */
697 __le32 interrupt_crb; /* Crb to use */ 730 __le32 interrupt_crb; /* Crb to use */
698}; 731} __packed;
699 732
700struct qlcnic_cardrsp_tx_ctx { 733struct qlcnic_cardrsp_tx_ctx {
701 __le32 host_ctx_state; /* Starting state */ 734 __le32 host_ctx_state; /* Starting state */
@@ -704,7 +737,7 @@ struct qlcnic_cardrsp_tx_ctx {
704 u8 virt_port; /* Virtual/Logical id of port */ 737 u8 virt_port; /* Virtual/Logical id of port */
705 struct qlcnic_cardrsp_cds_ring cds_ring; /* Card cds settings */ 738 struct qlcnic_cardrsp_cds_ring cds_ring; /* Card cds settings */
706 u8 reserved[128]; /* future expansion */ 739 u8 reserved[128]; /* future expansion */
707}; 740} __packed;
708 741
709#define SIZEOF_HOSTRQ_TX(HOSTRQ_TX) (sizeof(HOSTRQ_TX)) 742#define SIZEOF_HOSTRQ_TX(HOSTRQ_TX) (sizeof(HOSTRQ_TX))
710#define SIZEOF_CARDRSP_TX(CARDRSP_TX) (sizeof(CARDRSP_TX)) 743#define SIZEOF_CARDRSP_TX(CARDRSP_TX) (sizeof(CARDRSP_TX))
@@ -738,40 +771,6 @@ struct qlcnic_mac_list_s {
738 uint8_t mac_addr[ETH_ALEN+2]; 771 uint8_t mac_addr[ETH_ALEN+2];
739}; 772};
740 773
741/*
742 * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is
743 * adjusted based on configured MTU.
744 */
745#define QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US 3
746#define QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS 256
747#define QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS 64
748#define QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US 4
749
750#define QLCNIC_INTR_DEFAULT 0x04
751
752union qlcnic_nic_intr_coalesce_data {
753 struct {
754 u16 rx_packets;
755 u16 rx_time_us;
756 u16 tx_packets;
757 u16 tx_time_us;
758 } data;
759 u64 word;
760};
761
762struct qlcnic_nic_intr_coalesce {
763 u16 stats_time_us;
764 u16 rate_sample_time;
765 u16 flags;
766 u16 rsvd_1;
767 u32 low_threshold;
768 u32 high_threshold;
769 union qlcnic_nic_intr_coalesce_data normal;
770 union qlcnic_nic_intr_coalesce_data low;
771 union qlcnic_nic_intr_coalesce_data high;
772 union qlcnic_nic_intr_coalesce_data irq;
773};
774
775#define QLCNIC_HOST_REQUEST 0x13 774#define QLCNIC_HOST_REQUEST 0x13
776#define QLCNIC_REQUEST 0x14 775#define QLCNIC_REQUEST 0x14
777 776
@@ -783,50 +782,20 @@ struct qlcnic_nic_intr_coalesce {
783/* 782/*
784 * Driver --> Firmware 783 * Driver --> Firmware
785 */ 784 */
786#define QLCNIC_H2C_OPCODE_START 0 785#define QLCNIC_H2C_OPCODE_CONFIG_RSS 0x1
787#define QLCNIC_H2C_OPCODE_CONFIG_RSS 1 786#define QLCNIC_H2C_OPCODE_CONFIG_INTR_COALESCE 0x3
788#define QLCNIC_H2C_OPCODE_CONFIG_RSS_TBL 2 787#define QLCNIC_H2C_OPCODE_CONFIG_LED 0x4
789#define QLCNIC_H2C_OPCODE_CONFIG_INTR_COALESCE 3 788#define QLCNIC_H2C_OPCODE_LRO_REQUEST 0x7
790#define QLCNIC_H2C_OPCODE_CONFIG_LED 4 789#define QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE 0xc
791#define QLCNIC_H2C_OPCODE_CONFIG_PROMISCUOUS 5 790#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR 0x12
792#define QLCNIC_H2C_OPCODE_CONFIG_L2_MAC 6 791#define QLCNIC_H2C_OPCODE_GET_LINKEVENT 0x15
793#define QLCNIC_H2C_OPCODE_LRO_REQUEST 7 792#define QLCNIC_H2C_OPCODE_CONFIG_BRIDGING 0x17
794#define QLCNIC_H2C_OPCODE_GET_SNMP_STATS 8 793#define QLCNIC_H2C_OPCODE_CONFIG_HW_LRO 0x18
795#define QLCNIC_H2C_OPCODE_PROXY_START_REQUEST 9
796#define QLCNIC_H2C_OPCODE_PROXY_STOP_REQUEST 10
797#define QLCNIC_H2C_OPCODE_PROXY_SET_MTU 11
798#define QLCNIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE 12
799#define QLCNIC_H2C_OPCODE_GET_FINGER_PRINT_REQUEST 13
800#define QLCNIC_H2C_OPCODE_INSTALL_LICENSE_REQUEST 14
801#define QLCNIC_H2C_OPCODE_GET_LICENSE_CAPABILITY_REQUEST 15
802#define QLCNIC_H2C_OPCODE_GET_NET_STATS 16
803#define QLCNIC_H2C_OPCODE_PROXY_UPDATE_P2V 17
804#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR 18
805#define QLCNIC_H2C_OPCODE_PROXY_STOP_DONE 20
806#define QLCNIC_H2C_OPCODE_GET_LINKEVENT 21
807#define QLCNIC_C2C_OPCODE 22
808#define QLCNIC_H2C_OPCODE_CONFIG_BRIDGING 23
809#define QLCNIC_H2C_OPCODE_CONFIG_HW_LRO 24
810#define QLCNIC_H2C_OPCODE_LAST 25
811/* 794/*
812 * Firmware --> Driver 795 * Firmware --> Driver
813 */ 796 */
814 797
815#define QLCNIC_C2H_OPCODE_START 128
816#define QLCNIC_C2H_OPCODE_CONFIG_RSS_RESPONSE 129
817#define QLCNIC_C2H_OPCODE_CONFIG_RSS_TBL_RESPONSE 130
818#define QLCNIC_C2H_OPCODE_CONFIG_MAC_RESPONSE 131
819#define QLCNIC_C2H_OPCODE_CONFIG_PROMISCUOUS_RESPONSE 132
820#define QLCNIC_C2H_OPCODE_CONFIG_L2_MAC_RESPONSE 133
821#define QLCNIC_C2H_OPCODE_LRO_DELETE_RESPONSE 134
822#define QLCNIC_C2H_OPCODE_LRO_ADD_FAILURE_RESPONSE 135
823#define QLCNIC_C2H_OPCODE_GET_SNMP_STATS 136
824#define QLCNIC_C2H_OPCODE_GET_FINGER_PRINT_REPLY 137
825#define QLCNIC_C2H_OPCODE_INSTALL_LICENSE_REPLY 138
826#define QLCNIC_C2H_OPCODE_GET_LICENSE_CAPABILITIES_REPLY 139
827#define QLCNIC_C2H_OPCODE_GET_NET_STATS_RESPONSE 140
828#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 141 798#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 141
829#define QLCNIC_C2H_OPCODE_LAST 142
830 799
831#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */ 800#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */
832#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */ 801#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */
@@ -895,7 +864,7 @@ struct qlcnic_nic_req {
895 __le64 qhdr; 864 __le64 qhdr;
896 __le64 req_hdr; 865 __le64 req_hdr;
897 __le64 words[6]; 866 __le64 words[6];
898}; 867} __packed;
899 868
900struct qlcnic_mac_req { 869struct qlcnic_mac_req {
901 u8 op; 870 u8 op;
@@ -906,7 +875,7 @@ struct qlcnic_mac_req {
906struct qlcnic_vlan_req { 875struct qlcnic_vlan_req {
907 __le16 vlan_id; 876 __le16 vlan_id;
908 __le16 rsvd[3]; 877 __le16 rsvd[3];
909}; 878} __packed;
910 879
911struct qlcnic_ipaddr { 880struct qlcnic_ipaddr {
912 __be32 ipv4; 881 __be32 ipv4;
@@ -929,7 +898,8 @@ struct qlcnic_ipaddr {
929#define QLCNIC_IS_MSI_FAMILY(adapter) \ 898#define QLCNIC_IS_MSI_FAMILY(adapter) \
930 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED)) 899 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
931 900
932#define MSIX_ENTRIES_PER_ADAPTER NUM_STS_DESC_RINGS 901#define QLCNIC_DEF_NUM_STS_DESC_RINGS 4
902#define QLCNIC_MIN_NUM_RSS_RINGS 2
933#define QLCNIC_MSIX_TBL_SPACE 8192 903#define QLCNIC_MSIX_TBL_SPACE 8192
934#define QLCNIC_PCI_REG_MSIX_TBL 0x44 904#define QLCNIC_PCI_REG_MSIX_TBL 0x44
935#define QLCNIC_MSIX_TBL_PGSIZE 4096 905#define QLCNIC_MSIX_TBL_PGSIZE 4096
@@ -942,6 +912,7 @@ struct qlcnic_ipaddr {
942#define __QLCNIC_RESETTING 2 912#define __QLCNIC_RESETTING 2
943#define __QLCNIC_START_FW 4 913#define __QLCNIC_START_FW 4
944#define __QLCNIC_AER 5 914#define __QLCNIC_AER 5
915#define __QLCNIC_DIAG_RES_ALLOC 6
945 916
946#define QLCNIC_INTERRUPT_TEST 1 917#define QLCNIC_INTERRUPT_TEST 1
947#define QLCNIC_LOOPBACK_TEST 2 918#define QLCNIC_LOOPBACK_TEST 2
@@ -965,14 +936,14 @@ struct qlcnic_filter_hash {
965}; 936};
966 937
967struct qlcnic_adapter { 938struct qlcnic_adapter {
968 struct qlcnic_hardware_context ahw; 939 struct qlcnic_hardware_context *ahw;
969 940 struct qlcnic_recv_context *recv_ctx;
941 struct qlcnic_host_tx_ring *tx_ring;
970 struct net_device *netdev; 942 struct net_device *netdev;
971 struct pci_dev *pdev; 943 struct pci_dev *pdev;
972 struct list_head mac_list;
973 944
974 spinlock_t tx_clean_lock; 945 unsigned long state;
975 spinlock_t mac_learn_lock; 946 u32 flags;
976 947
977 u16 num_txd; 948 u16 num_txd;
978 u16 num_rxd; 949 u16 num_rxd;
@@ -983,14 +954,12 @@ struct qlcnic_adapter {
983 u8 max_rds_rings; 954 u8 max_rds_rings;
984 u8 max_sds_rings; 955 u8 max_sds_rings;
985 u8 msix_supported; 956 u8 msix_supported;
986 u8 rx_csum;
987 u8 portnum; 957 u8 portnum;
988 u8 physical_port; 958 u8 physical_port;
989 u8 reset_context; 959 u8 reset_context;
990 960
991 u8 mc_enabled; 961 u8 mc_enabled;
992 u8 max_mc_count; 962 u8 max_mc_count;
993 u8 rss_supported;
994 u8 fw_wait_cnt; 963 u8 fw_wait_cnt;
995 u8 fw_fail_cnt; 964 u8 fw_fail_cnt;
996 u8 tx_timeo_cnt; 965 u8 tx_timeo_cnt;
@@ -1015,7 +984,6 @@ struct qlcnic_adapter {
1015 984
1016 u32 fw_hal_version; 985 u32 fw_hal_version;
1017 u32 capabilities; 986 u32 capabilities;
1018 u32 flags;
1019 u32 irq; 987 u32 irq;
1020 u32 temp; 988 u32 temp;
1021 989
@@ -1033,31 +1001,29 @@ struct qlcnic_adapter {
1033 u8 mac_addr[ETH_ALEN]; 1001 u8 mac_addr[ETH_ALEN];
1034 1002
1035 u64 dev_rst_time; 1003 u64 dev_rst_time;
1004 unsigned long vlans[BITS_TO_LONGS(VLAN_N_VID)];
1036 1005
1037 struct vlan_group *vlgrp;
1038 struct qlcnic_npar_info *npars; 1006 struct qlcnic_npar_info *npars;
1039 struct qlcnic_eswitch *eswitch; 1007 struct qlcnic_eswitch *eswitch;
1040 struct qlcnic_nic_template *nic_ops; 1008 struct qlcnic_nic_template *nic_ops;
1041 1009
1042 struct qlcnic_adapter_stats stats; 1010 struct qlcnic_adapter_stats stats;
1043 1011 struct list_head mac_list;
1044 struct qlcnic_recv_context recv_ctx;
1045 struct qlcnic_host_tx_ring *tx_ring;
1046 1012
1047 void __iomem *tgt_mask_reg; 1013 void __iomem *tgt_mask_reg;
1048 void __iomem *tgt_status_reg; 1014 void __iomem *tgt_status_reg;
1049 void __iomem *crb_int_state_reg; 1015 void __iomem *crb_int_state_reg;
1050 void __iomem *isr_int_vec; 1016 void __iomem *isr_int_vec;
1051 1017
1052 struct msix_entry msix_entries[MSIX_ENTRIES_PER_ADAPTER]; 1018 struct msix_entry *msix_entries;
1053 1019
1054 struct delayed_work fw_work; 1020 struct delayed_work fw_work;
1055 1021
1056 struct qlcnic_nic_intr_coalesce coal;
1057 1022
1058 struct qlcnic_filter_hash fhash; 1023 struct qlcnic_filter_hash fhash;
1059 1024
1060 unsigned long state; 1025 spinlock_t tx_clean_lock;
1026 spinlock_t mac_learn_lock;
1061 __le32 file_prd_off; /*File fw product offset*/ 1027 __le32 file_prd_off; /*File fw product offset*/
1062 u32 fw_version; 1028 u32 fw_version;
1063 const struct firmware *fw; 1029 const struct firmware *fw;
@@ -1079,7 +1045,7 @@ struct qlcnic_info {
1079 __le16 min_tx_bw; 1045 __le16 min_tx_bw;
1080 __le16 max_tx_bw; 1046 __le16 max_tx_bw;
1081 u8 reserved2[104]; 1047 u8 reserved2[104];
1082}; 1048} __packed;
1083 1049
1084struct qlcnic_pci_info { 1050struct qlcnic_pci_info {
1085 __le16 id; /* pci function id */ 1051 __le16 id; /* pci function id */
@@ -1093,7 +1059,7 @@ struct qlcnic_pci_info {
1093 1059
1094 u8 mac[ETH_ALEN]; 1060 u8 mac[ETH_ALEN];
1095 u8 reserved2[106]; 1061 u8 reserved2[106];
1096}; 1062} __packed;
1097 1063
1098struct qlcnic_npar_info { 1064struct qlcnic_npar_info {
1099 u16 pvid; 1065 u16 pvid;
@@ -1210,15 +1176,160 @@ struct __qlcnic_esw_statistics {
1210 __le64 local_frames; 1176 __le64 local_frames;
1211 __le64 numbytes; 1177 __le64 numbytes;
1212 __le64 rsvd[3]; 1178 __le64 rsvd[3];
1213}; 1179} __packed;
1214 1180
1215struct qlcnic_esw_statistics { 1181struct qlcnic_esw_statistics {
1216 struct __qlcnic_esw_statistics rx; 1182 struct __qlcnic_esw_statistics rx;
1217 struct __qlcnic_esw_statistics tx; 1183 struct __qlcnic_esw_statistics tx;
1218}; 1184};
1219 1185
1220int qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val); 1186struct qlcnic_common_entry_hdr {
1221int qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val); 1187 __le32 type;
1188 __le32 offset;
1189 __le32 cap_size;
1190 u8 mask;
1191 u8 rsvd[2];
1192 u8 flags;
1193} __packed;
1194
1195struct __crb {
1196 __le32 addr;
1197 u8 stride;
1198 u8 rsvd1[3];
1199 __le32 data_size;
1200 __le32 no_ops;
1201 __le32 rsvd2[4];
1202} __packed;
1203
1204struct __ctrl {
1205 __le32 addr;
1206 u8 stride;
1207 u8 index_a;
1208 __le16 timeout;
1209 __le32 data_size;
1210 __le32 no_ops;
1211 u8 opcode;
1212 u8 index_v;
1213 u8 shl_val;
1214 u8 shr_val;
1215 __le32 val1;
1216 __le32 val2;
1217 __le32 val3;
1218} __packed;
1219
1220struct __cache {
1221 __le32 addr;
1222 u8 stride;
1223 u8 rsvd;
1224 __le16 init_tag_val;
1225 __le32 size;
1226 __le32 no_ops;
1227 __le32 ctrl_addr;
1228 __le32 ctrl_val;
1229 __le32 read_addr;
1230 u8 read_addr_stride;
1231 u8 read_addr_num;
1232 u8 rsvd1[2];
1233} __packed;
1234
1235struct __ocm {
1236 u8 rsvd[8];
1237 __le32 size;
1238 __le32 no_ops;
1239 u8 rsvd1[8];
1240 __le32 read_addr;
1241 __le32 read_addr_stride;
1242} __packed;
1243
1244struct __mem {
1245 u8 rsvd[24];
1246 __le32 addr;
1247 __le32 size;
1248} __packed;
1249
1250struct __mux {
1251 __le32 addr;
1252 u8 rsvd[4];
1253 __le32 size;
1254 __le32 no_ops;
1255 __le32 val;
1256 __le32 val_stride;
1257 __le32 read_addr;
1258 u8 rsvd2[4];
1259} __packed;
1260
1261struct __queue {
1262 __le32 sel_addr;
1263 __le16 stride;
1264 u8 rsvd[2];
1265 __le32 size;
1266 __le32 no_ops;
1267 u8 rsvd2[8];
1268 __le32 read_addr;
1269 u8 read_addr_stride;
1270 u8 read_addr_cnt;
1271 u8 rsvd3[2];
1272} __packed;
1273
1274struct qlcnic_dump_entry {
1275 struct qlcnic_common_entry_hdr hdr;
1276 union {
1277 struct __crb crb;
1278 struct __cache cache;
1279 struct __ocm ocm;
1280 struct __mem mem;
1281 struct __mux mux;
1282 struct __queue que;
1283 struct __ctrl ctrl;
1284 } region;
1285} __packed;
1286
1287enum op_codes {
1288 QLCNIC_DUMP_NOP = 0,
1289 QLCNIC_DUMP_READ_CRB = 1,
1290 QLCNIC_DUMP_READ_MUX = 2,
1291 QLCNIC_DUMP_QUEUE = 3,
1292 QLCNIC_DUMP_BRD_CONFIG = 4,
1293 QLCNIC_DUMP_READ_OCM = 6,
1294 QLCNIC_DUMP_PEG_REG = 7,
1295 QLCNIC_DUMP_L1_DTAG = 8,
1296 QLCNIC_DUMP_L1_ITAG = 9,
1297 QLCNIC_DUMP_L1_DATA = 11,
1298 QLCNIC_DUMP_L1_INST = 12,
1299 QLCNIC_DUMP_L2_DTAG = 21,
1300 QLCNIC_DUMP_L2_ITAG = 22,
1301 QLCNIC_DUMP_L2_DATA = 23,
1302 QLCNIC_DUMP_L2_INST = 24,
1303 QLCNIC_DUMP_READ_ROM = 71,
1304 QLCNIC_DUMP_READ_MEM = 72,
1305 QLCNIC_DUMP_READ_CTRL = 98,
1306 QLCNIC_DUMP_TLHDR = 99,
1307 QLCNIC_DUMP_RDEND = 255
1308};
1309
1310#define QLCNIC_DUMP_WCRB BIT_0
1311#define QLCNIC_DUMP_RWCRB BIT_1
1312#define QLCNIC_DUMP_ANDCRB BIT_2
1313#define QLCNIC_DUMP_ORCRB BIT_3
1314#define QLCNIC_DUMP_POLLCRB BIT_4
1315#define QLCNIC_DUMP_RD_SAVE BIT_5
1316#define QLCNIC_DUMP_WRT_SAVED BIT_6
1317#define QLCNIC_DUMP_MOD_SAVE_ST BIT_7
1318#define QLCNIC_DUMP_SKIP BIT_7
1319
1320#define QLCNIC_DUMP_MASK_MIN 3
1321#define QLCNIC_DUMP_MASK_DEF 0x0f
1322#define QLCNIC_DUMP_MASK_MAX 0xff
1323#define QLCNIC_FORCE_FW_DUMP_KEY 0xdeadfeed
1324
1325struct qlcnic_dump_operations {
1326 enum op_codes opcode;
1327 u32 (*handler)(struct qlcnic_adapter *,
1328 struct qlcnic_dump_entry *, u32 *);
1329};
1330
1331int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter);
1332int qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config);
1222 1333
1223u32 qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off); 1334u32 qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off);
1224int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *, ulong off, u32 data); 1335int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *, ulong off, u32 data);
@@ -1264,6 +1375,7 @@ int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
1264int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate); 1375int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate);
1265void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter); 1376void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter);
1266void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter); 1377void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter);
1378int qlcnic_dump_fw(struct qlcnic_adapter *);
1267 1379
1268/* Functions from qlcnic_init.c */ 1380/* Functions from qlcnic_init.c */
1269int qlcnic_load_firmware(struct qlcnic_adapter *adapter); 1381int qlcnic_load_firmware(struct qlcnic_adapter *adapter);
@@ -1274,7 +1386,7 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter);
1274int qlcnic_setup_idc_param(struct qlcnic_adapter *adapter); 1386int qlcnic_setup_idc_param(struct qlcnic_adapter *adapter);
1275int qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter); 1387int qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter);
1276 1388
1277int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp); 1389int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, u32 addr, u32 *valp);
1278int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr, 1390int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
1279 u8 *bytes, size_t size); 1391 u8 *bytes, size_t size);
1280int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter); 1392int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter);
@@ -1294,7 +1406,7 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter);
1294 1406
1295int qlcnic_check_fw_status(struct qlcnic_adapter *adapter); 1407int qlcnic_check_fw_status(struct qlcnic_adapter *adapter);
1296void qlcnic_watchdog_task(struct work_struct *work); 1408void qlcnic_watchdog_task(struct work_struct *work);
1297void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid, 1409void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
1298 struct qlcnic_host_rds_ring *rds_ring); 1410 struct qlcnic_host_rds_ring *rds_ring);
1299int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max); 1411int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max);
1300void qlcnic_set_multi(struct net_device *netdev); 1412void qlcnic_set_multi(struct net_device *netdev);
@@ -1308,6 +1420,8 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup);
1308 1420
1309int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu); 1421int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
1310int qlcnic_change_mtu(struct net_device *netdev, int new_mtu); 1422int qlcnic_change_mtu(struct net_device *netdev, int new_mtu);
1423u32 qlcnic_fix_features(struct net_device *netdev, u32 features);
1424int qlcnic_set_features(struct net_device *netdev, u32 features);
1311int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable); 1425int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable);
1312int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable); 1426int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
1313int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter); 1427int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
@@ -1322,6 +1436,9 @@ u32 qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
1322void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings); 1436void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings);
1323int qlcnic_diag_alloc_res(struct net_device *netdev, int test); 1437int qlcnic_diag_alloc_res(struct net_device *netdev, int test);
1324netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev); 1438netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
1439int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val);
1440int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data);
1441void qlcnic_dev_request_reset(struct qlcnic_adapter *);
1325 1442
1326/* Management functions */ 1443/* Management functions */
1327int qlcnic_get_mac_address(struct qlcnic_adapter *, u8*); 1444int qlcnic_get_mac_address(struct qlcnic_adapter *, u8*);
@@ -1379,8 +1496,7 @@ static const struct qlcnic_brdinfo qlcnic_boards[] = {
1379 1496
1380static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring) 1497static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
1381{ 1498{
1382 smp_mb(); 1499 if (likely(tx_ring->producer < tx_ring->sw_consumer))
1383 if (tx_ring->producer < tx_ring->sw_consumer)
1384 return tx_ring->sw_consumer - tx_ring->producer; 1500 return tx_ring->sw_consumer - tx_ring->producer;
1385 else 1501 else
1386 return tx_ring->sw_consumer + tx_ring->num_desc - 1502 return tx_ring->sw_consumer + tx_ring->num_desc -
diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c
index 27631f23b3fd..bab041a5c758 100644
--- a/drivers/net/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/qlcnic/qlcnic_ctx.c
@@ -64,14 +64,105 @@ qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
64 return rcode; 64 return rcode;
65} 65}
66 66
67static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u16 temp_size)
68{
69 uint64_t sum = 0;
70 int count = temp_size / sizeof(uint32_t);
71 while (count-- > 0)
72 sum += *temp_buffer++;
73 while (sum >> 32)
74 sum = (sum & 0xFFFFFFFF) + (sum >> 32);
75 return ~sum;
76}
77
78int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
79{
80 int err, i;
81 u16 temp_size;
82 void *tmp_addr;
83 u32 version, csum, *template, *tmp_buf;
84 struct qlcnic_hardware_context *ahw;
85 struct qlcnic_dump_template_hdr *tmpl_hdr, *tmp_tmpl;
86 dma_addr_t tmp_addr_t = 0;
87
88 ahw = adapter->ahw;
89 err = qlcnic_issue_cmd(adapter,
90 adapter->ahw->pci_func,
91 adapter->fw_hal_version,
92 0,
93 0,
94 0,
95 QLCNIC_CDRP_CMD_TEMP_SIZE);
96 if (err != QLCNIC_RCODE_SUCCESS) {
97 err = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
98 dev_err(&adapter->pdev->dev,
99 "Failed to get template size %d\n", err);
100 err = -EIO;
101 return err;
102 }
103 version = QLCRD32(adapter, QLCNIC_ARG3_CRB_OFFSET);
104 temp_size = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET);
105 if (!temp_size)
106 return -EIO;
107
108 tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size,
109 &tmp_addr_t, GFP_KERNEL);
110 if (!tmp_addr) {
111 dev_err(&adapter->pdev->dev,
112 "Can't get memory for FW dump template\n");
113 return -ENOMEM;
114 }
115 err = qlcnic_issue_cmd(adapter,
116 adapter->ahw->pci_func,
117 adapter->fw_hal_version,
118 LSD(tmp_addr_t),
119 MSD(tmp_addr_t),
120 temp_size,
121 QLCNIC_CDRP_CMD_GET_TEMP_HDR);
122
123 if (err != QLCNIC_RCODE_SUCCESS) {
124 dev_err(&adapter->pdev->dev,
125 "Failed to get mini dump template header %d\n", err);
126 err = -EIO;
127 goto error;
128 }
129 tmp_tmpl = (struct qlcnic_dump_template_hdr *) tmp_addr;
130 csum = qlcnic_temp_checksum((uint32_t *) tmp_addr, temp_size);
131 if (csum) {
132 dev_err(&adapter->pdev->dev,
133 "Template header checksum validation failed\n");
134 err = -EIO;
135 goto error;
136 }
137 ahw->fw_dump.tmpl_hdr = vzalloc(temp_size);
138 if (!ahw->fw_dump.tmpl_hdr) {
139 err = -EIO;
140 goto error;
141 }
142 tmp_buf = (u32 *) tmp_addr;
143 template = (u32 *) ahw->fw_dump.tmpl_hdr;
144 for (i = 0; i < temp_size/sizeof(u32); i++)
145 *template++ = __le32_to_cpu(*tmp_buf++);
146
147 tmpl_hdr = ahw->fw_dump.tmpl_hdr;
148 if (tmpl_hdr->cap_mask > QLCNIC_DUMP_MASK_DEF &&
149 tmpl_hdr->cap_mask <= QLCNIC_DUMP_MASK_MAX)
150 tmpl_hdr->drv_cap_mask = tmpl_hdr->cap_mask;
151 else
152 tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF;
153error:
154 dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t);
155 return err;
156}
157
67int 158int
68qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu) 159qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
69{ 160{
70 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; 161 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
71 162
72 if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) { 163 if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) {
73 if (qlcnic_issue_cmd(adapter, 164 if (qlcnic_issue_cmd(adapter,
74 adapter->ahw.pci_func, 165 adapter->ahw->pci_func,
75 adapter->fw_hal_version, 166 adapter->fw_hal_version,
76 recv_ctx->context_id, 167 recv_ctx->context_id,
77 mtu, 168 mtu,
@@ -102,12 +193,12 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
102 dma_addr_t hostrq_phys_addr, cardrsp_phys_addr; 193 dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
103 u64 phys_addr; 194 u64 phys_addr;
104 195
105 int i, nrds_rings, nsds_rings; 196 u8 i, nrds_rings, nsds_rings;
106 size_t rq_size, rsp_size; 197 size_t rq_size, rsp_size;
107 u32 cap, reg, val, reg2; 198 u32 cap, reg, val, reg2;
108 int err; 199 int err;
109 200
110 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; 201 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
111 202
112 nrds_rings = adapter->max_rds_rings; 203 nrds_rings = adapter->max_rds_rings;
113 nsds_rings = adapter->max_sds_rings; 204 nsds_rings = adapter->max_sds_rings;
@@ -119,14 +210,14 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
119 SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings, 210 SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
120 nsds_rings); 211 nsds_rings);
121 212
122 addr = pci_alloc_consistent(adapter->pdev, 213 addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
123 rq_size, &hostrq_phys_addr); 214 &hostrq_phys_addr, GFP_KERNEL);
124 if (addr == NULL) 215 if (addr == NULL)
125 return -ENOMEM; 216 return -ENOMEM;
126 prq = (struct qlcnic_hostrq_rx_ctx *)addr; 217 prq = (struct qlcnic_hostrq_rx_ctx *)addr;
127 218
128 addr = pci_alloc_consistent(adapter->pdev, 219 addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
129 rsp_size, &cardrsp_phys_addr); 220 &cardrsp_phys_addr, GFP_KERNEL);
130 if (addr == NULL) { 221 if (addr == NULL) {
131 err = -ENOMEM; 222 err = -ENOMEM;
132 goto out_free_rq; 223 goto out_free_rq;
@@ -151,7 +242,7 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
151 242
152 prq->num_rds_rings = cpu_to_le16(nrds_rings); 243 prq->num_rds_rings = cpu_to_le16(nrds_rings);
153 prq->num_sds_rings = cpu_to_le16(nsds_rings); 244 prq->num_sds_rings = cpu_to_le16(nsds_rings);
154 prq->rds_ring_offset = cpu_to_le32(0); 245 prq->rds_ring_offset = 0;
155 246
156 val = le32_to_cpu(prq->rds_ring_offset) + 247 val = le32_to_cpu(prq->rds_ring_offset) +
157 (sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings); 248 (sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings);
@@ -187,7 +278,7 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
187 278
188 phys_addr = hostrq_phys_addr; 279 phys_addr = hostrq_phys_addr;
189 err = qlcnic_issue_cmd(adapter, 280 err = qlcnic_issue_cmd(adapter,
190 adapter->ahw.pci_func, 281 adapter->ahw->pci_func,
191 adapter->fw_hal_version, 282 adapter->fw_hal_version,
192 (u32)(phys_addr >> 32), 283 (u32)(phys_addr >> 32),
193 (u32)(phys_addr & 0xffffffff), 284 (u32)(phys_addr & 0xffffffff),
@@ -207,7 +298,7 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
207 rds_ring = &recv_ctx->rds_rings[i]; 298 rds_ring = &recv_ctx->rds_rings[i];
208 299
209 reg = le32_to_cpu(prsp_rds[i].host_producer_crb); 300 reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
210 rds_ring->crb_rcv_producer = adapter->ahw.pci_base0 + reg; 301 rds_ring->crb_rcv_producer = adapter->ahw->pci_base0 + reg;
211 } 302 }
212 303
213 prsp_sds = ((struct qlcnic_cardrsp_sds_ring *) 304 prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
@@ -219,8 +310,8 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
219 reg = le32_to_cpu(prsp_sds[i].host_consumer_crb); 310 reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
220 reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb); 311 reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb);
221 312
222 sds_ring->crb_sts_consumer = adapter->ahw.pci_base0 + reg; 313 sds_ring->crb_sts_consumer = adapter->ahw->pci_base0 + reg;
223 sds_ring->crb_intr_mask = adapter->ahw.pci_base0 + reg2; 314 sds_ring->crb_intr_mask = adapter->ahw->pci_base0 + reg2;
224 } 315 }
225 316
226 recv_ctx->state = le32_to_cpu(prsp->host_ctx_state); 317 recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
@@ -228,19 +319,20 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
228 recv_ctx->virt_port = prsp->virt_port; 319 recv_ctx->virt_port = prsp->virt_port;
229 320
230out_free_rsp: 321out_free_rsp:
231 pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr); 322 dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp,
323 cardrsp_phys_addr);
232out_free_rq: 324out_free_rq:
233 pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr); 325 dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr);
234 return err; 326 return err;
235} 327}
236 328
237static void 329static void
238qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter) 330qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter)
239{ 331{
240 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; 332 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
241 333
242 if (qlcnic_issue_cmd(adapter, 334 if (qlcnic_issue_cmd(adapter,
243 adapter->ahw.pci_func, 335 adapter->ahw->pci_func,
244 adapter->fw_hal_version, 336 adapter->fw_hal_version,
245 recv_ctx->context_id, 337 recv_ctx->context_id,
246 QLCNIC_DESTROY_CTX_RESET, 338 QLCNIC_DESTROY_CTX_RESET,
@@ -274,14 +366,14 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
274 *(tx_ring->hw_consumer) = 0; 366 *(tx_ring->hw_consumer) = 0;
275 367
276 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx); 368 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
277 rq_addr = pci_alloc_consistent(adapter->pdev, 369 rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
278 rq_size, &rq_phys_addr); 370 &rq_phys_addr, GFP_KERNEL);
279 if (!rq_addr) 371 if (!rq_addr)
280 return -ENOMEM; 372 return -ENOMEM;
281 373
282 rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx); 374 rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
283 rsp_addr = pci_alloc_consistent(adapter->pdev, 375 rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
284 rsp_size, &rsp_phys_addr); 376 &rsp_phys_addr, GFP_KERNEL);
285 if (!rsp_addr) { 377 if (!rsp_addr) {
286 err = -ENOMEM; 378 err = -ENOMEM;
287 goto out_free_rq; 379 goto out_free_rq;
@@ -313,7 +405,7 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
313 405
314 phys_addr = rq_phys_addr; 406 phys_addr = rq_phys_addr;
315 err = qlcnic_issue_cmd(adapter, 407 err = qlcnic_issue_cmd(adapter,
316 adapter->ahw.pci_func, 408 adapter->ahw->pci_func,
317 adapter->fw_hal_version, 409 adapter->fw_hal_version,
318 (u32)(phys_addr >> 32), 410 (u32)(phys_addr >> 32),
319 ((u32)phys_addr & 0xffffffff), 411 ((u32)phys_addr & 0xffffffff),
@@ -322,7 +414,7 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
322 414
323 if (err == QLCNIC_RCODE_SUCCESS) { 415 if (err == QLCNIC_RCODE_SUCCESS) {
324 temp = le32_to_cpu(prsp->cds_ring.host_producer_crb); 416 temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
325 tx_ring->crb_cmd_producer = adapter->ahw.pci_base0 + temp; 417 tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp;
326 418
327 adapter->tx_context_id = 419 adapter->tx_context_id =
328 le16_to_cpu(prsp->context_id); 420 le16_to_cpu(prsp->context_id);
@@ -332,10 +424,11 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
332 err = -EIO; 424 err = -EIO;
333 } 425 }
334 426
335 pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr); 427 dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr,
428 rsp_phys_addr);
336 429
337out_free_rq: 430out_free_rq:
338 pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr); 431 dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr);
339 432
340 return err; 433 return err;
341} 434}
@@ -344,7 +437,7 @@ static void
344qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter) 437qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter)
345{ 438{
346 if (qlcnic_issue_cmd(adapter, 439 if (qlcnic_issue_cmd(adapter,
347 adapter->ahw.pci_func, 440 adapter->ahw->pci_func,
348 adapter->fw_hal_version, 441 adapter->fw_hal_version,
349 adapter->tx_context_id, 442 adapter->tx_context_id,
350 QLCNIC_DESTROY_CTX_RESET, 443 QLCNIC_DESTROY_CTX_RESET,
@@ -357,33 +450,15 @@ qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter)
357} 450}
358 451
359int 452int
360qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val) 453qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config)
361{
362
363 if (qlcnic_issue_cmd(adapter,
364 adapter->ahw.pci_func,
365 adapter->fw_hal_version,
366 reg,
367 0,
368 0,
369 QLCNIC_CDRP_CMD_READ_PHY)) {
370
371 return -EIO;
372 }
373
374 return QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
375}
376
377int
378qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val)
379{ 454{
380 return qlcnic_issue_cmd(adapter, 455 return qlcnic_issue_cmd(adapter,
381 adapter->ahw.pci_func, 456 adapter->ahw->pci_func,
382 adapter->fw_hal_version, 457 adapter->fw_hal_version,
383 reg, 458 config,
384 val, 459 0,
385 0, 460 0,
386 QLCNIC_CDRP_CMD_WRITE_PHY); 461 QLCNIC_CDRP_CMD_CONFIG_PORT);
387} 462}
388 463
389int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter) 464int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
@@ -398,20 +473,19 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
398 473
399 struct pci_dev *pdev = adapter->pdev; 474 struct pci_dev *pdev = adapter->pdev;
400 475
401 recv_ctx = &adapter->recv_ctx; 476 recv_ctx = adapter->recv_ctx;
402 tx_ring = adapter->tx_ring; 477 tx_ring = adapter->tx_ring;
403 478
404 tx_ring->hw_consumer = (__le32 *)pci_alloc_consistent(pdev, sizeof(u32), 479 tx_ring->hw_consumer = (__le32 *) dma_alloc_coherent(&pdev->dev,
405 &tx_ring->hw_cons_phys_addr); 480 sizeof(u32), &tx_ring->hw_cons_phys_addr, GFP_KERNEL);
406 if (tx_ring->hw_consumer == NULL) { 481 if (tx_ring->hw_consumer == NULL) {
407 dev_err(&pdev->dev, "failed to allocate tx consumer\n"); 482 dev_err(&pdev->dev, "failed to allocate tx consumer\n");
408 return -ENOMEM; 483 return -ENOMEM;
409 } 484 }
410 *(tx_ring->hw_consumer) = 0;
411 485
412 /* cmd desc ring */ 486 /* cmd desc ring */
413 addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring), 487 addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring),
414 &tx_ring->phys_addr); 488 &tx_ring->phys_addr, GFP_KERNEL);
415 489
416 if (addr == NULL) { 490 if (addr == NULL) {
417 dev_err(&pdev->dev, "failed to allocate tx desc ring\n"); 491 dev_err(&pdev->dev, "failed to allocate tx desc ring\n");
@@ -423,9 +497,9 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
423 497
424 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 498 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
425 rds_ring = &recv_ctx->rds_rings[ring]; 499 rds_ring = &recv_ctx->rds_rings[ring];
426 addr = pci_alloc_consistent(adapter->pdev, 500 addr = dma_alloc_coherent(&adapter->pdev->dev,
427 RCV_DESC_RINGSIZE(rds_ring), 501 RCV_DESC_RINGSIZE(rds_ring),
428 &rds_ring->phys_addr); 502 &rds_ring->phys_addr, GFP_KERNEL);
429 if (addr == NULL) { 503 if (addr == NULL) {
430 dev_err(&pdev->dev, 504 dev_err(&pdev->dev,
431 "failed to allocate rds ring [%d]\n", ring); 505 "failed to allocate rds ring [%d]\n", ring);
@@ -439,9 +513,9 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
439 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 513 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
440 sds_ring = &recv_ctx->sds_rings[ring]; 514 sds_ring = &recv_ctx->sds_rings[ring];
441 515
442 addr = pci_alloc_consistent(adapter->pdev, 516 addr = dma_alloc_coherent(&adapter->pdev->dev,
443 STATUS_DESC_RINGSIZE(sds_ring), 517 STATUS_DESC_RINGSIZE(sds_ring),
444 &sds_ring->phys_addr); 518 &sds_ring->phys_addr, GFP_KERNEL);
445 if (addr == NULL) { 519 if (addr == NULL) {
446 dev_err(&pdev->dev, 520 dev_err(&pdev->dev,
447 "failed to allocate sds ring [%d]\n", ring); 521 "failed to allocate sds ring [%d]\n", ring);
@@ -501,11 +575,11 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
501 struct qlcnic_host_tx_ring *tx_ring; 575 struct qlcnic_host_tx_ring *tx_ring;
502 int ring; 576 int ring;
503 577
504 recv_ctx = &adapter->recv_ctx; 578 recv_ctx = adapter->recv_ctx;
505 579
506 tx_ring = adapter->tx_ring; 580 tx_ring = adapter->tx_ring;
507 if (tx_ring->hw_consumer != NULL) { 581 if (tx_ring->hw_consumer != NULL) {
508 pci_free_consistent(adapter->pdev, 582 dma_free_coherent(&adapter->pdev->dev,
509 sizeof(u32), 583 sizeof(u32),
510 tx_ring->hw_consumer, 584 tx_ring->hw_consumer,
511 tx_ring->hw_cons_phys_addr); 585 tx_ring->hw_cons_phys_addr);
@@ -513,7 +587,7 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
513 } 587 }
514 588
515 if (tx_ring->desc_head != NULL) { 589 if (tx_ring->desc_head != NULL) {
516 pci_free_consistent(adapter->pdev, 590 dma_free_coherent(&adapter->pdev->dev,
517 TX_DESC_RINGSIZE(tx_ring), 591 TX_DESC_RINGSIZE(tx_ring),
518 tx_ring->desc_head, tx_ring->phys_addr); 592 tx_ring->desc_head, tx_ring->phys_addr);
519 tx_ring->desc_head = NULL; 593 tx_ring->desc_head = NULL;
@@ -523,7 +597,7 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
523 rds_ring = &recv_ctx->rds_rings[ring]; 597 rds_ring = &recv_ctx->rds_rings[ring];
524 598
525 if (rds_ring->desc_head != NULL) { 599 if (rds_ring->desc_head != NULL) {
526 pci_free_consistent(adapter->pdev, 600 dma_free_coherent(&adapter->pdev->dev,
527 RCV_DESC_RINGSIZE(rds_ring), 601 RCV_DESC_RINGSIZE(rds_ring),
528 rds_ring->desc_head, 602 rds_ring->desc_head,
529 rds_ring->phys_addr); 603 rds_ring->phys_addr);
@@ -535,7 +609,7 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
535 sds_ring = &recv_ctx->sds_rings[ring]; 609 sds_ring = &recv_ctx->sds_rings[ring];
536 610
537 if (sds_ring->desc_head != NULL) { 611 if (sds_ring->desc_head != NULL) {
538 pci_free_consistent(adapter->pdev, 612 dma_free_coherent(&adapter->pdev->dev,
539 STATUS_DESC_RINGSIZE(sds_ring), 613 STATUS_DESC_RINGSIZE(sds_ring),
540 sds_ring->desc_head, 614 sds_ring->desc_head,
541 sds_ring->phys_addr); 615 sds_ring->phys_addr);
@@ -551,9 +625,9 @@ int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
551 int err; 625 int err;
552 u32 arg1; 626 u32 arg1;
553 627
554 arg1 = adapter->ahw.pci_func | BIT_8; 628 arg1 = adapter->ahw->pci_func | BIT_8;
555 err = qlcnic_issue_cmd(adapter, 629 err = qlcnic_issue_cmd(adapter,
556 adapter->ahw.pci_func, 630 adapter->ahw->pci_func,
557 adapter->fw_hal_version, 631 adapter->fw_hal_version,
558 arg1, 632 arg1,
559 0, 633 0,
@@ -582,15 +656,15 @@ int qlcnic_get_nic_info(struct qlcnic_adapter *adapter,
582 void *nic_info_addr; 656 void *nic_info_addr;
583 size_t nic_size = sizeof(struct qlcnic_info); 657 size_t nic_size = sizeof(struct qlcnic_info);
584 658
585 nic_info_addr = pci_alloc_consistent(adapter->pdev, 659 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
586 nic_size, &nic_dma_t); 660 &nic_dma_t, GFP_KERNEL);
587 if (!nic_info_addr) 661 if (!nic_info_addr)
588 return -ENOMEM; 662 return -ENOMEM;
589 memset(nic_info_addr, 0, nic_size); 663 memset(nic_info_addr, 0, nic_size);
590 664
591 nic_info = (struct qlcnic_info *) nic_info_addr; 665 nic_info = (struct qlcnic_info *) nic_info_addr;
592 err = qlcnic_issue_cmd(adapter, 666 err = qlcnic_issue_cmd(adapter,
593 adapter->ahw.pci_func, 667 adapter->ahw->pci_func,
594 adapter->fw_hal_version, 668 adapter->fw_hal_version,
595 MSD(nic_dma_t), 669 MSD(nic_dma_t),
596 LSD(nic_dma_t), 670 LSD(nic_dma_t),
@@ -623,7 +697,8 @@ int qlcnic_get_nic_info(struct qlcnic_adapter *adapter,
623 err = -EIO; 697 err = -EIO;
624 } 698 }
625 699
626 pci_free_consistent(adapter->pdev, nic_size, nic_info_addr, nic_dma_t); 700 dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
701 nic_dma_t);
627 return err; 702 return err;
628} 703}
629 704
@@ -639,8 +714,8 @@ int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
639 if (adapter->op_mode != QLCNIC_MGMT_FUNC) 714 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
640 return err; 715 return err;
641 716
642 nic_info_addr = pci_alloc_consistent(adapter->pdev, nic_size, 717 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
643 &nic_dma_t); 718 &nic_dma_t, GFP_KERNEL);
644 if (!nic_info_addr) 719 if (!nic_info_addr)
645 return -ENOMEM; 720 return -ENOMEM;
646 721
@@ -659,7 +734,7 @@ int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
659 nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw); 734 nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw);
660 735
661 err = qlcnic_issue_cmd(adapter, 736 err = qlcnic_issue_cmd(adapter,
662 adapter->ahw.pci_func, 737 adapter->ahw->pci_func,
663 adapter->fw_hal_version, 738 adapter->fw_hal_version,
664 MSD(nic_dma_t), 739 MSD(nic_dma_t),
665 LSD(nic_dma_t), 740 LSD(nic_dma_t),
@@ -672,7 +747,8 @@ int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
672 err = -EIO; 747 err = -EIO;
673 } 748 }
674 749
675 pci_free_consistent(adapter->pdev, nic_size, nic_info_addr, nic_dma_t); 750 dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
751 nic_dma_t);
676 return err; 752 return err;
677} 753}
678 754
@@ -687,15 +763,15 @@ int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
687 size_t npar_size = sizeof(struct qlcnic_pci_info); 763 size_t npar_size = sizeof(struct qlcnic_pci_info);
688 size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC; 764 size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC;
689 765
690 pci_info_addr = pci_alloc_consistent(adapter->pdev, pci_size, 766 pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size,
691 &pci_info_dma_t); 767 &pci_info_dma_t, GFP_KERNEL);
692 if (!pci_info_addr) 768 if (!pci_info_addr)
693 return -ENOMEM; 769 return -ENOMEM;
694 memset(pci_info_addr, 0, pci_size); 770 memset(pci_info_addr, 0, pci_size);
695 771
696 npar = (struct qlcnic_pci_info *) pci_info_addr; 772 npar = (struct qlcnic_pci_info *) pci_info_addr;
697 err = qlcnic_issue_cmd(adapter, 773 err = qlcnic_issue_cmd(adapter,
698 adapter->ahw.pci_func, 774 adapter->ahw->pci_func,
699 adapter->fw_hal_version, 775 adapter->fw_hal_version,
700 MSD(pci_info_dma_t), 776 MSD(pci_info_dma_t),
701 LSD(pci_info_dma_t), 777 LSD(pci_info_dma_t),
@@ -721,7 +797,7 @@ int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
721 err = -EIO; 797 err = -EIO;
722 } 798 }
723 799
724 pci_free_consistent(adapter->pdev, pci_size, pci_info_addr, 800 dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr,
725 pci_info_dma_t); 801 pci_info_dma_t);
726 return err; 802 return err;
727} 803}
@@ -741,7 +817,7 @@ int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
741 arg1 |= pci_func << 8; 817 arg1 |= pci_func << 8;
742 818
743 err = qlcnic_issue_cmd(adapter, 819 err = qlcnic_issue_cmd(adapter,
744 adapter->ahw.pci_func, 820 adapter->ahw->pci_func,
745 adapter->fw_hal_version, 821 adapter->fw_hal_version,
746 arg1, 822 arg1,
747 0, 823 0,
@@ -775,14 +851,14 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
775 return -ENOMEM; 851 return -ENOMEM;
776 852
777 if (adapter->op_mode != QLCNIC_MGMT_FUNC && 853 if (adapter->op_mode != QLCNIC_MGMT_FUNC &&
778 func != adapter->ahw.pci_func) { 854 func != adapter->ahw->pci_func) {
779 dev_err(&adapter->pdev->dev, 855 dev_err(&adapter->pdev->dev,
780 "Not privilege to query stats for func=%d", func); 856 "Not privilege to query stats for func=%d", func);
781 return -EIO; 857 return -EIO;
782 } 858 }
783 859
784 stats_addr = pci_alloc_consistent(adapter->pdev, stats_size, 860 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
785 &stats_dma_t); 861 &stats_dma_t, GFP_KERNEL);
786 if (!stats_addr) { 862 if (!stats_addr) {
787 dev_err(&adapter->pdev->dev, "Unable to allocate memory\n"); 863 dev_err(&adapter->pdev->dev, "Unable to allocate memory\n");
788 return -ENOMEM; 864 return -ENOMEM;
@@ -793,7 +869,7 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
793 arg1 |= rx_tx << 15 | stats_size << 16; 869 arg1 |= rx_tx << 15 | stats_size << 16;
794 870
795 err = qlcnic_issue_cmd(adapter, 871 err = qlcnic_issue_cmd(adapter,
796 adapter->ahw.pci_func, 872 adapter->ahw->pci_func,
797 adapter->fw_hal_version, 873 adapter->fw_hal_version,
798 arg1, 874 arg1,
799 MSD(stats_dma_t), 875 MSD(stats_dma_t),
@@ -816,7 +892,7 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
816 esw_stats->numbytes = le64_to_cpu(stats->numbytes); 892 esw_stats->numbytes = le64_to_cpu(stats->numbytes);
817 } 893 }
818 894
819 pci_free_consistent(adapter->pdev, stats_size, stats_addr, 895 dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
820 stats_dma_t); 896 stats_dma_t);
821 return err; 897 return err;
822} 898}
@@ -900,7 +976,7 @@ int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
900 arg1 |= BIT_14 | rx_tx << 15; 976 arg1 |= BIT_14 | rx_tx << 15;
901 977
902 return qlcnic_issue_cmd(adapter, 978 return qlcnic_issue_cmd(adapter,
903 adapter->ahw.pci_func, 979 adapter->ahw->pci_func,
904 adapter->fw_hal_version, 980 adapter->fw_hal_version,
905 arg1, 981 arg1,
906 0, 982 0,
@@ -921,7 +997,7 @@ __qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
921 u8 pci_func; 997 u8 pci_func;
922 pci_func = (*arg1 >> 8); 998 pci_func = (*arg1 >> 8);
923 err = qlcnic_issue_cmd(adapter, 999 err = qlcnic_issue_cmd(adapter,
924 adapter->ahw.pci_func, 1000 adapter->ahw->pci_func,
925 adapter->fw_hal_version, 1001 adapter->fw_hal_version,
926 *arg1, 1002 *arg1,
927 0, 1003 0,
@@ -999,7 +1075,7 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
999 } 1075 }
1000 1076
1001 err = qlcnic_issue_cmd(adapter, 1077 err = qlcnic_issue_cmd(adapter,
1002 adapter->ahw.pci_func, 1078 adapter->ahw->pci_func,
1003 adapter->fw_hal_version, 1079 adapter->fw_hal_version,
1004 arg1, 1080 arg1,
1005 arg2, 1081 arg2,
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index 45b2755d6cba..9efc690a289f 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -150,10 +150,10 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
150{ 150{
151 struct qlcnic_adapter *adapter = netdev_priv(dev); 151 struct qlcnic_adapter *adapter = netdev_priv(dev);
152 int check_sfp_module = 0; 152 int check_sfp_module = 0;
153 u16 pcifn = adapter->ahw.pci_func; 153 u16 pcifn = adapter->ahw->pci_func;
154 154
155 /* read which mode */ 155 /* read which mode */
156 if (adapter->ahw.port_type == QLCNIC_GBE) { 156 if (adapter->ahw->port_type == QLCNIC_GBE) {
157 ecmd->supported = (SUPPORTED_10baseT_Half | 157 ecmd->supported = (SUPPORTED_10baseT_Half |
158 SUPPORTED_10baseT_Full | 158 SUPPORTED_10baseT_Full |
159 SUPPORTED_100baseT_Half | 159 SUPPORTED_100baseT_Half |
@@ -166,11 +166,11 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
166 ADVERTISED_1000baseT_Half | 166 ADVERTISED_1000baseT_Half |
167 ADVERTISED_1000baseT_Full); 167 ADVERTISED_1000baseT_Full);
168 168
169 ecmd->speed = adapter->link_speed; 169 ethtool_cmd_speed_set(ecmd, adapter->link_speed);
170 ecmd->duplex = adapter->link_duplex; 170 ecmd->duplex = adapter->link_duplex;
171 ecmd->autoneg = adapter->link_autoneg; 171 ecmd->autoneg = adapter->link_autoneg;
172 172
173 } else if (adapter->ahw.port_type == QLCNIC_XGBE) { 173 } else if (adapter->ahw->port_type == QLCNIC_XGBE) {
174 u32 val; 174 u32 val;
175 175
176 val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR); 176 val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
@@ -183,15 +183,15 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
183 } 183 }
184 184
185 if (netif_running(dev) && adapter->has_link_events) { 185 if (netif_running(dev) && adapter->has_link_events) {
186 ecmd->speed = adapter->link_speed; 186 ethtool_cmd_speed_set(ecmd, adapter->link_speed);
187 ecmd->autoneg = adapter->link_autoneg; 187 ecmd->autoneg = adapter->link_autoneg;
188 ecmd->duplex = adapter->link_duplex; 188 ecmd->duplex = adapter->link_duplex;
189 goto skip; 189 goto skip;
190 } 190 }
191 191
192 val = QLCRD32(adapter, P3P_LINK_SPEED_REG(pcifn)); 192 val = QLCRD32(adapter, P3P_LINK_SPEED_REG(pcifn));
193 ecmd->speed = P3P_LINK_SPEED_MHZ * 193 ethtool_cmd_speed_set(ecmd, P3P_LINK_SPEED_MHZ *
194 P3P_LINK_SPEED_VAL(pcifn, val); 194 P3P_LINK_SPEED_VAL(pcifn, val));
195 ecmd->duplex = DUPLEX_FULL; 195 ecmd->duplex = DUPLEX_FULL;
196 ecmd->autoneg = AUTONEG_DISABLE; 196 ecmd->autoneg = AUTONEG_DISABLE;
197 } else 197 } else
@@ -201,7 +201,7 @@ skip:
201 ecmd->phy_address = adapter->physical_port; 201 ecmd->phy_address = adapter->physical_port;
202 ecmd->transceiver = XCVR_EXTERNAL; 202 ecmd->transceiver = XCVR_EXTERNAL;
203 203
204 switch (adapter->ahw.board_type) { 204 switch (adapter->ahw->board_type) {
205 case QLCNIC_BRDTYPE_P3P_REF_QG: 205 case QLCNIC_BRDTYPE_P3P_REF_QG:
206 case QLCNIC_BRDTYPE_P3P_4_GB: 206 case QLCNIC_BRDTYPE_P3P_4_GB:
207 case QLCNIC_BRDTYPE_P3P_4_GB_MM: 207 case QLCNIC_BRDTYPE_P3P_4_GB_MM:
@@ -238,7 +238,7 @@ skip:
238 ecmd->autoneg = AUTONEG_DISABLE; 238 ecmd->autoneg = AUTONEG_DISABLE;
239 break; 239 break;
240 case QLCNIC_BRDTYPE_P3P_10G_TP: 240 case QLCNIC_BRDTYPE_P3P_10G_TP:
241 if (adapter->ahw.port_type == QLCNIC_XGBE) { 241 if (adapter->ahw->port_type == QLCNIC_XGBE) {
242 ecmd->autoneg = AUTONEG_DISABLE; 242 ecmd->autoneg = AUTONEG_DISABLE;
243 ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP); 243 ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
244 ecmd->advertising |= 244 ecmd->advertising |=
@@ -256,7 +256,7 @@ skip:
256 break; 256 break;
257 default: 257 default:
258 dev_err(&adapter->pdev->dev, "Unsupported board model %d\n", 258 dev_err(&adapter->pdev->dev, "Unsupported board model %d\n",
259 adapter->ahw.board_type); 259 adapter->ahw->board_type);
260 return -EIO; 260 return -EIO;
261 } 261 }
262 262
@@ -284,50 +284,44 @@ skip:
284static int 284static int
285qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 285qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
286{ 286{
287 u32 config = 0;
288 u32 ret = 0;
287 struct qlcnic_adapter *adapter = netdev_priv(dev); 289 struct qlcnic_adapter *adapter = netdev_priv(dev);
288 __u32 status; 290
291 if (adapter->ahw->port_type != QLCNIC_GBE)
292 return -EOPNOTSUPP;
289 293
290 /* read which mode */ 294 /* read which mode */
291 if (adapter->ahw.port_type == QLCNIC_GBE) { 295 if (ecmd->duplex)
292 /* autonegotiation */ 296 config |= 0x1;
293 if (qlcnic_fw_cmd_set_phy(adapter,
294 QLCNIC_NIU_GB_MII_MGMT_ADDR_AUTONEG,
295 ecmd->autoneg) != 0)
296 return -EIO;
297 else
298 adapter->link_autoneg = ecmd->autoneg;
299 297
300 if (qlcnic_fw_cmd_query_phy(adapter, 298 if (ecmd->autoneg)
301 QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS, 299 config |= 0x2;
302 &status) != 0)
303 return -EIO;
304 300
305 switch (ecmd->speed) { 301 switch (ethtool_cmd_speed(ecmd)) {
306 case SPEED_10: 302 case SPEED_10:
307 qlcnic_set_phy_speed(status, 0); 303 config |= (0 << 8);
308 break; 304 break;
309 case SPEED_100: 305 case SPEED_100:
310 qlcnic_set_phy_speed(status, 1); 306 config |= (1 << 8);
311 break; 307 break;
312 case SPEED_1000: 308 case SPEED_1000:
313 qlcnic_set_phy_speed(status, 2); 309 config |= (10 << 8);
314 break; 310 break;
315 } 311 default:
312 return -EIO;
313 }
316 314
317 if (ecmd->duplex == DUPLEX_HALF) 315 ret = qlcnic_fw_cmd_set_port(adapter, config);
318 qlcnic_clear_phy_duplex(status); 316
319 if (ecmd->duplex == DUPLEX_FULL) 317 if (ret == QLCNIC_RCODE_NOT_SUPPORTED)
320 qlcnic_set_phy_duplex(status);
321 if (qlcnic_fw_cmd_set_phy(adapter,
322 QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
323 *((int *)&status)) != 0)
324 return -EIO;
325 else {
326 adapter->link_speed = ecmd->speed;
327 adapter->link_duplex = ecmd->duplex;
328 }
329 } else
330 return -EOPNOTSUPP; 318 return -EOPNOTSUPP;
319 else if (ret)
320 return -EIO;
321
322 adapter->link_speed = ethtool_cmd_speed(ecmd);
323 adapter->link_duplex = ecmd->duplex;
324 adapter->link_autoneg = ecmd->autoneg;
331 325
332 if (!netif_running(dev)) 326 if (!netif_running(dev))
333 return 0; 327 return 0;
@@ -340,14 +334,14 @@ static void
340qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) 334qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
341{ 335{
342 struct qlcnic_adapter *adapter = netdev_priv(dev); 336 struct qlcnic_adapter *adapter = netdev_priv(dev);
343 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; 337 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
344 struct qlcnic_host_sds_ring *sds_ring; 338 struct qlcnic_host_sds_ring *sds_ring;
345 u32 *regs_buff = p; 339 u32 *regs_buff = p;
346 int ring, i = 0, j = 0; 340 int ring, i = 0, j = 0;
347 341
348 memset(p, 0, qlcnic_get_regs_len(dev)); 342 memset(p, 0, qlcnic_get_regs_len(dev));
349 regs->version = (QLCNIC_ETHTOOL_REGS_VER << 24) | 343 regs->version = (QLCNIC_ETHTOOL_REGS_VER << 24) |
350 (adapter->ahw.revision_id << 16) | (adapter->pdev)->device; 344 (adapter->ahw->revision_id << 16) | (adapter->pdev)->device;
351 345
352 regs_buff[0] = (0xcafe0000 | (QLCNIC_DEV_INFO_SIZE & 0xffff)); 346 regs_buff[0] = (0xcafe0000 | (QLCNIC_DEV_INFO_SIZE & 0xffff));
353 regs_buff[1] = QLCNIC_MGMT_API_VERSION; 347 regs_buff[1] = QLCNIC_MGMT_API_VERSION;
@@ -382,7 +376,7 @@ static u32 qlcnic_test_link(struct net_device *dev)
382 u32 val; 376 u32 val;
383 377
384 val = QLCRD32(adapter, CRB_XG_STATE_P3P); 378 val = QLCRD32(adapter, CRB_XG_STATE_P3P);
385 val = XG_LINK_STATE_P3P(adapter->ahw.pci_func, val); 379 val = XG_LINK_STATE_P3P(adapter->ahw->pci_func, val);
386 return (val == XG_LINK_UP_P3P) ? 0 : 1; 380 return (val == XG_LINK_UP_P3P) ? 0 : 1;
387} 381}
388 382
@@ -474,6 +468,39 @@ qlcnic_set_ringparam(struct net_device *dev,
474 return qlcnic_reset_context(adapter); 468 return qlcnic_reset_context(adapter);
475} 469}
476 470
471static void qlcnic_get_channels(struct net_device *dev,
472 struct ethtool_channels *channel)
473{
474 struct qlcnic_adapter *adapter = netdev_priv(dev);
475
476 channel->max_rx = rounddown_pow_of_two(min_t(int,
477 adapter->max_rx_ques, num_online_cpus()));
478 channel->max_tx = adapter->max_tx_ques;
479
480 channel->rx_count = adapter->max_sds_rings;
481 channel->tx_count = adapter->max_tx_ques;
482}
483
484static int qlcnic_set_channels(struct net_device *dev,
485 struct ethtool_channels *channel)
486{
487 struct qlcnic_adapter *adapter = netdev_priv(dev);
488 int err;
489
490 if (channel->other_count || channel->combined_count ||
491 channel->tx_count != channel->max_tx)
492 return -EINVAL;
493
494 err = qlcnic_validate_max_rss(dev, channel->max_rx, channel->rx_count);
495 if (err)
496 return err;
497
498 err = qlcnic_set_max_rss(adapter, channel->rx_count);
499 netdev_info(dev, "allocated 0x%x sds rings\n",
500 adapter->max_sds_rings);
501 return err;
502}
503
477static void 504static void
478qlcnic_get_pauseparam(struct net_device *netdev, 505qlcnic_get_pauseparam(struct net_device *netdev,
479 struct ethtool_pauseparam *pause) 506 struct ethtool_pauseparam *pause)
@@ -482,7 +509,7 @@ qlcnic_get_pauseparam(struct net_device *netdev,
482 int port = adapter->physical_port; 509 int port = adapter->physical_port;
483 __u32 val; 510 __u32 val;
484 511
485 if (adapter->ahw.port_type == QLCNIC_GBE) { 512 if (adapter->ahw->port_type == QLCNIC_GBE) {
486 if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS)) 513 if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
487 return; 514 return;
488 /* get flow control settings */ 515 /* get flow control settings */
@@ -504,7 +531,7 @@ qlcnic_get_pauseparam(struct net_device *netdev,
504 pause->tx_pause = !(qlcnic_gb_get_gb3_mask(val)); 531 pause->tx_pause = !(qlcnic_gb_get_gb3_mask(val));
505 break; 532 break;
506 } 533 }
507 } else if (adapter->ahw.port_type == QLCNIC_XGBE) { 534 } else if (adapter->ahw->port_type == QLCNIC_XGBE) {
508 if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS)) 535 if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
509 return; 536 return;
510 pause->rx_pause = 1; 537 pause->rx_pause = 1;
@@ -515,7 +542,7 @@ qlcnic_get_pauseparam(struct net_device *netdev,
515 pause->tx_pause = !(qlcnic_xg_get_xg1_mask(val)); 542 pause->tx_pause = !(qlcnic_xg_get_xg1_mask(val));
516 } else { 543 } else {
517 dev_err(&netdev->dev, "Unknown board type: %x\n", 544 dev_err(&netdev->dev, "Unknown board type: %x\n",
518 adapter->ahw.port_type); 545 adapter->ahw->port_type);
519 } 546 }
520} 547}
521 548
@@ -528,7 +555,7 @@ qlcnic_set_pauseparam(struct net_device *netdev,
528 __u32 val; 555 __u32 val;
529 556
530 /* read mode */ 557 /* read mode */
531 if (adapter->ahw.port_type == QLCNIC_GBE) { 558 if (adapter->ahw->port_type == QLCNIC_GBE) {
532 if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS)) 559 if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
533 return -EIO; 560 return -EIO;
534 /* set flow control */ 561 /* set flow control */
@@ -571,7 +598,7 @@ qlcnic_set_pauseparam(struct net_device *netdev,
571 break; 598 break;
572 } 599 }
573 QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, val); 600 QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, val);
574 } else if (adapter->ahw.port_type == QLCNIC_XGBE) { 601 } else if (adapter->ahw->port_type == QLCNIC_XGBE) {
575 if (!pause->rx_pause || pause->autoneg) 602 if (!pause->rx_pause || pause->autoneg)
576 return -EOPNOTSUPP; 603 return -EOPNOTSUPP;
577 604
@@ -593,7 +620,7 @@ qlcnic_set_pauseparam(struct net_device *netdev,
593 QLCWR32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, val); 620 QLCWR32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, val);
594 } else { 621 } else {
595 dev_err(&netdev->dev, "Unknown board type: %x\n", 622 dev_err(&netdev->dev, "Unknown board type: %x\n",
596 adapter->ahw.port_type); 623 adapter->ahw->port_type);
597 } 624 }
598 return 0; 625 return 0;
599} 626}
@@ -639,8 +666,8 @@ static int qlcnic_irq_test(struct net_device *netdev)
639 goto clear_it; 666 goto clear_it;
640 667
641 adapter->diag_cnt = 0; 668 adapter->diag_cnt = 0;
642 ret = qlcnic_issue_cmd(adapter, adapter->ahw.pci_func, 669 ret = qlcnic_issue_cmd(adapter, adapter->ahw->pci_func,
643 adapter->fw_hal_version, adapter->portnum, 670 adapter->fw_hal_version, adapter->ahw->pci_func,
644 0, 0, 0x00000011); 671 0, 0, 0x00000011);
645 if (ret) 672 if (ret)
646 goto done; 673 goto done;
@@ -749,14 +776,14 @@ qlcnic_get_ethtool_stats(struct net_device *dev,
749 return; 776 return;
750 777
751 memset(&port_stats, 0, sizeof(struct qlcnic_esw_statistics)); 778 memset(&port_stats, 0, sizeof(struct qlcnic_esw_statistics));
752 ret = qlcnic_get_port_stats(adapter, adapter->ahw.pci_func, 779 ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func,
753 QLCNIC_QUERY_RX_COUNTER, &port_stats.rx); 780 QLCNIC_QUERY_RX_COUNTER, &port_stats.rx);
754 if (ret) 781 if (ret)
755 return; 782 return;
756 783
757 qlcnic_fill_device_stats(&index, data, &port_stats.rx); 784 qlcnic_fill_device_stats(&index, data, &port_stats.rx);
758 785
759 ret = qlcnic_get_port_stats(adapter, adapter->ahw.pci_func, 786 ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func,
760 QLCNIC_QUERY_TX_COUNTER, &port_stats.tx); 787 QLCNIC_QUERY_TX_COUNTER, &port_stats.tx);
761 if (ret) 788 if (ret)
762 return; 789 return;
@@ -764,115 +791,49 @@ qlcnic_get_ethtool_stats(struct net_device *dev,
764 qlcnic_fill_device_stats(&index, data, &port_stats.tx); 791 qlcnic_fill_device_stats(&index, data, &port_stats.tx);
765} 792}
766 793
767static int qlcnic_set_tx_csum(struct net_device *dev, u32 data) 794static int qlcnic_set_led(struct net_device *dev,
768{ 795 enum ethtool_phys_id_state state)
769 struct qlcnic_adapter *adapter = netdev_priv(dev);
770
771 if ((adapter->flags & QLCNIC_ESWITCH_ENABLED))
772 return -EOPNOTSUPP;
773 if (data)
774 dev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
775 else
776 dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
777
778 return 0;
779
780}
781static u32 qlcnic_get_tx_csum(struct net_device *dev)
782{
783 return dev->features & NETIF_F_IP_CSUM;
784}
785
786static u32 qlcnic_get_rx_csum(struct net_device *dev)
787{
788 struct qlcnic_adapter *adapter = netdev_priv(dev);
789 return adapter->rx_csum;
790}
791
792static int qlcnic_set_rx_csum(struct net_device *dev, u32 data)
793{
794 struct qlcnic_adapter *adapter = netdev_priv(dev);
795
796 if ((adapter->flags & QLCNIC_ESWITCH_ENABLED))
797 return -EOPNOTSUPP;
798 if (!!data) {
799 adapter->rx_csum = !!data;
800 return 0;
801 }
802
803 if (dev->features & NETIF_F_LRO) {
804 if (qlcnic_config_hw_lro(adapter, QLCNIC_LRO_DISABLED))
805 return -EIO;
806
807 dev->features &= ~NETIF_F_LRO;
808 qlcnic_send_lro_cleanup(adapter);
809 dev_info(&adapter->pdev->dev,
810 "disabling LRO as rx_csum is off\n");
811 }
812 adapter->rx_csum = !!data;
813 return 0;
814}
815
816static u32 qlcnic_get_tso(struct net_device *dev)
817{
818 return (dev->features & (NETIF_F_TSO | NETIF_F_TSO6)) != 0;
819}
820
821static int qlcnic_set_tso(struct net_device *dev, u32 data)
822{
823 struct qlcnic_adapter *adapter = netdev_priv(dev);
824 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO))
825 return -EOPNOTSUPP;
826 if (data)
827 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
828 else
829 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
830
831 return 0;
832}
833
834static int qlcnic_blink_led(struct net_device *dev, u32 val)
835{ 796{
836 struct qlcnic_adapter *adapter = netdev_priv(dev); 797 struct qlcnic_adapter *adapter = netdev_priv(dev);
837 int max_sds_rings = adapter->max_sds_rings; 798 int max_sds_rings = adapter->max_sds_rings;
838 int dev_down = 0;
839 int ret;
840
841 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
842 dev_down = 1;
843 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
844 return -EIO;
845 799
846 ret = qlcnic_diag_alloc_res(dev, QLCNIC_LED_TEST); 800 switch (state) {
847 if (ret) { 801 case ETHTOOL_ID_ACTIVE:
848 clear_bit(__QLCNIC_RESETTING, &adapter->state); 802 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
849 return ret; 803 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
804 return -EIO;
805
806 if (qlcnic_diag_alloc_res(dev, QLCNIC_LED_TEST)) {
807 clear_bit(__QLCNIC_RESETTING, &adapter->state);
808 return -EIO;
809 }
810 set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state);
850 } 811 }
851 }
852 812
853 ret = adapter->nic_ops->config_led(adapter, 1, 0xf); 813 if (adapter->nic_ops->config_led(adapter, 1, 0xf) == 0)
854 if (ret) { 814 return 0;
815
855 dev_err(&adapter->pdev->dev, 816 dev_err(&adapter->pdev->dev,
856 "Failed to set LED blink state.\n"); 817 "Failed to set LED blink state.\n");
857 goto done; 818 break;
858 }
859 819
860 msleep_interruptible(val * 1000); 820 case ETHTOOL_ID_INACTIVE:
821 if (adapter->nic_ops->config_led(adapter, 0, 0xf))
822 dev_err(&adapter->pdev->dev,
823 "Failed to reset LED blink state.\n");
861 824
862 ret = adapter->nic_ops->config_led(adapter, 0, 0xf); 825 break;
863 if (ret) { 826
864 dev_err(&adapter->pdev->dev, 827 default:
865 "Failed to reset LED blink state.\n"); 828 return -EINVAL;
866 goto done;
867 } 829 }
868 830
869done: 831 if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state)) {
870 if (dev_down) {
871 qlcnic_diag_free_res(dev, max_sds_rings); 832 qlcnic_diag_free_res(dev, max_sds_rings);
872 clear_bit(__QLCNIC_RESETTING, &adapter->state); 833 clear_bit(__QLCNIC_RESETTING, &adapter->state);
873 } 834 }
874 return ret;
875 835
836 return -EIO;
876} 837}
877 838
878static void 839static void
@@ -936,8 +897,8 @@ static int qlcnic_set_intr_coalesce(struct net_device *netdev,
936 */ 897 */
937 if (ethcoal->rx_coalesce_usecs > 0xffff || 898 if (ethcoal->rx_coalesce_usecs > 0xffff ||
938 ethcoal->rx_max_coalesced_frames > 0xffff || 899 ethcoal->rx_max_coalesced_frames > 0xffff ||
939 ethcoal->tx_coalesce_usecs > 0xffff || 900 ethcoal->tx_coalesce_usecs ||
940 ethcoal->tx_max_coalesced_frames > 0xffff || 901 ethcoal->tx_max_coalesced_frames ||
941 ethcoal->rx_coalesce_usecs_irq || 902 ethcoal->rx_coalesce_usecs_irq ||
942 ethcoal->rx_max_coalesced_frames_irq || 903 ethcoal->rx_max_coalesced_frames_irq ||
943 ethcoal->tx_coalesce_usecs_irq || 904 ethcoal->tx_coalesce_usecs_irq ||
@@ -959,21 +920,17 @@ static int qlcnic_set_intr_coalesce(struct net_device *netdev,
959 920
960 if (!ethcoal->rx_coalesce_usecs || 921 if (!ethcoal->rx_coalesce_usecs ||
961 !ethcoal->rx_max_coalesced_frames) { 922 !ethcoal->rx_max_coalesced_frames) {
962 adapter->coal.flags = QLCNIC_INTR_DEFAULT; 923 adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT;
963 adapter->coal.normal.data.rx_time_us = 924 adapter->ahw->coal.rx_time_us =
964 QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US; 925 QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
965 adapter->coal.normal.data.rx_packets = 926 adapter->ahw->coal.rx_packets =
966 QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS; 927 QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
967 } else { 928 } else {
968 adapter->coal.flags = 0; 929 adapter->ahw->coal.flag = 0;
969 adapter->coal.normal.data.rx_time_us = 930 adapter->ahw->coal.rx_time_us = ethcoal->rx_coalesce_usecs;
970 ethcoal->rx_coalesce_usecs; 931 adapter->ahw->coal.rx_packets =
971 adapter->coal.normal.data.rx_packets = 932 ethcoal->rx_max_coalesced_frames;
972 ethcoal->rx_max_coalesced_frames;
973 } 933 }
974 adapter->coal.normal.data.tx_time_us = ethcoal->tx_coalesce_usecs;
975 adapter->coal.normal.data.tx_packets =
976 ethcoal->tx_max_coalesced_frames;
977 934
978 qlcnic_config_intr_coalesce(adapter); 935 qlcnic_config_intr_coalesce(adapter);
979 936
@@ -988,66 +945,102 @@ static int qlcnic_get_intr_coalesce(struct net_device *netdev,
988 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) 945 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
989 return -EINVAL; 946 return -EINVAL;
990 947
991 ethcoal->rx_coalesce_usecs = adapter->coal.normal.data.rx_time_us; 948 ethcoal->rx_coalesce_usecs = adapter->ahw->coal.rx_time_us;
992 ethcoal->tx_coalesce_usecs = adapter->coal.normal.data.tx_time_us; 949 ethcoal->rx_max_coalesced_frames = adapter->ahw->coal.rx_packets;
993 ethcoal->rx_max_coalesced_frames =
994 adapter->coal.normal.data.rx_packets;
995 ethcoal->tx_max_coalesced_frames =
996 adapter->coal.normal.data.tx_packets;
997 950
998 return 0; 951 return 0;
999} 952}
1000 953
1001static int qlcnic_set_flags(struct net_device *netdev, u32 data) 954static u32 qlcnic_get_msglevel(struct net_device *netdev)
1002{ 955{
1003 struct qlcnic_adapter *adapter = netdev_priv(netdev); 956 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1004 int hw_lro;
1005
1006 if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO))
1007 return -EINVAL;
1008
1009 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO))
1010 return -EINVAL;
1011
1012 if (!adapter->rx_csum) {
1013 dev_info(&adapter->pdev->dev, "rx csum is off, "
1014 "cannot toggle lro\n");
1015 return -EINVAL;
1016 }
1017 957
1018 if ((data & ETH_FLAG_LRO) && (netdev->features & NETIF_F_LRO)) 958 return adapter->msg_enable;
1019 return 0; 959}
1020
1021 if (data & ETH_FLAG_LRO) {
1022 hw_lro = QLCNIC_LRO_ENABLED;
1023 netdev->features |= NETIF_F_LRO;
1024 } else {
1025 hw_lro = 0;
1026 netdev->features &= ~NETIF_F_LRO;
1027 }
1028 960
1029 if (qlcnic_config_hw_lro(adapter, hw_lro)) 961static void qlcnic_set_msglevel(struct net_device *netdev, u32 msglvl)
1030 return -EIO; 962{
963 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1031 964
1032 if ((hw_lro == 0) && qlcnic_send_lro_cleanup(adapter)) 965 adapter->msg_enable = msglvl;
1033 return -EIO; 966}
1034 967
968static int
969qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
970{
971 struct qlcnic_adapter *adapter = netdev_priv(netdev);
972 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1035 973
974 dump->len = fw_dump->tmpl_hdr->size + fw_dump->size;
975 dump->flag = fw_dump->tmpl_hdr->drv_cap_mask;
976 dump->version = adapter->fw_version;
1036 return 0; 977 return 0;
1037} 978}
1038 979
1039static u32 qlcnic_get_msglevel(struct net_device *netdev) 980static int
981qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
982 void *buffer)
1040{ 983{
984 int i, copy_sz;
985 u32 *hdr_ptr, *data;
1041 struct qlcnic_adapter *adapter = netdev_priv(netdev); 986 struct qlcnic_adapter *adapter = netdev_priv(netdev);
987 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1042 988
1043 return adapter->msg_enable; 989 if (qlcnic_api_lock(adapter))
990 return -EIO;
991 if (!fw_dump->clr) {
992 netdev_info(netdev, "Dump not available\n");
993 qlcnic_api_unlock(adapter);
994 return -EINVAL;
995 }
996 /* Copy template header first */
997 copy_sz = fw_dump->tmpl_hdr->size;
998 hdr_ptr = (u32 *) fw_dump->tmpl_hdr;
999 data = (u32 *) buffer;
1000 for (i = 0; i < copy_sz/sizeof(u32); i++)
1001 *data++ = cpu_to_le32(*hdr_ptr++);
1002
1003 /* Copy captured dump data */
1004 memcpy(buffer + copy_sz, fw_dump->data, fw_dump->size);
1005 dump->len = copy_sz + fw_dump->size;
1006 dump->flag = fw_dump->tmpl_hdr->drv_cap_mask;
1007
1008 /* Free dump area once data has been captured */
1009 vfree(fw_dump->data);
1010 fw_dump->data = NULL;
1011 fw_dump->clr = 0;
1012 qlcnic_api_unlock(adapter);
1013
1014 return 0;
1044} 1015}
1045 1016
1046static void qlcnic_set_msglevel(struct net_device *netdev, u32 msglvl) 1017static int
1018qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
1047{ 1019{
1020 int ret = 0;
1048 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1021 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1022 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1049 1023
1050 adapter->msg_enable = msglvl; 1024 if (val->flag == QLCNIC_FORCE_FW_DUMP_KEY) {
1025 netdev_info(netdev, "Forcing a FW dump\n");
1026 qlcnic_dev_request_reset(adapter);
1027 } else {
1028 if (val->flag > QLCNIC_DUMP_MASK_MAX ||
1029 val->flag < QLCNIC_DUMP_MASK_MIN) {
1030 netdev_info(netdev,
1031 "Invalid dump level: 0x%x\n", val->flag);
1032 ret = -EINVAL;
1033 goto out;
1034 }
1035 if (qlcnic_api_lock(adapter))
1036 return -EIO;
1037 fw_dump->tmpl_hdr->drv_cap_mask = val->flag & 0xff;
1038 qlcnic_api_unlock(adapter);
1039 netdev_info(netdev, "Driver mask changed to: 0x%x\n",
1040 fw_dump->tmpl_hdr->drv_cap_mask);
1041 }
1042out:
1043 return ret;
1051} 1044}
1052 1045
1053const struct ethtool_ops qlcnic_ethtool_ops = { 1046const struct ethtool_ops qlcnic_ethtool_ops = {
@@ -1061,26 +1054,22 @@ const struct ethtool_ops qlcnic_ethtool_ops = {
1061 .get_eeprom = qlcnic_get_eeprom, 1054 .get_eeprom = qlcnic_get_eeprom,
1062 .get_ringparam = qlcnic_get_ringparam, 1055 .get_ringparam = qlcnic_get_ringparam,
1063 .set_ringparam = qlcnic_set_ringparam, 1056 .set_ringparam = qlcnic_set_ringparam,
1057 .get_channels = qlcnic_get_channels,
1058 .set_channels = qlcnic_set_channels,
1064 .get_pauseparam = qlcnic_get_pauseparam, 1059 .get_pauseparam = qlcnic_get_pauseparam,
1065 .set_pauseparam = qlcnic_set_pauseparam, 1060 .set_pauseparam = qlcnic_set_pauseparam,
1066 .get_tx_csum = qlcnic_get_tx_csum,
1067 .set_tx_csum = qlcnic_set_tx_csum,
1068 .set_sg = ethtool_op_set_sg,
1069 .get_tso = qlcnic_get_tso,
1070 .set_tso = qlcnic_set_tso,
1071 .get_wol = qlcnic_get_wol, 1061 .get_wol = qlcnic_get_wol,
1072 .set_wol = qlcnic_set_wol, 1062 .set_wol = qlcnic_set_wol,
1073 .self_test = qlcnic_diag_test, 1063 .self_test = qlcnic_diag_test,
1074 .get_strings = qlcnic_get_strings, 1064 .get_strings = qlcnic_get_strings,
1075 .get_ethtool_stats = qlcnic_get_ethtool_stats, 1065 .get_ethtool_stats = qlcnic_get_ethtool_stats,
1076 .get_sset_count = qlcnic_get_sset_count, 1066 .get_sset_count = qlcnic_get_sset_count,
1077 .get_rx_csum = qlcnic_get_rx_csum,
1078 .set_rx_csum = qlcnic_set_rx_csum,
1079 .get_coalesce = qlcnic_get_intr_coalesce, 1067 .get_coalesce = qlcnic_get_intr_coalesce,
1080 .set_coalesce = qlcnic_set_intr_coalesce, 1068 .set_coalesce = qlcnic_set_intr_coalesce,
1081 .get_flags = ethtool_op_get_flags, 1069 .set_phys_id = qlcnic_set_led,
1082 .set_flags = qlcnic_set_flags,
1083 .phys_id = qlcnic_blink_led,
1084 .set_msglevel = qlcnic_set_msglevel, 1070 .set_msglevel = qlcnic_set_msglevel,
1085 .get_msglevel = qlcnic_get_msglevel, 1071 .get_msglevel = qlcnic_get_msglevel,
1072 .get_dump_flag = qlcnic_get_dump_flag,
1073 .get_dump_data = qlcnic_get_dump_data,
1074 .set_dump = qlcnic_set_dump,
1086}; 1075};
diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h
index 726ef555b6bc..d14506f764e0 100644
--- a/drivers/net/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/qlcnic/qlcnic_hdr.h
@@ -492,10 +492,10 @@ enum {
492 492
493#define TEST_AGT_CTRL (0x00) 493#define TEST_AGT_CTRL (0x00)
494 494
495#define TA_CTL_START 1 495#define TA_CTL_START BIT_0
496#define TA_CTL_ENABLE 2 496#define TA_CTL_ENABLE BIT_1
497#define TA_CTL_WRITE 4 497#define TA_CTL_WRITE BIT_2
498#define TA_CTL_BUSY 8 498#define TA_CTL_BUSY BIT_3
499 499
500/* 500/*
501 * Register offsets for MN 501 * Register offsets for MN
@@ -765,6 +765,38 @@ struct qlcnic_legacy_intr_set {
765#define QLCNIC_MAX_PCI_FUNC 8 765#define QLCNIC_MAX_PCI_FUNC 8
766#define QLCNIC_MAX_VLAN_FILTERS 64 766#define QLCNIC_MAX_VLAN_FILTERS 64
767 767
768/* FW dump defines */
769#define MIU_TEST_CTR 0x41000090
770#define MIU_TEST_ADDR_LO 0x41000094
771#define MIU_TEST_ADDR_HI 0x41000098
772#define FLASH_ROM_WINDOW 0x42110030
773#define FLASH_ROM_DATA 0x42150000
774
775static const u32 MIU_TEST_READ_DATA[] = {
776 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC, };
777
778#define QLCNIC_FW_DUMP_REG1 0x00130060
779#define QLCNIC_FW_DUMP_REG2 0x001e0000
780#define QLCNIC_FLASH_SEM2_LK 0x0013C010
781#define QLCNIC_FLASH_SEM2_ULK 0x0013C014
782#define QLCNIC_FLASH_LOCK_ID 0x001B2100
783
784#define QLCNIC_RD_DUMP_REG(addr, bar0, data) do { \
785 writel((addr & 0xFFFF0000), (void *) (bar0 + \
786 QLCNIC_FW_DUMP_REG1)); \
787 readl((void *) (bar0 + QLCNIC_FW_DUMP_REG1)); \
788 *data = readl((void *) (bar0 + QLCNIC_FW_DUMP_REG2 + \
789 LSW(addr))); \
790} while (0)
791
792#define QLCNIC_WR_DUMP_REG(addr, bar0, data) do { \
793 writel((addr & 0xFFFF0000), (void *) (bar0 + \
794 QLCNIC_FW_DUMP_REG1)); \
795 readl((void *) (bar0 + QLCNIC_FW_DUMP_REG1)); \
796 writel(data, (void *) (bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr)));\
797 readl((void *) (bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr))); \
798} while (0)
799
768/* PCI function operational mode */ 800/* PCI function operational mode */
769enum { 801enum {
770 QLCNIC_MGMT_FUNC = 0, 802 QLCNIC_MGMT_FUNC = 0,
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
index 616940f0a8d0..e9656616f2a2 100644
--- a/drivers/net/qlcnic/qlcnic_hw.c
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -9,6 +9,7 @@
9 9
10#include <linux/slab.h> 10#include <linux/slab.h>
11#include <net/ip.h> 11#include <net/ip.h>
12#include <linux/bitops.h>
12 13
13#define MASK(n) ((1ULL<<(n))-1) 14#define MASK(n) ((1ULL<<(n))-1)
14#define OCM_WIN_P3P(addr) (addr & 0xffc0000) 15#define OCM_WIN_P3P(addr) (addr & 0xffc0000)
@@ -457,7 +458,7 @@ int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
457 458
458 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); 459 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
459 460
460 word = QLCNIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE | 461 word = QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE |
461 ((u64)adapter->portnum << 16); 462 ((u64)adapter->portnum << 16);
462 req.req_hdr = cpu_to_le64(word); 463 req.req_hdr = cpu_to_le64(word);
463 464
@@ -532,33 +533,31 @@ void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter)
532 } 533 }
533} 534}
534 535
535#define QLCNIC_CONFIG_INTR_COALESCE 3
536
537/* 536/*
538 * Send the interrupt coalescing parameter set by ethtool to the card. 537 * Send the interrupt coalescing parameter set by ethtool to the card.
539 */ 538 */
540int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter) 539int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter)
541{ 540{
542 struct qlcnic_nic_req req; 541 struct qlcnic_nic_req req;
543 u64 word[6]; 542 int rv;
544 int rv, i;
545 543
546 memset(&req, 0, sizeof(struct qlcnic_nic_req)); 544 memset(&req, 0, sizeof(struct qlcnic_nic_req));
547 545
548 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); 546 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
549 547
550 word[0] = QLCNIC_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16); 548 req.req_hdr = cpu_to_le64(QLCNIC_CONFIG_INTR_COALESCE |
551 req.req_hdr = cpu_to_le64(word[0]); 549 ((u64) adapter->portnum << 16));
552
553 memcpy(&word[0], &adapter->coal, sizeof(adapter->coal));
554 for (i = 0; i < 6; i++)
555 req.words[i] = cpu_to_le64(word[i]);
556 550
551 req.words[0] = cpu_to_le64(((u64) adapter->ahw->coal.flag) << 32);
552 req.words[2] = cpu_to_le64(adapter->ahw->coal.rx_packets |
553 ((u64) adapter->ahw->coal.rx_time_us) << 16);
554 req.words[5] = cpu_to_le64(adapter->ahw->coal.timer_out |
555 ((u64) adapter->ahw->coal.type) << 32 |
556 ((u64) adapter->ahw->coal.sts_ring_mask) << 40);
557 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); 557 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
558 if (rv != 0) 558 if (rv != 0)
559 dev_err(&adapter->netdev->dev, 559 dev_err(&adapter->netdev->dev,
560 "Could not send interrupt coalescing parameters\n"); 560 "Could not send interrupt coalescing parameters\n");
561
562 return rv; 561 return rv;
563} 562}
564 563
@@ -568,6 +567,9 @@ int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
568 u64 word; 567 u64 word;
569 int rv; 568 int rv;
570 569
570 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
571 return 0;
572
571 memset(&req, 0, sizeof(struct qlcnic_nic_req)); 573 memset(&req, 0, sizeof(struct qlcnic_nic_req));
572 574
573 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); 575 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
@@ -713,6 +715,9 @@ int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter)
713 u64 word; 715 u64 word;
714 int rv; 716 int rv;
715 717
718 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
719 return 0;
720
716 memset(&req, 0, sizeof(struct qlcnic_nic_req)); 721 memset(&req, 0, sizeof(struct qlcnic_nic_req));
717 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); 722 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
718 723
@@ -754,6 +759,43 @@ int qlcnic_change_mtu(struct net_device *netdev, int mtu)
754 return rc; 759 return rc;
755} 760}
756 761
762
763u32 qlcnic_fix_features(struct net_device *netdev, u32 features)
764{
765 struct qlcnic_adapter *adapter = netdev_priv(netdev);
766
767 if ((adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
768 u32 changed = features ^ netdev->features;
769 features ^= changed & (NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
770 }
771
772 if (!(features & NETIF_F_RXCSUM))
773 features &= ~NETIF_F_LRO;
774
775 return features;
776}
777
778
779int qlcnic_set_features(struct net_device *netdev, u32 features)
780{
781 struct qlcnic_adapter *adapter = netdev_priv(netdev);
782 u32 changed = netdev->features ^ features;
783 int hw_lro = (features & NETIF_F_LRO) ? QLCNIC_LRO_ENABLED : 0;
784
785 if (!(changed & NETIF_F_LRO))
786 return 0;
787
788 netdev->features = features ^ NETIF_F_LRO;
789
790 if (qlcnic_config_hw_lro(adapter, hw_lro))
791 return -EIO;
792
793 if ((hw_lro == 0) && qlcnic_send_lro_cleanup(adapter))
794 return -EIO;
795
796 return 0;
797}
798
757/* 799/*
758 * Changes the CRB window to the specified window. 800 * Changes the CRB window to the specified window.
759 */ 801 */
@@ -780,7 +822,7 @@ qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter,
780 m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)]; 822 m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)];
781 823
782 if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) { 824 if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) {
783 *addr = adapter->ahw.pci_base0 + m->start_2M + 825 *addr = adapter->ahw->pci_base0 + m->start_2M +
784 (off - m->start_128M); 826 (off - m->start_128M);
785 return 0; 827 return 0;
786 } 828 }
@@ -788,7 +830,7 @@ qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter,
788 /* 830 /*
789 * Not in direct map, use crb window 831 * Not in direct map, use crb window
790 */ 832 */
791 *addr = adapter->ahw.pci_base0 + CRB_INDIRECT_2M + (off & MASK(16)); 833 *addr = adapter->ahw->pci_base0 + CRB_INDIRECT_2M + (off & MASK(16));
792 return 1; 834 return 1;
793} 835}
794 836
@@ -801,7 +843,7 @@ static int
801qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off) 843qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
802{ 844{
803 u32 window; 845 u32 window;
804 void __iomem *addr = adapter->ahw.pci_base0 + CRB_WINDOW_2M; 846 void __iomem *addr = adapter->ahw->pci_base0 + CRB_WINDOW_2M;
805 847
806 off -= QLCNIC_PCI_CRBSPACE; 848 off -= QLCNIC_PCI_CRBSPACE;
807 849
@@ -838,13 +880,13 @@ qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data)
838 880
839 if (rv > 0) { 881 if (rv > 0) {
840 /* indirect access */ 882 /* indirect access */
841 write_lock_irqsave(&adapter->ahw.crb_lock, flags); 883 write_lock_irqsave(&adapter->ahw->crb_lock, flags);
842 crb_win_lock(adapter); 884 crb_win_lock(adapter);
843 rv = qlcnic_pci_set_crbwindow_2M(adapter, off); 885 rv = qlcnic_pci_set_crbwindow_2M(adapter, off);
844 if (!rv) 886 if (!rv)
845 writel(data, addr); 887 writel(data, addr);
846 crb_win_unlock(adapter); 888 crb_win_unlock(adapter);
847 write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); 889 write_unlock_irqrestore(&adapter->ahw->crb_lock, flags);
848 return rv; 890 return rv;
849 } 891 }
850 892
@@ -869,12 +911,12 @@ qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
869 911
870 if (rv > 0) { 912 if (rv > 0) {
871 /* indirect access */ 913 /* indirect access */
872 write_lock_irqsave(&adapter->ahw.crb_lock, flags); 914 write_lock_irqsave(&adapter->ahw->crb_lock, flags);
873 crb_win_lock(adapter); 915 crb_win_lock(adapter);
874 if (!qlcnic_pci_set_crbwindow_2M(adapter, off)) 916 if (!qlcnic_pci_set_crbwindow_2M(adapter, off))
875 data = readl(addr); 917 data = readl(addr);
876 crb_win_unlock(adapter); 918 crb_win_unlock(adapter);
877 write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); 919 write_unlock_irqrestore(&adapter->ahw->crb_lock, flags);
878 return data; 920 return data;
879 } 921 }
880 922
@@ -904,9 +946,9 @@ qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter,
904 946
905 window = OCM_WIN_P3P(addr); 947 window = OCM_WIN_P3P(addr);
906 948
907 writel(window, adapter->ahw.ocm_win_crb); 949 writel(window, adapter->ahw->ocm_win_crb);
908 /* read back to flush */ 950 /* read back to flush */
909 readl(adapter->ahw.ocm_win_crb); 951 readl(adapter->ahw->ocm_win_crb);
910 952
911 *start = QLCNIC_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr); 953 *start = QLCNIC_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr);
912 return 0; 954 return 0;
@@ -920,13 +962,13 @@ qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
920 int ret; 962 int ret;
921 u32 start; 963 u32 start;
922 964
923 mutex_lock(&adapter->ahw.mem_lock); 965 mutex_lock(&adapter->ahw->mem_lock);
924 966
925 ret = qlcnic_pci_set_window_2M(adapter, off, &start); 967 ret = qlcnic_pci_set_window_2M(adapter, off, &start);
926 if (ret != 0) 968 if (ret != 0)
927 goto unlock; 969 goto unlock;
928 970
929 addr = adapter->ahw.pci_base0 + start; 971 addr = adapter->ahw->pci_base0 + start;
930 972
931 if (op == 0) /* read */ 973 if (op == 0) /* read */
932 *data = readq(addr); 974 *data = readq(addr);
@@ -934,7 +976,7 @@ qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
934 writeq(*data, addr); 976 writeq(*data, addr);
935 977
936unlock: 978unlock:
937 mutex_unlock(&adapter->ahw.mem_lock); 979 mutex_unlock(&adapter->ahw->mem_lock);
938 980
939 return ret; 981 return ret;
940} 982}
@@ -942,23 +984,23 @@ unlock:
942void 984void
943qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data) 985qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
944{ 986{
945 void __iomem *addr = adapter->ahw.pci_base0 + 987 void __iomem *addr = adapter->ahw->pci_base0 +
946 QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM); 988 QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
947 989
948 mutex_lock(&adapter->ahw.mem_lock); 990 mutex_lock(&adapter->ahw->mem_lock);
949 *data = readq(addr); 991 *data = readq(addr);
950 mutex_unlock(&adapter->ahw.mem_lock); 992 mutex_unlock(&adapter->ahw->mem_lock);
951} 993}
952 994
953void 995void
954qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data) 996qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data)
955{ 997{
956 void __iomem *addr = adapter->ahw.pci_base0 + 998 void __iomem *addr = adapter->ahw->pci_base0 +
957 QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM); 999 QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
958 1000
959 mutex_lock(&adapter->ahw.mem_lock); 1001 mutex_lock(&adapter->ahw->mem_lock);
960 writeq(data, addr); 1002 writeq(data, addr);
961 mutex_unlock(&adapter->ahw.mem_lock); 1003 mutex_unlock(&adapter->ahw->mem_lock);
962} 1004}
963 1005
964#define MAX_CTL_CHECK 1000 1006#define MAX_CTL_CHECK 1000
@@ -997,7 +1039,7 @@ qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
997correct: 1039correct:
998 off8 = off & ~0xf; 1040 off8 = off & ~0xf;
999 1041
1000 mutex_lock(&adapter->ahw.mem_lock); 1042 mutex_lock(&adapter->ahw->mem_lock);
1001 1043
1002 writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO)); 1044 writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
1003 writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI)); 1045 writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
@@ -1049,7 +1091,7 @@ correct:
1049 ret = 0; 1091 ret = 0;
1050 1092
1051done: 1093done:
1052 mutex_unlock(&adapter->ahw.mem_lock); 1094 mutex_unlock(&adapter->ahw->mem_lock);
1053 1095
1054 return ret; 1096 return ret;
1055} 1097}
@@ -1091,7 +1133,7 @@ qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
1091correct: 1133correct:
1092 off8 = off & ~0xf; 1134 off8 = off & ~0xf;
1093 1135
1094 mutex_lock(&adapter->ahw.mem_lock); 1136 mutex_lock(&adapter->ahw->mem_lock);
1095 1137
1096 writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO)); 1138 writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
1097 writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI)); 1139 writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
@@ -1121,7 +1163,7 @@ correct:
1121 ret = 0; 1163 ret = 0;
1122 } 1164 }
1123 1165
1124 mutex_unlock(&adapter->ahw.mem_lock); 1166 mutex_unlock(&adapter->ahw->mem_lock);
1125 1167
1126 return ret; 1168 return ret;
1127} 1169}
@@ -1145,7 +1187,7 @@ int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
1145 if (qlcnic_rom_fast_read(adapter, offset, &board_type)) 1187 if (qlcnic_rom_fast_read(adapter, offset, &board_type))
1146 return -EIO; 1188 return -EIO;
1147 1189
1148 adapter->ahw.board_type = board_type; 1190 adapter->ahw->board_type = board_type;
1149 1191
1150 if (board_type == QLCNIC_BRDTYPE_P3P_4_GB_MM) { 1192 if (board_type == QLCNIC_BRDTYPE_P3P_4_GB_MM) {
1151 u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I); 1193 u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I);
@@ -1164,20 +1206,20 @@ int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
1164 case QLCNIC_BRDTYPE_P3P_10G_SFP_QT: 1206 case QLCNIC_BRDTYPE_P3P_10G_SFP_QT:
1165 case QLCNIC_BRDTYPE_P3P_10G_XFP: 1207 case QLCNIC_BRDTYPE_P3P_10G_XFP:
1166 case QLCNIC_BRDTYPE_P3P_10000_BASE_T: 1208 case QLCNIC_BRDTYPE_P3P_10000_BASE_T:
1167 adapter->ahw.port_type = QLCNIC_XGBE; 1209 adapter->ahw->port_type = QLCNIC_XGBE;
1168 break; 1210 break;
1169 case QLCNIC_BRDTYPE_P3P_REF_QG: 1211 case QLCNIC_BRDTYPE_P3P_REF_QG:
1170 case QLCNIC_BRDTYPE_P3P_4_GB: 1212 case QLCNIC_BRDTYPE_P3P_4_GB:
1171 case QLCNIC_BRDTYPE_P3P_4_GB_MM: 1213 case QLCNIC_BRDTYPE_P3P_4_GB_MM:
1172 adapter->ahw.port_type = QLCNIC_GBE; 1214 adapter->ahw->port_type = QLCNIC_GBE;
1173 break; 1215 break;
1174 case QLCNIC_BRDTYPE_P3P_10G_TP: 1216 case QLCNIC_BRDTYPE_P3P_10G_TP:
1175 adapter->ahw.port_type = (adapter->portnum < 2) ? 1217 adapter->ahw->port_type = (adapter->portnum < 2) ?
1176 QLCNIC_XGBE : QLCNIC_GBE; 1218 QLCNIC_XGBE : QLCNIC_GBE;
1177 break; 1219 break;
1178 default: 1220 default:
1179 dev_err(&pdev->dev, "unknown board type %x\n", board_type); 1221 dev_err(&pdev->dev, "unknown board type %x\n", board_type);
1180 adapter->ahw.port_type = QLCNIC_XGBE; 1222 adapter->ahw->port_type = QLCNIC_XGBE;
1181 break; 1223 break;
1182 } 1224 }
1183 1225
@@ -1220,3 +1262,461 @@ int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
1220 1262
1221 return rv; 1263 return rv;
1222} 1264}
1265
1266/* FW dump related functions */
1267static u32
1268qlcnic_dump_crb(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
1269 u32 *buffer)
1270{
1271 int i;
1272 u32 addr, data;
1273 struct __crb *crb = &entry->region.crb;
1274 void __iomem *base = adapter->ahw->pci_base0;
1275
1276 addr = crb->addr;
1277
1278 for (i = 0; i < crb->no_ops; i++) {
1279 QLCNIC_RD_DUMP_REG(addr, base, &data);
1280 *buffer++ = cpu_to_le32(addr);
1281 *buffer++ = cpu_to_le32(data);
1282 addr += crb->stride;
1283 }
1284 return crb->no_ops * 2 * sizeof(u32);
1285}
1286
1287static u32
1288qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
1289 struct qlcnic_dump_entry *entry, u32 *buffer)
1290{
1291 int i, k, timeout = 0;
1292 void __iomem *base = adapter->ahw->pci_base0;
1293 u32 addr, data;
1294 u8 opcode, no_ops;
1295 struct __ctrl *ctr = &entry->region.ctrl;
1296 struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr;
1297
1298 addr = ctr->addr;
1299 no_ops = ctr->no_ops;
1300
1301 for (i = 0; i < no_ops; i++) {
1302 k = 0;
1303 opcode = 0;
1304 for (k = 0; k < 8; k++) {
1305 if (!(ctr->opcode & (1 << k)))
1306 continue;
1307 switch (1 << k) {
1308 case QLCNIC_DUMP_WCRB:
1309 QLCNIC_WR_DUMP_REG(addr, base, ctr->val1);
1310 break;
1311 case QLCNIC_DUMP_RWCRB:
1312 QLCNIC_RD_DUMP_REG(addr, base, &data);
1313 QLCNIC_WR_DUMP_REG(addr, base, data);
1314 break;
1315 case QLCNIC_DUMP_ANDCRB:
1316 QLCNIC_RD_DUMP_REG(addr, base, &data);
1317 QLCNIC_WR_DUMP_REG(addr, base,
1318 (data & ctr->val2));
1319 break;
1320 case QLCNIC_DUMP_ORCRB:
1321 QLCNIC_RD_DUMP_REG(addr, base, &data);
1322 QLCNIC_WR_DUMP_REG(addr, base,
1323 (data | ctr->val3));
1324 break;
1325 case QLCNIC_DUMP_POLLCRB:
1326 while (timeout <= ctr->timeout) {
1327 QLCNIC_RD_DUMP_REG(addr, base, &data);
1328 if ((data & ctr->val2) == ctr->val1)
1329 break;
1330 msleep(1);
1331 timeout++;
1332 }
1333 if (timeout > ctr->timeout) {
1334 dev_info(&adapter->pdev->dev,
1335 "Timed out, aborting poll CRB\n");
1336 return -EINVAL;
1337 }
1338 break;
1339 case QLCNIC_DUMP_RD_SAVE:
1340 if (ctr->index_a)
1341 addr = t_hdr->saved_state[ctr->index_a];
1342 QLCNIC_RD_DUMP_REG(addr, base, &data);
1343 t_hdr->saved_state[ctr->index_v] = data;
1344 break;
1345 case QLCNIC_DUMP_WRT_SAVED:
1346 if (ctr->index_v)
1347 data = t_hdr->saved_state[ctr->index_v];
1348 else
1349 data = ctr->val1;
1350 if (ctr->index_a)
1351 addr = t_hdr->saved_state[ctr->index_a];
1352 QLCNIC_WR_DUMP_REG(addr, base, data);
1353 break;
1354 case QLCNIC_DUMP_MOD_SAVE_ST:
1355 data = t_hdr->saved_state[ctr->index_v];
1356 data <<= ctr->shl_val;
1357 data >>= ctr->shr_val;
1358 if (ctr->val2)
1359 data &= ctr->val2;
1360 data |= ctr->val3;
1361 data += ctr->val1;
1362 t_hdr->saved_state[ctr->index_v] = data;
1363 break;
1364 default:
1365 dev_info(&adapter->pdev->dev,
1366 "Unknown opcode\n");
1367 break;
1368 }
1369 }
1370 addr += ctr->stride;
1371 }
1372 return 0;
1373}
1374
1375static u32
1376qlcnic_dump_mux(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
1377 u32 *buffer)
1378{
1379 int loop;
1380 u32 val, data = 0;
1381 struct __mux *mux = &entry->region.mux;
1382 void __iomem *base = adapter->ahw->pci_base0;
1383
1384 val = mux->val;
1385 for (loop = 0; loop < mux->no_ops; loop++) {
1386 QLCNIC_WR_DUMP_REG(mux->addr, base, val);
1387 QLCNIC_RD_DUMP_REG(mux->read_addr, base, &data);
1388 *buffer++ = cpu_to_le32(val);
1389 *buffer++ = cpu_to_le32(data);
1390 val += mux->val_stride;
1391 }
1392 return 2 * mux->no_ops * sizeof(u32);
1393}
1394
1395static u32
1396qlcnic_dump_que(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
1397 u32 *buffer)
1398{
1399 int i, loop;
1400 u32 cnt, addr, data, que_id = 0;
1401 void __iomem *base = adapter->ahw->pci_base0;
1402 struct __queue *que = &entry->region.que;
1403
1404 addr = que->read_addr;
1405 cnt = que->read_addr_cnt;
1406
1407 for (loop = 0; loop < que->no_ops; loop++) {
1408 QLCNIC_WR_DUMP_REG(que->sel_addr, base, que_id);
1409 for (i = 0; i < cnt; i++) {
1410 QLCNIC_RD_DUMP_REG(addr, base, &data);
1411 *buffer++ = cpu_to_le32(data);
1412 addr += que->read_addr_stride;
1413 }
1414 que_id += que->stride;
1415 }
1416 return que->no_ops * cnt * sizeof(u32);
1417}
1418
1419static u32
1420qlcnic_dump_ocm(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
1421 u32 *buffer)
1422{
1423 int i;
1424 u32 data;
1425 void __iomem *addr;
1426 struct __ocm *ocm = &entry->region.ocm;
1427
1428 addr = adapter->ahw->pci_base0 + ocm->read_addr;
1429 for (i = 0; i < ocm->no_ops; i++) {
1430 data = readl(addr);
1431 *buffer++ = cpu_to_le32(data);
1432 addr += ocm->read_addr_stride;
1433 }
1434 return ocm->no_ops * sizeof(u32);
1435}
1436
1437static u32
1438qlcnic_read_rom(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
1439 u32 *buffer)
1440{
1441 int i, count = 0;
1442 u32 fl_addr, size, val, lck_val, addr;
1443 struct __mem *rom = &entry->region.mem;
1444 void __iomem *base = adapter->ahw->pci_base0;
1445
1446 fl_addr = rom->addr;
1447 size = rom->size/4;
1448lock_try:
1449 lck_val = readl(base + QLCNIC_FLASH_SEM2_LK);
1450 if (!lck_val && count < MAX_CTL_CHECK) {
1451 msleep(10);
1452 count++;
1453 goto lock_try;
1454 }
1455 writel(adapter->ahw->pci_func, (base + QLCNIC_FLASH_LOCK_ID));
1456 for (i = 0; i < size; i++) {
1457 addr = fl_addr & 0xFFFF0000;
1458 QLCNIC_WR_DUMP_REG(FLASH_ROM_WINDOW, base, addr);
1459 addr = LSW(fl_addr) + FLASH_ROM_DATA;
1460 QLCNIC_RD_DUMP_REG(addr, base, &val);
1461 fl_addr += 4;
1462 *buffer++ = cpu_to_le32(val);
1463 }
1464 readl(base + QLCNIC_FLASH_SEM2_ULK);
1465 return rom->size;
1466}
1467
1468static u32
1469qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
1470 struct qlcnic_dump_entry *entry, u32 *buffer)
1471{
1472 int i;
1473 u32 cnt, val, data, addr;
1474 void __iomem *base = adapter->ahw->pci_base0;
1475 struct __cache *l1 = &entry->region.cache;
1476
1477 val = l1->init_tag_val;
1478
1479 for (i = 0; i < l1->no_ops; i++) {
1480 QLCNIC_WR_DUMP_REG(l1->addr, base, val);
1481 QLCNIC_WR_DUMP_REG(l1->ctrl_addr, base, LSW(l1->ctrl_val));
1482 addr = l1->read_addr;
1483 cnt = l1->read_addr_num;
1484 while (cnt) {
1485 QLCNIC_RD_DUMP_REG(addr, base, &data);
1486 *buffer++ = cpu_to_le32(data);
1487 addr += l1->read_addr_stride;
1488 cnt--;
1489 }
1490 val += l1->stride;
1491 }
1492 return l1->no_ops * l1->read_addr_num * sizeof(u32);
1493}
1494
1495static u32
1496qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
1497 struct qlcnic_dump_entry *entry, u32 *buffer)
1498{
1499 int i;
1500 u32 cnt, val, data, addr;
1501 u8 poll_mask, poll_to, time_out = 0;
1502 void __iomem *base = adapter->ahw->pci_base0;
1503 struct __cache *l2 = &entry->region.cache;
1504
1505 val = l2->init_tag_val;
1506 poll_mask = LSB(MSW(l2->ctrl_val));
1507 poll_to = MSB(MSW(l2->ctrl_val));
1508
1509 for (i = 0; i < l2->no_ops; i++) {
1510 QLCNIC_WR_DUMP_REG(l2->addr, base, val);
1511 do {
1512 QLCNIC_WR_DUMP_REG(l2->ctrl_addr, base,
1513 LSW(l2->ctrl_val));
1514 QLCNIC_RD_DUMP_REG(l2->ctrl_addr, base, &data);
1515 if (!(data & poll_mask))
1516 break;
1517 msleep(1);
1518 time_out++;
1519 } while (time_out <= poll_to);
1520 if (time_out > poll_to)
1521 return -EINVAL;
1522
1523 addr = l2->read_addr;
1524 cnt = l2->read_addr_num;
1525 while (cnt) {
1526 QLCNIC_RD_DUMP_REG(addr, base, &data);
1527 *buffer++ = cpu_to_le32(data);
1528 addr += l2->read_addr_stride;
1529 cnt--;
1530 }
1531 val += l2->stride;
1532 }
1533 return l2->no_ops * l2->read_addr_num * sizeof(u32);
1534}
1535
1536static u32
1537qlcnic_read_memory(struct qlcnic_adapter *adapter,
1538 struct qlcnic_dump_entry *entry, u32 *buffer)
1539{
1540 u32 addr, data, test, ret = 0;
1541 int i, reg_read;
1542 struct __mem *mem = &entry->region.mem;
1543 void __iomem *base = adapter->ahw->pci_base0;
1544
1545 reg_read = mem->size;
1546 addr = mem->addr;
1547 /* check for data size of multiple of 16 and 16 byte alignment */
1548 if ((addr & 0xf) || (reg_read%16)) {
1549 dev_info(&adapter->pdev->dev,
1550 "Unaligned memory addr:0x%x size:0x%x\n",
1551 addr, reg_read);
1552 return -EINVAL;
1553 }
1554
1555 mutex_lock(&adapter->ahw->mem_lock);
1556
1557 while (reg_read != 0) {
1558 QLCNIC_WR_DUMP_REG(MIU_TEST_ADDR_LO, base, addr);
1559 QLCNIC_WR_DUMP_REG(MIU_TEST_ADDR_HI, base, 0);
1560 QLCNIC_WR_DUMP_REG(MIU_TEST_CTR, base,
1561 TA_CTL_ENABLE | TA_CTL_START);
1562
1563 for (i = 0; i < MAX_CTL_CHECK; i++) {
1564 QLCNIC_RD_DUMP_REG(MIU_TEST_CTR, base, &test);
1565 if (!(test & TA_CTL_BUSY))
1566 break;
1567 }
1568 if (i == MAX_CTL_CHECK) {
1569 if (printk_ratelimit()) {
1570 dev_err(&adapter->pdev->dev,
1571 "failed to read through agent\n");
1572 ret = -EINVAL;
1573 goto out;
1574 }
1575 }
1576 for (i = 0; i < 4; i++) {
1577 QLCNIC_RD_DUMP_REG(MIU_TEST_READ_DATA[i], base, &data);
1578 *buffer++ = cpu_to_le32(data);
1579 }
1580 addr += 16;
1581 reg_read -= 16;
1582 ret += 16;
1583 }
1584out:
1585 mutex_unlock(&adapter->ahw->mem_lock);
1586 return mem->size;
1587}
1588
1589static u32
1590qlcnic_dump_nop(struct qlcnic_adapter *adapter,
1591 struct qlcnic_dump_entry *entry, u32 *buffer)
1592{
1593 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1594 return 0;
1595}
1596
1597struct qlcnic_dump_operations fw_dump_ops[] = {
1598 { QLCNIC_DUMP_NOP, qlcnic_dump_nop },
1599 { QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb },
1600 { QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux },
1601 { QLCNIC_DUMP_QUEUE, qlcnic_dump_que },
1602 { QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom },
1603 { QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm },
1604 { QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl },
1605 { QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache },
1606 { QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache },
1607 { QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache },
1608 { QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache },
1609 { QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache },
1610 { QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache },
1611 { QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache },
1612 { QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache },
1613 { QLCNIC_DUMP_READ_ROM, qlcnic_read_rom },
1614 { QLCNIC_DUMP_READ_MEM, qlcnic_read_memory },
1615 { QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl },
1616 { QLCNIC_DUMP_TLHDR, qlcnic_dump_nop },
1617 { QLCNIC_DUMP_RDEND, qlcnic_dump_nop },
1618};
1619
1620/* Walk the template and collect dump for each entry in the dump template */
1621static int
1622qlcnic_valid_dump_entry(struct device *dev, struct qlcnic_dump_entry *entry,
1623 u32 size)
1624{
1625 int ret = 1;
1626 if (size != entry->hdr.cap_size) {
1627 dev_info(dev,
1628 "Invalidate dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
1629 entry->hdr.type, entry->hdr.mask, size, entry->hdr.cap_size);
1630 dev_info(dev, "Aborting further dump capture\n");
1631 ret = 0;
1632 }
1633 return ret;
1634}
1635
1636int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
1637{
1638 u32 *buffer;
1639 char mesg[64];
1640 char *msg[] = {mesg, NULL};
1641 int i, k, ops_cnt, ops_index, dump_size = 0;
1642 u32 entry_offset, dump, no_entries, buf_offset = 0;
1643 struct qlcnic_dump_entry *entry;
1644 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1645 struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
1646
1647 if (fw_dump->clr) {
1648 dev_info(&adapter->pdev->dev,
1649 "Previous dump not cleared, not capturing dump\n");
1650 return -EIO;
1651 }
1652 /* Calculate the size for dump data area only */
1653 for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
1654 if (i & tmpl_hdr->drv_cap_mask)
1655 dump_size += tmpl_hdr->cap_sizes[k];
1656 if (!dump_size)
1657 return -EIO;
1658
1659 fw_dump->data = vzalloc(dump_size);
1660 if (!fw_dump->data) {
1661 dev_info(&adapter->pdev->dev,
1662 "Unable to allocate (%d KB) for fw dump\n",
1663 dump_size/1024);
1664 return -ENOMEM;
1665 }
1666 buffer = fw_dump->data;
1667 fw_dump->size = dump_size;
1668 no_entries = tmpl_hdr->num_entries;
1669 ops_cnt = ARRAY_SIZE(fw_dump_ops);
1670 entry_offset = tmpl_hdr->offset;
1671 tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION;
1672 tmpl_hdr->sys_info[1] = adapter->fw_version;
1673
1674 for (i = 0; i < no_entries; i++) {
1675 entry = (struct qlcnic_dump_entry *) ((void *) tmpl_hdr +
1676 entry_offset);
1677 if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) {
1678 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1679 entry_offset += entry->hdr.offset;
1680 continue;
1681 }
1682 /* Find the handler for this entry */
1683 ops_index = 0;
1684 while (ops_index < ops_cnt) {
1685 if (entry->hdr.type == fw_dump_ops[ops_index].opcode)
1686 break;
1687 ops_index++;
1688 }
1689 if (ops_index == ops_cnt) {
1690 dev_info(&adapter->pdev->dev,
1691 "Invalid entry type %d, exiting dump\n",
1692 entry->hdr.type);
1693 goto error;
1694 }
1695 /* Collect dump for this entry */
1696 dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
1697 if (dump && !qlcnic_valid_dump_entry(&adapter->pdev->dev, entry,
1698 dump))
1699 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1700 buf_offset += entry->hdr.cap_size;
1701 entry_offset += entry->hdr.offset;
1702 buffer = fw_dump->data + buf_offset;
1703 }
1704 if (dump_size != buf_offset) {
1705 dev_info(&adapter->pdev->dev,
1706 "Captured(%d) and expected size(%d) do not match\n",
1707 buf_offset, dump_size);
1708 goto error;
1709 } else {
1710 fw_dump->clr = 1;
1711 snprintf(mesg, sizeof(mesg), "FW dump for device: %d\n",
1712 adapter->pdev->devfn);
1713 dev_info(&adapter->pdev->dev, "Dump data, %d bytes captured\n",
1714 fw_dump->size);
1715 /* Send a udev event to notify availability of FW dump */
1716 kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg);
1717 return 0;
1718 }
1719error:
1720 vfree(fw_dump->data);
1721 return -EINVAL;
1722}
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index a7f1d5b7e811..5b8bbcf904d5 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -94,7 +94,7 @@ void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter)
94 struct qlcnic_rx_buffer *rx_buf; 94 struct qlcnic_rx_buffer *rx_buf;
95 int i, ring; 95 int i, ring;
96 96
97 recv_ctx = &adapter->recv_ctx; 97 recv_ctx = adapter->recv_ctx;
98 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 98 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
99 rds_ring = &recv_ctx->rds_rings[ring]; 99 rds_ring = &recv_ctx->rds_rings[ring];
100 for (i = 0; i < rds_ring->num_desc; ++i) { 100 for (i = 0; i < rds_ring->num_desc; ++i) {
@@ -119,7 +119,7 @@ void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter)
119 struct qlcnic_rx_buffer *rx_buf; 119 struct qlcnic_rx_buffer *rx_buf;
120 int i, ring; 120 int i, ring;
121 121
122 recv_ctx = &adapter->recv_ctx; 122 recv_ctx = adapter->recv_ctx;
123 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 123 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
124 rds_ring = &recv_ctx->rds_rings[ring]; 124 rds_ring = &recv_ctx->rds_rings[ring];
125 125
@@ -173,7 +173,7 @@ void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
173 struct qlcnic_host_tx_ring *tx_ring; 173 struct qlcnic_host_tx_ring *tx_ring;
174 int ring; 174 int ring;
175 175
176 recv_ctx = &adapter->recv_ctx; 176 recv_ctx = adapter->recv_ctx;
177 177
178 if (recv_ctx->rds_rings == NULL) 178 if (recv_ctx->rds_rings == NULL)
179 goto skip_rds; 179 goto skip_rds;
@@ -226,7 +226,7 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
226 } 226 }
227 tx_ring->cmd_buf_arr = cmd_buf_arr; 227 tx_ring->cmd_buf_arr = cmd_buf_arr;
228 228
229 recv_ctx = &adapter->recv_ctx; 229 recv_ctx = adapter->recv_ctx;
230 230
231 size = adapter->max_rds_rings * sizeof(struct qlcnic_host_rds_ring); 231 size = adapter->max_rds_rings * sizeof(struct qlcnic_host_rds_ring);
232 rds_ring = kzalloc(size, GFP_KERNEL); 232 rds_ring = kzalloc(size, GFP_KERNEL);
@@ -345,7 +345,7 @@ static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter)
345} 345}
346 346
347static int do_rom_fast_read(struct qlcnic_adapter *adapter, 347static int do_rom_fast_read(struct qlcnic_adapter *adapter,
348 int addr, int *valp) 348 u32 addr, u32 *valp)
349{ 349{
350 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ADDRESS, addr); 350 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ADDRESS, addr);
351 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); 351 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
@@ -398,7 +398,7 @@ qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
398 return ret; 398 return ret;
399} 399}
400 400
401int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp) 401int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, u32 addr, u32 *valp)
402{ 402{
403 int ret; 403 int ret;
404 404
@@ -864,7 +864,7 @@ nomn:
864 for (i = 0; i < entries; i++) { 864 for (i = 0; i < entries; i++) {
865 865
866 __le32 flags, file_chiprev, offs; 866 __le32 flags, file_chiprev, offs;
867 u8 chiprev = adapter->ahw.revision_id; 867 u8 chiprev = adapter->ahw->revision_id;
868 u32 flagbit; 868 u32 flagbit;
869 869
870 offs = cpu_to_le32(ptab_descr->findex) + 870 offs = cpu_to_le32(ptab_descr->findex) +
@@ -1130,9 +1130,20 @@ qlcnic_load_firmware(struct qlcnic_adapter *adapter)
1130 } else { 1130 } else {
1131 u64 data; 1131 u64 data;
1132 u32 hi, lo; 1132 u32 hi, lo;
1133 1133 int ret;
1134 size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8; 1134 struct qlcnic_flt_entry bootld_entry;
1135 flashaddr = QLCNIC_BOOTLD_START; 1135
1136 ret = qlcnic_get_flt_entry(adapter, QLCNIC_BOOTLD_REGION,
1137 &bootld_entry);
1138 if (!ret) {
1139 size = bootld_entry.size / 8;
1140 flashaddr = bootld_entry.start_addr;
1141 } else {
1142 size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
1143 flashaddr = QLCNIC_BOOTLD_START;
1144 dev_info(&pdev->dev,
1145 "using legacy method to get flash fw region");
1146 }
1136 1147
1137 for (i = 0; i < size; i++) { 1148 for (i = 0; i < size; i++) {
1138 if (qlcnic_rom_fast_read(adapter, 1149 if (qlcnic_rom_fast_read(adapter,
@@ -1379,8 +1390,8 @@ static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
1379 1390
1380 skb = buffer->skb; 1391 skb = buffer->skb;
1381 1392
1382 if (likely(adapter->rx_csum && (cksum == STATUS_CKSUM_OK || 1393 if (likely((adapter->netdev->features & NETIF_F_RXCSUM) &&
1383 cksum == STATUS_CKSUM_LOOP))) { 1394 (cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) {
1384 adapter->stats.csummed++; 1395 adapter->stats.csummed++;
1385 skb->ip_summed = CHECKSUM_UNNECESSARY; 1396 skb->ip_summed = CHECKSUM_UNNECESSARY;
1386 } else { 1397 } else {
@@ -1394,7 +1405,7 @@ static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
1394 return skb; 1405 return skb;
1395} 1406}
1396 1407
1397static int 1408static inline int
1398qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter, struct sk_buff *skb, 1409qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter, struct sk_buff *skb,
1399 u16 *vlan_tag) 1410 u16 *vlan_tag)
1400{ 1411{
@@ -1425,7 +1436,7 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
1425 int ring, u64 sts_data0) 1436 int ring, u64 sts_data0)
1426{ 1437{
1427 struct net_device *netdev = adapter->netdev; 1438 struct net_device *netdev = adapter->netdev;
1428 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; 1439 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1429 struct qlcnic_rx_buffer *buffer; 1440 struct qlcnic_rx_buffer *buffer;
1430 struct sk_buff *skb; 1441 struct sk_buff *skb;
1431 struct qlcnic_host_rds_ring *rds_ring; 1442 struct qlcnic_host_rds_ring *rds_ring;
@@ -1467,10 +1478,10 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
1467 1478
1468 skb->protocol = eth_type_trans(skb, netdev); 1479 skb->protocol = eth_type_trans(skb, netdev);
1469 1480
1470 if ((vid != 0xffff) && adapter->vlgrp) 1481 if (vid != 0xffff)
1471 vlan_gro_receive(&sds_ring->napi, adapter->vlgrp, vid, skb); 1482 __vlan_hwaccel_put_tag(skb, vid);
1472 else 1483
1473 napi_gro_receive(&sds_ring->napi, skb); 1484 napi_gro_receive(&sds_ring->napi, skb);
1474 1485
1475 adapter->stats.rx_pkts++; 1486 adapter->stats.rx_pkts++;
1476 adapter->stats.rxbytes += length; 1487 adapter->stats.rxbytes += length;
@@ -1488,7 +1499,7 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
1488 int ring, u64 sts_data0, u64 sts_data1) 1499 int ring, u64 sts_data0, u64 sts_data1)
1489{ 1500{
1490 struct net_device *netdev = adapter->netdev; 1501 struct net_device *netdev = adapter->netdev;
1491 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; 1502 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1492 struct qlcnic_rx_buffer *buffer; 1503 struct qlcnic_rx_buffer *buffer;
1493 struct sk_buff *skb; 1504 struct sk_buff *skb;
1494 struct qlcnic_host_rds_ring *rds_ring; 1505 struct qlcnic_host_rds_ring *rds_ring;
@@ -1552,10 +1563,9 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
1552 1563
1553 length = skb->len; 1564 length = skb->len;
1554 1565
1555 if ((vid != 0xffff) && adapter->vlgrp) 1566 if (vid != 0xffff)
1556 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, vid); 1567 __vlan_hwaccel_put_tag(skb, vid);
1557 else 1568 netif_receive_skb(skb);
1558 netif_receive_skb(skb);
1559 1569
1560 adapter->stats.lro_pkts++; 1570 adapter->stats.lro_pkts++;
1561 adapter->stats.lrobytes += length; 1571 adapter->stats.lrobytes += length;
@@ -1625,7 +1635,7 @@ skip:
1625 1635
1626 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 1636 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1627 struct qlcnic_host_rds_ring *rds_ring = 1637 struct qlcnic_host_rds_ring *rds_ring =
1628 &adapter->recv_ctx.rds_rings[ring]; 1638 &adapter->recv_ctx->rds_rings[ring];
1629 1639
1630 if (!list_empty(&sds_ring->free_list[ring])) { 1640 if (!list_empty(&sds_ring->free_list[ring])) {
1631 list_for_each(cur, &sds_ring->free_list[ring]) { 1641 list_for_each(cur, &sds_ring->free_list[ring]) {
@@ -1651,12 +1661,13 @@ skip:
1651} 1661}
1652 1662
1653void 1663void
1654qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid, 1664qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
1655 struct qlcnic_host_rds_ring *rds_ring) 1665 struct qlcnic_host_rds_ring *rds_ring)
1656{ 1666{
1657 struct rcv_desc *pdesc; 1667 struct rcv_desc *pdesc;
1658 struct qlcnic_rx_buffer *buffer; 1668 struct qlcnic_rx_buffer *buffer;
1659 int producer, count = 0; 1669 int count = 0;
1670 u32 producer;
1660 struct list_head *head; 1671 struct list_head *head;
1661 1672
1662 producer = rds_ring->producer; 1673 producer = rds_ring->producer;
@@ -1696,7 +1707,8 @@ qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
1696{ 1707{
1697 struct rcv_desc *pdesc; 1708 struct rcv_desc *pdesc;
1698 struct qlcnic_rx_buffer *buffer; 1709 struct qlcnic_rx_buffer *buffer;
1699 int producer, count = 0; 1710 int count = 0;
1711 uint32_t producer;
1700 struct list_head *head; 1712 struct list_head *head;
1701 1713
1702 if (!spin_trylock(&rds_ring->lock)) 1714 if (!spin_trylock(&rds_ring->lock))
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index cb1a1ef36c0a..3ab7d2c7baf2 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -13,12 +13,12 @@
13 13
14#include <linux/swab.h> 14#include <linux/swab.h>
15#include <linux/dma-mapping.h> 15#include <linux/dma-mapping.h>
16#include <linux/if_vlan.h>
17#include <net/ip.h> 16#include <net/ip.h>
18#include <linux/ipv6.h> 17#include <linux/ipv6.h>
19#include <linux/inetdevice.h> 18#include <linux/inetdevice.h>
20#include <linux/sysfs.h> 19#include <linux/sysfs.h>
21#include <linux/aer.h> 20#include <linux/aer.h>
21#include <linux/log2.h>
22 22
23MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver"); 23MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
24MODULE_LICENSE("GPL"); 24MODULE_LICENSE("GPL");
@@ -98,6 +98,9 @@ static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
98static int qlcnicvf_start_firmware(struct qlcnic_adapter *); 98static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
99static void qlcnic_set_netdev_features(struct qlcnic_adapter *, 99static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
100 struct qlcnic_esw_func_cfg *); 100 struct qlcnic_esw_func_cfg *);
101static void qlcnic_vlan_rx_add(struct net_device *, u16);
102static void qlcnic_vlan_rx_del(struct net_device *, u16);
103
101/* PCI Device ID Table */ 104/* PCI Device ID Table */
102#define ENTRY(device) \ 105#define ENTRY(device) \
103 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \ 106 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
@@ -113,7 +116,7 @@ static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
113MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl); 116MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
114 117
115 118
116void 119inline void
117qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter, 120qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
118 struct qlcnic_host_tx_ring *tx_ring) 121 struct qlcnic_host_tx_ring *tx_ring)
119{ 122{
@@ -169,7 +172,7 @@ qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
169{ 172{
170 int ring; 173 int ring;
171 struct qlcnic_host_sds_ring *sds_ring; 174 struct qlcnic_host_sds_ring *sds_ring;
172 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; 175 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
173 176
174 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings)) 177 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
175 return -ENOMEM; 178 return -ENOMEM;
@@ -193,14 +196,14 @@ qlcnic_napi_del(struct qlcnic_adapter *adapter)
193{ 196{
194 int ring; 197 int ring;
195 struct qlcnic_host_sds_ring *sds_ring; 198 struct qlcnic_host_sds_ring *sds_ring;
196 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; 199 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
197 200
198 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 201 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
199 sds_ring = &recv_ctx->sds_rings[ring]; 202 sds_ring = &recv_ctx->sds_rings[ring];
200 netif_napi_del(&sds_ring->napi); 203 netif_napi_del(&sds_ring->napi);
201 } 204 }
202 205
203 qlcnic_free_sds_rings(&adapter->recv_ctx); 206 qlcnic_free_sds_rings(adapter->recv_ctx);
204} 207}
205 208
206static void 209static void
@@ -208,7 +211,7 @@ qlcnic_napi_enable(struct qlcnic_adapter *adapter)
208{ 211{
209 int ring; 212 int ring;
210 struct qlcnic_host_sds_ring *sds_ring; 213 struct qlcnic_host_sds_ring *sds_ring;
211 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; 214 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
212 215
213 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) 216 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
214 return; 217 return;
@@ -225,7 +228,7 @@ qlcnic_napi_disable(struct qlcnic_adapter *adapter)
225{ 228{
226 int ring; 229 int ring;
227 struct qlcnic_host_sds_ring *sds_ring; 230 struct qlcnic_host_sds_ring *sds_ring;
228 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; 231 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
229 232
230 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) 233 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
231 return; 234 return;
@@ -317,13 +320,6 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
317 return 0; 320 return 0;
318} 321}
319 322
320static void qlcnic_vlan_rx_register(struct net_device *netdev,
321 struct vlan_group *grp)
322{
323 struct qlcnic_adapter *adapter = netdev_priv(netdev);
324 adapter->vlgrp = grp;
325}
326
327static const struct net_device_ops qlcnic_netdev_ops = { 323static const struct net_device_ops qlcnic_netdev_ops = {
328 .ndo_open = qlcnic_open, 324 .ndo_open = qlcnic_open,
329 .ndo_stop = qlcnic_close, 325 .ndo_stop = qlcnic_close,
@@ -333,8 +329,11 @@ static const struct net_device_ops qlcnic_netdev_ops = {
333 .ndo_set_multicast_list = qlcnic_set_multi, 329 .ndo_set_multicast_list = qlcnic_set_multi,
334 .ndo_set_mac_address = qlcnic_set_mac, 330 .ndo_set_mac_address = qlcnic_set_mac,
335 .ndo_change_mtu = qlcnic_change_mtu, 331 .ndo_change_mtu = qlcnic_change_mtu,
332 .ndo_fix_features = qlcnic_fix_features,
333 .ndo_set_features = qlcnic_set_features,
336 .ndo_tx_timeout = qlcnic_tx_timeout, 334 .ndo_tx_timeout = qlcnic_tx_timeout,
337 .ndo_vlan_rx_register = qlcnic_vlan_rx_register, 335 .ndo_vlan_rx_add_vid = qlcnic_vlan_rx_add,
336 .ndo_vlan_rx_kill_vid = qlcnic_vlan_rx_del,
338#ifdef CONFIG_NET_POLL_CONTROLLER 337#ifdef CONFIG_NET_POLL_CONTROLLER
339 .ndo_poll_controller = qlcnic_poll_controller, 338 .ndo_poll_controller = qlcnic_poll_controller,
340#endif 339#endif
@@ -352,72 +351,87 @@ static struct qlcnic_nic_template qlcnic_vf_ops = {
352 .start_firmware = qlcnicvf_start_firmware 351 .start_firmware = qlcnicvf_start_firmware
353}; 352};
354 353
355static void 354static int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
356qlcnic_setup_intr(struct qlcnic_adapter *adapter)
357{ 355{
358 const struct qlcnic_legacy_intr_set *legacy_intrp;
359 struct pci_dev *pdev = adapter->pdev; 356 struct pci_dev *pdev = adapter->pdev;
360 int err, num_msix; 357 int err = -1;
361
362 if (adapter->rss_supported) {
363 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
364 MSIX_ENTRIES_PER_ADAPTER : 2;
365 } else
366 num_msix = 1;
367 358
368 adapter->max_sds_rings = 1; 359 adapter->max_sds_rings = 1;
369
370 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED); 360 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
371
372 legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
373
374 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
375 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
376 legacy_intrp->tgt_status_reg);
377 adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
378 legacy_intrp->tgt_mask_reg);
379 adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
380
381 adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
382 ISR_INT_STATE_REG);
383
384 qlcnic_set_msix_bit(pdev, 0); 361 qlcnic_set_msix_bit(pdev, 0);
385 362
386 if (adapter->msix_supported) { 363 if (adapter->msix_supported) {
387 364 enable_msix:
388 qlcnic_init_msix_entries(adapter, num_msix); 365 qlcnic_init_msix_entries(adapter, num_msix);
389 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix); 366 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
390 if (err == 0) { 367 if (err == 0) {
391 adapter->flags |= QLCNIC_MSIX_ENABLED; 368 adapter->flags |= QLCNIC_MSIX_ENABLED;
392 qlcnic_set_msix_bit(pdev, 1); 369 qlcnic_set_msix_bit(pdev, 1);
393 370
394 if (adapter->rss_supported) 371 adapter->max_sds_rings = num_msix;
395 adapter->max_sds_rings = num_msix;
396 372
397 dev_info(&pdev->dev, "using msi-x interrupts\n"); 373 dev_info(&pdev->dev, "using msi-x interrupts\n");
398 return; 374 return err;
399 } 375 }
376 if (err > 0) {
377 num_msix = rounddown_pow_of_two(err);
378 if (num_msix)
379 goto enable_msix;
380 }
381 }
382 return err;
383}
400 384
401 if (err > 0)
402 pci_disable_msix(pdev);
403 385
404 /* fall through for msi */ 386static void qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
405 } 387{
388 const struct qlcnic_legacy_intr_set *legacy_intrp;
389 struct pci_dev *pdev = adapter->pdev;
406 390
407 if (use_msi && !pci_enable_msi(pdev)) { 391 if (use_msi && !pci_enable_msi(pdev)) {
408 adapter->flags |= QLCNIC_MSI_ENABLED; 392 adapter->flags |= QLCNIC_MSI_ENABLED;
409 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter, 393 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
410 msi_tgt_status[adapter->ahw.pci_func]); 394 msi_tgt_status[adapter->ahw->pci_func]);
411 dev_info(&pdev->dev, "using msi interrupts\n"); 395 dev_info(&pdev->dev, "using msi interrupts\n");
412 adapter->msix_entries[0].vector = pdev->irq; 396 adapter->msix_entries[0].vector = pdev->irq;
413 return; 397 return;
414 } 398 }
415 399
400 legacy_intrp = &legacy_intr[adapter->ahw->pci_func];
401
402 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
403 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
404 legacy_intrp->tgt_status_reg);
405 adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
406 legacy_intrp->tgt_mask_reg);
407 adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
408
409 adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
410 ISR_INT_STATE_REG);
416 dev_info(&pdev->dev, "using legacy interrupts\n"); 411 dev_info(&pdev->dev, "using legacy interrupts\n");
417 adapter->msix_entries[0].vector = pdev->irq; 412 adapter->msix_entries[0].vector = pdev->irq;
418} 413}
419 414
420static void 415static void
416qlcnic_setup_intr(struct qlcnic_adapter *adapter)
417{
418 int num_msix;
419
420 if (adapter->msix_supported) {
421 num_msix = (num_online_cpus() >=
422 QLCNIC_DEF_NUM_STS_DESC_RINGS) ?
423 QLCNIC_DEF_NUM_STS_DESC_RINGS :
424 QLCNIC_MIN_NUM_RSS_RINGS;
425 } else
426 num_msix = 1;
427
428 if (!qlcnic_enable_msix(adapter, num_msix))
429 return;
430
431 qlcnic_enable_msi_legacy(adapter);
432}
433
434static void
421qlcnic_teardown_intr(struct qlcnic_adapter *adapter) 435qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
422{ 436{
423 if (adapter->flags & QLCNIC_MSIX_ENABLED) 437 if (adapter->flags & QLCNIC_MSIX_ENABLED)
@@ -429,8 +443,8 @@ qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
429static void 443static void
430qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter) 444qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
431{ 445{
432 if (adapter->ahw.pci_base0 != NULL) 446 if (adapter->ahw->pci_base0 != NULL)
433 iounmap(adapter->ahw.pci_base0); 447 iounmap(adapter->ahw->pci_base0);
434} 448}
435 449
436static int 450static int
@@ -464,8 +478,10 @@ qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
464 478
465 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { 479 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
466 pfn = pci_info[i].id; 480 pfn = pci_info[i].id;
467 if (pfn > QLCNIC_MAX_PCI_FUNC) 481 if (pfn > QLCNIC_MAX_PCI_FUNC) {
468 return QL_STATUS_INVALID_PARAM; 482 ret = QL_STATUS_INVALID_PARAM;
483 goto err_eswitch;
484 }
469 adapter->npars[pfn].active = (u8)pci_info[i].active; 485 adapter->npars[pfn].active = (u8)pci_info[i].active;
470 adapter->npars[pfn].type = (u8)pci_info[i].type; 486 adapter->npars[pfn].type = (u8)pci_info[i].type;
471 adapter->npars[pfn].phy_port = (u8)pci_info[i].default_port; 487 adapter->npars[pfn].phy_port = (u8)pci_info[i].default_port;
@@ -498,7 +514,7 @@ qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
498 u32 ref_count; 514 u32 ref_count;
499 int i, ret = 1; 515 int i, ret = 1;
500 u32 data = QLCNIC_MGMT_FUNC; 516 u32 data = QLCNIC_MGMT_FUNC;
501 void __iomem *priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE; 517 void __iomem *priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
502 518
503 /* If other drivers are not in use set their privilege level */ 519 /* If other drivers are not in use set their privilege level */
504 ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE); 520 ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
@@ -510,16 +526,16 @@ qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
510 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { 526 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
511 id = i; 527 id = i;
512 if (adapter->npars[i].type != QLCNIC_TYPE_NIC || 528 if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
513 id == adapter->ahw.pci_func) 529 id == adapter->ahw->pci_func)
514 continue; 530 continue;
515 data |= (qlcnic_config_npars & 531 data |= (qlcnic_config_npars &
516 QLC_DEV_SET_DRV(0xf, id)); 532 QLC_DEV_SET_DRV(0xf, id));
517 } 533 }
518 } else { 534 } else {
519 data = readl(priv_op); 535 data = readl(priv_op);
520 data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw.pci_func)) | 536 data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw->pci_func)) |
521 (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC, 537 (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
522 adapter->ahw.pci_func)); 538 adapter->ahw->pci_func));
523 } 539 }
524 writel(data, priv_op); 540 writel(data, priv_op);
525 qlcnic_api_unlock(adapter); 541 qlcnic_api_unlock(adapter);
@@ -537,22 +553,23 @@ qlcnic_check_vf(struct qlcnic_adapter *adapter)
537 u32 op_mode, priv_level; 553 u32 op_mode, priv_level;
538 554
539 /* Determine FW API version */ 555 /* Determine FW API version */
540 adapter->fw_hal_version = readl(adapter->ahw.pci_base0 + QLCNIC_FW_API); 556 adapter->fw_hal_version = readl(adapter->ahw->pci_base0 +
557 QLCNIC_FW_API);
541 558
542 /* Find PCI function number */ 559 /* Find PCI function number */
543 pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func); 560 pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
544 msix_base_addr = adapter->ahw.pci_base0 + QLCNIC_MSIX_BASE; 561 msix_base_addr = adapter->ahw->pci_base0 + QLCNIC_MSIX_BASE;
545 msix_base = readl(msix_base_addr); 562 msix_base = readl(msix_base_addr);
546 func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE; 563 func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
547 adapter->ahw.pci_func = func; 564 adapter->ahw->pci_func = func;
548 565
549 /* Determine function privilege level */ 566 /* Determine function privilege level */
550 priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE; 567 priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
551 op_mode = readl(priv_op); 568 op_mode = readl(priv_op);
552 if (op_mode == QLC_DEV_DRV_DEFAULT) 569 if (op_mode == QLC_DEV_DRV_DEFAULT)
553 priv_level = QLCNIC_MGMT_FUNC; 570 priv_level = QLCNIC_MGMT_FUNC;
554 else 571 else
555 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func); 572 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
556 573
557 if (priv_level == QLCNIC_NON_PRIV_FUNC) { 574 if (priv_level == QLCNIC_NON_PRIV_FUNC) {
558 adapter->op_mode = QLCNIC_NON_PRIV_FUNC; 575 adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
@@ -591,13 +608,14 @@ qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
591 608
592 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20)); 609 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
593 610
594 adapter->ahw.pci_base0 = mem_ptr0; 611 adapter->ahw->pci_base0 = mem_ptr0;
595 adapter->ahw.pci_len0 = pci_len0; 612 adapter->ahw->pci_len0 = pci_len0;
596 613
597 qlcnic_check_vf(adapter); 614 qlcnic_check_vf(adapter);
598 615
599 adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter, 616 adapter->ahw->ocm_win_crb = qlcnic_get_ioaddr(adapter,
600 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(adapter->ahw.pci_func))); 617 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(
618 adapter->ahw->pci_func)));
601 619
602 return 0; 620 return 0;
603} 621}
@@ -639,7 +657,7 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
639 657
640 dev_info(&pdev->dev, "firmware v%d.%d.%d\n", 658 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
641 fw_major, fw_minor, fw_build); 659 fw_major, fw_minor, fw_build);
642 if (adapter->ahw.port_type == QLCNIC_XGBE) { 660 if (adapter->ahw->port_type == QLCNIC_XGBE) {
643 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) { 661 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
644 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF; 662 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
645 adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF; 663 adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
@@ -651,7 +669,7 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
651 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; 669 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
652 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; 670 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
653 671
654 } else if (adapter->ahw.port_type == QLCNIC_GBE) { 672 } else if (adapter->ahw->port_type == QLCNIC_GBE) {
655 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G; 673 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
656 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; 674 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
657 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; 675 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
@@ -659,7 +677,6 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
659 } 677 }
660 678
661 adapter->msix_supported = !!use_msi_x; 679 adapter->msix_supported = !!use_msi_x;
662 adapter->rss_supported = !!use_msi_x;
663 680
664 adapter->num_txd = MAX_CMD_DESCRIPTORS; 681 adapter->num_txd = MAX_CMD_DESCRIPTORS;
665 682
@@ -672,7 +689,7 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
672 int err; 689 int err;
673 struct qlcnic_info nic_info; 690 struct qlcnic_info nic_info;
674 691
675 err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func); 692 err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func);
676 if (err) 693 if (err)
677 return err; 694 return err;
678 695
@@ -708,6 +725,22 @@ qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
708} 725}
709 726
710static void 727static void
728qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid)
729{
730 struct qlcnic_adapter *adapter = netdev_priv(netdev);
731 set_bit(vid, adapter->vlans);
732}
733
734static void
735qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid)
736{
737 struct qlcnic_adapter *adapter = netdev_priv(netdev);
738
739 qlcnic_restore_indev_addr(netdev, NETDEV_DOWN);
740 clear_bit(vid, adapter->vlans);
741}
742
743static void
711qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter, 744qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
712 struct qlcnic_esw_func_cfg *esw_cfg) 745 struct qlcnic_esw_func_cfg *esw_cfg)
713{ 746{
@@ -734,7 +767,7 @@ qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
734 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) 767 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
735 return 0; 768 return 0;
736 769
737 esw_cfg.pci_func = adapter->ahw.pci_func; 770 esw_cfg.pci_func = adapter->ahw->pci_func;
738 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg)) 771 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg))
739 return -EIO; 772 return -EIO;
740 qlcnic_set_vlan_config(adapter, &esw_cfg); 773 qlcnic_set_vlan_config(adapter, &esw_cfg);
@@ -750,28 +783,27 @@ qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
750 struct net_device *netdev = adapter->netdev; 783 struct net_device *netdev = adapter->netdev;
751 unsigned long features, vlan_features; 784 unsigned long features, vlan_features;
752 785
753 features = (NETIF_F_SG | NETIF_F_IP_CSUM | 786 features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
754 NETIF_F_IPV6_CSUM | NETIF_F_GRO); 787 NETIF_F_IPV6_CSUM | NETIF_F_GRO);
755 vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM | 788 vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
756 NETIF_F_IPV6_CSUM); 789 NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_FILTER);
757 790
758 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) { 791 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
759 features |= (NETIF_F_TSO | NETIF_F_TSO6); 792 features |= (NETIF_F_TSO | NETIF_F_TSO6);
760 vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6); 793 vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
761 } 794 }
762 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO) 795
796 if (netdev->features & NETIF_F_LRO)
763 features |= NETIF_F_LRO; 797 features |= NETIF_F_LRO;
764 798
765 if (esw_cfg->offload_flags & BIT_0) { 799 if (esw_cfg->offload_flags & BIT_0) {
766 netdev->features |= features; 800 netdev->features |= features;
767 adapter->rx_csum = 1;
768 if (!(esw_cfg->offload_flags & BIT_1)) 801 if (!(esw_cfg->offload_flags & BIT_1))
769 netdev->features &= ~NETIF_F_TSO; 802 netdev->features &= ~NETIF_F_TSO;
770 if (!(esw_cfg->offload_flags & BIT_2)) 803 if (!(esw_cfg->offload_flags & BIT_2))
771 netdev->features &= ~NETIF_F_TSO6; 804 netdev->features &= ~NETIF_F_TSO6;
772 } else { 805 } else {
773 netdev->features &= ~features; 806 netdev->features &= ~features;
774 adapter->rx_csum = 0;
775 } 807 }
776 808
777 netdev->vlan_features = (features & vlan_features); 809 netdev->vlan_features = (features & vlan_features);
@@ -791,14 +823,14 @@ qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
791 if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED) 823 if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED)
792 return 0; 824 return 0;
793 825
794 priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE; 826 priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
795 op_mode = readl(priv_op); 827 op_mode = readl(priv_op);
796 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func); 828 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
797 829
798 if (op_mode == QLC_DEV_DRV_DEFAULT) 830 if (op_mode == QLC_DEV_DRV_DEFAULT)
799 priv_level = QLCNIC_MGMT_FUNC; 831 priv_level = QLCNIC_MGMT_FUNC;
800 else 832 else
801 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func); 833 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
802 834
803 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) { 835 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
804 if (priv_level == QLCNIC_MGMT_FUNC) { 836 if (priv_level == QLCNIC_MGMT_FUNC) {
@@ -1038,7 +1070,7 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
1038 1070
1039 unsigned long flags = 0; 1071 unsigned long flags = 0;
1040 struct net_device *netdev = adapter->netdev; 1072 struct net_device *netdev = adapter->netdev;
1041 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; 1073 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1042 1074
1043 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) { 1075 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1044 handler = qlcnic_tmp_intr; 1076 handler = qlcnic_tmp_intr;
@@ -1075,7 +1107,7 @@ qlcnic_free_irq(struct qlcnic_adapter *adapter)
1075 int ring; 1107 int ring;
1076 struct qlcnic_host_sds_ring *sds_ring; 1108 struct qlcnic_host_sds_ring *sds_ring;
1077 1109
1078 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; 1110 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1079 1111
1080 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 1112 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1081 sds_ring = &recv_ctx->sds_rings[ring]; 1113 sds_ring = &recv_ctx->sds_rings[ring];
@@ -1083,20 +1115,6 @@ qlcnic_free_irq(struct qlcnic_adapter *adapter)
1083 } 1115 }
1084} 1116}
1085 1117
1086static void
1087qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter)
1088{
1089 adapter->coal.flags = QLCNIC_INTR_DEFAULT;
1090 adapter->coal.normal.data.rx_time_us =
1091 QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
1092 adapter->coal.normal.data.rx_packets =
1093 QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
1094 adapter->coal.normal.data.tx_time_us =
1095 QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US;
1096 adapter->coal.normal.data.tx_packets =
1097 QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS;
1098}
1099
1100static int 1118static int
1101__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev) 1119__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1102{ 1120{
@@ -1115,14 +1133,14 @@ __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1115 return -EIO; 1133 return -EIO;
1116 1134
1117 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 1135 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1118 rds_ring = &adapter->recv_ctx.rds_rings[ring]; 1136 rds_ring = &adapter->recv_ctx->rds_rings[ring];
1119 qlcnic_post_rx_buffers(adapter, ring, rds_ring); 1137 qlcnic_post_rx_buffers(adapter, rds_ring);
1120 } 1138 }
1121 1139
1122 qlcnic_set_multi(netdev); 1140 qlcnic_set_multi(netdev);
1123 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu); 1141 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
1124 1142
1125 adapter->ahw.linkup = 0; 1143 adapter->ahw->linkup = 0;
1126 1144
1127 if (adapter->max_sds_rings > 1) 1145 if (adapter->max_sds_rings > 1)
1128 qlcnic_config_rss(adapter, 1); 1146 qlcnic_config_rss(adapter, 1);
@@ -1230,8 +1248,6 @@ qlcnic_attach(struct qlcnic_adapter *adapter)
1230 goto err_out_free_hw; 1248 goto err_out_free_hw;
1231 } 1249 }
1232 1250
1233 qlcnic_init_coalesce_defaults(adapter);
1234
1235 qlcnic_create_sysfs_entries(adapter); 1251 qlcnic_create_sysfs_entries(adapter);
1236 1252
1237 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC; 1253 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
@@ -1272,7 +1288,7 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
1272 clear_bit(__QLCNIC_DEV_UP, &adapter->state); 1288 clear_bit(__QLCNIC_DEV_UP, &adapter->state);
1273 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) { 1289 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1274 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 1290 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1275 sds_ring = &adapter->recv_ctx.sds_rings[ring]; 1291 sds_ring = &adapter->recv_ctx->sds_rings[ring];
1276 qlcnic_disable_int(sds_ring); 1292 qlcnic_disable_int(sds_ring);
1277 } 1293 }
1278 } 1294 }
@@ -1293,6 +1309,48 @@ out:
1293 netif_device_attach(netdev); 1309 netif_device_attach(netdev);
1294} 1310}
1295 1311
1312static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
1313{
1314 int err = 0;
1315 adapter->ahw = kzalloc(sizeof(struct qlcnic_hardware_context),
1316 GFP_KERNEL);
1317 if (!adapter->ahw) {
1318 dev_err(&adapter->pdev->dev,
1319 "Failed to allocate recv ctx resources for adapter\n");
1320 err = -ENOMEM;
1321 goto err_out;
1322 }
1323 adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
1324 GFP_KERNEL);
1325 if (!adapter->recv_ctx) {
1326 dev_err(&adapter->pdev->dev,
1327 "Failed to allocate recv ctx resources for adapter\n");
1328 kfree(adapter->ahw);
1329 adapter->ahw = NULL;
1330 err = -ENOMEM;
1331 goto err_out;
1332 }
1333 /* Initialize interrupt coalesce parameters */
1334 adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT;
1335 adapter->ahw->coal.rx_time_us = QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
1336 adapter->ahw->coal.rx_packets = QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
1337err_out:
1338 return err;
1339}
1340
1341static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter)
1342{
1343 kfree(adapter->recv_ctx);
1344 adapter->recv_ctx = NULL;
1345
1346 if (adapter->ahw->fw_dump.tmpl_hdr) {
1347 vfree(adapter->ahw->fw_dump.tmpl_hdr);
1348 adapter->ahw->fw_dump.tmpl_hdr = NULL;
1349 }
1350 kfree(adapter->ahw);
1351 adapter->ahw = NULL;
1352}
1353
1296int qlcnic_diag_alloc_res(struct net_device *netdev, int test) 1354int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1297{ 1355{
1298 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1356 struct qlcnic_adapter *adapter = netdev_priv(netdev);
@@ -1325,13 +1383,13 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1325 } 1383 }
1326 1384
1327 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 1385 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1328 rds_ring = &adapter->recv_ctx.rds_rings[ring]; 1386 rds_ring = &adapter->recv_ctx->rds_rings[ring];
1329 qlcnic_post_rx_buffers(adapter, ring, rds_ring); 1387 qlcnic_post_rx_buffers(adapter, rds_ring);
1330 } 1388 }
1331 1389
1332 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) { 1390 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1333 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 1391 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1334 sds_ring = &adapter->recv_ctx.sds_rings[ring]; 1392 sds_ring = &adapter->recv_ctx->sds_rings[ring];
1335 qlcnic_enable_int(sds_ring); 1393 qlcnic_enable_int(sds_ring);
1336 } 1394 }
1337 } 1395 }
@@ -1399,7 +1457,6 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1399 int err; 1457 int err;
1400 struct pci_dev *pdev = adapter->pdev; 1458 struct pci_dev *pdev = adapter->pdev;
1401 1459
1402 adapter->rx_csum = 1;
1403 adapter->mc_enabled = 0; 1460 adapter->mc_enabled = 0;
1404 adapter->max_mc_count = 38; 1461 adapter->max_mc_count = 38;
1405 1462
@@ -1410,26 +1467,24 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1410 1467
1411 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops); 1468 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
1412 1469
1413 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | 1470 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
1414 NETIF_F_IPV6_CSUM | NETIF_F_GRO | NETIF_F_HW_VLAN_RX); 1471 NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
1415 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
1416 NETIF_F_IPV6_CSUM);
1417 1472
1418 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) { 1473 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
1419 netdev->features |= (NETIF_F_TSO | NETIF_F_TSO6); 1474 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
1420 netdev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6); 1475 if (pci_using_dac)
1421 } 1476 netdev->hw_features |= NETIF_F_HIGHDMA;
1422 1477
1423 if (pci_using_dac) { 1478 netdev->vlan_features = netdev->hw_features;
1424 netdev->features |= NETIF_F_HIGHDMA;
1425 netdev->vlan_features |= NETIF_F_HIGHDMA;
1426 }
1427 1479
1428 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX) 1480 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
1429 netdev->features |= (NETIF_F_HW_VLAN_TX); 1481 netdev->hw_features |= NETIF_F_HW_VLAN_TX;
1430
1431 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO) 1482 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
1432 netdev->features |= NETIF_F_LRO; 1483 netdev->hw_features |= NETIF_F_LRO;
1484
1485 netdev->features |= netdev->hw_features |
1486 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
1487
1433 netdev->irq = adapter->msix_entries[0].vector; 1488 netdev->irq = adapter->msix_entries[0].vector;
1434 1489
1435 netif_carrier_off(netdev); 1490 netif_carrier_off(netdev);
@@ -1459,6 +1514,19 @@ static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac)
1459 return 0; 1514 return 0;
1460} 1515}
1461 1516
1517static int
1518qlcnic_alloc_msix_entries(struct qlcnic_adapter *adapter, u16 count)
1519{
1520 adapter->msix_entries = kcalloc(count, sizeof(struct msix_entry),
1521 GFP_KERNEL);
1522
1523 if (adapter->msix_entries)
1524 return 0;
1525
1526 dev_err(&adapter->pdev->dev, "failed allocating msix_entries\n");
1527 return -ENOMEM;
1528}
1529
1462static int __devinit 1530static int __devinit
1463qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1531qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1464{ 1532{
@@ -1501,23 +1569,30 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1501 adapter = netdev_priv(netdev); 1569 adapter = netdev_priv(netdev);
1502 adapter->netdev = netdev; 1570 adapter->netdev = netdev;
1503 adapter->pdev = pdev; 1571 adapter->pdev = pdev;
1504 adapter->dev_rst_time = jiffies;
1505 1572
1573 if (qlcnic_alloc_adapter_resources(adapter))
1574 goto err_out_free_netdev;
1575
1576 adapter->dev_rst_time = jiffies;
1506 revision_id = pdev->revision; 1577 revision_id = pdev->revision;
1507 adapter->ahw.revision_id = revision_id; 1578 adapter->ahw->revision_id = revision_id;
1508 1579
1509 rwlock_init(&adapter->ahw.crb_lock); 1580 rwlock_init(&adapter->ahw->crb_lock);
1510 mutex_init(&adapter->ahw.mem_lock); 1581 mutex_init(&adapter->ahw->mem_lock);
1511 1582
1512 spin_lock_init(&adapter->tx_clean_lock); 1583 spin_lock_init(&adapter->tx_clean_lock);
1513 INIT_LIST_HEAD(&adapter->mac_list); 1584 INIT_LIST_HEAD(&adapter->mac_list);
1514 1585
1515 err = qlcnic_setup_pci_map(adapter); 1586 err = qlcnic_setup_pci_map(adapter);
1516 if (err) 1587 if (err)
1517 goto err_out_free_netdev; 1588 goto err_out_free_hw;
1518 1589
1519 /* This will be reset for mezz cards */ 1590 /* This will be reset for mezz cards */
1520 adapter->portnum = adapter->ahw.pci_func; 1591 adapter->portnum = adapter->ahw->pci_func;
1592
1593 /* Get FW dump template and store it */
1594 if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC)
1595 qlcnic_fw_cmd_get_minidump_temp(adapter);
1521 1596
1522 err = qlcnic_get_board_info(adapter); 1597 err = qlcnic_get_board_info(adapter);
1523 if (err) { 1598 if (err) {
@@ -1545,11 +1620,15 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1545 1620
1546 pr_info("%s: %s Board Chip rev 0x%x\n", 1621 pr_info("%s: %s Board Chip rev 0x%x\n",
1547 module_name(THIS_MODULE), 1622 module_name(THIS_MODULE),
1548 brd_name, adapter->ahw.revision_id); 1623 brd_name, adapter->ahw->revision_id);
1549 } 1624 }
1550 1625
1551 qlcnic_clear_stats(adapter); 1626 qlcnic_clear_stats(adapter);
1552 1627
1628 err = qlcnic_alloc_msix_entries(adapter, adapter->max_rx_ques);
1629 if (err)
1630 goto err_out_decr_ref;
1631
1553 qlcnic_setup_intr(adapter); 1632 qlcnic_setup_intr(adapter);
1554 1633
1555 err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac); 1634 err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
@@ -1560,7 +1639,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1560 1639
1561 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY); 1640 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1562 1641
1563 switch (adapter->ahw.port_type) { 1642 switch (adapter->ahw->port_type) {
1564 case QLCNIC_GBE: 1643 case QLCNIC_GBE:
1565 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n", 1644 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1566 adapter->netdev->name); 1645 adapter->netdev->name);
@@ -1578,6 +1657,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1578 1657
1579err_out_disable_msi: 1658err_out_disable_msi:
1580 qlcnic_teardown_intr(adapter); 1659 qlcnic_teardown_intr(adapter);
1660 kfree(adapter->msix_entries);
1581 1661
1582err_out_decr_ref: 1662err_out_decr_ref:
1583 qlcnic_clr_all_drv_state(adapter, 0); 1663 qlcnic_clr_all_drv_state(adapter, 0);
@@ -1585,6 +1665,9 @@ err_out_decr_ref:
1585err_out_iounmap: 1665err_out_iounmap:
1586 qlcnic_cleanup_pci_map(adapter); 1666 qlcnic_cleanup_pci_map(adapter);
1587 1667
1668err_out_free_hw:
1669 qlcnic_free_adapter_resources(adapter);
1670
1588err_out_free_netdev: 1671err_out_free_netdev:
1589 free_netdev(netdev); 1672 free_netdev(netdev);
1590 1673
@@ -1626,6 +1709,7 @@ static void __devexit qlcnic_remove(struct pci_dev *pdev)
1626 qlcnic_free_lb_filters_mem(adapter); 1709 qlcnic_free_lb_filters_mem(adapter);
1627 1710
1628 qlcnic_teardown_intr(adapter); 1711 qlcnic_teardown_intr(adapter);
1712 kfree(adapter->msix_entries);
1629 1713
1630 qlcnic_remove_diag_entries(adapter); 1714 qlcnic_remove_diag_entries(adapter);
1631 1715
@@ -1638,6 +1722,7 @@ static void __devexit qlcnic_remove(struct pci_dev *pdev)
1638 pci_disable_device(pdev); 1722 pci_disable_device(pdev);
1639 pci_set_drvdata(pdev, NULL); 1723 pci_set_drvdata(pdev, NULL);
1640 1724
1725 qlcnic_free_adapter_resources(adapter);
1641 free_netdev(netdev); 1726 free_netdev(netdev);
1642} 1727}
1643static int __qlcnic_shutdown(struct pci_dev *pdev) 1728static int __qlcnic_shutdown(struct pci_dev *pdev)
@@ -1819,6 +1904,7 @@ static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
1819 vlan_req->vlan_id = vlan_id; 1904 vlan_req->vlan_id = vlan_id;
1820 1905
1821 tx_ring->producer = get_next_index(producer, tx_ring->num_desc); 1906 tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
1907 smp_mb();
1822} 1908}
1823 1909
1824#define QLCNIC_MAC_HASH(MAC)\ 1910#define QLCNIC_MAC_HASH(MAC)\
@@ -1879,58 +1965,122 @@ qlcnic_send_filter(struct qlcnic_adapter *adapter,
1879 spin_unlock(&adapter->mac_learn_lock); 1965 spin_unlock(&adapter->mac_learn_lock);
1880} 1966}
1881 1967
1882static void 1968static int
1883qlcnic_tso_check(struct net_device *netdev, 1969qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
1884 struct qlcnic_host_tx_ring *tx_ring,
1885 struct cmd_desc_type0 *first_desc, 1970 struct cmd_desc_type0 *first_desc,
1886 struct sk_buff *skb) 1971 struct sk_buff *skb)
1887{ 1972{
1888 u8 opcode = TX_ETHER_PKT; 1973 u8 opcode = 0, hdr_len = 0;
1889 __be16 protocol = skb->protocol; 1974 u16 flags = 0, vlan_tci = 0;
1890 u16 flags = 0; 1975 int copied, offset, copy_len;
1891 int copied, offset, copy_len, hdr_len = 0, tso = 0;
1892 struct cmd_desc_type0 *hwdesc; 1976 struct cmd_desc_type0 *hwdesc;
1893 struct vlan_ethhdr *vh; 1977 struct vlan_ethhdr *vh;
1894 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1978 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1979 u16 protocol = ntohs(skb->protocol);
1895 u32 producer = tx_ring->producer; 1980 u32 producer = tx_ring->producer;
1896 __le16 vlan_oob = first_desc->flags_opcode & 1981
1897 cpu_to_le16(FLAGS_VLAN_OOB); 1982 if (protocol == ETH_P_8021Q) {
1983 vh = (struct vlan_ethhdr *)skb->data;
1984 flags = FLAGS_VLAN_TAGGED;
1985 vlan_tci = vh->h_vlan_TCI;
1986 } else if (vlan_tx_tag_present(skb)) {
1987 flags = FLAGS_VLAN_OOB;
1988 vlan_tci = vlan_tx_tag_get(skb);
1989 }
1990 if (unlikely(adapter->pvid)) {
1991 if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
1992 return -EIO;
1993 if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
1994 goto set_flags;
1995
1996 flags = FLAGS_VLAN_OOB;
1997 vlan_tci = adapter->pvid;
1998 }
1999set_flags:
2000 qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
2001 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
1898 2002
1899 if (*(skb->data) & BIT_0) { 2003 if (*(skb->data) & BIT_0) {
1900 flags |= BIT_0; 2004 flags |= BIT_0;
1901 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN); 2005 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
1902 } 2006 }
1903 2007 opcode = TX_ETHER_PKT;
1904 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && 2008 if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1905 skb_shinfo(skb)->gso_size > 0) { 2009 skb_shinfo(skb)->gso_size > 0) {
1906 2010
1907 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 2011 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1908 2012
1909 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); 2013 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1910 first_desc->total_hdr_length = hdr_len; 2014 first_desc->total_hdr_length = hdr_len;
1911 if (vlan_oob) { 2015
2016 opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO;
2017
2018 /* For LSO, we need to copy the MAC/IP/TCP headers into
2019 * the descriptor ring */
2020 copied = 0;
2021 offset = 2;
2022
2023 if (flags & FLAGS_VLAN_OOB) {
1912 first_desc->total_hdr_length += VLAN_HLEN; 2024 first_desc->total_hdr_length += VLAN_HLEN;
1913 first_desc->tcp_hdr_offset = VLAN_HLEN; 2025 first_desc->tcp_hdr_offset = VLAN_HLEN;
1914 first_desc->ip_hdr_offset = VLAN_HLEN; 2026 first_desc->ip_hdr_offset = VLAN_HLEN;
1915 /* Only in case of TSO on vlan device */ 2027 /* Only in case of TSO on vlan device */
1916 flags |= FLAGS_VLAN_TAGGED; 2028 flags |= FLAGS_VLAN_TAGGED;
2029
2030 /* Create a TSO vlan header template for firmware */
2031
2032 hwdesc = &tx_ring->desc_head[producer];
2033 tx_ring->cmd_buf_arr[producer].skb = NULL;
2034
2035 copy_len = min((int)sizeof(struct cmd_desc_type0) -
2036 offset, hdr_len + VLAN_HLEN);
2037
2038 vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
2039 skb_copy_from_linear_data(skb, vh, 12);
2040 vh->h_vlan_proto = htons(ETH_P_8021Q);
2041 vh->h_vlan_TCI = htons(vlan_tci);
2042
2043 skb_copy_from_linear_data_offset(skb, 12,
2044 (char *)vh + 16, copy_len - 16);
2045
2046 copied = copy_len - VLAN_HLEN;
2047 offset = 0;
2048
2049 producer = get_next_index(producer, tx_ring->num_desc);
1917 } 2050 }
1918 2051
1919 opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ? 2052 while (copied < hdr_len) {
1920 TX_TCP_LSO6 : TX_TCP_LSO; 2053
1921 tso = 1; 2054 copy_len = min((int)sizeof(struct cmd_desc_type0) -
2055 offset, (hdr_len - copied));
2056
2057 hwdesc = &tx_ring->desc_head[producer];
2058 tx_ring->cmd_buf_arr[producer].skb = NULL;
2059
2060 skb_copy_from_linear_data_offset(skb, copied,
2061 (char *) hwdesc + offset, copy_len);
2062
2063 copied += copy_len;
2064 offset = 0;
2065
2066 producer = get_next_index(producer, tx_ring->num_desc);
2067 }
2068
2069 tx_ring->producer = producer;
2070 smp_mb();
2071 adapter->stats.lso_frames++;
1922 2072
1923 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 2073 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1924 u8 l4proto; 2074 u8 l4proto;
1925 2075
1926 if (protocol == cpu_to_be16(ETH_P_IP)) { 2076 if (protocol == ETH_P_IP) {
1927 l4proto = ip_hdr(skb)->protocol; 2077 l4proto = ip_hdr(skb)->protocol;
1928 2078
1929 if (l4proto == IPPROTO_TCP) 2079 if (l4proto == IPPROTO_TCP)
1930 opcode = TX_TCP_PKT; 2080 opcode = TX_TCP_PKT;
1931 else if (l4proto == IPPROTO_UDP) 2081 else if (l4proto == IPPROTO_UDP)
1932 opcode = TX_UDP_PKT; 2082 opcode = TX_UDP_PKT;
1933 } else if (protocol == cpu_to_be16(ETH_P_IPV6)) { 2083 } else if (protocol == ETH_P_IPV6) {
1934 l4proto = ipv6_hdr(skb)->nexthdr; 2084 l4proto = ipv6_hdr(skb)->nexthdr;
1935 2085
1936 if (l4proto == IPPROTO_TCP) 2086 if (l4proto == IPPROTO_TCP)
@@ -1939,63 +2089,11 @@ qlcnic_tso_check(struct net_device *netdev,
1939 opcode = TX_UDPV6_PKT; 2089 opcode = TX_UDPV6_PKT;
1940 } 2090 }
1941 } 2091 }
1942
1943 first_desc->tcp_hdr_offset += skb_transport_offset(skb); 2092 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
1944 first_desc->ip_hdr_offset += skb_network_offset(skb); 2093 first_desc->ip_hdr_offset += skb_network_offset(skb);
1945 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode); 2094 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
1946 2095
1947 if (!tso) 2096 return 0;
1948 return;
1949
1950 /* For LSO, we need to copy the MAC/IP/TCP headers into
1951 * the descriptor ring
1952 */
1953 copied = 0;
1954 offset = 2;
1955
1956 if (vlan_oob) {
1957 /* Create a TSO vlan header template for firmware */
1958
1959 hwdesc = &tx_ring->desc_head[producer];
1960 tx_ring->cmd_buf_arr[producer].skb = NULL;
1961
1962 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1963 hdr_len + VLAN_HLEN);
1964
1965 vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
1966 skb_copy_from_linear_data(skb, vh, 12);
1967 vh->h_vlan_proto = htons(ETH_P_8021Q);
1968 vh->h_vlan_TCI = (__be16)swab16((u16)first_desc->vlan_TCI);
1969
1970 skb_copy_from_linear_data_offset(skb, 12,
1971 (char *)vh + 16, copy_len - 16);
1972
1973 copied = copy_len - VLAN_HLEN;
1974 offset = 0;
1975
1976 producer = get_next_index(producer, tx_ring->num_desc);
1977 }
1978
1979 while (copied < hdr_len) {
1980
1981 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1982 (hdr_len - copied));
1983
1984 hwdesc = &tx_ring->desc_head[producer];
1985 tx_ring->cmd_buf_arr[producer].skb = NULL;
1986
1987 skb_copy_from_linear_data_offset(skb, copied,
1988 (char *)hwdesc + offset, copy_len);
1989
1990 copied += copy_len;
1991 offset = 0;
1992
1993 producer = get_next_index(producer, tx_ring->num_desc);
1994 }
1995
1996 tx_ring->producer = producer;
1997 barrier();
1998 adapter->stats.lso_frames++;
1999} 2097}
2000 2098
2001static int 2099static int
@@ -2046,39 +2144,21 @@ out_err:
2046 return -ENOMEM; 2144 return -ENOMEM;
2047} 2145}
2048 2146
2049static int 2147static void
2050qlcnic_check_tx_tagging(struct qlcnic_adapter *adapter, 2148qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
2051 struct sk_buff *skb, 2149 struct qlcnic_cmd_buffer *pbuf)
2052 struct cmd_desc_type0 *first_desc)
2053{ 2150{
2054 u8 opcode = 0; 2151 struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
2055 u16 flags = 0; 2152 int nr_frags = skb_shinfo(skb)->nr_frags;
2056 __be16 protocol = skb->protocol; 2153 int i;
2057 struct vlan_ethhdr *vh;
2058 2154
2059 if (protocol == cpu_to_be16(ETH_P_8021Q)) { 2155 for (i = 0; i < nr_frags; i++) {
2060 vh = (struct vlan_ethhdr *)skb->data; 2156 nf = &pbuf->frag_array[i+1];
2061 protocol = vh->h_vlan_encapsulated_proto; 2157 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
2062 flags = FLAGS_VLAN_TAGGED;
2063 qlcnic_set_tx_vlan_tci(first_desc, ntohs(vh->h_vlan_TCI));
2064 } else if (vlan_tx_tag_present(skb)) {
2065 flags = FLAGS_VLAN_OOB;
2066 qlcnic_set_tx_vlan_tci(first_desc, vlan_tx_tag_get(skb));
2067 } 2158 }
2068 if (unlikely(adapter->pvid)) {
2069 if (first_desc->vlan_TCI &&
2070 !(adapter->flags & QLCNIC_TAGGING_ENABLED))
2071 return -EIO;
2072 if (first_desc->vlan_TCI &&
2073 (adapter->flags & QLCNIC_TAGGING_ENABLED))
2074 goto set_flags;
2075 2159
2076 flags = FLAGS_VLAN_OOB; 2160 nf = &pbuf->frag_array[0];
2077 qlcnic_set_tx_vlan_tci(first_desc, adapter->pvid); 2161 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
2078 }
2079set_flags:
2080 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
2081 return 0;
2082} 2162}
2083 2163
2084static inline void 2164static inline void
@@ -2103,7 +2183,7 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2103 int i, k; 2183 int i, k;
2104 2184
2105 u32 producer; 2185 u32 producer;
2106 int frag_count, no_of_desc; 2186 int frag_count;
2107 u32 num_txd = tx_ring->num_desc; 2187 u32 num_txd = tx_ring->num_desc;
2108 2188
2109 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) { 2189 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
@@ -2133,12 +2213,8 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2133 frag_count = 1 + skb_shinfo(skb)->nr_frags; 2213 frag_count = 1 + skb_shinfo(skb)->nr_frags;
2134 } 2214 }
2135 2215
2136 /* 4 fragments per cmd des */
2137 no_of_desc = (frag_count + 3) >> 2;
2138
2139 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) { 2216 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
2140 netif_stop_queue(netdev); 2217 netif_stop_queue(netdev);
2141 smp_mb();
2142 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) 2218 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
2143 netif_start_queue(netdev); 2219 netif_start_queue(netdev);
2144 else { 2220 else {
@@ -2155,9 +2231,6 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2155 first_desc = hwdesc = &tx_ring->desc_head[producer]; 2231 first_desc = hwdesc = &tx_ring->desc_head[producer];
2156 qlcnic_clear_cmddesc((u64 *)hwdesc); 2232 qlcnic_clear_cmddesc((u64 *)hwdesc);
2157 2233
2158 if (qlcnic_check_tx_tagging(adapter, skb, first_desc))
2159 goto drop_packet;
2160
2161 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) { 2234 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
2162 adapter->stats.tx_dma_map_error++; 2235 adapter->stats.tx_dma_map_error++;
2163 goto drop_packet; 2236 goto drop_packet;
@@ -2201,8 +2274,10 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2201 } 2274 }
2202 2275
2203 tx_ring->producer = get_next_index(producer, num_txd); 2276 tx_ring->producer = get_next_index(producer, num_txd);
2277 smp_mb();
2204 2278
2205 qlcnic_tso_check(netdev, tx_ring, first_desc, skb); 2279 if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
2280 goto unwind_buff;
2206 2281
2207 if (qlcnic_mac_learn) 2282 if (qlcnic_mac_learn)
2208 qlcnic_send_filter(adapter, tx_ring, first_desc, skb); 2283 qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
@@ -2214,6 +2289,8 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2214 2289
2215 return NETDEV_TX_OK; 2290 return NETDEV_TX_OK;
2216 2291
2292unwind_buff:
2293 qlcnic_unmap_buffers(pdev, skb, pbuf);
2217drop_packet: 2294drop_packet:
2218 adapter->stats.txdropped++; 2295 adapter->stats.txdropped++;
2219 dev_kfree_skb_any(skb); 2296 dev_kfree_skb_any(skb);
@@ -2260,16 +2337,16 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
2260{ 2337{
2261 struct net_device *netdev = adapter->netdev; 2338 struct net_device *netdev = adapter->netdev;
2262 2339
2263 if (adapter->ahw.linkup && !linkup) { 2340 if (adapter->ahw->linkup && !linkup) {
2264 netdev_info(netdev, "NIC Link is down\n"); 2341 netdev_info(netdev, "NIC Link is down\n");
2265 adapter->ahw.linkup = 0; 2342 adapter->ahw->linkup = 0;
2266 if (netif_running(netdev)) { 2343 if (netif_running(netdev)) {
2267 netif_carrier_off(netdev); 2344 netif_carrier_off(netdev);
2268 netif_stop_queue(netdev); 2345 netif_stop_queue(netdev);
2269 } 2346 }
2270 } else if (!adapter->ahw.linkup && linkup) { 2347 } else if (!adapter->ahw->linkup && linkup) {
2271 netdev_info(netdev, "NIC Link is up\n"); 2348 netdev_info(netdev, "NIC Link is up\n");
2272 adapter->ahw.linkup = 1; 2349 adapter->ahw->linkup = 1;
2273 if (netif_running(netdev)) { 2350 if (netif_running(netdev)) {
2274 netif_carrier_on(netdev); 2351 netif_carrier_on(netdev);
2275 netif_wake_queue(netdev); 2352 netif_wake_queue(netdev);
@@ -2505,7 +2582,7 @@ static void qlcnic_poll_controller(struct net_device *netdev)
2505 int ring; 2582 int ring;
2506 struct qlcnic_host_sds_ring *sds_ring; 2583 struct qlcnic_host_sds_ring *sds_ring;
2507 struct qlcnic_adapter *adapter = netdev_priv(netdev); 2584 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2508 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; 2585 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
2509 2586
2510 disable_irq(adapter->irq); 2587 disable_irq(adapter->irq);
2511 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 2588 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
@@ -2756,6 +2833,8 @@ skip_ack_check:
2756 set_bit(__QLCNIC_START_FW, &adapter->state); 2833 set_bit(__QLCNIC_START_FW, &adapter->state);
2757 QLCDB(adapter, DRV, "Restarting fw\n"); 2834 QLCDB(adapter, DRV, "Restarting fw\n");
2758 qlcnic_idc_debug_info(adapter, 0); 2835 qlcnic_idc_debug_info(adapter, 0);
2836 QLCDB(adapter, DRV, "Take FW dump\n");
2837 qlcnic_dump_fw(adapter);
2759 } 2838 }
2760 2839
2761 qlcnic_api_unlock(adapter); 2840 qlcnic_api_unlock(adapter);
@@ -2854,7 +2933,7 @@ qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
2854} 2933}
2855 2934
2856/*Transit to RESET state from READY state only */ 2935/*Transit to RESET state from READY state only */
2857static void 2936void
2858qlcnic_dev_request_reset(struct qlcnic_adapter *adapter) 2937qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2859{ 2938{
2860 u32 state; 2939 u32 state;
@@ -3266,6 +3345,56 @@ static struct device_attribute dev_attr_diag_mode = {
3266 .store = qlcnic_store_diag_mode, 3345 .store = qlcnic_store_diag_mode,
3267}; 3346};
3268 3347
3348int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val)
3349{
3350 if (!use_msi_x && !use_msi) {
3351 netdev_info(netdev, "no msix or msi support, hence no rss\n");
3352 return -EINVAL;
3353 }
3354
3355 if ((val > max_hw) || (val < 2) || !is_power_of_2(val)) {
3356 netdev_info(netdev, "rss_ring valid range [2 - %x] in "
3357 " powers of 2\n", max_hw);
3358 return -EINVAL;
3359 }
3360 return 0;
3361
3362}
3363
3364int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data)
3365{
3366 struct net_device *netdev = adapter->netdev;
3367 int err = 0;
3368
3369 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
3370 return -EBUSY;
3371
3372 netif_device_detach(netdev);
3373 if (netif_running(netdev))
3374 __qlcnic_down(adapter, netdev);
3375 qlcnic_detach(adapter);
3376 qlcnic_teardown_intr(adapter);
3377
3378 if (qlcnic_enable_msix(adapter, data)) {
3379 netdev_info(netdev, "failed setting max_rss; rss disabled\n");
3380 qlcnic_enable_msi_legacy(adapter);
3381 }
3382
3383 if (netif_running(netdev)) {
3384 err = qlcnic_attach(adapter);
3385 if (err)
3386 goto done;
3387 err = __qlcnic_up(adapter, netdev);
3388 if (err)
3389 goto done;
3390 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
3391 }
3392 done:
3393 netif_device_attach(netdev);
3394 clear_bit(__QLCNIC_RESETTING, &adapter->state);
3395 return err;
3396}
3397
3269static int 3398static int
3270qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter, 3399qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
3271 loff_t offset, size_t size) 3400 loff_t offset, size_t size)
@@ -3396,7 +3525,6 @@ qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
3396 return size; 3525 return size;
3397} 3526}
3398 3527
3399
3400static struct bin_attribute bin_attr_crb = { 3528static struct bin_attribute bin_attr_crb = {
3401 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)}, 3529 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
3402 .size = 0, 3530 .size = 0,
@@ -3515,7 +3643,7 @@ validate_esw_config(struct qlcnic_adapter *adapter,
3515 u8 pci_func; 3643 u8 pci_func;
3516 int i; 3644 int i;
3517 3645
3518 op_mode = readl(adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE); 3646 op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
3519 3647
3520 for (i = 0; i < count; i++) { 3648 for (i = 0; i < count; i++) {
3521 pci_func = esw_cfg[i].pci_func; 3649 pci_func = esw_cfg[i].pci_func;
@@ -3581,13 +3709,13 @@ qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
3581 if (qlcnic_config_switch_port(adapter, &esw_cfg[i])) 3709 if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
3582 return QL_STATUS_INVALID_PARAM; 3710 return QL_STATUS_INVALID_PARAM;
3583 3711
3584 if (adapter->ahw.pci_func != esw_cfg[i].pci_func) 3712 if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
3585 continue; 3713 continue;
3586 3714
3587 op_mode = esw_cfg[i].op_mode; 3715 op_mode = esw_cfg[i].op_mode;
3588 qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]); 3716 qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
3589 esw_cfg[i].op_mode = op_mode; 3717 esw_cfg[i].op_mode = op_mode;
3590 esw_cfg[i].pci_func = adapter->ahw.pci_func; 3718 esw_cfg[i].pci_func = adapter->ahw->pci_func;
3591 3719
3592 switch (esw_cfg[i].op_mode) { 3720 switch (esw_cfg[i].op_mode) {
3593 case QLCNIC_PORT_DEFAULTS: 3721 case QLCNIC_PORT_DEFAULTS:
@@ -3968,14 +4096,14 @@ qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
3968 dev_info(dev, "failed to create crb sysfs entry\n"); 4096 dev_info(dev, "failed to create crb sysfs entry\n");
3969 if (device_create_bin_file(dev, &bin_attr_mem)) 4097 if (device_create_bin_file(dev, &bin_attr_mem))
3970 dev_info(dev, "failed to create mem sysfs entry\n"); 4098 dev_info(dev, "failed to create mem sysfs entry\n");
4099 if (device_create_bin_file(dev, &bin_attr_pci_config))
4100 dev_info(dev, "failed to create pci config sysfs entry");
3971 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) 4101 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
3972 return; 4102 return;
3973 if (device_create_bin_file(dev, &bin_attr_esw_config)) 4103 if (device_create_bin_file(dev, &bin_attr_esw_config))
3974 dev_info(dev, "failed to create esw config sysfs entry"); 4104 dev_info(dev, "failed to create esw config sysfs entry");
3975 if (adapter->op_mode != QLCNIC_MGMT_FUNC) 4105 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
3976 return; 4106 return;
3977 if (device_create_bin_file(dev, &bin_attr_pci_config))
3978 dev_info(dev, "failed to create pci config sysfs entry");
3979 if (device_create_bin_file(dev, &bin_attr_npar_config)) 4107 if (device_create_bin_file(dev, &bin_attr_npar_config))
3980 dev_info(dev, "failed to create npar config sysfs entry"); 4108 dev_info(dev, "failed to create npar config sysfs entry");
3981 if (device_create_bin_file(dev, &bin_attr_pm_config)) 4109 if (device_create_bin_file(dev, &bin_attr_pm_config))
@@ -3996,12 +4124,12 @@ qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
3996 device_remove_file(dev, &dev_attr_diag_mode); 4124 device_remove_file(dev, &dev_attr_diag_mode);
3997 device_remove_bin_file(dev, &bin_attr_crb); 4125 device_remove_bin_file(dev, &bin_attr_crb);
3998 device_remove_bin_file(dev, &bin_attr_mem); 4126 device_remove_bin_file(dev, &bin_attr_mem);
4127 device_remove_bin_file(dev, &bin_attr_pci_config);
3999 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) 4128 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
4000 return; 4129 return;
4001 device_remove_bin_file(dev, &bin_attr_esw_config); 4130 device_remove_bin_file(dev, &bin_attr_esw_config);
4002 if (adapter->op_mode != QLCNIC_MGMT_FUNC) 4131 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
4003 return; 4132 return;
4004 device_remove_bin_file(dev, &bin_attr_pci_config);
4005 device_remove_bin_file(dev, &bin_attr_npar_config); 4133 device_remove_bin_file(dev, &bin_attr_npar_config);
4006 device_remove_bin_file(dev, &bin_attr_pm_config); 4134 device_remove_bin_file(dev, &bin_attr_pm_config);
4007 device_remove_bin_file(dev, &bin_attr_esw_stats); 4135 device_remove_bin_file(dev, &bin_attr_esw_stats);
@@ -4048,14 +4176,10 @@ qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
4048 4176
4049 qlcnic_config_indev_addr(adapter, netdev, event); 4177 qlcnic_config_indev_addr(adapter, netdev, event);
4050 4178
4051 if (!adapter->vlgrp) 4179 for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) {
4052 return; 4180 dev = vlan_find_dev(netdev, vid);
4053
4054 for (vid = 0; vid < VLAN_N_VID; vid++) {
4055 dev = vlan_group_get_device(adapter->vlgrp, vid);
4056 if (!dev) 4181 if (!dev)
4057 continue; 4182 continue;
4058
4059 qlcnic_config_indev_addr(adapter, dev, event); 4183 qlcnic_config_indev_addr(adapter, dev, event);
4060 } 4184 }
4061} 4185}
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 4757c59a07a2..d32850715f5c 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -2134,7 +2134,7 @@ struct ql_adapter {
2134 struct delayed_work mpi_idc_work; 2134 struct delayed_work mpi_idc_work;
2135 struct delayed_work mpi_core_to_log; 2135 struct delayed_work mpi_core_to_log;
2136 struct completion ide_completion; 2136 struct completion ide_completion;
2137 struct nic_operations *nic_ops; 2137 const struct nic_operations *nic_ops;
2138 u16 device_id; 2138 u16 device_id;
2139 struct timer_list timer; 2139 struct timer_list timer;
2140 atomic_t lb_count; 2140 atomic_t lb_count;
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 8149cc9de4ca..19b00fa0eaf0 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -356,7 +356,7 @@ static int ql_get_settings(struct net_device *ndev,
356 ecmd->port = PORT_FIBRE; 356 ecmd->port = PORT_FIBRE;
357 } 357 }
358 358
359 ecmd->speed = SPEED_10000; 359 ethtool_cmd_speed_set(ecmd, SPEED_10000);
360 ecmd->duplex = DUPLEX_FULL; 360 ecmd->duplex = DUPLEX_FULL;
361 361
362 return 0; 362 return 0;
@@ -412,31 +412,31 @@ static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
412 return 0; 412 return 0;
413} 413}
414 414
415static int ql_phys_id(struct net_device *ndev, u32 data) 415static int ql_set_phys_id(struct net_device *ndev,
416 enum ethtool_phys_id_state state)
417
416{ 418{
417 struct ql_adapter *qdev = netdev_priv(ndev); 419 struct ql_adapter *qdev = netdev_priv(ndev);
418 u32 led_reg, i;
419 int status;
420
421 /* Save the current LED settings */
422 status = ql_mb_get_led_cfg(qdev);
423 if (status)
424 return status;
425 led_reg = qdev->led_config;
426 420
427 /* Start blinking the led */ 421 switch (state) {
428 if (!data || data > 300) 422 case ETHTOOL_ID_ACTIVE:
429 data = 300; 423 /* Save the current LED settings */
424 if (ql_mb_get_led_cfg(qdev))
425 return -EIO;
430 426
431 for (i = 0; i < (data * 10); i++) 427 /* Start blinking */
432 ql_mb_set_led_cfg(qdev, QL_LED_BLINK); 428 ql_mb_set_led_cfg(qdev, QL_LED_BLINK);
429 return 0;
433 430
434 /* Restore LED settings */ 431 case ETHTOOL_ID_INACTIVE:
435 status = ql_mb_set_led_cfg(qdev, led_reg); 432 /* Restore LED settings */
436 if (status) 433 if (ql_mb_set_led_cfg(qdev, qdev->led_config))
437 return status; 434 return -EIO;
435 return 0;
438 436
439 return 0; 437 default:
438 return -EINVAL;
439 }
440} 440}
441 441
442static int ql_start_loopback(struct ql_adapter *qdev) 442static int ql_start_loopback(struct ql_adapter *qdev)
@@ -655,32 +655,6 @@ static int ql_set_pauseparam(struct net_device *netdev,
655 return status; 655 return status;
656} 656}
657 657
658static u32 ql_get_rx_csum(struct net_device *netdev)
659{
660 struct ql_adapter *qdev = netdev_priv(netdev);
661 return qdev->rx_csum;
662}
663
664static int ql_set_rx_csum(struct net_device *netdev, uint32_t data)
665{
666 struct ql_adapter *qdev = netdev_priv(netdev);
667 qdev->rx_csum = data;
668 return 0;
669}
670
671static int ql_set_tso(struct net_device *ndev, uint32_t data)
672{
673
674 if (data) {
675 ndev->features |= NETIF_F_TSO;
676 ndev->features |= NETIF_F_TSO6;
677 } else {
678 ndev->features &= ~NETIF_F_TSO;
679 ndev->features &= ~NETIF_F_TSO6;
680 }
681 return 0;
682}
683
684static u32 ql_get_msglevel(struct net_device *ndev) 658static u32 ql_get_msglevel(struct net_device *ndev)
685{ 659{
686 struct ql_adapter *qdev = netdev_priv(ndev); 660 struct ql_adapter *qdev = netdev_priv(ndev);
@@ -703,18 +677,10 @@ const struct ethtool_ops qlge_ethtool_ops = {
703 .get_msglevel = ql_get_msglevel, 677 .get_msglevel = ql_get_msglevel,
704 .set_msglevel = ql_set_msglevel, 678 .set_msglevel = ql_set_msglevel,
705 .get_link = ethtool_op_get_link, 679 .get_link = ethtool_op_get_link,
706 .phys_id = ql_phys_id, 680 .set_phys_id = ql_set_phys_id,
707 .self_test = ql_self_test, 681 .self_test = ql_self_test,
708 .get_pauseparam = ql_get_pauseparam, 682 .get_pauseparam = ql_get_pauseparam,
709 .set_pauseparam = ql_set_pauseparam, 683 .set_pauseparam = ql_set_pauseparam,
710 .get_rx_csum = ql_get_rx_csum,
711 .set_rx_csum = ql_set_rx_csum,
712 .get_tx_csum = ethtool_op_get_tx_csum,
713 .set_tx_csum = ethtool_op_set_tx_csum,
714 .get_sg = ethtool_op_get_sg,
715 .set_sg = ethtool_op_set_sg,
716 .get_tso = ethtool_op_get_tso,
717 .set_tso = ql_set_tso,
718 .get_coalesce = ql_get_coalesce, 684 .get_coalesce = ql_get_coalesce,
719 .set_coalesce = ql_set_coalesce, 685 .set_coalesce = ql_set_coalesce,
720 .get_sset_count = ql_get_sset_count, 686 .get_sset_count = ql_get_sset_count,
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 5bb311945436..6c9d124cfc76 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -1571,7 +1571,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1571 skb->protocol = eth_type_trans(skb, ndev); 1571 skb->protocol = eth_type_trans(skb, ndev);
1572 skb_checksum_none_assert(skb); 1572 skb_checksum_none_assert(skb);
1573 1573
1574 if (qdev->rx_csum && 1574 if ((ndev->features & NETIF_F_RXCSUM) &&
1575 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { 1575 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1576 /* TCP frame. */ 1576 /* TCP frame. */
1577 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { 1577 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
@@ -1684,7 +1684,7 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1684 /* If rx checksum is on, and there are no 1684 /* If rx checksum is on, and there are no
1685 * csum or frame errors. 1685 * csum or frame errors.
1686 */ 1686 */
1687 if (qdev->rx_csum && 1687 if ((ndev->features & NETIF_F_RXCSUM) &&
1688 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { 1688 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1689 /* TCP frame. */ 1689 /* TCP frame. */
1690 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { 1690 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
@@ -2004,7 +2004,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
2004 /* If rx checksum is on, and there are no 2004 /* If rx checksum is on, and there are no
2005 * csum or frame errors. 2005 * csum or frame errors.
2006 */ 2006 */
2007 if (qdev->rx_csum && 2007 if ((ndev->features & NETIF_F_RXCSUM) &&
2008 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { 2008 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2009 /* TCP frame. */ 2009 /* TCP frame. */
2010 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { 2010 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
@@ -4412,12 +4412,12 @@ error:
4412 rtnl_unlock(); 4412 rtnl_unlock();
4413} 4413}
4414 4414
4415static struct nic_operations qla8012_nic_ops = { 4415static const struct nic_operations qla8012_nic_ops = {
4416 .get_flash = ql_get_8012_flash_params, 4416 .get_flash = ql_get_8012_flash_params,
4417 .port_initialize = ql_8012_port_initialize, 4417 .port_initialize = ql_8012_port_initialize,
4418}; 4418};
4419 4419
4420static struct nic_operations qla8000_nic_ops = { 4420static const struct nic_operations qla8000_nic_ops = {
4421 .get_flash = ql_get_8000_flash_params, 4421 .get_flash = ql_get_8000_flash_params,
4422 .port_initialize = ql_8000_port_initialize, 4422 .port_initialize = ql_8000_port_initialize,
4423}; 4423};
@@ -4621,7 +4621,6 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
4621 /* 4621 /*
4622 * Set up the operating parameters. 4622 * Set up the operating parameters.
4623 */ 4623 */
4624 qdev->rx_csum = 1;
4625 qdev->workqueue = create_singlethread_workqueue(ndev->name); 4624 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4626 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work); 4625 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4627 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work); 4626 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
@@ -4695,15 +4694,11 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
4695 4694
4696 qdev = netdev_priv(ndev); 4695 qdev = netdev_priv(ndev);
4697 SET_NETDEV_DEV(ndev, &pdev->dev); 4696 SET_NETDEV_DEV(ndev, &pdev->dev);
4698 ndev->features = (0 4697 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
4699 | NETIF_F_IP_CSUM 4698 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
4700 | NETIF_F_SG 4699 NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
4701 | NETIF_F_TSO 4700 ndev->features = ndev->hw_features |
4702 | NETIF_F_TSO6 4701 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4703 | NETIF_F_TSO_ECN
4704 | NETIF_F_HW_VLAN_TX
4705 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
4706 ndev->features |= NETIF_F_GRO;
4707 4702
4708 if (test_bit(QL_DMA64, &qdev->flags)) 4703 if (test_bit(QL_DMA64, &qdev->flags))
4709 ndev->features |= NETIF_F_HIGHDMA; 4704 ndev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 397c36810a15..04f4e6086cd0 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -37,6 +37,8 @@
37 37
38#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw" 38#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
39#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw" 39#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
40#define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw"
41#define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw"
40#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw" 42#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
41 43
42#ifdef RTL8169_DEBUG 44#ifdef RTL8169_DEBUG
@@ -96,89 +98,125 @@ static const int multicast_filter_limit = 32;
96#define RTL_R32(reg) readl (ioaddr + (reg)) 98#define RTL_R32(reg) readl (ioaddr + (reg))
97 99
98enum mac_version { 100enum mac_version {
99 RTL_GIGA_MAC_NONE = 0x00, 101 RTL_GIGA_MAC_VER_01 = 0,
100 RTL_GIGA_MAC_VER_01 = 0x01, // 8169 102 RTL_GIGA_MAC_VER_02,
101 RTL_GIGA_MAC_VER_02 = 0x02, // 8169S 103 RTL_GIGA_MAC_VER_03,
102 RTL_GIGA_MAC_VER_03 = 0x03, // 8110S 104 RTL_GIGA_MAC_VER_04,
103 RTL_GIGA_MAC_VER_04 = 0x04, // 8169SB 105 RTL_GIGA_MAC_VER_05,
104 RTL_GIGA_MAC_VER_05 = 0x05, // 8110SCd 106 RTL_GIGA_MAC_VER_06,
105 RTL_GIGA_MAC_VER_06 = 0x06, // 8110SCe 107 RTL_GIGA_MAC_VER_07,
106 RTL_GIGA_MAC_VER_07 = 0x07, // 8102e 108 RTL_GIGA_MAC_VER_08,
107 RTL_GIGA_MAC_VER_08 = 0x08, // 8102e 109 RTL_GIGA_MAC_VER_09,
108 RTL_GIGA_MAC_VER_09 = 0x09, // 8102e 110 RTL_GIGA_MAC_VER_10,
109 RTL_GIGA_MAC_VER_10 = 0x0a, // 8101e 111 RTL_GIGA_MAC_VER_11,
110 RTL_GIGA_MAC_VER_11 = 0x0b, // 8168Bb 112 RTL_GIGA_MAC_VER_12,
111 RTL_GIGA_MAC_VER_12 = 0x0c, // 8168Be 113 RTL_GIGA_MAC_VER_13,
112 RTL_GIGA_MAC_VER_13 = 0x0d, // 8101Eb 114 RTL_GIGA_MAC_VER_14,
113 RTL_GIGA_MAC_VER_14 = 0x0e, // 8101 ? 115 RTL_GIGA_MAC_VER_15,
114 RTL_GIGA_MAC_VER_15 = 0x0f, // 8101 ? 116 RTL_GIGA_MAC_VER_16,
115 RTL_GIGA_MAC_VER_16 = 0x11, // 8101Ec 117 RTL_GIGA_MAC_VER_17,
116 RTL_GIGA_MAC_VER_17 = 0x10, // 8168Bf 118 RTL_GIGA_MAC_VER_18,
117 RTL_GIGA_MAC_VER_18 = 0x12, // 8168CP 119 RTL_GIGA_MAC_VER_19,
118 RTL_GIGA_MAC_VER_19 = 0x13, // 8168C 120 RTL_GIGA_MAC_VER_20,
119 RTL_GIGA_MAC_VER_20 = 0x14, // 8168C 121 RTL_GIGA_MAC_VER_21,
120 RTL_GIGA_MAC_VER_21 = 0x15, // 8168C 122 RTL_GIGA_MAC_VER_22,
121 RTL_GIGA_MAC_VER_22 = 0x16, // 8168C 123 RTL_GIGA_MAC_VER_23,
122 RTL_GIGA_MAC_VER_23 = 0x17, // 8168CP 124 RTL_GIGA_MAC_VER_24,
123 RTL_GIGA_MAC_VER_24 = 0x18, // 8168CP 125 RTL_GIGA_MAC_VER_25,
124 RTL_GIGA_MAC_VER_25 = 0x19, // 8168D 126 RTL_GIGA_MAC_VER_26,
125 RTL_GIGA_MAC_VER_26 = 0x1a, // 8168D 127 RTL_GIGA_MAC_VER_27,
126 RTL_GIGA_MAC_VER_27 = 0x1b, // 8168DP 128 RTL_GIGA_MAC_VER_28,
127 RTL_GIGA_MAC_VER_28 = 0x1c, // 8168DP 129 RTL_GIGA_MAC_VER_29,
128 RTL_GIGA_MAC_VER_29 = 0x1d, // 8105E 130 RTL_GIGA_MAC_VER_30,
129 RTL_GIGA_MAC_VER_30 = 0x1e, // 8105E 131 RTL_GIGA_MAC_VER_31,
132 RTL_GIGA_MAC_VER_32,
133 RTL_GIGA_MAC_VER_33,
134 RTL_GIGA_MAC_NONE = 0xff,
130}; 135};
131 136
132#define _R(NAME,MAC,MASK) \ 137enum rtl_tx_desc_version {
133 { .name = NAME, .mac_version = MAC, .RxConfigMask = MASK } 138 RTL_TD_0 = 0,
139 RTL_TD_1 = 1,
140};
141
142#define _R(NAME,TD,FW) \
143 { .name = NAME, .txd_version = TD, .fw_name = FW }
134 144
135static const struct { 145static const struct {
136 const char *name; 146 const char *name;
137 u8 mac_version; 147 enum rtl_tx_desc_version txd_version;
138 u32 RxConfigMask; /* Clears the bits supported by this chip */
139} rtl_chip_info[] = {
140 _R("RTL8169", RTL_GIGA_MAC_VER_01, 0xff7e1880), // 8169
141 _R("RTL8169s", RTL_GIGA_MAC_VER_02, 0xff7e1880), // 8169S
142 _R("RTL8110s", RTL_GIGA_MAC_VER_03, 0xff7e1880), // 8110S
143 _R("RTL8169sb/8110sb", RTL_GIGA_MAC_VER_04, 0xff7e1880), // 8169SB
144 _R("RTL8169sc/8110sc", RTL_GIGA_MAC_VER_05, 0xff7e1880), // 8110SCd
145 _R("RTL8169sc/8110sc", RTL_GIGA_MAC_VER_06, 0xff7e1880), // 8110SCe
146 _R("RTL8102e", RTL_GIGA_MAC_VER_07, 0xff7e1880), // PCI-E
147 _R("RTL8102e", RTL_GIGA_MAC_VER_08, 0xff7e1880), // PCI-E
148 _R("RTL8102e", RTL_GIGA_MAC_VER_09, 0xff7e1880), // PCI-E
149 _R("RTL8101e", RTL_GIGA_MAC_VER_10, 0xff7e1880), // PCI-E
150 _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_11, 0xff7e1880), // PCI-E
151 _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_12, 0xff7e1880), // PCI-E
152 _R("RTL8101e", RTL_GIGA_MAC_VER_13, 0xff7e1880), // PCI-E 8139
153 _R("RTL8100e", RTL_GIGA_MAC_VER_14, 0xff7e1880), // PCI-E 8139
154 _R("RTL8100e", RTL_GIGA_MAC_VER_15, 0xff7e1880), // PCI-E 8139
155 _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_17, 0xff7e1880), // PCI-E
156 _R("RTL8101e", RTL_GIGA_MAC_VER_16, 0xff7e1880), // PCI-E
157 _R("RTL8168cp/8111cp", RTL_GIGA_MAC_VER_18, 0xff7e1880), // PCI-E
158 _R("RTL8168c/8111c", RTL_GIGA_MAC_VER_19, 0xff7e1880), // PCI-E
159 _R("RTL8168c/8111c", RTL_GIGA_MAC_VER_20, 0xff7e1880), // PCI-E
160 _R("RTL8168c/8111c", RTL_GIGA_MAC_VER_21, 0xff7e1880), // PCI-E
161 _R("RTL8168c/8111c", RTL_GIGA_MAC_VER_22, 0xff7e1880), // PCI-E
162 _R("RTL8168cp/8111cp", RTL_GIGA_MAC_VER_23, 0xff7e1880), // PCI-E
163 _R("RTL8168cp/8111cp", RTL_GIGA_MAC_VER_24, 0xff7e1880), // PCI-E
164 _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_25, 0xff7e1880), // PCI-E
165 _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_26, 0xff7e1880), // PCI-E
166 _R("RTL8168dp/8111dp", RTL_GIGA_MAC_VER_27, 0xff7e1880), // PCI-E
167 _R("RTL8168dp/8111dp", RTL_GIGA_MAC_VER_28, 0xff7e1880), // PCI-E
168 _R("RTL8105e", RTL_GIGA_MAC_VER_29, 0xff7e1880), // PCI-E
169 _R("RTL8105e", RTL_GIGA_MAC_VER_30, 0xff7e1880) // PCI-E
170};
171#undef _R
172
173static const struct rtl_firmware_info {
174 int mac_version;
175 const char *fw_name; 148 const char *fw_name;
176} rtl_firmware_infos[] = { 149} rtl_chip_infos[] = {
177 { .mac_version = RTL_GIGA_MAC_VER_25, .fw_name = FIRMWARE_8168D_1 }, 150 /* PCI devices. */
178 { .mac_version = RTL_GIGA_MAC_VER_26, .fw_name = FIRMWARE_8168D_2 }, 151 [RTL_GIGA_MAC_VER_01] =
179 { .mac_version = RTL_GIGA_MAC_VER_29, .fw_name = FIRMWARE_8105E_1 }, 152 _R("RTL8169", RTL_TD_0, NULL),
180 { .mac_version = RTL_GIGA_MAC_VER_30, .fw_name = FIRMWARE_8105E_1 } 153 [RTL_GIGA_MAC_VER_02] =
154 _R("RTL8169s", RTL_TD_0, NULL),
155 [RTL_GIGA_MAC_VER_03] =
156 _R("RTL8110s", RTL_TD_0, NULL),
157 [RTL_GIGA_MAC_VER_04] =
158 _R("RTL8169sb/8110sb", RTL_TD_0, NULL),
159 [RTL_GIGA_MAC_VER_05] =
160 _R("RTL8169sc/8110sc", RTL_TD_0, NULL),
161 [RTL_GIGA_MAC_VER_06] =
162 _R("RTL8169sc/8110sc", RTL_TD_0, NULL),
163 /* PCI-E devices. */
164 [RTL_GIGA_MAC_VER_07] =
165 _R("RTL8102e", RTL_TD_1, NULL),
166 [RTL_GIGA_MAC_VER_08] =
167 _R("RTL8102e", RTL_TD_1, NULL),
168 [RTL_GIGA_MAC_VER_09] =
169 _R("RTL8102e", RTL_TD_1, NULL),
170 [RTL_GIGA_MAC_VER_10] =
171 _R("RTL8101e", RTL_TD_0, NULL),
172 [RTL_GIGA_MAC_VER_11] =
173 _R("RTL8168b/8111b", RTL_TD_0, NULL),
174 [RTL_GIGA_MAC_VER_12] =
175 _R("RTL8168b/8111b", RTL_TD_0, NULL),
176 [RTL_GIGA_MAC_VER_13] =
177 _R("RTL8101e", RTL_TD_0, NULL),
178 [RTL_GIGA_MAC_VER_14] =
179 _R("RTL8100e", RTL_TD_0, NULL),
180 [RTL_GIGA_MAC_VER_15] =
181 _R("RTL8100e", RTL_TD_0, NULL),
182 [RTL_GIGA_MAC_VER_16] =
183 _R("RTL8101e", RTL_TD_0, NULL),
184 [RTL_GIGA_MAC_VER_17] =
185 _R("RTL8168b/8111b", RTL_TD_0, NULL),
186 [RTL_GIGA_MAC_VER_18] =
187 _R("RTL8168cp/8111cp", RTL_TD_1, NULL),
188 [RTL_GIGA_MAC_VER_19] =
189 _R("RTL8168c/8111c", RTL_TD_1, NULL),
190 [RTL_GIGA_MAC_VER_20] =
191 _R("RTL8168c/8111c", RTL_TD_1, NULL),
192 [RTL_GIGA_MAC_VER_21] =
193 _R("RTL8168c/8111c", RTL_TD_1, NULL),
194 [RTL_GIGA_MAC_VER_22] =
195 _R("RTL8168c/8111c", RTL_TD_1, NULL),
196 [RTL_GIGA_MAC_VER_23] =
197 _R("RTL8168cp/8111cp", RTL_TD_1, NULL),
198 [RTL_GIGA_MAC_VER_24] =
199 _R("RTL8168cp/8111cp", RTL_TD_1, NULL),
200 [RTL_GIGA_MAC_VER_25] =
201 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1),
202 [RTL_GIGA_MAC_VER_26] =
203 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2),
204 [RTL_GIGA_MAC_VER_27] =
205 _R("RTL8168dp/8111dp", RTL_TD_1, NULL),
206 [RTL_GIGA_MAC_VER_28] =
207 _R("RTL8168dp/8111dp", RTL_TD_1, NULL),
208 [RTL_GIGA_MAC_VER_29] =
209 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1),
210 [RTL_GIGA_MAC_VER_30] =
211 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1),
212 [RTL_GIGA_MAC_VER_31] =
213 _R("RTL8168dp/8111dp", RTL_TD_1, NULL),
214 [RTL_GIGA_MAC_VER_32] =
215 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1),
216 [RTL_GIGA_MAC_VER_33] =
217 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2)
181}; 218};
219#undef _R
182 220
183enum cfg_version { 221enum cfg_version {
184 RTL_CFG_0 = 0x00, 222 RTL_CFG_0 = 0x00,
@@ -232,6 +270,9 @@ enum rtl_registers {
232 IntrStatus = 0x3e, 270 IntrStatus = 0x3e,
233 TxConfig = 0x40, 271 TxConfig = 0x40,
234 RxConfig = 0x44, 272 RxConfig = 0x44,
273
274#define RTL_RX_CONFIG_MASK 0xff7e1880u
275
235 RxMissed = 0x4c, 276 RxMissed = 0x4c,
236 Cfg9346 = 0x50, 277 Cfg9346 = 0x50,
237 Config0 = 0x51, 278 Config0 = 0x51,
@@ -325,7 +366,9 @@ enum rtl8168_registers {
325#define OCPAR_FLAG 0x80000000 366#define OCPAR_FLAG 0x80000000
326#define OCPAR_GPHY_WRITE_CMD 0x8000f060 367#define OCPAR_GPHY_WRITE_CMD 0x8000f060
327#define OCPAR_GPHY_READ_CMD 0x0000f060 368#define OCPAR_GPHY_READ_CMD 0x0000f060
328 RDSAR1 = 0xd0 /* 8168c only. Undocumented on 8168dp */ 369 RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */
370 MISC = 0xf0, /* 8168e only. */
371#define TXPLA_RST (1 << 29)
329}; 372};
330 373
331enum rtl_register_content { 374enum rtl_register_content {
@@ -403,6 +446,7 @@ enum rtl_register_content {
403 BWF = (1 << 6), /* Accept Broadcast wakeup frame */ 446 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
404 MWF = (1 << 5), /* Accept Multicast wakeup frame */ 447 MWF = (1 << 5), /* Accept Multicast wakeup frame */
405 UWF = (1 << 4), /* Accept Unicast wakeup frame */ 448 UWF = (1 << 4), /* Accept Unicast wakeup frame */
449 Spi_en = (1 << 3),
406 LanWake = (1 << 1), /* LanWake enable/disable */ 450 LanWake = (1 << 1), /* LanWake enable/disable */
407 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ 451 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
408 452
@@ -451,21 +495,69 @@ enum rtl_register_content {
451 CounterDump = 0x8, 495 CounterDump = 0x8,
452}; 496};
453 497
454enum desc_status_bit { 498enum rtl_desc_bit {
499 /* First doubleword. */
455 DescOwn = (1 << 31), /* Descriptor is owned by NIC */ 500 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
456 RingEnd = (1 << 30), /* End of descriptor ring */ 501 RingEnd = (1 << 30), /* End of descriptor ring */
457 FirstFrag = (1 << 29), /* First segment of a packet */ 502 FirstFrag = (1 << 29), /* First segment of a packet */
458 LastFrag = (1 << 28), /* Final segment of a packet */ 503 LastFrag = (1 << 28), /* Final segment of a packet */
504};
505
506/* Generic case. */
507enum rtl_tx_desc_bit {
508 /* First doubleword. */
509 TD_LSO = (1 << 27), /* Large Send Offload */
510#define TD_MSS_MAX 0x07ffu /* MSS value */
459 511
460 /* Tx private */ 512 /* Second doubleword. */
461 LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */ 513 TxVlanTag = (1 << 17), /* Add VLAN tag */
462 MSSShift = 16, /* MSS value position */ 514};
463 MSSMask = 0xfff, /* MSS value + LargeSend bit: 12 bits */ 515
464 IPCS = (1 << 18), /* Calculate IP checksum */ 516/* 8169, 8168b and 810x except 8102e. */
465 UDPCS = (1 << 17), /* Calculate UDP/IP checksum */ 517enum rtl_tx_desc_bit_0 {
466 TCPCS = (1 << 16), /* Calculate TCP/IP checksum */ 518 /* First doubleword. */
467 TxVlanTag = (1 << 17), /* Add VLAN tag */ 519#define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */
520 TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */
521 TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */
522 TD0_IP_CS = (1 << 18), /* Calculate IP checksum */
523};
524
525/* 8102e, 8168c and beyond. */
526enum rtl_tx_desc_bit_1 {
527 /* Second doubleword. */
528#define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */
529 TD1_IP_CS = (1 << 29), /* Calculate IP checksum */
530 TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */
531 TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */
532};
468 533
534static const struct rtl_tx_desc_info {
535 struct {
536 u32 udp;
537 u32 tcp;
538 } checksum;
539 u16 mss_shift;
540 u16 opts_offset;
541} tx_desc_info [] = {
542 [RTL_TD_0] = {
543 .checksum = {
544 .udp = TD0_IP_CS | TD0_UDP_CS,
545 .tcp = TD0_IP_CS | TD0_TCP_CS
546 },
547 .mss_shift = TD0_MSS_SHIFT,
548 .opts_offset = 0
549 },
550 [RTL_TD_1] = {
551 .checksum = {
552 .udp = TD1_IP_CS | TD1_UDP_CS,
553 .tcp = TD1_IP_CS | TD1_TCP_CS
554 },
555 .mss_shift = TD1_MSS_SHIFT,
556 .opts_offset = 1
557 }
558};
559
560enum rtl_rx_desc_bit {
469 /* Rx private */ 561 /* Rx private */
470 PID1 = (1 << 18), /* Protocol ID bit 1/2 */ 562 PID1 = (1 << 18), /* Protocol ID bit 1/2 */
471 PID0 = (1 << 17), /* Protocol ID bit 2/2 */ 563 PID0 = (1 << 17), /* Protocol ID bit 2/2 */
@@ -525,13 +617,13 @@ struct rtl8169_counters {
525 617
526struct rtl8169_private { 618struct rtl8169_private {
527 void __iomem *mmio_addr; /* memory map physical address */ 619 void __iomem *mmio_addr; /* memory map physical address */
528 struct pci_dev *pci_dev; /* Index of PCI device */ 620 struct pci_dev *pci_dev;
529 struct net_device *dev; 621 struct net_device *dev;
530 struct napi_struct napi; 622 struct napi_struct napi;
531 spinlock_t lock; /* spin lock flag */ 623 spinlock_t lock;
532 u32 msg_enable; 624 u32 msg_enable;
533 int chipset; 625 u16 txd_version;
534 int mac_version; 626 u16 mac_version;
535 u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */ 627 u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
536 u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */ 628 u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
537 u32 dirty_rx; 629 u32 dirty_rx;
@@ -547,7 +639,6 @@ struct rtl8169_private {
547 u16 intr_event; 639 u16 intr_event;
548 u16 napi_event; 640 u16 napi_event;
549 u16 intr_mask; 641 u16 intr_mask;
550 int phy_1000_ctrl_reg;
551 642
552 struct mdio_ops { 643 struct mdio_ops {
553 void (*write)(void __iomem *, int, int); 644 void (*write)(void __iomem *, int, int);
@@ -588,6 +679,8 @@ MODULE_LICENSE("GPL");
588MODULE_VERSION(RTL8169_VERSION); 679MODULE_VERSION(RTL8169_VERSION);
589MODULE_FIRMWARE(FIRMWARE_8168D_1); 680MODULE_FIRMWARE(FIRMWARE_8168D_1);
590MODULE_FIRMWARE(FIRMWARE_8168D_2); 681MODULE_FIRMWARE(FIRMWARE_8168D_2);
682MODULE_FIRMWARE(FIRMWARE_8168E_1);
683MODULE_FIRMWARE(FIRMWARE_8168E_2);
591MODULE_FIRMWARE(FIRMWARE_8105E_1); 684MODULE_FIRMWARE(FIRMWARE_8105E_1);
592 685
593static int rtl8169_open(struct net_device *dev); 686static int rtl8169_open(struct net_device *dev);
@@ -659,32 +752,49 @@ static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
659#define OOB_CMD_DRIVER_START 0x05 752#define OOB_CMD_DRIVER_START 0x05
660#define OOB_CMD_DRIVER_STOP 0x06 753#define OOB_CMD_DRIVER_STOP 0x06
661 754
755static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
756{
757 return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
758}
759
662static void rtl8168_driver_start(struct rtl8169_private *tp) 760static void rtl8168_driver_start(struct rtl8169_private *tp)
663{ 761{
762 u16 reg;
664 int i; 763 int i;
665 764
666 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START); 765 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
667 766
767 reg = rtl8168_get_ocp_reg(tp);
768
668 for (i = 0; i < 10; i++) { 769 for (i = 0; i < 10; i++) {
669 msleep(10); 770 msleep(10);
670 if (ocp_read(tp, 0x0f, 0x0010) & 0x00000800) 771 if (ocp_read(tp, 0x0f, reg) & 0x00000800)
671 break; 772 break;
672 } 773 }
673} 774}
674 775
675static void rtl8168_driver_stop(struct rtl8169_private *tp) 776static void rtl8168_driver_stop(struct rtl8169_private *tp)
676{ 777{
778 u16 reg;
677 int i; 779 int i;
678 780
679 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP); 781 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
680 782
783 reg = rtl8168_get_ocp_reg(tp);
784
681 for (i = 0; i < 10; i++) { 785 for (i = 0; i < 10; i++) {
682 msleep(10); 786 msleep(10);
683 if ((ocp_read(tp, 0x0f, 0x0010) & 0x00000800) == 0) 787 if ((ocp_read(tp, 0x0f, reg) & 0x00000800) == 0)
684 break; 788 break;
685 } 789 }
686} 790}
687 791
792static int r8168dp_check_dash(struct rtl8169_private *tp)
793{
794 u16 reg = rtl8168_get_ocp_reg(tp);
795
796 return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0;
797}
688 798
689static void r8169_mdio_write(void __iomem *ioaddr, int reg_addr, int value) 799static void r8169_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
690{ 800{
@@ -983,9 +1093,8 @@ static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
983} 1093}
984 1094
985static void __rtl8169_check_link_status(struct net_device *dev, 1095static void __rtl8169_check_link_status(struct net_device *dev,
986 struct rtl8169_private *tp, 1096 struct rtl8169_private *tp,
987 void __iomem *ioaddr, 1097 void __iomem *ioaddr, bool pm)
988 bool pm)
989{ 1098{
990 unsigned long flags; 1099 unsigned long flags;
991 1100
@@ -1102,6 +1211,11 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1102 return 0; 1211 return 0;
1103} 1212}
1104 1213
1214static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp)
1215{
1216 return rtl_chip_infos[tp->mac_version].fw_name;
1217}
1218
1105static void rtl8169_get_drvinfo(struct net_device *dev, 1219static void rtl8169_get_drvinfo(struct net_device *dev,
1106 struct ethtool_drvinfo *info) 1220 struct ethtool_drvinfo *info)
1107{ 1221{
@@ -1110,6 +1224,8 @@ static void rtl8169_get_drvinfo(struct net_device *dev,
1110 strcpy(info->driver, MODULENAME); 1224 strcpy(info->driver, MODULENAME);
1111 strcpy(info->version, RTL8169_VERSION); 1225 strcpy(info->version, RTL8169_VERSION);
1112 strcpy(info->bus_info, pci_name(tp->pci_dev)); 1226 strcpy(info->bus_info, pci_name(tp->pci_dev));
1227 strncpy(info->fw_version, IS_ERR_OR_NULL(tp->fw) ? "N/A" :
1228 rtl_lookup_firmware_name(tp), sizeof(info->fw_version) - 1);
1113} 1229}
1114 1230
1115static int rtl8169_get_regs_len(struct net_device *dev) 1231static int rtl8169_get_regs_len(struct net_device *dev)
@@ -1171,16 +1287,7 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
1171 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF); 1287 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
1172 1288
1173 /* The 8100e/8101e/8102e do Fast Ethernet only. */ 1289 /* The 8100e/8101e/8102e do Fast Ethernet only. */
1174 if ((tp->mac_version != RTL_GIGA_MAC_VER_07) && 1290 if (tp->mii.supports_gmii) {
1175 (tp->mac_version != RTL_GIGA_MAC_VER_08) &&
1176 (tp->mac_version != RTL_GIGA_MAC_VER_09) &&
1177 (tp->mac_version != RTL_GIGA_MAC_VER_10) &&
1178 (tp->mac_version != RTL_GIGA_MAC_VER_13) &&
1179 (tp->mac_version != RTL_GIGA_MAC_VER_14) &&
1180 (tp->mac_version != RTL_GIGA_MAC_VER_15) &&
1181 (tp->mac_version != RTL_GIGA_MAC_VER_16) &&
1182 (tp->mac_version != RTL_GIGA_MAC_VER_29) &&
1183 (tp->mac_version != RTL_GIGA_MAC_VER_30)) {
1184 if (adv & ADVERTISED_1000baseT_Half) 1291 if (adv & ADVERTISED_1000baseT_Half)
1185 giga_ctrl |= ADVERTISE_1000HALF; 1292 giga_ctrl |= ADVERTISE_1000HALF;
1186 if (adv & ADVERTISED_1000baseT_Full) 1293 if (adv & ADVERTISED_1000baseT_Full)
@@ -1210,12 +1317,10 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
1210 bmcr |= BMCR_FULLDPLX; 1317 bmcr |= BMCR_FULLDPLX;
1211 } 1318 }
1212 1319
1213 tp->phy_1000_ctrl_reg = giga_ctrl;
1214
1215 rtl_writephy(tp, MII_BMCR, bmcr); 1320 rtl_writephy(tp, MII_BMCR, bmcr);
1216 1321
1217 if ((tp->mac_version == RTL_GIGA_MAC_VER_02) || 1322 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
1218 (tp->mac_version == RTL_GIGA_MAC_VER_03)) { 1323 tp->mac_version == RTL_GIGA_MAC_VER_03) {
1219 if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) { 1324 if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
1220 rtl_writephy(tp, 0x17, 0x2138); 1325 rtl_writephy(tp, 0x17, 0x2138);
1221 rtl_writephy(tp, 0x0e, 0x0260); 1326 rtl_writephy(tp, 0x0e, 0x0260);
@@ -1237,10 +1342,14 @@ static int rtl8169_set_speed(struct net_device *dev,
1237 int ret; 1342 int ret;
1238 1343
1239 ret = tp->set_speed(dev, autoneg, speed, duplex, advertising); 1344 ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
1345 if (ret < 0)
1346 goto out;
1240 1347
1241 if (netif_running(dev) && (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL)) 1348 if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) &&
1349 (advertising & ADVERTISED_1000baseT_Full)) {
1242 mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT); 1350 mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
1243 1351 }
1352out:
1244 return ret; 1353 return ret;
1245} 1354}
1246 1355
@@ -1250,22 +1359,25 @@ static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1250 unsigned long flags; 1359 unsigned long flags;
1251 int ret; 1360 int ret;
1252 1361
1362 del_timer_sync(&tp->timer);
1363
1253 spin_lock_irqsave(&tp->lock, flags); 1364 spin_lock_irqsave(&tp->lock, flags);
1254 ret = rtl8169_set_speed(dev, 1365 ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
1255 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising); 1366 cmd->duplex, cmd->advertising);
1256 spin_unlock_irqrestore(&tp->lock, flags); 1367 spin_unlock_irqrestore(&tp->lock, flags);
1257 1368
1258 return ret; 1369 return ret;
1259} 1370}
1260 1371
1261static u32 rtl8169_get_rx_csum(struct net_device *dev) 1372static u32 rtl8169_fix_features(struct net_device *dev, u32 features)
1262{ 1373{
1263 struct rtl8169_private *tp = netdev_priv(dev); 1374 if (dev->mtu > TD_MSS_MAX)
1375 features &= ~NETIF_F_ALL_TSO;
1264 1376
1265 return tp->cp_cmd & RxChkSum; 1377 return features;
1266} 1378}
1267 1379
1268static int rtl8169_set_rx_csum(struct net_device *dev, u32 data) 1380static int rtl8169_set_features(struct net_device *dev, u32 features)
1269{ 1381{
1270 struct rtl8169_private *tp = netdev_priv(dev); 1382 struct rtl8169_private *tp = netdev_priv(dev);
1271 void __iomem *ioaddr = tp->mmio_addr; 1383 void __iomem *ioaddr = tp->mmio_addr;
@@ -1273,11 +1385,16 @@ static int rtl8169_set_rx_csum(struct net_device *dev, u32 data)
1273 1385
1274 spin_lock_irqsave(&tp->lock, flags); 1386 spin_lock_irqsave(&tp->lock, flags);
1275 1387
1276 if (data) 1388 if (features & NETIF_F_RXCSUM)
1277 tp->cp_cmd |= RxChkSum; 1389 tp->cp_cmd |= RxChkSum;
1278 else 1390 else
1279 tp->cp_cmd &= ~RxChkSum; 1391 tp->cp_cmd &= ~RxChkSum;
1280 1392
1393 if (dev->features & NETIF_F_HW_VLAN_RX)
1394 tp->cp_cmd |= RxVlan;
1395 else
1396 tp->cp_cmd &= ~RxVlan;
1397
1281 RTL_W16(CPlusCmd, tp->cp_cmd); 1398 RTL_W16(CPlusCmd, tp->cp_cmd);
1282 RTL_R16(CPlusCmd); 1399 RTL_R16(CPlusCmd);
1283 1400
@@ -1293,27 +1410,6 @@ static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
1293 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00; 1410 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
1294} 1411}
1295 1412
1296#define NETIF_F_HW_VLAN_TX_RX (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX)
1297
1298static void rtl8169_vlan_mode(struct net_device *dev)
1299{
1300 struct rtl8169_private *tp = netdev_priv(dev);
1301 void __iomem *ioaddr = tp->mmio_addr;
1302 unsigned long flags;
1303
1304 spin_lock_irqsave(&tp->lock, flags);
1305 if (dev->features & NETIF_F_HW_VLAN_RX)
1306 tp->cp_cmd |= RxVlan;
1307 else
1308 tp->cp_cmd &= ~RxVlan;
1309 RTL_W16(CPlusCmd, tp->cp_cmd);
1310 /* PCI commit */
1311 RTL_R16(CPlusCmd);
1312 spin_unlock_irqrestore(&tp->lock, flags);
1313
1314 dev->vlan_features = dev->features &~ NETIF_F_HW_VLAN_TX_RX;
1315}
1316
1317static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb) 1413static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
1318{ 1414{
1319 u32 opts2 = le32_to_cpu(desc->opts2); 1415 u32 opts2 = le32_to_cpu(desc->opts2);
@@ -1339,7 +1435,7 @@ static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
1339 cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0; 1435 cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
1340 cmd->autoneg = !!(status & TBINwEnable); 1436 cmd->autoneg = !!(status & TBINwEnable);
1341 1437
1342 cmd->speed = SPEED_1000; 1438 ethtool_cmd_speed_set(cmd, SPEED_1000);
1343 cmd->duplex = DUPLEX_FULL; /* Always set */ 1439 cmd->duplex = DUPLEX_FULL; /* Always set */
1344 1440
1345 return 0; 1441 return 0;
@@ -1424,11 +1520,11 @@ static void rtl8169_update_counters(struct net_device *dev)
1424{ 1520{
1425 struct rtl8169_private *tp = netdev_priv(dev); 1521 struct rtl8169_private *tp = netdev_priv(dev);
1426 void __iomem *ioaddr = tp->mmio_addr; 1522 void __iomem *ioaddr = tp->mmio_addr;
1523 struct device *d = &tp->pci_dev->dev;
1427 struct rtl8169_counters *counters; 1524 struct rtl8169_counters *counters;
1428 dma_addr_t paddr; 1525 dma_addr_t paddr;
1429 u32 cmd; 1526 u32 cmd;
1430 int wait = 1000; 1527 int wait = 1000;
1431 struct device *d = &tp->pci_dev->dev;
1432 1528
1433 /* 1529 /*
1434 * Some chips are unable to dump tally counters when the receiver 1530 * Some chips are unable to dump tally counters when the receiver
@@ -1448,7 +1544,6 @@ static void rtl8169_update_counters(struct net_device *dev)
1448 1544
1449 while (wait--) { 1545 while (wait--) {
1450 if ((RTL_R32(CounterAddrLow) & CounterDump) == 0) { 1546 if ((RTL_R32(CounterAddrLow) & CounterDump) == 0) {
1451 /* copy updated counters */
1452 memcpy(&tp->counters, counters, sizeof(*counters)); 1547 memcpy(&tp->counters, counters, sizeof(*counters));
1453 break; 1548 break;
1454 } 1549 }
@@ -1494,28 +1589,6 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1494 } 1589 }
1495} 1590}
1496 1591
1497static int rtl8169_set_flags(struct net_device *dev, u32 data)
1498{
1499 struct rtl8169_private *tp = netdev_priv(dev);
1500 unsigned long old_feat = dev->features;
1501 int rc;
1502
1503 if ((tp->mac_version == RTL_GIGA_MAC_VER_05) &&
1504 !(data & ETH_FLAG_RXVLAN)) {
1505 netif_info(tp, drv, dev, "8110SCd requires hardware Rx VLAN\n");
1506 return -EINVAL;
1507 }
1508
1509 rc = ethtool_op_set_flags(dev, data, ETH_FLAG_TXVLAN | ETH_FLAG_RXVLAN);
1510 if (rc)
1511 return rc;
1512
1513 if ((old_feat ^ dev->features) & NETIF_F_HW_VLAN_RX)
1514 rtl8169_vlan_mode(dev);
1515
1516 return 0;
1517}
1518
1519static const struct ethtool_ops rtl8169_ethtool_ops = { 1592static const struct ethtool_ops rtl8169_ethtool_ops = {
1520 .get_drvinfo = rtl8169_get_drvinfo, 1593 .get_drvinfo = rtl8169_get_drvinfo,
1521 .get_regs_len = rtl8169_get_regs_len, 1594 .get_regs_len = rtl8169_get_regs_len,
@@ -1524,24 +1597,18 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
1524 .set_settings = rtl8169_set_settings, 1597 .set_settings = rtl8169_set_settings,
1525 .get_msglevel = rtl8169_get_msglevel, 1598 .get_msglevel = rtl8169_get_msglevel,
1526 .set_msglevel = rtl8169_set_msglevel, 1599 .set_msglevel = rtl8169_set_msglevel,
1527 .get_rx_csum = rtl8169_get_rx_csum,
1528 .set_rx_csum = rtl8169_set_rx_csum,
1529 .set_tx_csum = ethtool_op_set_tx_csum,
1530 .set_sg = ethtool_op_set_sg,
1531 .set_tso = ethtool_op_set_tso,
1532 .get_regs = rtl8169_get_regs, 1600 .get_regs = rtl8169_get_regs,
1533 .get_wol = rtl8169_get_wol, 1601 .get_wol = rtl8169_get_wol,
1534 .set_wol = rtl8169_set_wol, 1602 .set_wol = rtl8169_set_wol,
1535 .get_strings = rtl8169_get_strings, 1603 .get_strings = rtl8169_get_strings,
1536 .get_sset_count = rtl8169_get_sset_count, 1604 .get_sset_count = rtl8169_get_sset_count,
1537 .get_ethtool_stats = rtl8169_get_ethtool_stats, 1605 .get_ethtool_stats = rtl8169_get_ethtool_stats,
1538 .set_flags = rtl8169_set_flags,
1539 .get_flags = ethtool_op_get_flags,
1540}; 1606};
1541 1607
1542static void rtl8169_get_mac_version(struct rtl8169_private *tp, 1608static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1543 void __iomem *ioaddr) 1609 struct net_device *dev, u8 default_version)
1544{ 1610{
1611 void __iomem *ioaddr = tp->mmio_addr;
1545 /* 1612 /*
1546 * The driver currently handles the 8168Bf and the 8168Be identically 1613 * The driver currently handles the 8168Bf and the 8168Be identically
1547 * but they can be identified more specifically through the test below 1614 * but they can be identified more specifically through the test below
@@ -1558,6 +1625,11 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1558 u32 val; 1625 u32 val;
1559 int mac_version; 1626 int mac_version;
1560 } mac_info[] = { 1627 } mac_info[] = {
1628 /* 8168E family. */
1629 { 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33 },
1630 { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 },
1631 { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 },
1632
1561 /* 8168D family. */ 1633 /* 8168D family. */
1562 { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 }, 1634 { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 },
1563 { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 }, 1635 { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 },
@@ -1566,6 +1638,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1566 /* 8168DP family. */ 1638 /* 8168DP family. */
1567 { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 }, 1639 { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 },
1568 { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 }, 1640 { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 },
1641 { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 },
1569 1642
1570 /* 8168C family. */ 1643 /* 8168C family. */
1571 { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 }, 1644 { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 },
@@ -1585,6 +1658,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1585 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 }, 1658 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
1586 1659
1587 /* 8101 family. */ 1660 /* 8101 family. */
1661 { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 },
1588 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 }, 1662 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
1589 { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 }, 1663 { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 },
1590 { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 }, 1664 { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 },
@@ -1621,6 +1695,12 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1621 while ((reg & p->mask) != p->val) 1695 while ((reg & p->mask) != p->val)
1622 p++; 1696 p++;
1623 tp->mac_version = p->mac_version; 1697 tp->mac_version = p->mac_version;
1698
1699 if (tp->mac_version == RTL_GIGA_MAC_NONE) {
1700 netif_notice(tp, probe, dev,
1701 "unknown MAC, using family default\n");
1702 tp->mac_version = default_version;
1703 }
1624} 1704}
1625 1705
1626static void rtl8169_print_mac_version(struct rtl8169_private *tp) 1706static void rtl8169_print_mac_version(struct rtl8169_private *tp)
@@ -1690,14 +1770,14 @@ rtl_phy_write_fw(struct rtl8169_private *tp, const struct firmware *fw)
1690 case PHY_BJMPN: 1770 case PHY_BJMPN:
1691 if (regno > index) { 1771 if (regno > index) {
1692 netif_err(tp, probe, tp->dev, 1772 netif_err(tp, probe, tp->dev,
1693 "Out of range of firmware\n"); 1773 "Out of range of firmware\n");
1694 return; 1774 return;
1695 } 1775 }
1696 break; 1776 break;
1697 case PHY_READCOUNT_EQ_SKIP: 1777 case PHY_READCOUNT_EQ_SKIP:
1698 if (index + 2 >= fw_size) { 1778 if (index + 2 >= fw_size) {
1699 netif_err(tp, probe, tp->dev, 1779 netif_err(tp, probe, tp->dev,
1700 "Out of range of firmware\n"); 1780 "Out of range of firmware\n");
1701 return; 1781 return;
1702 } 1782 }
1703 break; 1783 break;
@@ -1706,7 +1786,7 @@ rtl_phy_write_fw(struct rtl8169_private *tp, const struct firmware *fw)
1706 case PHY_SKIPN: 1786 case PHY_SKIPN:
1707 if (index + 1 + regno >= fw_size) { 1787 if (index + 1 + regno >= fw_size) {
1708 netif_err(tp, probe, tp->dev, 1788 netif_err(tp, probe, tp->dev,
1709 "Out of range of firmware\n"); 1789 "Out of range of firmware\n");
1710 return; 1790 return;
1711 } 1791 }
1712 break; 1792 break;
@@ -1762,10 +1842,7 @@ rtl_phy_write_fw(struct rtl8169_private *tp, const struct firmware *fw)
1762 index++; 1842 index++;
1763 break; 1843 break;
1764 case PHY_READCOUNT_EQ_SKIP: 1844 case PHY_READCOUNT_EQ_SKIP:
1765 if (count == data) 1845 index += (count == data) ? 2 : 1;
1766 index += 2;
1767 else
1768 index += 1;
1769 break; 1846 break;
1770 case PHY_COMP_EQ_SKIPN: 1847 case PHY_COMP_EQ_SKIPN:
1771 if (predata == data) 1848 if (predata == data)
@@ -2176,7 +2253,7 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
2176 2253
2177 /* 2254 /*
2178 * Tx Error Issue 2255 * Tx Error Issue
2179 * enhance line driver power 2256 * Enhance line driver power
2180 */ 2257 */
2181 { 0x1f, 0x0002 }, 2258 { 0x1f, 0x0002 },
2182 { 0x06, 0x5561 }, 2259 { 0x06, 0x5561 },
@@ -2288,7 +2365,7 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
2288 2365
2289 /* 2366 /*
2290 * Tx Error Issue 2367 * Tx Error Issue
2291 * enhance line driver power 2368 * Enhance line driver power
2292 */ 2369 */
2293 { 0x1f, 0x0002 }, 2370 { 0x1f, 0x0002 },
2294 { 0x06, 0x5561 }, 2371 { 0x06, 0x5561 },
@@ -2444,6 +2521,79 @@ static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
2444 rtl_patchphy(tp, 0x0d, 1 << 5); 2521 rtl_patchphy(tp, 0x0d, 1 << 5);
2445} 2522}
2446 2523
2524static void rtl8168e_hw_phy_config(struct rtl8169_private *tp)
2525{
2526 static const struct phy_reg phy_reg_init[] = {
2527 /* Enable Delay cap */
2528 { 0x1f, 0x0005 },
2529 { 0x05, 0x8b80 },
2530 { 0x06, 0xc896 },
2531 { 0x1f, 0x0000 },
2532
2533 /* Channel estimation fine tune */
2534 { 0x1f, 0x0001 },
2535 { 0x0b, 0x6c20 },
2536 { 0x07, 0x2872 },
2537 { 0x1c, 0xefff },
2538 { 0x1f, 0x0003 },
2539 { 0x14, 0x6420 },
2540 { 0x1f, 0x0000 },
2541
2542 /* Update PFM & 10M TX idle timer */
2543 { 0x1f, 0x0007 },
2544 { 0x1e, 0x002f },
2545 { 0x15, 0x1919 },
2546 { 0x1f, 0x0000 },
2547
2548 { 0x1f, 0x0007 },
2549 { 0x1e, 0x00ac },
2550 { 0x18, 0x0006 },
2551 { 0x1f, 0x0000 }
2552 };
2553
2554 rtl_apply_firmware(tp);
2555
2556 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2557
2558 /* DCO enable for 10M IDLE Power */
2559 rtl_writephy(tp, 0x1f, 0x0007);
2560 rtl_writephy(tp, 0x1e, 0x0023);
2561 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
2562 rtl_writephy(tp, 0x1f, 0x0000);
2563
2564 /* For impedance matching */
2565 rtl_writephy(tp, 0x1f, 0x0002);
2566 rtl_w1w0_phy(tp, 0x08, 0x8000, 0x7f00);
2567 rtl_writephy(tp, 0x1f, 0x0000);
2568
2569 /* PHY auto speed down */
2570 rtl_writephy(tp, 0x1f, 0x0007);
2571 rtl_writephy(tp, 0x1e, 0x002d);
2572 rtl_w1w0_phy(tp, 0x18, 0x0050, 0x0000);
2573 rtl_writephy(tp, 0x1f, 0x0000);
2574 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
2575
2576 rtl_writephy(tp, 0x1f, 0x0005);
2577 rtl_writephy(tp, 0x05, 0x8b86);
2578 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
2579 rtl_writephy(tp, 0x1f, 0x0000);
2580
2581 rtl_writephy(tp, 0x1f, 0x0005);
2582 rtl_writephy(tp, 0x05, 0x8b85);
2583 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
2584 rtl_writephy(tp, 0x1f, 0x0007);
2585 rtl_writephy(tp, 0x1e, 0x0020);
2586 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x1100);
2587 rtl_writephy(tp, 0x1f, 0x0006);
2588 rtl_writephy(tp, 0x00, 0x5a00);
2589 rtl_writephy(tp, 0x1f, 0x0000);
2590 rtl_writephy(tp, 0x0d, 0x0007);
2591 rtl_writephy(tp, 0x0e, 0x003c);
2592 rtl_writephy(tp, 0x0d, 0x4007);
2593 rtl_writephy(tp, 0x0e, 0x0000);
2594 rtl_writephy(tp, 0x0d, 0x0000);
2595}
2596
2447static void rtl8102e_hw_phy_config(struct rtl8169_private *tp) 2597static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
2448{ 2598{
2449 static const struct phy_reg phy_reg_init[] = { 2599 static const struct phy_reg phy_reg_init[] = {
@@ -2558,6 +2708,13 @@ static void rtl_hw_phy_config(struct net_device *dev)
2558 case RTL_GIGA_MAC_VER_30: 2708 case RTL_GIGA_MAC_VER_30:
2559 rtl8105e_hw_phy_config(tp); 2709 rtl8105e_hw_phy_config(tp);
2560 break; 2710 break;
2711 case RTL_GIGA_MAC_VER_31:
2712 /* None. */
2713 break;
2714 case RTL_GIGA_MAC_VER_32:
2715 case RTL_GIGA_MAC_VER_33:
2716 rtl8168e_hw_phy_config(tp);
2717 break;
2561 2718
2562 default: 2719 default:
2563 break; 2720 break;
@@ -2574,9 +2731,6 @@ static void rtl8169_phy_timer(unsigned long __opaque)
2574 2731
2575 assert(tp->mac_version > RTL_GIGA_MAC_VER_01); 2732 assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
2576 2733
2577 if (!(tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL))
2578 return;
2579
2580 spin_lock_irq(&tp->lock); 2734 spin_lock_irq(&tp->lock);
2581 2735
2582 if (tp->phy_reset_pending(tp)) { 2736 if (tp->phy_reset_pending(tp)) {
@@ -2601,28 +2755,6 @@ out_unlock:
2601 spin_unlock_irq(&tp->lock); 2755 spin_unlock_irq(&tp->lock);
2602} 2756}
2603 2757
2604static inline void rtl8169_delete_timer(struct net_device *dev)
2605{
2606 struct rtl8169_private *tp = netdev_priv(dev);
2607 struct timer_list *timer = &tp->timer;
2608
2609 if (tp->mac_version <= RTL_GIGA_MAC_VER_01)
2610 return;
2611
2612 del_timer_sync(timer);
2613}
2614
2615static inline void rtl8169_request_timer(struct net_device *dev)
2616{
2617 struct rtl8169_private *tp = netdev_priv(dev);
2618 struct timer_list *timer = &tp->timer;
2619
2620 if (tp->mac_version <= RTL_GIGA_MAC_VER_01)
2621 return;
2622
2623 mod_timer(timer, jiffies + RTL8169_PHY_TIMEOUT);
2624}
2625
2626#ifdef CONFIG_NET_POLL_CONTROLLER 2758#ifdef CONFIG_NET_POLL_CONTROLLER
2627/* 2759/*
2628 * Polling 'interrupt' - used by things like netconsole to send skbs 2760 * Polling 'interrupt' - used by things like netconsole to send skbs
@@ -2690,11 +2822,11 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
2690 rtl8169_phy_reset(dev, tp); 2822 rtl8169_phy_reset(dev, tp);
2691 2823
2692 rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL, 2824 rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
2693 ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | 2825 ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
2694 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | 2826 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
2695 (tp->mii.supports_gmii ? 2827 (tp->mii.supports_gmii ?
2696 ADVERTISED_1000baseT_Half | 2828 ADVERTISED_1000baseT_Half |
2697 ADVERTISED_1000baseT_Full : 0)); 2829 ADVERTISED_1000baseT_Full : 0));
2698 2830
2699 if (RTL_R8(PHYstatus) & TBI_Enable) 2831 if (RTL_R8(PHYstatus) & TBI_Enable)
2700 netif_info(tp, link, dev, "TBI auto-negotiating\n"); 2832 netif_info(tp, link, dev, "TBI auto-negotiating\n");
@@ -2747,7 +2879,8 @@ static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2747 return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV; 2879 return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV;
2748} 2880}
2749 2881
2750static int rtl_xmii_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd) 2882static int rtl_xmii_ioctl(struct rtl8169_private *tp,
2883 struct mii_ioctl_data *data, int cmd)
2751{ 2884{
2752 switch (cmd) { 2885 switch (cmd) {
2753 case SIOCGMIIPHY: 2886 case SIOCGMIIPHY:
@@ -2847,6 +2980,8 @@ static const struct net_device_ops rtl8169_netdev_ops = {
2847 .ndo_tx_timeout = rtl8169_tx_timeout, 2980 .ndo_tx_timeout = rtl8169_tx_timeout,
2848 .ndo_validate_addr = eth_validate_addr, 2981 .ndo_validate_addr = eth_validate_addr,
2849 .ndo_change_mtu = rtl8169_change_mtu, 2982 .ndo_change_mtu = rtl8169_change_mtu,
2983 .ndo_fix_features = rtl8169_fix_features,
2984 .ndo_set_features = rtl8169_set_features,
2850 .ndo_set_mac_address = rtl_set_mac_address, 2985 .ndo_set_mac_address = rtl_set_mac_address,
2851 .ndo_do_ioctl = rtl8169_ioctl, 2986 .ndo_do_ioctl = rtl8169_ioctl,
2852 .ndo_set_multicast_list = rtl_set_rx_mode, 2987 .ndo_set_multicast_list = rtl_set_rx_mode,
@@ -2866,6 +3001,7 @@ static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp)
2866 ops->read = r8168dp_1_mdio_read; 3001 ops->read = r8168dp_1_mdio_read;
2867 break; 3002 break;
2868 case RTL_GIGA_MAC_VER_28: 3003 case RTL_GIGA_MAC_VER_28:
3004 case RTL_GIGA_MAC_VER_31:
2869 ops->write = r8168dp_2_mdio_write; 3005 ops->write = r8168dp_2_mdio_write;
2870 ops->read = r8168dp_2_mdio_read; 3006 ops->read = r8168dp_2_mdio_read;
2871 break; 3007 break;
@@ -2907,33 +3043,82 @@ static void r810x_pll_power_up(struct rtl8169_private *tp)
2907static void r8168_phy_power_up(struct rtl8169_private *tp) 3043static void r8168_phy_power_up(struct rtl8169_private *tp)
2908{ 3044{
2909 rtl_writephy(tp, 0x1f, 0x0000); 3045 rtl_writephy(tp, 0x1f, 0x0000);
2910 rtl_writephy(tp, 0x0e, 0x0000); 3046 switch (tp->mac_version) {
3047 case RTL_GIGA_MAC_VER_11:
3048 case RTL_GIGA_MAC_VER_12:
3049 case RTL_GIGA_MAC_VER_17:
3050 case RTL_GIGA_MAC_VER_18:
3051 case RTL_GIGA_MAC_VER_19:
3052 case RTL_GIGA_MAC_VER_20:
3053 case RTL_GIGA_MAC_VER_21:
3054 case RTL_GIGA_MAC_VER_22:
3055 case RTL_GIGA_MAC_VER_23:
3056 case RTL_GIGA_MAC_VER_24:
3057 case RTL_GIGA_MAC_VER_25:
3058 case RTL_GIGA_MAC_VER_26:
3059 case RTL_GIGA_MAC_VER_27:
3060 case RTL_GIGA_MAC_VER_28:
3061 case RTL_GIGA_MAC_VER_31:
3062 rtl_writephy(tp, 0x0e, 0x0000);
3063 break;
3064 default:
3065 break;
3066 }
2911 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE); 3067 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
2912} 3068}
2913 3069
2914static void r8168_phy_power_down(struct rtl8169_private *tp) 3070static void r8168_phy_power_down(struct rtl8169_private *tp)
2915{ 3071{
2916 rtl_writephy(tp, 0x1f, 0x0000); 3072 rtl_writephy(tp, 0x1f, 0x0000);
2917 rtl_writephy(tp, 0x0e, 0x0200); 3073 switch (tp->mac_version) {
2918 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN); 3074 case RTL_GIGA_MAC_VER_32:
3075 case RTL_GIGA_MAC_VER_33:
3076 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
3077 break;
3078
3079 case RTL_GIGA_MAC_VER_11:
3080 case RTL_GIGA_MAC_VER_12:
3081 case RTL_GIGA_MAC_VER_17:
3082 case RTL_GIGA_MAC_VER_18:
3083 case RTL_GIGA_MAC_VER_19:
3084 case RTL_GIGA_MAC_VER_20:
3085 case RTL_GIGA_MAC_VER_21:
3086 case RTL_GIGA_MAC_VER_22:
3087 case RTL_GIGA_MAC_VER_23:
3088 case RTL_GIGA_MAC_VER_24:
3089 case RTL_GIGA_MAC_VER_25:
3090 case RTL_GIGA_MAC_VER_26:
3091 case RTL_GIGA_MAC_VER_27:
3092 case RTL_GIGA_MAC_VER_28:
3093 case RTL_GIGA_MAC_VER_31:
3094 rtl_writephy(tp, 0x0e, 0x0200);
3095 default:
3096 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3097 break;
3098 }
2919} 3099}
2920 3100
2921static void r8168_pll_power_down(struct rtl8169_private *tp) 3101static void r8168_pll_power_down(struct rtl8169_private *tp)
2922{ 3102{
2923 void __iomem *ioaddr = tp->mmio_addr; 3103 void __iomem *ioaddr = tp->mmio_addr;
2924 3104
2925 if (((tp->mac_version == RTL_GIGA_MAC_VER_27) || 3105 if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
2926 (tp->mac_version == RTL_GIGA_MAC_VER_28)) && 3106 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
2927 (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) { 3107 tp->mac_version == RTL_GIGA_MAC_VER_31) &&
3108 r8168dp_check_dash(tp)) {
2928 return; 3109 return;
2929 } 3110 }
2930 3111
2931 if (((tp->mac_version == RTL_GIGA_MAC_VER_23) || 3112 if ((tp->mac_version == RTL_GIGA_MAC_VER_23 ||
2932 (tp->mac_version == RTL_GIGA_MAC_VER_24)) && 3113 tp->mac_version == RTL_GIGA_MAC_VER_24) &&
2933 (RTL_R16(CPlusCmd) & ASF)) { 3114 (RTL_R16(CPlusCmd) & ASF)) {
2934 return; 3115 return;
2935 } 3116 }
2936 3117
3118 if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
3119 tp->mac_version == RTL_GIGA_MAC_VER_33)
3120 rtl_ephy_write(ioaddr, 0x19, 0xff64);
3121
2937 if (__rtl8169_get_wol(tp) & WAKE_ANY) { 3122 if (__rtl8169_get_wol(tp) & WAKE_ANY) {
2938 rtl_writephy(tp, 0x1f, 0x0000); 3123 rtl_writephy(tp, 0x1f, 0x0000);
2939 rtl_writephy(tp, MII_BMCR, 0x0000); 3124 rtl_writephy(tp, MII_BMCR, 0x0000);
@@ -2950,6 +3135,9 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
2950 case RTL_GIGA_MAC_VER_26: 3135 case RTL_GIGA_MAC_VER_26:
2951 case RTL_GIGA_MAC_VER_27: 3136 case RTL_GIGA_MAC_VER_27:
2952 case RTL_GIGA_MAC_VER_28: 3137 case RTL_GIGA_MAC_VER_28:
3138 case RTL_GIGA_MAC_VER_31:
3139 case RTL_GIGA_MAC_VER_32:
3140 case RTL_GIGA_MAC_VER_33:
2953 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); 3141 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
2954 break; 3142 break;
2955 } 3143 }
@@ -2959,9 +3147,10 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
2959{ 3147{
2960 void __iomem *ioaddr = tp->mmio_addr; 3148 void __iomem *ioaddr = tp->mmio_addr;
2961 3149
2962 if (((tp->mac_version == RTL_GIGA_MAC_VER_27) || 3150 if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
2963 (tp->mac_version == RTL_GIGA_MAC_VER_28)) && 3151 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
2964 (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) { 3152 tp->mac_version == RTL_GIGA_MAC_VER_31) &&
3153 r8168dp_check_dash(tp)) {
2965 return; 3154 return;
2966 } 3155 }
2967 3156
@@ -2970,6 +3159,9 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
2970 case RTL_GIGA_MAC_VER_26: 3159 case RTL_GIGA_MAC_VER_26:
2971 case RTL_GIGA_MAC_VER_27: 3160 case RTL_GIGA_MAC_VER_27:
2972 case RTL_GIGA_MAC_VER_28: 3161 case RTL_GIGA_MAC_VER_28:
3162 case RTL_GIGA_MAC_VER_31:
3163 case RTL_GIGA_MAC_VER_32:
3164 case RTL_GIGA_MAC_VER_33:
2973 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); 3165 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
2974 break; 3166 break;
2975 } 3167 }
@@ -3024,6 +3216,9 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
3024 case RTL_GIGA_MAC_VER_26: 3216 case RTL_GIGA_MAC_VER_26:
3025 case RTL_GIGA_MAC_VER_27: 3217 case RTL_GIGA_MAC_VER_27:
3026 case RTL_GIGA_MAC_VER_28: 3218 case RTL_GIGA_MAC_VER_28:
3219 case RTL_GIGA_MAC_VER_31:
3220 case RTL_GIGA_MAC_VER_32:
3221 case RTL_GIGA_MAC_VER_33:
3027 ops->down = r8168_pll_power_down; 3222 ops->down = r8168_pll_power_down;
3028 ops->up = r8168_pll_power_up; 3223 ops->up = r8168_pll_power_up;
3029 break; 3224 break;
@@ -3035,6 +3230,22 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
3035 } 3230 }
3036} 3231}
3037 3232
3233static void rtl_hw_reset(struct rtl8169_private *tp)
3234{
3235 void __iomem *ioaddr = tp->mmio_addr;
3236 int i;
3237
3238 /* Soft reset the chip. */
3239 RTL_W8(ChipCmd, CmdReset);
3240
3241 /* Check that the chip has finished the reset. */
3242 for (i = 0; i < 100; i++) {
3243 if ((RTL_R8(ChipCmd) & CmdReset) == 0)
3244 break;
3245 msleep_interruptible(1);
3246 }
3247}
3248
3038static int __devinit 3249static int __devinit
3039rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 3250rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3040{ 3251{
@@ -3044,7 +3255,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3044 struct mii_if_info *mii; 3255 struct mii_if_info *mii;
3045 struct net_device *dev; 3256 struct net_device *dev;
3046 void __iomem *ioaddr; 3257 void __iomem *ioaddr;
3047 unsigned int i; 3258 int chipset, i;
3048 int rc; 3259 int rc;
3049 3260
3050 if (netif_msg_drv(&debug)) { 3261 if (netif_msg_drv(&debug)) {
@@ -3134,6 +3345,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3134 rc = -EIO; 3345 rc = -EIO;
3135 goto err_out_free_res_3; 3346 goto err_out_free_res_3;
3136 } 3347 }
3348 tp->mmio_addr = ioaddr;
3137 3349
3138 tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); 3350 tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
3139 if (!tp->pcie_cap) 3351 if (!tp->pcie_cap)
@@ -3141,22 +3353,14 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3141 3353
3142 RTL_W16(IntrMask, 0x0000); 3354 RTL_W16(IntrMask, 0x0000);
3143 3355
3144 /* Soft reset the chip. */ 3356 rtl_hw_reset(tp);
3145 RTL_W8(ChipCmd, CmdReset);
3146
3147 /* Check that the chip has finished the reset. */
3148 for (i = 0; i < 100; i++) {
3149 if ((RTL_R8(ChipCmd) & CmdReset) == 0)
3150 break;
3151 msleep_interruptible(1);
3152 }
3153 3357
3154 RTL_W16(IntrStatus, 0xffff); 3358 RTL_W16(IntrStatus, 0xffff);
3155 3359
3156 pci_set_master(pdev); 3360 pci_set_master(pdev);
3157 3361
3158 /* Identify chip attached to board */ 3362 /* Identify chip attached to board */
3159 rtl8169_get_mac_version(tp, ioaddr); 3363 rtl8169_get_mac_version(tp, dev, cfg->default_ver);
3160 3364
3161 /* 3365 /*
3162 * Pretend we are using VLANs; This bypasses a nasty bug where 3366 * Pretend we are using VLANs; This bypasses a nasty bug where
@@ -3168,25 +3372,10 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3168 rtl_init_mdio_ops(tp); 3372 rtl_init_mdio_ops(tp);
3169 rtl_init_pll_power_ops(tp); 3373 rtl_init_pll_power_ops(tp);
3170 3374
3171 /* Use appropriate default if unknown */
3172 if (tp->mac_version == RTL_GIGA_MAC_NONE) {
3173 netif_notice(tp, probe, dev,
3174 "unknown MAC, using family default\n");
3175 tp->mac_version = cfg->default_ver;
3176 }
3177
3178 rtl8169_print_mac_version(tp); 3375 rtl8169_print_mac_version(tp);
3179 3376
3180 for (i = 0; i < ARRAY_SIZE(rtl_chip_info); i++) { 3377 chipset = tp->mac_version;
3181 if (tp->mac_version == rtl_chip_info[i].mac_version) 3378 tp->txd_version = rtl_chip_infos[chipset].txd_version;
3182 break;
3183 }
3184 if (i == ARRAY_SIZE(rtl_chip_info)) {
3185 dev_err(&pdev->dev,
3186 "driver bug, MAC version not found in rtl_chip_info\n");
3187 goto err_out_msi_4;
3188 }
3189 tp->chipset = i;
3190 3379
3191 RTL_W8(Cfg9346, Cfg9346_Unlock); 3380 RTL_W8(Cfg9346, Cfg9346_Unlock);
3192 RTL_W8(Config1, RTL_R8(Config1) | PMEnable); 3381 RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
@@ -3206,8 +3395,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3206 tp->phy_reset_pending = rtl8169_tbi_reset_pending; 3395 tp->phy_reset_pending = rtl8169_tbi_reset_pending;
3207 tp->link_ok = rtl8169_tbi_link_ok; 3396 tp->link_ok = rtl8169_tbi_link_ok;
3208 tp->do_ioctl = rtl_tbi_ioctl; 3397 tp->do_ioctl = rtl_tbi_ioctl;
3209
3210 tp->phy_1000_ctrl_reg = ADVERTISE_1000FULL; /* Implied by TBI */
3211 } else { 3398 } else {
3212 tp->set_speed = rtl8169_set_speed_xmii; 3399 tp->set_speed = rtl8169_set_speed_xmii;
3213 tp->get_settings = rtl8169_gset_xmii; 3400 tp->get_settings = rtl8169_gset_xmii;
@@ -3219,8 +3406,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3219 3406
3220 spin_lock_init(&tp->lock); 3407 spin_lock_init(&tp->lock);
3221 3408
3222 tp->mmio_addr = ioaddr;
3223
3224 /* Get MAC address */ 3409 /* Get MAC address */
3225 for (i = 0; i < MAC_ADDR_LEN; i++) 3410 for (i = 0; i < MAC_ADDR_LEN; i++)
3226 dev->dev_addr[i] = RTL_R8(MAC0 + i); 3411 dev->dev_addr[i] = RTL_R8(MAC0 + i);
@@ -3233,7 +3418,19 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3233 3418
3234 netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT); 3419 netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
3235 3420
3236 dev->features |= NETIF_F_HW_VLAN_TX_RX | NETIF_F_GRO; 3421 /* don't enable SG, IP_CSUM and TSO by default - it might not work
3422 * properly for all devices */
3423 dev->features |= NETIF_F_RXCSUM |
3424 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3425
3426 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
3427 NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3428 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
3429 NETIF_F_HIGHDMA;
3430
3431 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
3432 /* 8110SCd requires hardware Rx VLAN - disallow toggling */
3433 dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
3237 3434
3238 tp->intr_mask = 0xffff; 3435 tp->intr_mask = 0xffff;
3239 tp->hw_start = cfg->hw_start; 3436 tp->hw_start = cfg->hw_start;
@@ -3253,12 +3450,12 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3253 pci_set_drvdata(pdev, dev); 3450 pci_set_drvdata(pdev, dev);
3254 3451
3255 netif_info(tp, probe, dev, "%s at 0x%lx, %pM, XID %08x IRQ %d\n", 3452 netif_info(tp, probe, dev, "%s at 0x%lx, %pM, XID %08x IRQ %d\n",
3256 rtl_chip_info[tp->chipset].name, 3453 rtl_chip_infos[chipset].name, dev->base_addr, dev->dev_addr,
3257 dev->base_addr, dev->dev_addr,
3258 (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), dev->irq); 3454 (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), dev->irq);
3259 3455
3260 if ((tp->mac_version == RTL_GIGA_MAC_VER_27) || 3456 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
3261 (tp->mac_version == RTL_GIGA_MAC_VER_28)) { 3457 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
3458 tp->mac_version == RTL_GIGA_MAC_VER_31) {
3262 rtl8168_driver_start(tp); 3459 rtl8168_driver_start(tp);
3263 } 3460 }
3264 3461
@@ -3290,8 +3487,9 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
3290 struct net_device *dev = pci_get_drvdata(pdev); 3487 struct net_device *dev = pci_get_drvdata(pdev);
3291 struct rtl8169_private *tp = netdev_priv(dev); 3488 struct rtl8169_private *tp = netdev_priv(dev);
3292 3489
3293 if ((tp->mac_version == RTL_GIGA_MAC_VER_27) || 3490 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
3294 (tp->mac_version == RTL_GIGA_MAC_VER_28)) { 3491 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
3492 tp->mac_version == RTL_GIGA_MAC_VER_31) {
3295 rtl8168_driver_stop(tp); 3493 rtl8168_driver_stop(tp);
3296 } 3494 }
3297 3495
@@ -3314,33 +3512,23 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
3314 3512
3315static void rtl_request_firmware(struct rtl8169_private *tp) 3513static void rtl_request_firmware(struct rtl8169_private *tp)
3316{ 3514{
3317 int i;
3318
3319 /* Return early if the firmware is already loaded / cached. */ 3515 /* Return early if the firmware is already loaded / cached. */
3320 if (!IS_ERR(tp->fw)) 3516 if (IS_ERR(tp->fw)) {
3321 goto out; 3517 const char *name;
3322
3323 for (i = 0; i < ARRAY_SIZE(rtl_firmware_infos); i++) {
3324 const struct rtl_firmware_info *info = rtl_firmware_infos + i;
3325 3518
3326 if (info->mac_version == tp->mac_version) { 3519 name = rtl_lookup_firmware_name(tp);
3327 const char *name = info->fw_name; 3520 if (name) {
3328 int rc; 3521 int rc;
3329 3522
3330 rc = request_firmware(&tp->fw, name, &tp->pci_dev->dev); 3523 rc = request_firmware(&tp->fw, name, &tp->pci_dev->dev);
3331 if (rc < 0) { 3524 if (rc >= 0)
3332 netif_warn(tp, ifup, tp->dev, "unable to load " 3525 return;
3333 "firmware patch %s (%d)\n", name, rc); 3526
3334 goto out_disable_request_firmware; 3527 netif_warn(tp, ifup, tp->dev, "unable to load "
3335 } 3528 "firmware patch %s (%d)\n", name, rc);
3336 goto out;
3337 } 3529 }
3530 tp->fw = NULL;
3338 } 3531 }
3339
3340out_disable_request_firmware:
3341 tp->fw = NULL;
3342out:
3343 return;
3344} 3532}
3345 3533
3346static int rtl8169_open(struct net_device *dev) 3534static int rtl8169_open(struct net_device *dev)
@@ -3386,14 +3574,12 @@ static int rtl8169_open(struct net_device *dev)
3386 3574
3387 rtl8169_init_phy(dev, tp); 3575 rtl8169_init_phy(dev, tp);
3388 3576
3389 rtl8169_vlan_mode(dev); 3577 rtl8169_set_features(dev, dev->features);
3390 3578
3391 rtl_pll_power_up(tp); 3579 rtl_pll_power_up(tp);
3392 3580
3393 rtl_hw_start(dev); 3581 rtl_hw_start(dev);
3394 3582
3395 rtl8169_request_timer(dev);
3396
3397 tp->saved_wolopts = 0; 3583 tp->saved_wolopts = 0;
3398 pm_runtime_put_noidle(&pdev->dev); 3584 pm_runtime_put_noidle(&pdev->dev);
3399 3585
@@ -3425,7 +3611,8 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
3425 rtl8169_irq_mask_and_ack(ioaddr); 3611 rtl8169_irq_mask_and_ack(ioaddr);
3426 3612
3427 if (tp->mac_version == RTL_GIGA_MAC_VER_27 || 3613 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
3428 tp->mac_version == RTL_GIGA_MAC_VER_28) { 3614 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
3615 tp->mac_version == RTL_GIGA_MAC_VER_31) {
3429 while (RTL_R8(TxPoll) & NPQ) 3616 while (RTL_R8(TxPoll) & NPQ)
3430 udelay(20); 3617 udelay(20);
3431 3618
@@ -3443,7 +3630,7 @@ static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
3443 void __iomem *ioaddr = tp->mmio_addr; 3630 void __iomem *ioaddr = tp->mmio_addr;
3444 u32 cfg = rtl8169_rx_config; 3631 u32 cfg = rtl8169_rx_config;
3445 3632
3446 cfg |= (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask); 3633 cfg |= (RTL_R32(RxConfig) & RTL_RX_CONFIG_MASK);
3447 RTL_W32(RxConfig, cfg); 3634 RTL_W32(RxConfig, cfg);
3448 3635
3449 /* Set DMA burst size and Interframe Gap Time */ 3636 /* Set DMA burst size and Interframe Gap Time */
@@ -3454,25 +3641,14 @@ static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
3454static void rtl_hw_start(struct net_device *dev) 3641static void rtl_hw_start(struct net_device *dev)
3455{ 3642{
3456 struct rtl8169_private *tp = netdev_priv(dev); 3643 struct rtl8169_private *tp = netdev_priv(dev);
3457 void __iomem *ioaddr = tp->mmio_addr;
3458 unsigned int i;
3459 3644
3460 /* Soft reset the chip. */ 3645 rtl_hw_reset(tp);
3461 RTL_W8(ChipCmd, CmdReset);
3462
3463 /* Check that the chip has finished the reset. */
3464 for (i = 0; i < 100; i++) {
3465 if ((RTL_R8(ChipCmd) & CmdReset) == 0)
3466 break;
3467 msleep_interruptible(1);
3468 }
3469 3646
3470 tp->hw_start(dev); 3647 tp->hw_start(dev);
3471 3648
3472 netif_start_queue(dev); 3649 netif_start_queue(dev);
3473} 3650}
3474 3651
3475
3476static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp, 3652static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
3477 void __iomem *ioaddr) 3653 void __iomem *ioaddr)
3478{ 3654{
@@ -3538,26 +3714,26 @@ static void rtl_hw_start_8169(struct net_device *dev)
3538 } 3714 }
3539 3715
3540 RTL_W8(Cfg9346, Cfg9346_Unlock); 3716 RTL_W8(Cfg9346, Cfg9346_Unlock);
3541 if ((tp->mac_version == RTL_GIGA_MAC_VER_01) || 3717 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
3542 (tp->mac_version == RTL_GIGA_MAC_VER_02) || 3718 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
3543 (tp->mac_version == RTL_GIGA_MAC_VER_03) || 3719 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
3544 (tp->mac_version == RTL_GIGA_MAC_VER_04)) 3720 tp->mac_version == RTL_GIGA_MAC_VER_04)
3545 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); 3721 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
3546 3722
3547 RTL_W8(EarlyTxThres, NoEarlyTx); 3723 RTL_W8(EarlyTxThres, NoEarlyTx);
3548 3724
3549 rtl_set_rx_max_size(ioaddr, rx_buf_sz); 3725 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
3550 3726
3551 if ((tp->mac_version == RTL_GIGA_MAC_VER_01) || 3727 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
3552 (tp->mac_version == RTL_GIGA_MAC_VER_02) || 3728 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
3553 (tp->mac_version == RTL_GIGA_MAC_VER_03) || 3729 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
3554 (tp->mac_version == RTL_GIGA_MAC_VER_04)) 3730 tp->mac_version == RTL_GIGA_MAC_VER_04)
3555 rtl_set_rx_tx_config_registers(tp); 3731 rtl_set_rx_tx_config_registers(tp);
3556 3732
3557 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW; 3733 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
3558 3734
3559 if ((tp->mac_version == RTL_GIGA_MAC_VER_02) || 3735 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
3560 (tp->mac_version == RTL_GIGA_MAC_VER_03)) { 3736 tp->mac_version == RTL_GIGA_MAC_VER_03) {
3561 dprintk("Set MAC Reg C+CR Offset 0xE0. " 3737 dprintk("Set MAC Reg C+CR Offset 0xE0. "
3562 "Bit-3 and bit-14 MUST be 1\n"); 3738 "Bit-3 and bit-14 MUST be 1\n");
3563 tp->cp_cmd |= (1 << 14); 3739 tp->cp_cmd |= (1 << 14);
@@ -3575,10 +3751,10 @@ static void rtl_hw_start_8169(struct net_device *dev)
3575 3751
3576 rtl_set_rx_tx_desc_registers(tp, ioaddr); 3752 rtl_set_rx_tx_desc_registers(tp, ioaddr);
3577 3753
3578 if ((tp->mac_version != RTL_GIGA_MAC_VER_01) && 3754 if (tp->mac_version != RTL_GIGA_MAC_VER_01 &&
3579 (tp->mac_version != RTL_GIGA_MAC_VER_02) && 3755 tp->mac_version != RTL_GIGA_MAC_VER_02 &&
3580 (tp->mac_version != RTL_GIGA_MAC_VER_03) && 3756 tp->mac_version != RTL_GIGA_MAC_VER_03 &&
3581 (tp->mac_version != RTL_GIGA_MAC_VER_04)) { 3757 tp->mac_version != RTL_GIGA_MAC_VER_04) {
3582 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); 3758 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
3583 rtl_set_rx_tx_config_registers(tp); 3759 rtl_set_rx_tx_config_registers(tp);
3584 } 3760 }
@@ -3822,6 +3998,17 @@ static void rtl_hw_start_8168d(void __iomem *ioaddr, struct pci_dev *pdev)
3822 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); 3998 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
3823} 3999}
3824 4000
4001static void rtl_hw_start_8168dp(void __iomem *ioaddr, struct pci_dev *pdev)
4002{
4003 rtl_csi_access_enable_1(ioaddr);
4004
4005 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4006
4007 RTL_W8(MaxTxPacketSize, TxPacketMax);
4008
4009 rtl_disable_clock_request(pdev);
4010}
4011
3825static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev) 4012static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev)
3826{ 4013{
3827 static const struct ephy_info e_info_8168d_4[] = { 4014 static const struct ephy_info e_info_8168d_4[] = {
@@ -3848,6 +4035,41 @@ static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev)
3848 rtl_enable_clock_request(pdev); 4035 rtl_enable_clock_request(pdev);
3849} 4036}
3850 4037
4038static void rtl_hw_start_8168e(void __iomem *ioaddr, struct pci_dev *pdev)
4039{
4040 static const struct ephy_info e_info_8168e[] = {
4041 { 0x00, 0x0200, 0x0100 },
4042 { 0x00, 0x0000, 0x0004 },
4043 { 0x06, 0x0002, 0x0001 },
4044 { 0x06, 0x0000, 0x0030 },
4045 { 0x07, 0x0000, 0x2000 },
4046 { 0x00, 0x0000, 0x0020 },
4047 { 0x03, 0x5800, 0x2000 },
4048 { 0x03, 0x0000, 0x0001 },
4049 { 0x01, 0x0800, 0x1000 },
4050 { 0x07, 0x0000, 0x4000 },
4051 { 0x1e, 0x0000, 0x2000 },
4052 { 0x19, 0xffff, 0xfe6c },
4053 { 0x0a, 0x0000, 0x0040 }
4054 };
4055
4056 rtl_csi_access_enable_2(ioaddr);
4057
4058 rtl_ephy_init(ioaddr, e_info_8168e, ARRAY_SIZE(e_info_8168e));
4059
4060 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4061
4062 RTL_W8(MaxTxPacketSize, TxPacketMax);
4063
4064 rtl_disable_clock_request(pdev);
4065
4066 /* Reset tx FIFO pointer */
4067 RTL_W32(MISC, RTL_R32(MISC) | TXPLA_RST);
4068 RTL_W32(MISC, RTL_R32(MISC) & ~TXPLA_RST);
4069
4070 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
4071}
4072
3851static void rtl_hw_start_8168(struct net_device *dev) 4073static void rtl_hw_start_8168(struct net_device *dev)
3852{ 4074{
3853 struct rtl8169_private *tp = netdev_priv(dev); 4075 struct rtl8169_private *tp = netdev_priv(dev);
@@ -3885,55 +4107,64 @@ static void rtl_hw_start_8168(struct net_device *dev)
3885 switch (tp->mac_version) { 4107 switch (tp->mac_version) {
3886 case RTL_GIGA_MAC_VER_11: 4108 case RTL_GIGA_MAC_VER_11:
3887 rtl_hw_start_8168bb(ioaddr, pdev); 4109 rtl_hw_start_8168bb(ioaddr, pdev);
3888 break; 4110 break;
3889 4111
3890 case RTL_GIGA_MAC_VER_12: 4112 case RTL_GIGA_MAC_VER_12:
3891 case RTL_GIGA_MAC_VER_17: 4113 case RTL_GIGA_MAC_VER_17:
3892 rtl_hw_start_8168bef(ioaddr, pdev); 4114 rtl_hw_start_8168bef(ioaddr, pdev);
3893 break; 4115 break;
3894 4116
3895 case RTL_GIGA_MAC_VER_18: 4117 case RTL_GIGA_MAC_VER_18:
3896 rtl_hw_start_8168cp_1(ioaddr, pdev); 4118 rtl_hw_start_8168cp_1(ioaddr, pdev);
3897 break; 4119 break;
3898 4120
3899 case RTL_GIGA_MAC_VER_19: 4121 case RTL_GIGA_MAC_VER_19:
3900 rtl_hw_start_8168c_1(ioaddr, pdev); 4122 rtl_hw_start_8168c_1(ioaddr, pdev);
3901 break; 4123 break;
3902 4124
3903 case RTL_GIGA_MAC_VER_20: 4125 case RTL_GIGA_MAC_VER_20:
3904 rtl_hw_start_8168c_2(ioaddr, pdev); 4126 rtl_hw_start_8168c_2(ioaddr, pdev);
3905 break; 4127 break;
3906 4128
3907 case RTL_GIGA_MAC_VER_21: 4129 case RTL_GIGA_MAC_VER_21:
3908 rtl_hw_start_8168c_3(ioaddr, pdev); 4130 rtl_hw_start_8168c_3(ioaddr, pdev);
3909 break; 4131 break;
3910 4132
3911 case RTL_GIGA_MAC_VER_22: 4133 case RTL_GIGA_MAC_VER_22:
3912 rtl_hw_start_8168c_4(ioaddr, pdev); 4134 rtl_hw_start_8168c_4(ioaddr, pdev);
3913 break; 4135 break;
3914 4136
3915 case RTL_GIGA_MAC_VER_23: 4137 case RTL_GIGA_MAC_VER_23:
3916 rtl_hw_start_8168cp_2(ioaddr, pdev); 4138 rtl_hw_start_8168cp_2(ioaddr, pdev);
3917 break; 4139 break;
3918 4140
3919 case RTL_GIGA_MAC_VER_24: 4141 case RTL_GIGA_MAC_VER_24:
3920 rtl_hw_start_8168cp_3(ioaddr, pdev); 4142 rtl_hw_start_8168cp_3(ioaddr, pdev);
3921 break; 4143 break;
3922 4144
3923 case RTL_GIGA_MAC_VER_25: 4145 case RTL_GIGA_MAC_VER_25:
3924 case RTL_GIGA_MAC_VER_26: 4146 case RTL_GIGA_MAC_VER_26:
3925 case RTL_GIGA_MAC_VER_27: 4147 case RTL_GIGA_MAC_VER_27:
3926 rtl_hw_start_8168d(ioaddr, pdev); 4148 rtl_hw_start_8168d(ioaddr, pdev);
3927 break; 4149 break;
3928 4150
3929 case RTL_GIGA_MAC_VER_28: 4151 case RTL_GIGA_MAC_VER_28:
3930 rtl_hw_start_8168d_4(ioaddr, pdev); 4152 rtl_hw_start_8168d_4(ioaddr, pdev);
3931 break; 4153 break;
4154
4155 case RTL_GIGA_MAC_VER_31:
4156 rtl_hw_start_8168dp(ioaddr, pdev);
4157 break;
4158
4159 case RTL_GIGA_MAC_VER_32:
4160 case RTL_GIGA_MAC_VER_33:
4161 rtl_hw_start_8168e(ioaddr, pdev);
4162 break;
3932 4163
3933 default: 4164 default:
3934 printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n", 4165 printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
3935 dev->name, tp->mac_version); 4166 dev->name, tp->mac_version);
3936 break; 4167 break;
3937 } 4168 }
3938 4169
3939 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); 4170 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
@@ -4017,10 +4248,10 @@ static void rtl_hw_start_8105e_1(void __iomem *ioaddr, struct pci_dev *pdev)
4017 { 0x0a, 0, 0x0020 } 4248 { 0x0a, 0, 0x0020 }
4018 }; 4249 };
4019 4250
4020 /* Force LAN exit from ASPM if Rx/Tx are not idel */ 4251 /* Force LAN exit from ASPM if Rx/Tx are not idle */
4021 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800); 4252 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
4022 4253
4023 /* disable Early Tally Counter */ 4254 /* Disable Early Tally Counter */
4024 RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000); 4255 RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
4025 4256
4026 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET); 4257 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
@@ -4041,8 +4272,8 @@ static void rtl_hw_start_8101(struct net_device *dev)
4041 void __iomem *ioaddr = tp->mmio_addr; 4272 void __iomem *ioaddr = tp->mmio_addr;
4042 struct pci_dev *pdev = tp->pci_dev; 4273 struct pci_dev *pdev = tp->pci_dev;
4043 4274
4044 if ((tp->mac_version == RTL_GIGA_MAC_VER_13) || 4275 if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
4045 (tp->mac_version == RTL_GIGA_MAC_VER_16)) { 4276 tp->mac_version == RTL_GIGA_MAC_VER_16) {
4046 int cap = tp->pcie_cap; 4277 int cap = tp->pcie_cap;
4047 4278
4048 if (cap) { 4279 if (cap) {
@@ -4105,6 +4336,8 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
4105 return -EINVAL; 4336 return -EINVAL;
4106 4337
4107 dev->mtu = new_mtu; 4338 dev->mtu = new_mtu;
4339 netdev_update_features(dev);
4340
4108 return 0; 4341 return 0;
4109} 4342}
4110 4343
@@ -4342,6 +4575,7 @@ static void rtl8169_reset_task(struct work_struct *work)
4342 struct rtl8169_private *tp = 4575 struct rtl8169_private *tp =
4343 container_of(work, struct rtl8169_private, task.work); 4576 container_of(work, struct rtl8169_private, task.work);
4344 struct net_device *dev = tp->dev; 4577 struct net_device *dev = tp->dev;
4578 int i;
4345 4579
4346 rtnl_lock(); 4580 rtnl_lock();
4347 4581
@@ -4350,19 +4584,15 @@ static void rtl8169_reset_task(struct work_struct *work)
4350 4584
4351 rtl8169_wait_for_quiescence(dev); 4585 rtl8169_wait_for_quiescence(dev);
4352 4586
4353 rtl8169_rx_interrupt(dev, tp, tp->mmio_addr, ~(u32)0); 4587 for (i = 0; i < NUM_RX_DESC; i++)
4588 rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
4589
4354 rtl8169_tx_clear(tp); 4590 rtl8169_tx_clear(tp);
4355 4591
4356 if (tp->dirty_rx == tp->cur_rx) { 4592 rtl8169_init_ring_indexes(tp);
4357 rtl8169_init_ring_indexes(tp); 4593 rtl_hw_start(dev);
4358 rtl_hw_start(dev); 4594 netif_wake_queue(dev);
4359 netif_wake_queue(dev); 4595 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
4360 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
4361 } else {
4362 if (net_ratelimit())
4363 netif_emerg(tp, intr, dev, "Rx buffers shortage\n");
4364 rtl8169_schedule_work(dev, rtl8169_reset_task);
4365 }
4366 4596
4367out_unlock: 4597out_unlock:
4368 rtnl_unlock(); 4598 rtnl_unlock();
@@ -4379,7 +4609,7 @@ static void rtl8169_tx_timeout(struct net_device *dev)
4379} 4609}
4380 4610
4381static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb, 4611static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
4382 u32 opts1) 4612 u32 *opts)
4383{ 4613{
4384 struct skb_shared_info *info = skb_shinfo(skb); 4614 struct skb_shared_info *info = skb_shinfo(skb);
4385 unsigned int cur_frag, entry; 4615 unsigned int cur_frag, entry;
@@ -4406,10 +4636,12 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
4406 goto err_out; 4636 goto err_out;
4407 } 4637 }
4408 4638
4409 /* anti gcc 2.95.3 bugware (sic) */ 4639 /* Anti gcc 2.95.3 bugware (sic) */
4410 status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC)); 4640 status = opts[0] | len |
4641 (RingEnd * !((entry + 1) % NUM_TX_DESC));
4411 4642
4412 txd->opts1 = cpu_to_le32(status); 4643 txd->opts1 = cpu_to_le32(status);
4644 txd->opts2 = cpu_to_le32(opts[1]);
4413 txd->addr = cpu_to_le64(mapping); 4645 txd->addr = cpu_to_le64(mapping);
4414 4646
4415 tp->tx_skb[entry].len = len; 4647 tp->tx_skb[entry].len = len;
@@ -4427,24 +4659,26 @@ err_out:
4427 return -EIO; 4659 return -EIO;
4428} 4660}
4429 4661
4430static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev) 4662static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
4663 struct sk_buff *skb, u32 *opts)
4431{ 4664{
4432 if (dev->features & NETIF_F_TSO) { 4665 const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
4433 u32 mss = skb_shinfo(skb)->gso_size; 4666 u32 mss = skb_shinfo(skb)->gso_size;
4667 int offset = info->opts_offset;
4434 4668
4435 if (mss) 4669 if (mss) {
4436 return LargeSend | ((mss & MSSMask) << MSSShift); 4670 opts[0] |= TD_LSO;
4437 } 4671 opts[offset] |= min(mss, TD_MSS_MAX) << info->mss_shift;
4438 if (skb->ip_summed == CHECKSUM_PARTIAL) { 4672 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
4439 const struct iphdr *ip = ip_hdr(skb); 4673 const struct iphdr *ip = ip_hdr(skb);
4440 4674
4441 if (ip->protocol == IPPROTO_TCP) 4675 if (ip->protocol == IPPROTO_TCP)
4442 return IPCS | TCPCS; 4676 opts[offset] |= info->checksum.tcp;
4443 else if (ip->protocol == IPPROTO_UDP) 4677 else if (ip->protocol == IPPROTO_UDP)
4444 return IPCS | UDPCS; 4678 opts[offset] |= info->checksum.udp;
4445 WARN_ON(1); /* we need a WARN() */ 4679 else
4680 WARN_ON_ONCE(1);
4446 } 4681 }
4447 return 0;
4448} 4682}
4449 4683
4450static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, 4684static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
@@ -4457,7 +4691,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
4457 struct device *d = &tp->pci_dev->dev; 4691 struct device *d = &tp->pci_dev->dev;
4458 dma_addr_t mapping; 4692 dma_addr_t mapping;
4459 u32 status, len; 4693 u32 status, len;
4460 u32 opts1; 4694 u32 opts[2];
4461 int frags; 4695 int frags;
4462 4696
4463 if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) { 4697 if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
@@ -4478,31 +4712,35 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
4478 4712
4479 tp->tx_skb[entry].len = len; 4713 tp->tx_skb[entry].len = len;
4480 txd->addr = cpu_to_le64(mapping); 4714 txd->addr = cpu_to_le64(mapping);
4481 txd->opts2 = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
4482 4715
4483 opts1 = DescOwn | rtl8169_tso_csum(skb, dev); 4716 opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
4717 opts[0] = DescOwn;
4484 4718
4485 frags = rtl8169_xmit_frags(tp, skb, opts1); 4719 rtl8169_tso_csum(tp, skb, opts);
4720
4721 frags = rtl8169_xmit_frags(tp, skb, opts);
4486 if (frags < 0) 4722 if (frags < 0)
4487 goto err_dma_1; 4723 goto err_dma_1;
4488 else if (frags) 4724 else if (frags)
4489 opts1 |= FirstFrag; 4725 opts[0] |= FirstFrag;
4490 else { 4726 else {
4491 opts1 |= FirstFrag | LastFrag; 4727 opts[0] |= FirstFrag | LastFrag;
4492 tp->tx_skb[entry].skb = skb; 4728 tp->tx_skb[entry].skb = skb;
4493 } 4729 }
4494 4730
4731 txd->opts2 = cpu_to_le32(opts[1]);
4732
4495 wmb(); 4733 wmb();
4496 4734
4497 /* anti gcc 2.95.3 bugware (sic) */ 4735 /* Anti gcc 2.95.3 bugware (sic) */
4498 status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC)); 4736 status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
4499 txd->opts1 = cpu_to_le32(status); 4737 txd->opts1 = cpu_to_le32(status);
4500 4738
4501 tp->cur_tx += frags + 1; 4739 tp->cur_tx += frags + 1;
4502 4740
4503 wmb(); 4741 wmb();
4504 4742
4505 RTL_W8(TxPoll, NPQ); /* set polling bit */ 4743 RTL_W8(TxPoll, NPQ);
4506 4744
4507 if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) { 4745 if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
4508 netif_stop_queue(dev); 4746 netif_stop_queue(dev);
@@ -4659,20 +4897,12 @@ static struct sk_buff *rtl8169_try_rx_copy(void *data,
4659 return skb; 4897 return skb;
4660} 4898}
4661 4899
4662/*
4663 * Warning : rtl8169_rx_interrupt() might be called :
4664 * 1) from NAPI (softirq) context
4665 * (polling = 1 : we should call netif_receive_skb())
4666 * 2) from process context (rtl8169_reset_task())
4667 * (polling = 0 : we must call netif_rx() instead)
4668 */
4669static int rtl8169_rx_interrupt(struct net_device *dev, 4900static int rtl8169_rx_interrupt(struct net_device *dev,
4670 struct rtl8169_private *tp, 4901 struct rtl8169_private *tp,
4671 void __iomem *ioaddr, u32 budget) 4902 void __iomem *ioaddr, u32 budget)
4672{ 4903{
4673 unsigned int cur_rx, rx_left; 4904 unsigned int cur_rx, rx_left;
4674 unsigned int count; 4905 unsigned int count;
4675 int polling = (budget != ~(u32)0) ? 1 : 0;
4676 4906
4677 cur_rx = tp->cur_rx; 4907 cur_rx = tp->cur_rx;
4678 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx; 4908 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
@@ -4732,10 +4962,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
4732 4962
4733 rtl8169_rx_vlan_tag(desc, skb); 4963 rtl8169_rx_vlan_tag(desc, skb);
4734 4964
4735 if (likely(polling)) 4965 napi_gro_receive(&tp->napi, skb);
4736 napi_gro_receive(&tp->napi, skb);
4737 else
4738 netif_rx(skb);
4739 4966
4740 dev->stats.rx_bytes += pkt_size; 4967 dev->stats.rx_bytes += pkt_size;
4741 dev->stats.rx_packets++; 4968 dev->stats.rx_packets++;
@@ -4798,6 +5025,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
4798 case RTL_GIGA_MAC_VER_24: 5025 case RTL_GIGA_MAC_VER_24:
4799 case RTL_GIGA_MAC_VER_27: 5026 case RTL_GIGA_MAC_VER_27:
4800 case RTL_GIGA_MAC_VER_28: 5027 case RTL_GIGA_MAC_VER_28:
5028 case RTL_GIGA_MAC_VER_31:
4801 /* Experimental science. Pktgen proof. */ 5029 /* Experimental science. Pktgen proof. */
4802 case RTL_GIGA_MAC_VER_12: 5030 case RTL_GIGA_MAC_VER_12:
4803 case RTL_GIGA_MAC_VER_25: 5031 case RTL_GIGA_MAC_VER_25:
@@ -4890,7 +5118,7 @@ static void rtl8169_down(struct net_device *dev)
4890 struct rtl8169_private *tp = netdev_priv(dev); 5118 struct rtl8169_private *tp = netdev_priv(dev);
4891 void __iomem *ioaddr = tp->mmio_addr; 5119 void __iomem *ioaddr = tp->mmio_addr;
4892 5120
4893 rtl8169_delete_timer(dev); 5121 del_timer_sync(&tp->timer);
4894 5122
4895 netif_stop_queue(dev); 5123 netif_stop_queue(dev);
4896 5124
@@ -4927,7 +5155,7 @@ static int rtl8169_close(struct net_device *dev)
4927 5155
4928 pm_runtime_get_sync(&pdev->dev); 5156 pm_runtime_get_sync(&pdev->dev);
4929 5157
4930 /* update counters before going down */ 5158 /* Update counters before going down */
4931 rtl8169_update_counters(dev); 5159 rtl8169_update_counters(dev);
4932 5160
4933 rtl8169_down(dev); 5161 rtl8169_down(dev);
@@ -4982,7 +5210,7 @@ static void rtl_set_rx_mode(struct net_device *dev)
4982 spin_lock_irqsave(&tp->lock, flags); 5210 spin_lock_irqsave(&tp->lock, flags);
4983 5211
4984 tmp = rtl8169_rx_config | rx_mode | 5212 tmp = rtl8169_rx_config | rx_mode |
4985 (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask); 5213 (RTL_R32(RxConfig) & RTL_RX_CONFIG_MASK);
4986 5214
4987 if (tp->mac_version > RTL_GIGA_MAC_VER_06) { 5215 if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
4988 u32 data = mc_filter[0]; 5216 u32 data = mc_filter[0];
@@ -5120,15 +5348,15 @@ static int rtl8169_runtime_idle(struct device *device)
5120} 5348}
5121 5349
5122static const struct dev_pm_ops rtl8169_pm_ops = { 5350static const struct dev_pm_ops rtl8169_pm_ops = {
5123 .suspend = rtl8169_suspend, 5351 .suspend = rtl8169_suspend,
5124 .resume = rtl8169_resume, 5352 .resume = rtl8169_resume,
5125 .freeze = rtl8169_suspend, 5353 .freeze = rtl8169_suspend,
5126 .thaw = rtl8169_resume, 5354 .thaw = rtl8169_resume,
5127 .poweroff = rtl8169_suspend, 5355 .poweroff = rtl8169_suspend,
5128 .restore = rtl8169_resume, 5356 .restore = rtl8169_resume,
5129 .runtime_suspend = rtl8169_runtime_suspend, 5357 .runtime_suspend = rtl8169_runtime_suspend,
5130 .runtime_resume = rtl8169_runtime_resume, 5358 .runtime_resume = rtl8169_runtime_resume,
5131 .runtime_idle = rtl8169_runtime_idle, 5359 .runtime_idle = rtl8169_runtime_idle,
5132}; 5360};
5133 5361
5134#define RTL8169_PM_OPS (&rtl8169_pm_ops) 5362#define RTL8169_PM_OPS (&rtl8169_pm_ops)
@@ -5147,7 +5375,7 @@ static void rtl_shutdown(struct pci_dev *pdev)
5147 5375
5148 rtl8169_net_suspend(dev); 5376 rtl8169_net_suspend(dev);
5149 5377
5150 /* restore original MAC address */ 5378 /* Restore original MAC address */
5151 rtl_rar_set(tp, dev->perm_addr); 5379 rtl_rar_set(tp, dev->perm_addr);
5152 5380
5153 spin_lock_irq(&tp->lock); 5381 spin_lock_irq(&tp->lock);
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 337bdcd5abc9..89cfee7e8643 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -2244,13 +2244,12 @@ static int verify_xena_quiescence(struct s2io_nic *sp)
2244static void fix_mac_address(struct s2io_nic *sp) 2244static void fix_mac_address(struct s2io_nic *sp)
2245{ 2245{
2246 struct XENA_dev_config __iomem *bar0 = sp->bar0; 2246 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2247 u64 val64;
2248 int i = 0; 2247 int i = 0;
2249 2248
2250 while (fix_mac[i] != END_SIGN) { 2249 while (fix_mac[i] != END_SIGN) {
2251 writeq(fix_mac[i++], &bar0->gpio_control); 2250 writeq(fix_mac[i++], &bar0->gpio_control);
2252 udelay(10); 2251 udelay(10);
2253 val64 = readq(&bar0->gpio_control); 2252 (void) readq(&bar0->gpio_control);
2254 } 2253 }
2255} 2254}
2256 2255
@@ -2727,7 +2726,6 @@ static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2727 int j; 2726 int j;
2728 struct sk_buff *skb; 2727 struct sk_buff *skb;
2729 struct RxD_t *rxdp; 2728 struct RxD_t *rxdp;
2730 struct buffAdd *ba;
2731 struct RxD1 *rxdp1; 2729 struct RxD1 *rxdp1;
2732 struct RxD3 *rxdp3; 2730 struct RxD3 *rxdp3;
2733 struct mac_info *mac_control = &sp->mac_control; 2731 struct mac_info *mac_control = &sp->mac_control;
@@ -2751,7 +2749,6 @@ static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2751 memset(rxdp, 0, sizeof(struct RxD1)); 2749 memset(rxdp, 0, sizeof(struct RxD1));
2752 } else if (sp->rxd_mode == RXD_MODE_3B) { 2750 } else if (sp->rxd_mode == RXD_MODE_3B) {
2753 rxdp3 = (struct RxD3 *)rxdp; 2751 rxdp3 = (struct RxD3 *)rxdp;
2754 ba = &mac_control->rings[ring_no].ba[blk][j];
2755 pci_unmap_single(sp->pdev, 2752 pci_unmap_single(sp->pdev,
2756 (dma_addr_t)rxdp3->Buffer0_ptr, 2753 (dma_addr_t)rxdp3->Buffer0_ptr,
2757 BUF0_LEN, 2754 BUF0_LEN,
@@ -5383,7 +5380,7 @@ static int s2io_ethtool_sset(struct net_device *dev,
5383{ 5380{
5384 struct s2io_nic *sp = netdev_priv(dev); 5381 struct s2io_nic *sp = netdev_priv(dev);
5385 if ((info->autoneg == AUTONEG_ENABLE) || 5382 if ((info->autoneg == AUTONEG_ENABLE) ||
5386 (info->speed != SPEED_10000) || 5383 (ethtool_cmd_speed(info) != SPEED_10000) ||
5387 (info->duplex != DUPLEX_FULL)) 5384 (info->duplex != DUPLEX_FULL))
5388 return -EINVAL; 5385 return -EINVAL;
5389 else { 5386 else {
@@ -5417,10 +5414,10 @@ static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5417 info->transceiver = XCVR_EXTERNAL; 5414 info->transceiver = XCVR_EXTERNAL;
5418 5415
5419 if (netif_carrier_ok(sp->dev)) { 5416 if (netif_carrier_ok(sp->dev)) {
5420 info->speed = 10000; 5417 ethtool_cmd_speed_set(info, SPEED_10000);
5421 info->duplex = DUPLEX_FULL; 5418 info->duplex = DUPLEX_FULL;
5422 } else { 5419 } else {
5423 info->speed = -1; 5420 ethtool_cmd_speed_set(info, -1);
5424 info->duplex = -1; 5421 info->duplex = -1;
5425 } 5422 }
5426 5423
@@ -5484,83 +5481,79 @@ static void s2io_ethtool_gregs(struct net_device *dev,
5484 } 5481 }
5485} 5482}
5486 5483
5487/** 5484/*
5488 * s2io_phy_id - timer function that alternates adapter LED. 5485 * s2io_set_led - control NIC led
5489 * @data : address of the private member of the device structure, which
5490 * is a pointer to the s2io_nic structure, provided as an u32.
5491 * Description: This is actually the timer function that alternates the
5492 * adapter LED bit of the adapter control bit to set/reset every time on
5493 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
5494 * once every second.
5495 */ 5486 */
5496static void s2io_phy_id(unsigned long data) 5487static void s2io_set_led(struct s2io_nic *sp, bool on)
5497{ 5488{
5498 struct s2io_nic *sp = (struct s2io_nic *)data;
5499 struct XENA_dev_config __iomem *bar0 = sp->bar0; 5489 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5500 u64 val64 = 0; 5490 u16 subid = sp->pdev->subsystem_device;
5501 u16 subid; 5491 u64 val64;
5502 5492
5503 subid = sp->pdev->subsystem_device;
5504 if ((sp->device_type == XFRAME_II_DEVICE) || 5493 if ((sp->device_type == XFRAME_II_DEVICE) ||
5505 ((subid & 0xFF) >= 0x07)) { 5494 ((subid & 0xFF) >= 0x07)) {
5506 val64 = readq(&bar0->gpio_control); 5495 val64 = readq(&bar0->gpio_control);
5507 val64 ^= GPIO_CTRL_GPIO_0; 5496 if (on)
5497 val64 |= GPIO_CTRL_GPIO_0;
5498 else
5499 val64 &= ~GPIO_CTRL_GPIO_0;
5500
5508 writeq(val64, &bar0->gpio_control); 5501 writeq(val64, &bar0->gpio_control);
5509 } else { 5502 } else {
5510 val64 = readq(&bar0->adapter_control); 5503 val64 = readq(&bar0->adapter_control);
5511 val64 ^= ADAPTER_LED_ON; 5504 if (on)
5505 val64 |= ADAPTER_LED_ON;
5506 else
5507 val64 &= ~ADAPTER_LED_ON;
5508
5512 writeq(val64, &bar0->adapter_control); 5509 writeq(val64, &bar0->adapter_control);
5513 } 5510 }
5514 5511
5515 mod_timer(&sp->id_timer, jiffies + HZ / 2);
5516} 5512}
5517 5513
5518/** 5514/**
5519 * s2io_ethtool_idnic - To physically identify the nic on the system. 5515 * s2io_ethtool_set_led - To physically identify the nic on the system.
5520 * @sp : private member of the device structure, which is a pointer to the 5516 * @dev : network device
5521 * s2io_nic structure. 5517 * @state: led setting
5522 * @id : pointer to the structure with identification parameters given by 5518 *
5523 * ethtool.
5524 * Description: Used to physically identify the NIC on the system. 5519 * Description: Used to physically identify the NIC on the system.
5525 * The Link LED will blink for a time specified by the user for 5520 * The Link LED will blink for a time specified by the user for
5526 * identification. 5521 * identification.
5527 * NOTE: The Link has to be Up to be able to blink the LED. Hence 5522 * NOTE: The Link has to be Up to be able to blink the LED. Hence
5528 * identification is possible only if it's link is up. 5523 * identification is possible only if it's link is up.
5529 * Return value:
5530 * int , returns 0 on success
5531 */ 5524 */
5532 5525
5533static int s2io_ethtool_idnic(struct net_device *dev, u32 data) 5526static int s2io_ethtool_set_led(struct net_device *dev,
5527 enum ethtool_phys_id_state state)
5534{ 5528{
5535 u64 val64 = 0, last_gpio_ctrl_val;
5536 struct s2io_nic *sp = netdev_priv(dev); 5529 struct s2io_nic *sp = netdev_priv(dev);
5537 struct XENA_dev_config __iomem *bar0 = sp->bar0; 5530 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5538 u16 subid; 5531 u16 subid = sp->pdev->subsystem_device;
5539 5532
5540 subid = sp->pdev->subsystem_device;
5541 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5542 if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) { 5533 if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
5543 val64 = readq(&bar0->adapter_control); 5534 u64 val64 = readq(&bar0->adapter_control);
5544 if (!(val64 & ADAPTER_CNTL_EN)) { 5535 if (!(val64 & ADAPTER_CNTL_EN)) {
5545 pr_err("Adapter Link down, cannot blink LED\n"); 5536 pr_err("Adapter Link down, cannot blink LED\n");
5546 return -EFAULT; 5537 return -EAGAIN;
5547 } 5538 }
5548 } 5539 }
5549 if (sp->id_timer.function == NULL) {
5550 init_timer(&sp->id_timer);
5551 sp->id_timer.function = s2io_phy_id;
5552 sp->id_timer.data = (unsigned long)sp;
5553 }
5554 mod_timer(&sp->id_timer, jiffies);
5555 if (data)
5556 msleep_interruptible(data * HZ);
5557 else
5558 msleep_interruptible(MAX_FLICKER_TIME);
5559 del_timer_sync(&sp->id_timer);
5560 5540
5561 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) { 5541 switch (state) {
5562 writeq(last_gpio_ctrl_val, &bar0->gpio_control); 5542 case ETHTOOL_ID_ACTIVE:
5563 last_gpio_ctrl_val = readq(&bar0->gpio_control); 5543 sp->adapt_ctrl_org = readq(&bar0->gpio_control);
5544 return 1; /* cycle on/off once per second */
5545
5546 case ETHTOOL_ID_ON:
5547 s2io_set_led(sp, true);
5548 break;
5549
5550 case ETHTOOL_ID_OFF:
5551 s2io_set_led(sp, false);
5552 break;
5553
5554 case ETHTOOL_ID_INACTIVE:
5555 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid))
5556 writeq(sp->adapt_ctrl_org, &bar0->gpio_control);
5564 } 5557 }
5565 5558
5566 return 0; 5559 return 0;
@@ -6625,25 +6618,6 @@ static int s2io_ethtool_get_regs_len(struct net_device *dev)
6625} 6618}
6626 6619
6627 6620
6628static u32 s2io_ethtool_get_rx_csum(struct net_device *dev)
6629{
6630 struct s2io_nic *sp = netdev_priv(dev);
6631
6632 return sp->rx_csum;
6633}
6634
6635static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
6636{
6637 struct s2io_nic *sp = netdev_priv(dev);
6638
6639 if (data)
6640 sp->rx_csum = 1;
6641 else
6642 sp->rx_csum = 0;
6643
6644 return 0;
6645}
6646
6647static int s2io_get_eeprom_len(struct net_device *dev) 6621static int s2io_get_eeprom_len(struct net_device *dev)
6648{ 6622{
6649 return XENA_EEPROM_SPACE; 6623 return XENA_EEPROM_SPACE;
@@ -6695,61 +6669,27 @@ static void s2io_ethtool_get_strings(struct net_device *dev,
6695 } 6669 }
6696} 6670}
6697 6671
6698static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data) 6672static int s2io_set_features(struct net_device *dev, u32 features)
6699{
6700 if (data)
6701 dev->features |= NETIF_F_IP_CSUM;
6702 else
6703 dev->features &= ~NETIF_F_IP_CSUM;
6704
6705 return 0;
6706}
6707
6708static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6709{
6710 return (dev->features & NETIF_F_TSO) != 0;
6711}
6712
6713static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6714{
6715 if (data)
6716 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6717 else
6718 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6719
6720 return 0;
6721}
6722
6723static int s2io_ethtool_set_flags(struct net_device *dev, u32 data)
6724{ 6673{
6725 struct s2io_nic *sp = netdev_priv(dev); 6674 struct s2io_nic *sp = netdev_priv(dev);
6726 int rc = 0; 6675 u32 changed = (features ^ dev->features) & NETIF_F_LRO;
6727 int changed = 0;
6728
6729 if (ethtool_invalid_flags(dev, data, ETH_FLAG_LRO))
6730 return -EINVAL;
6731
6732 if (data & ETH_FLAG_LRO) {
6733 if (!(dev->features & NETIF_F_LRO)) {
6734 dev->features |= NETIF_F_LRO;
6735 changed = 1;
6736 }
6737 } else if (dev->features & NETIF_F_LRO) {
6738 dev->features &= ~NETIF_F_LRO;
6739 changed = 1;
6740 }
6741 6676
6742 if (changed && netif_running(dev)) { 6677 if (changed && netif_running(dev)) {
6678 int rc;
6679
6743 s2io_stop_all_tx_queue(sp); 6680 s2io_stop_all_tx_queue(sp);
6744 s2io_card_down(sp); 6681 s2io_card_down(sp);
6682 dev->features = features;
6745 rc = s2io_card_up(sp); 6683 rc = s2io_card_up(sp);
6746 if (rc) 6684 if (rc)
6747 s2io_reset(sp); 6685 s2io_reset(sp);
6748 else 6686 else
6749 s2io_start_all_tx_queue(sp); 6687 s2io_start_all_tx_queue(sp);
6688
6689 return rc ? rc : 1;
6750 } 6690 }
6751 6691
6752 return rc; 6692 return 0;
6753} 6693}
6754 6694
6755static const struct ethtool_ops netdev_ethtool_ops = { 6695static const struct ethtool_ops netdev_ethtool_ops = {
@@ -6765,18 +6705,9 @@ static const struct ethtool_ops netdev_ethtool_ops = {
6765 .get_ringparam = s2io_ethtool_gringparam, 6705 .get_ringparam = s2io_ethtool_gringparam,
6766 .get_pauseparam = s2io_ethtool_getpause_data, 6706 .get_pauseparam = s2io_ethtool_getpause_data,
6767 .set_pauseparam = s2io_ethtool_setpause_data, 6707 .set_pauseparam = s2io_ethtool_setpause_data,
6768 .get_rx_csum = s2io_ethtool_get_rx_csum,
6769 .set_rx_csum = s2io_ethtool_set_rx_csum,
6770 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
6771 .set_flags = s2io_ethtool_set_flags,
6772 .get_flags = ethtool_op_get_flags,
6773 .set_sg = ethtool_op_set_sg,
6774 .get_tso = s2io_ethtool_op_get_tso,
6775 .set_tso = s2io_ethtool_op_set_tso,
6776 .set_ufo = ethtool_op_set_ufo,
6777 .self_test = s2io_ethtool_test, 6708 .self_test = s2io_ethtool_test,
6778 .get_strings = s2io_ethtool_get_strings, 6709 .get_strings = s2io_ethtool_get_strings,
6779 .phys_id = s2io_ethtool_idnic, 6710 .set_phys_id = s2io_ethtool_set_led,
6780 .get_ethtool_stats = s2io_get_ethtool_stats, 6711 .get_ethtool_stats = s2io_get_ethtool_stats,
6781 .get_sset_count = s2io_get_sset_count, 6712 .get_sset_count = s2io_get_sset_count,
6782}; 6713};
@@ -7545,7 +7476,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7545 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && 7476 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
7546 ((!ring_data->lro) || 7477 ((!ring_data->lro) ||
7547 (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) && 7478 (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
7548 (sp->rx_csum)) { 7479 (dev->features & NETIF_F_RXCSUM)) {
7549 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1); 7480 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7550 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1); 7481 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7551 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) { 7482 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
@@ -7806,6 +7737,7 @@ static const struct net_device_ops s2io_netdev_ops = {
7806 .ndo_do_ioctl = s2io_ioctl, 7737 .ndo_do_ioctl = s2io_ioctl,
7807 .ndo_set_mac_address = s2io_set_mac_addr, 7738 .ndo_set_mac_address = s2io_set_mac_addr,
7808 .ndo_change_mtu = s2io_change_mtu, 7739 .ndo_change_mtu = s2io_change_mtu,
7740 .ndo_set_features = s2io_set_features,
7809 .ndo_vlan_rx_register = s2io_vlan_rx_register, 7741 .ndo_vlan_rx_register = s2io_vlan_rx_register,
7810 .ndo_vlan_rx_kill_vid = s2io_vlan_rx_kill_vid, 7742 .ndo_vlan_rx_kill_vid = s2io_vlan_rx_kill_vid,
7811 .ndo_tx_timeout = s2io_tx_watchdog, 7743 .ndo_tx_timeout = s2io_tx_watchdog,
@@ -8047,17 +7979,18 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
8047 /* Driver entry points */ 7979 /* Driver entry points */
8048 dev->netdev_ops = &s2io_netdev_ops; 7980 dev->netdev_ops = &s2io_netdev_ops;
8049 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); 7981 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
8050 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 7982 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
8051 dev->features |= NETIF_F_LRO; 7983 NETIF_F_TSO | NETIF_F_TSO6 |
8052 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; 7984 NETIF_F_RXCSUM | NETIF_F_LRO;
7985 dev->features |= dev->hw_features |
7986 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7987 if (sp->device_type & XFRAME_II_DEVICE) {
7988 dev->hw_features |= NETIF_F_UFO;
7989 if (ufo)
7990 dev->features |= NETIF_F_UFO;
7991 }
8053 if (sp->high_dma_flag == true) 7992 if (sp->high_dma_flag == true)
8054 dev->features |= NETIF_F_HIGHDMA; 7993 dev->features |= NETIF_F_HIGHDMA;
8055 dev->features |= NETIF_F_TSO;
8056 dev->features |= NETIF_F_TSO6;
8057 if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) {
8058 dev->features |= NETIF_F_UFO;
8059 dev->features |= NETIF_F_HW_CSUM;
8060 }
8061 dev->watchdog_timeo = WATCH_DOG_TIMEOUT; 7994 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
8062 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic); 7995 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
8063 INIT_WORK(&sp->set_link_task, s2io_set_link); 7996 INIT_WORK(&sp->set_link_task, s2io_set_link);
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 2d144979f6f8..800b3a44e653 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -893,9 +893,6 @@ struct s2io_nic {
893 u16 all_multi_pos; 893 u16 all_multi_pos;
894 u16 promisc_flg; 894 u16 promisc_flg;
895 895
896 /* Id timer, used to blink NIC to physically identify NIC. */
897 struct timer_list id_timer;
898
899 /* Restart timer, used to restart NIC if the device is stuck and 896 /* Restart timer, used to restart NIC if the device is stuck and
900 * a schedule task that will set the correct Link state once the 897 * a schedule task that will set the correct Link state once the
901 * NIC's PHY has stabilized after a state change. 898 * NIC's PHY has stabilized after a state change.
@@ -1005,18 +1002,16 @@ static inline void writeq(u64 val, void __iomem *addr)
1005#define LF 2 1002#define LF 2
1006static inline void SPECIAL_REG_WRITE(u64 val, void __iomem *addr, int order) 1003static inline void SPECIAL_REG_WRITE(u64 val, void __iomem *addr, int order)
1007{ 1004{
1008 u32 ret;
1009
1010 if (order == LF) { 1005 if (order == LF) {
1011 writel((u32) (val), addr); 1006 writel((u32) (val), addr);
1012 ret = readl(addr); 1007 (void) readl(addr);
1013 writel((u32) (val >> 32), (addr + 4)); 1008 writel((u32) (val >> 32), (addr + 4));
1014 ret = readl(addr + 4); 1009 (void) readl(addr + 4);
1015 } else { 1010 } else {
1016 writel((u32) (val >> 32), (addr + 4)); 1011 writel((u32) (val >> 32), (addr + 4));
1017 ret = readl(addr + 4); 1012 (void) readl(addr + 4);
1018 writel((u32) (val), addr); 1013 writel((u32) (val), addr);
1019 ret = readl(addr); 1014 (void) readl(addr);
1020 } 1015 }
1021} 1016}
1022 1017
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index 76290a8c3c14..fa74314ef789 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -1173,7 +1173,8 @@ static int sc92031_ethtool_get_settings(struct net_device *dev,
1173 if (phy_ctrl & PhyCtrlAne) 1173 if (phy_ctrl & PhyCtrlAne)
1174 cmd->advertising |= ADVERTISED_Autoneg; 1174 cmd->advertising |= ADVERTISED_Autoneg;
1175 1175
1176 cmd->speed = (output_status & 0x2) ? SPEED_100 : SPEED_10; 1176 ethtool_cmd_speed_set(cmd,
1177 (output_status & 0x2) ? SPEED_100 : SPEED_10);
1177 cmd->duplex = (output_status & 0x4) ? DUPLEX_FULL : DUPLEX_HALF; 1178 cmd->duplex = (output_status & 0x4) ? DUPLEX_FULL : DUPLEX_HALF;
1178 cmd->port = PORT_MII; 1179 cmd->port = PORT_MII;
1179 cmd->phy_address = phy_address; 1180 cmd->phy_address = phy_address;
@@ -1188,10 +1189,11 @@ static int sc92031_ethtool_set_settings(struct net_device *dev,
1188{ 1189{
1189 struct sc92031_priv *priv = netdev_priv(dev); 1190 struct sc92031_priv *priv = netdev_priv(dev);
1190 void __iomem *port_base = priv->port_base; 1191 void __iomem *port_base = priv->port_base;
1192 u32 speed = ethtool_cmd_speed(cmd);
1191 u32 phy_ctrl; 1193 u32 phy_ctrl;
1192 u32 old_phy_ctrl; 1194 u32 old_phy_ctrl;
1193 1195
1194 if (!(cmd->speed == SPEED_10 || cmd->speed == SPEED_100)) 1196 if (!(speed == SPEED_10 || speed == SPEED_100))
1195 return -EINVAL; 1197 return -EINVAL;
1196 if (!(cmd->duplex == DUPLEX_HALF || cmd->duplex == DUPLEX_FULL)) 1198 if (!(cmd->duplex == DUPLEX_HALF || cmd->duplex == DUPLEX_FULL))
1197 return -EINVAL; 1199 return -EINVAL;
@@ -1229,7 +1231,7 @@ static int sc92031_ethtool_set_settings(struct net_device *dev,
1229 // FIXME: Whole branch guessed 1231 // FIXME: Whole branch guessed
1230 phy_ctrl = 0; 1232 phy_ctrl = 0;
1231 1233
1232 if (cmd->speed == SPEED_10) 1234 if (speed == SPEED_10)
1233 phy_ctrl |= PhyCtrlSpd10; 1235 phy_ctrl |= PhyCtrlSpd10;
1234 else /* cmd->speed == SPEED_100 */ 1236 else /* cmd->speed == SPEED_100 */
1235 phy_ctrl |= PhyCtrlSpd100; 1237 phy_ctrl |= PhyCtrlSpd100;
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index a3c2aab53de8..05502b359b9e 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -798,11 +798,6 @@ void efx_link_status_changed(struct efx_nic *efx)
798 if (!netif_running(efx->net_dev)) 798 if (!netif_running(efx->net_dev))
799 return; 799 return;
800 800
801 if (efx->port_inhibited) {
802 netif_carrier_off(efx->net_dev);
803 return;
804 }
805
806 if (link_state->up != netif_carrier_ok(efx->net_dev)) { 801 if (link_state->up != netif_carrier_ok(efx->net_dev)) {
807 efx->n_link_state_changes++; 802 efx->n_link_state_changes++;
808 803
@@ -1319,8 +1314,20 @@ static void efx_remove_interrupts(struct efx_nic *efx)
1319 1314
1320static void efx_set_channels(struct efx_nic *efx) 1315static void efx_set_channels(struct efx_nic *efx)
1321{ 1316{
1317 struct efx_channel *channel;
1318 struct efx_tx_queue *tx_queue;
1319
1322 efx->tx_channel_offset = 1320 efx->tx_channel_offset =
1323 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; 1321 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
1322
1323 /* We need to adjust the TX queue numbers if we have separate
1324 * RX-only and TX-only channels.
1325 */
1326 efx_for_each_channel(channel, efx) {
1327 efx_for_each_channel_tx_queue(tx_queue, channel)
1328 tx_queue->queue -= (efx->tx_channel_offset *
1329 EFX_TXQ_TYPES);
1330 }
1324} 1331}
1325 1332
1326static int efx_probe_nic(struct efx_nic *efx) 1333static int efx_probe_nic(struct efx_nic *efx)
@@ -1438,7 +1445,7 @@ static void efx_start_all(struct efx_nic *efx)
1438 * restart the transmit interface early so the watchdog timer stops */ 1445 * restart the transmit interface early so the watchdog timer stops */
1439 efx_start_port(efx); 1446 efx_start_port(efx);
1440 1447
1441 if (efx_dev_registered(efx) && !efx->port_inhibited) 1448 if (efx_dev_registered(efx) && netif_device_present(efx->net_dev))
1442 netif_tx_wake_all_queues(efx->net_dev); 1449 netif_tx_wake_all_queues(efx->net_dev);
1443 1450
1444 efx_for_each_channel(channel, efx) 1451 efx_for_each_channel(channel, efx)
@@ -1876,6 +1883,17 @@ static void efx_set_multicast_list(struct net_device *net_dev)
1876 /* Otherwise efx_start_port() will do this */ 1883 /* Otherwise efx_start_port() will do this */
1877} 1884}
1878 1885
1886static int efx_set_features(struct net_device *net_dev, u32 data)
1887{
1888 struct efx_nic *efx = netdev_priv(net_dev);
1889
1890 /* If disabling RX n-tuple filtering, clear existing filters */
1891 if (net_dev->features & ~data & NETIF_F_NTUPLE)
1892 efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
1893
1894 return 0;
1895}
1896
1879static const struct net_device_ops efx_netdev_ops = { 1897static const struct net_device_ops efx_netdev_ops = {
1880 .ndo_open = efx_net_open, 1898 .ndo_open = efx_net_open,
1881 .ndo_stop = efx_net_stop, 1899 .ndo_stop = efx_net_stop,
@@ -1887,6 +1905,7 @@ static const struct net_device_ops efx_netdev_ops = {
1887 .ndo_change_mtu = efx_change_mtu, 1905 .ndo_change_mtu = efx_change_mtu,
1888 .ndo_set_mac_address = efx_set_mac_address, 1906 .ndo_set_mac_address = efx_set_mac_address,
1889 .ndo_set_multicast_list = efx_set_multicast_list, 1907 .ndo_set_multicast_list = efx_set_multicast_list,
1908 .ndo_set_features = efx_set_features,
1890#ifdef CONFIG_NET_POLL_CONTROLLER 1909#ifdef CONFIG_NET_POLL_CONTROLLER
1891 .ndo_poll_controller = efx_netpoll, 1910 .ndo_poll_controller = efx_netpoll,
1892#endif 1911#endif
@@ -2090,6 +2109,7 @@ int efx_reset(struct efx_nic *efx, enum reset_type method)
2090 netif_info(efx, drv, efx->net_dev, "resetting (%s)\n", 2109 netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
2091 RESET_TYPE(method)); 2110 RESET_TYPE(method));
2092 2111
2112 netif_device_detach(efx->net_dev);
2093 efx_reset_down(efx, method); 2113 efx_reset_down(efx, method);
2094 2114
2095 rc = efx->type->reset(efx, method); 2115 rc = efx->type->reset(efx, method);
@@ -2123,6 +2143,7 @@ out:
2123 efx->state = STATE_DISABLED; 2143 efx->state = STATE_DISABLED;
2124 } else { 2144 } else {
2125 netif_dbg(efx, drv, efx->net_dev, "reset complete\n"); 2145 netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
2146 netif_device_attach(efx->net_dev);
2126 } 2147 }
2127 return rc; 2148 return rc;
2128} 2149}
@@ -2235,7 +2256,7 @@ static bool efx_port_dummy_op_poll(struct efx_nic *efx)
2235 return false; 2256 return false;
2236} 2257}
2237 2258
2238static struct efx_phy_operations efx_dummy_phy_operations = { 2259static const struct efx_phy_operations efx_dummy_phy_operations = {
2239 .init = efx_port_dummy_op_int, 2260 .init = efx_port_dummy_op_int,
2240 .reconfigure = efx_port_dummy_op_int, 2261 .reconfigure = efx_port_dummy_op_int,
2241 .poll = efx_port_dummy_op_poll, 2262 .poll = efx_port_dummy_op_poll,
@@ -2251,7 +2272,7 @@ static struct efx_phy_operations efx_dummy_phy_operations = {
2251/* This zeroes out and then fills in the invariants in a struct 2272/* This zeroes out and then fills in the invariants in a struct
2252 * efx_nic (including all sub-structures). 2273 * efx_nic (including all sub-structures).
2253 */ 2274 */
2254static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type, 2275static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
2255 struct pci_dev *pci_dev, struct net_device *net_dev) 2276 struct pci_dev *pci_dev, struct net_device *net_dev)
2256{ 2277{
2257 int i; 2278 int i;
@@ -2271,7 +2292,6 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
2271 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); 2292 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
2272 2293
2273 efx->net_dev = net_dev; 2294 efx->net_dev = net_dev;
2274 efx->rx_checksum_enabled = true;
2275 spin_lock_init(&efx->stats_lock); 2295 spin_lock_init(&efx->stats_lock);
2276 mutex_init(&efx->mac_lock); 2296 mutex_init(&efx->mac_lock);
2277 efx->mac_op = type->default_mac_ops; 2297 efx->mac_op = type->default_mac_ops;
@@ -2442,7 +2462,7 @@ static int efx_pci_probe_main(struct efx_nic *efx)
2442static int __devinit efx_pci_probe(struct pci_dev *pci_dev, 2462static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2443 const struct pci_device_id *entry) 2463 const struct pci_device_id *entry)
2444{ 2464{
2445 struct efx_nic_type *type = (struct efx_nic_type *) entry->driver_data; 2465 const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data;
2446 struct net_device *net_dev; 2466 struct net_device *net_dev;
2447 struct efx_nic *efx; 2467 struct efx_nic *efx;
2448 int i, rc; 2468 int i, rc;
@@ -2454,12 +2474,15 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2454 return -ENOMEM; 2474 return -ENOMEM;
2455 net_dev->features |= (type->offload_features | NETIF_F_SG | 2475 net_dev->features |= (type->offload_features | NETIF_F_SG |
2456 NETIF_F_HIGHDMA | NETIF_F_TSO | 2476 NETIF_F_HIGHDMA | NETIF_F_TSO |
2457 NETIF_F_GRO); 2477 NETIF_F_RXCSUM);
2458 if (type->offload_features & NETIF_F_V6_CSUM) 2478 if (type->offload_features & NETIF_F_V6_CSUM)
2459 net_dev->features |= NETIF_F_TSO6; 2479 net_dev->features |= NETIF_F_TSO6;
2460 /* Mask for features that also apply to VLAN devices */ 2480 /* Mask for features that also apply to VLAN devices */
2461 net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | 2481 net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
2462 NETIF_F_HIGHDMA | NETIF_F_TSO); 2482 NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
2483 NETIF_F_RXCSUM);
2484 /* All offloads can be toggled */
2485 net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA;
2463 efx = netdev_priv(net_dev); 2486 efx = netdev_priv(net_dev);
2464 pci_set_drvdata(pci_dev, efx); 2487 pci_set_drvdata(pci_dev, efx);
2465 SET_NETDEV_DEV(net_dev, &pci_dev->dev); 2488 SET_NETDEV_DEV(net_dev, &pci_dev->dev);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 807178ef65ad..348437a8117e 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -178,19 +178,27 @@ static struct efx_ethtool_stat efx_ethtool_stats[] = {
178 */ 178 */
179 179
180/* Identify device by flashing LEDs */ 180/* Identify device by flashing LEDs */
181static int efx_ethtool_phys_id(struct net_device *net_dev, u32 count) 181static int efx_ethtool_phys_id(struct net_device *net_dev,
182 enum ethtool_phys_id_state state)
182{ 183{
183 struct efx_nic *efx = netdev_priv(net_dev); 184 struct efx_nic *efx = netdev_priv(net_dev);
185 enum efx_led_mode mode = EFX_LED_DEFAULT;
184 186
185 do { 187 switch (state) {
186 efx->type->set_id_led(efx, EFX_LED_ON); 188 case ETHTOOL_ID_ON:
187 schedule_timeout_interruptible(HZ / 2); 189 mode = EFX_LED_ON;
188 190 break;
189 efx->type->set_id_led(efx, EFX_LED_OFF); 191 case ETHTOOL_ID_OFF:
190 schedule_timeout_interruptible(HZ / 2); 192 mode = EFX_LED_OFF;
191 } while (!signal_pending(current) && --count != 0); 193 break;
194 case ETHTOOL_ID_INACTIVE:
195 mode = EFX_LED_DEFAULT;
196 break;
197 case ETHTOOL_ID_ACTIVE:
198 return 1; /* cycle on/off once per second */
199 }
192 200
193 efx->type->set_id_led(efx, EFX_LED_DEFAULT); 201 efx->type->set_id_led(efx, mode);
194 return 0; 202 return 0;
195} 203}
196 204
@@ -211,7 +219,7 @@ static int efx_ethtool_get_settings(struct net_device *net_dev,
211 ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; 219 ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
212 220
213 if (LOOPBACK_INTERNAL(efx)) { 221 if (LOOPBACK_INTERNAL(efx)) {
214 ecmd->speed = link_state->speed; 222 ethtool_cmd_speed_set(ecmd, link_state->speed);
215 ecmd->duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF; 223 ecmd->duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF;
216 } 224 }
217 225
@@ -226,7 +234,8 @@ static int efx_ethtool_set_settings(struct net_device *net_dev,
226 int rc; 234 int rc;
227 235
228 /* GMAC does not support 1000Mbps HD */ 236 /* GMAC does not support 1000Mbps HD */
229 if (ecmd->speed == SPEED_1000 && ecmd->duplex != DUPLEX_FULL) { 237 if ((ethtool_cmd_speed(ecmd) == SPEED_1000) &&
238 (ecmd->duplex != DUPLEX_FULL)) {
230 netif_dbg(efx, drv, efx->net_dev, 239 netif_dbg(efx, drv, efx->net_dev,
231 "rejecting unsupported 1000Mbps HD setting\n"); 240 "rejecting unsupported 1000Mbps HD setting\n");
232 return -EINVAL; 241 return -EINVAL;
@@ -518,72 +527,6 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
518 } 527 }
519} 528}
520 529
521static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
522{
523 struct efx_nic *efx __attribute__ ((unused)) = netdev_priv(net_dev);
524 u32 features;
525
526 features = NETIF_F_TSO;
527 if (efx->type->offload_features & NETIF_F_V6_CSUM)
528 features |= NETIF_F_TSO6;
529
530 if (enable)
531 net_dev->features |= features;
532 else
533 net_dev->features &= ~features;
534
535 return 0;
536}
537
538static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable)
539{
540 struct efx_nic *efx = netdev_priv(net_dev);
541 u32 features = efx->type->offload_features & NETIF_F_ALL_CSUM;
542
543 if (enable)
544 net_dev->features |= features;
545 else
546 net_dev->features &= ~features;
547
548 return 0;
549}
550
551static int efx_ethtool_set_rx_csum(struct net_device *net_dev, u32 enable)
552{
553 struct efx_nic *efx = netdev_priv(net_dev);
554
555 /* No way to stop the hardware doing the checks; we just
556 * ignore the result.
557 */
558 efx->rx_checksum_enabled = !!enable;
559
560 return 0;
561}
562
563static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev)
564{
565 struct efx_nic *efx = netdev_priv(net_dev);
566
567 return efx->rx_checksum_enabled;
568}
569
570static int efx_ethtool_set_flags(struct net_device *net_dev, u32 data)
571{
572 struct efx_nic *efx = netdev_priv(net_dev);
573 u32 supported = (efx->type->offload_features &
574 (ETH_FLAG_RXHASH | ETH_FLAG_NTUPLE));
575 int rc;
576
577 rc = ethtool_op_set_flags(net_dev, data, supported);
578 if (rc)
579 return rc;
580
581 if (!(data & ETH_FLAG_NTUPLE))
582 efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
583
584 return 0;
585}
586
587static void efx_ethtool_self_test(struct net_device *net_dev, 530static void efx_ethtool_self_test(struct net_device *net_dev,
588 struct ethtool_test *test, u64 *data) 531 struct ethtool_test *test, u64 *data)
589{ 532{
@@ -1012,8 +955,9 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
1012 955
1013 if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_CLEAR) 956 if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_CLEAR)
1014 return efx_filter_remove_filter(efx, &filter); 957 return efx_filter_remove_filter(efx, &filter);
1015 else 958
1016 return efx_filter_insert_filter(efx, &filter, true); 959 rc = efx_filter_insert_filter(efx, &filter, true);
960 return rc < 0 ? rc : 0;
1017} 961}
1018 962
1019static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, 963static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev,
@@ -1070,22 +1014,10 @@ const struct ethtool_ops efx_ethtool_ops = {
1070 .set_ringparam = efx_ethtool_set_ringparam, 1014 .set_ringparam = efx_ethtool_set_ringparam,
1071 .get_pauseparam = efx_ethtool_get_pauseparam, 1015 .get_pauseparam = efx_ethtool_get_pauseparam,
1072 .set_pauseparam = efx_ethtool_set_pauseparam, 1016 .set_pauseparam = efx_ethtool_set_pauseparam,
1073 .get_rx_csum = efx_ethtool_get_rx_csum,
1074 .set_rx_csum = efx_ethtool_set_rx_csum,
1075 .get_tx_csum = ethtool_op_get_tx_csum,
1076 /* Need to enable/disable IPv6 too */
1077 .set_tx_csum = efx_ethtool_set_tx_csum,
1078 .get_sg = ethtool_op_get_sg,
1079 .set_sg = ethtool_op_set_sg,
1080 .get_tso = ethtool_op_get_tso,
1081 /* Need to enable/disable TSO-IPv6 too */
1082 .set_tso = efx_ethtool_set_tso,
1083 .get_flags = ethtool_op_get_flags,
1084 .set_flags = efx_ethtool_set_flags,
1085 .get_sset_count = efx_ethtool_get_sset_count, 1017 .get_sset_count = efx_ethtool_get_sset_count,
1086 .self_test = efx_ethtool_self_test, 1018 .self_test = efx_ethtool_self_test,
1087 .get_strings = efx_ethtool_get_strings, 1019 .get_strings = efx_ethtool_get_strings,
1088 .phys_id = efx_ethtool_phys_id, 1020 .set_phys_id = efx_ethtool_phys_id,
1089 .get_ethtool_stats = efx_ethtool_get_stats, 1021 .get_ethtool_stats = efx_ethtool_get_stats,
1090 .get_wol = efx_ethtool_get_wol, 1022 .get_wol = efx_ethtool_get_wol,
1091 .set_wol = efx_ethtool_set_wol, 1023 .set_wol = efx_ethtool_set_wol,
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index d96b23769bd1..60176e873d62 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -1703,7 +1703,7 @@ static int falcon_set_wol(struct efx_nic *efx, u32 type)
1703 ************************************************************************** 1703 **************************************************************************
1704 */ 1704 */
1705 1705
1706struct efx_nic_type falcon_a1_nic_type = { 1706const struct efx_nic_type falcon_a1_nic_type = {
1707 .probe = falcon_probe_nic, 1707 .probe = falcon_probe_nic,
1708 .remove = falcon_remove_nic, 1708 .remove = falcon_remove_nic,
1709 .init = falcon_init_nic, 1709 .init = falcon_init_nic,
@@ -1744,7 +1744,7 @@ struct efx_nic_type falcon_a1_nic_type = {
1744 .reset_world_flags = ETH_RESET_IRQ, 1744 .reset_world_flags = ETH_RESET_IRQ,
1745}; 1745};
1746 1746
1747struct efx_nic_type falcon_b0_nic_type = { 1747const struct efx_nic_type falcon_b0_nic_type = {
1748 .probe = falcon_probe_nic, 1748 .probe = falcon_probe_nic,
1749 .remove = falcon_remove_nic, 1749 .remove = falcon_remove_nic,
1750 .init = falcon_init_nic, 1750 .init = falcon_init_nic,
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index 2c9ee5db3bf7..9516452c079c 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -362,7 +362,7 @@ void falcon_poll_xmac(struct efx_nic *efx)
362 falcon_ack_status_intr(efx); 362 falcon_ack_status_intr(efx);
363} 363}
364 364
365struct efx_mac_operations falcon_xmac_operations = { 365const struct efx_mac_operations falcon_xmac_operations = {
366 .reconfigure = falcon_reconfigure_xmac, 366 .reconfigure = falcon_reconfigure_xmac,
367 .update_stats = falcon_update_stats_xmac, 367 .update_stats = falcon_update_stats_xmac,
368 .check_fault = falcon_xmac_check_fault, 368 .check_fault = falcon_xmac_check_fault,
diff --git a/drivers/net/sfc/mac.h b/drivers/net/sfc/mac.h
index 6886cdf87c12..d6a255d0856b 100644
--- a/drivers/net/sfc/mac.h
+++ b/drivers/net/sfc/mac.h
@@ -13,8 +13,8 @@
13 13
14#include "net_driver.h" 14#include "net_driver.h"
15 15
16extern struct efx_mac_operations falcon_xmac_operations; 16extern const struct efx_mac_operations falcon_xmac_operations;
17extern struct efx_mac_operations efx_mcdi_mac_operations; 17extern const struct efx_mac_operations efx_mcdi_mac_operations;
18extern int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr, 18extern int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
19 u32 dma_len, int enable, int clear); 19 u32 dma_len, int enable, int clear);
20 20
diff --git a/drivers/net/sfc/mcdi_mac.c b/drivers/net/sfc/mcdi_mac.c
index 33f7294edb47..50c20777a564 100644
--- a/drivers/net/sfc/mcdi_mac.c
+++ b/drivers/net/sfc/mcdi_mac.c
@@ -138,7 +138,7 @@ static bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
138} 138}
139 139
140 140
141struct efx_mac_operations efx_mcdi_mac_operations = { 141const struct efx_mac_operations efx_mcdi_mac_operations = {
142 .reconfigure = efx_mcdi_mac_reconfigure, 142 .reconfigure = efx_mcdi_mac_reconfigure,
143 .update_stats = efx_port_dummy_op_void, 143 .update_stats = efx_port_dummy_op_void,
144 .check_fault = efx_mcdi_mac_check_fault, 144 .check_fault = efx_mcdi_mac_check_fault,
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
index 7e3c65b0c99f..6c63ab0710af 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -513,7 +513,7 @@ static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *e
513 ecmd->supported = 513 ecmd->supported =
514 mcdi_to_ethtool_cap(phy_cfg->media, phy_cfg->supported_cap); 514 mcdi_to_ethtool_cap(phy_cfg->media, phy_cfg->supported_cap);
515 ecmd->advertising = efx->link_advertising; 515 ecmd->advertising = efx->link_advertising;
516 ecmd->speed = efx->link_state.speed; 516 ethtool_cmd_speed_set(ecmd, efx->link_state.speed);
517 ecmd->duplex = efx->link_state.fd; 517 ecmd->duplex = efx->link_state.fd;
518 ecmd->port = mcdi_to_ethtool_media(phy_cfg->media); 518 ecmd->port = mcdi_to_ethtool_media(phy_cfg->media);
519 ecmd->phy_address = phy_cfg->port; 519 ecmd->phy_address = phy_cfg->port;
@@ -545,7 +545,7 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
545 caps = (ethtool_to_mcdi_cap(ecmd->advertising) | 545 caps = (ethtool_to_mcdi_cap(ecmd->advertising) |
546 1 << MC_CMD_PHY_CAP_AN_LBN); 546 1 << MC_CMD_PHY_CAP_AN_LBN);
547 } else if (ecmd->duplex) { 547 } else if (ecmd->duplex) {
548 switch (ecmd->speed) { 548 switch (ethtool_cmd_speed(ecmd)) {
549 case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break; 549 case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break;
550 case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break; 550 case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break;
551 case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break; 551 case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break;
@@ -553,7 +553,7 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
553 default: return -EINVAL; 553 default: return -EINVAL;
554 } 554 }
555 } else { 555 } else {
556 switch (ecmd->speed) { 556 switch (ethtool_cmd_speed(ecmd)) {
557 case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break; 557 case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break;
558 case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break; 558 case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break;
559 case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break; 559 case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break;
@@ -739,7 +739,7 @@ static const char *efx_mcdi_phy_test_name(struct efx_nic *efx,
739 return NULL; 739 return NULL;
740} 740}
741 741
742struct efx_phy_operations efx_mcdi_phy_ops = { 742const struct efx_phy_operations efx_mcdi_phy_ops = {
743 .probe = efx_mcdi_phy_probe, 743 .probe = efx_mcdi_phy_probe,
744 .init = efx_port_dummy_op_int, 744 .init = efx_port_dummy_op_int,
745 .reconfigure = efx_mcdi_phy_reconfigure, 745 .reconfigure = efx_mcdi_phy_reconfigure,
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index 19e68c26d103..71159145b4bf 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -232,12 +232,12 @@ void efx_mdio_set_mmds_lpower(struct efx_nic *efx,
232 */ 232 */
233int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) 233int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
234{ 234{
235 struct ethtool_cmd prev; 235 struct ethtool_cmd prev = { .cmd = ETHTOOL_GSET };
236 236
237 efx->phy_op->get_settings(efx, &prev); 237 efx->phy_op->get_settings(efx, &prev);
238 238
239 if (ecmd->advertising == prev.advertising && 239 if (ecmd->advertising == prev.advertising &&
240 ecmd->speed == prev.speed && 240 ethtool_cmd_speed(ecmd) == ethtool_cmd_speed(&prev) &&
241 ecmd->duplex == prev.duplex && 241 ecmd->duplex == prev.duplex &&
242 ecmd->port == prev.port && 242 ecmd->port == prev.port &&
243 ecmd->autoneg == prev.autoneg) 243 ecmd->autoneg == prev.autoneg)
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 191a311da2dc..ce9697bac1b9 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -670,16 +670,14 @@ struct efx_filter_state;
670 * @mtd_list: List of MTDs attached to the NIC 670 * @mtd_list: List of MTDs attached to the NIC
671 * @nic_data: Hardware dependent state 671 * @nic_data: Hardware dependent state
672 * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode, 672 * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
673 * @port_inhibited, efx_monitor() and efx_reconfigure_port() 673 * efx_monitor() and efx_reconfigure_port()
674 * @port_enabled: Port enabled indicator. 674 * @port_enabled: Port enabled indicator.
675 * Serialises efx_stop_all(), efx_start_all(), efx_monitor() and 675 * Serialises efx_stop_all(), efx_start_all(), efx_monitor() and
676 * efx_mac_work() with kernel interfaces. Safe to read under any 676 * efx_mac_work() with kernel interfaces. Safe to read under any
677 * one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must 677 * one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must
678 * be held to modify it. 678 * be held to modify it.
679 * @port_inhibited: If set, the netif_carrier is always off. Hold the mac_lock
680 * @port_initialized: Port initialized? 679 * @port_initialized: Port initialized?
681 * @net_dev: Operating system network device. Consider holding the rtnl lock 680 * @net_dev: Operating system network device. Consider holding the rtnl lock
682 * @rx_checksum_enabled: RX checksumming enabled
683 * @stats_buffer: DMA buffer for statistics 681 * @stats_buffer: DMA buffer for statistics
684 * @mac_op: MAC interface 682 * @mac_op: MAC interface
685 * @phy_type: PHY type 683 * @phy_type: PHY type
@@ -765,18 +763,16 @@ struct efx_nic {
765 struct mutex mac_lock; 763 struct mutex mac_lock;
766 struct work_struct mac_work; 764 struct work_struct mac_work;
767 bool port_enabled; 765 bool port_enabled;
768 bool port_inhibited;
769 766
770 bool port_initialized; 767 bool port_initialized;
771 struct net_device *net_dev; 768 struct net_device *net_dev;
772 bool rx_checksum_enabled;
773 769
774 struct efx_buffer stats_buffer; 770 struct efx_buffer stats_buffer;
775 771
776 struct efx_mac_operations *mac_op; 772 const struct efx_mac_operations *mac_op;
777 773
778 unsigned int phy_type; 774 unsigned int phy_type;
779 struct efx_phy_operations *phy_op; 775 const struct efx_phy_operations *phy_op;
780 void *phy_data; 776 void *phy_data;
781 struct mdio_if_info mdio; 777 struct mdio_if_info mdio;
782 unsigned int mdio_bus; 778 unsigned int mdio_bus;
@@ -897,7 +893,7 @@ struct efx_nic_type {
897 void (*resume_wol)(struct efx_nic *efx); 893 void (*resume_wol)(struct efx_nic *efx);
898 int (*test_registers)(struct efx_nic *efx); 894 int (*test_registers)(struct efx_nic *efx);
899 int (*test_nvram)(struct efx_nic *efx); 895 int (*test_nvram)(struct efx_nic *efx);
900 struct efx_mac_operations *default_mac_ops; 896 const struct efx_mac_operations *default_mac_ops;
901 897
902 int revision; 898 int revision;
903 unsigned int mem_map_size; 899 unsigned int mem_map_size;
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index 9b29a8d7c449..f2a2b947f860 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -852,7 +852,6 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
852 unsigned expected_ptr; 852 unsigned expected_ptr;
853 bool rx_ev_pkt_ok, discard = false, checksummed; 853 bool rx_ev_pkt_ok, discard = false, checksummed;
854 struct efx_rx_queue *rx_queue; 854 struct efx_rx_queue *rx_queue;
855 struct efx_nic *efx = channel->efx;
856 855
857 /* Basic packet information */ 856 /* Basic packet information */
858 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); 857 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
@@ -875,9 +874,8 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
875 * UDP/IP, then we can rely on the hardware checksum. 874 * UDP/IP, then we can rely on the hardware checksum.
876 */ 875 */
877 checksummed = 876 checksummed =
878 likely(efx->rx_checksum_enabled) && 877 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP ||
879 (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP || 878 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP;
880 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP);
881 } else { 879 } else {
882 efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard); 880 efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard);
883 checksummed = false; 881 checksummed = false;
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index d91701abd331..4bd1f2839dfe 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -152,9 +152,9 @@ struct siena_nic_data {
152 int wol_filter_id; 152 int wol_filter_id;
153}; 153};
154 154
155extern struct efx_nic_type falcon_a1_nic_type; 155extern const struct efx_nic_type falcon_a1_nic_type;
156extern struct efx_nic_type falcon_b0_nic_type; 156extern const struct efx_nic_type falcon_b0_nic_type;
157extern struct efx_nic_type siena_a0_nic_type; 157extern const struct efx_nic_type siena_a0_nic_type;
158 158
159/************************************************************************** 159/**************************************************************************
160 * 160 *
diff --git a/drivers/net/sfc/phy.h b/drivers/net/sfc/phy.h
index b3b79472421e..11d148cd8441 100644
--- a/drivers/net/sfc/phy.h
+++ b/drivers/net/sfc/phy.h
@@ -13,14 +13,14 @@
13/**************************************************************************** 13/****************************************************************************
14 * 10Xpress (SFX7101) PHY 14 * 10Xpress (SFX7101) PHY
15 */ 15 */
16extern struct efx_phy_operations falcon_sfx7101_phy_ops; 16extern const struct efx_phy_operations falcon_sfx7101_phy_ops;
17 17
18extern void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode); 18extern void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
19 19
20/**************************************************************************** 20/****************************************************************************
21 * AMCC/Quake QT202x PHYs 21 * AMCC/Quake QT202x PHYs
22 */ 22 */
23extern struct efx_phy_operations falcon_qt202x_phy_ops; 23extern const struct efx_phy_operations falcon_qt202x_phy_ops;
24 24
25/* These PHYs provide various H/W control states for LEDs */ 25/* These PHYs provide various H/W control states for LEDs */
26#define QUAKE_LED_LINK_INVAL (0) 26#define QUAKE_LED_LINK_INVAL (0)
@@ -39,7 +39,7 @@ extern void falcon_qt202x_set_led(struct efx_nic *p, int led, int state);
39/**************************************************************************** 39/****************************************************************************
40* Transwitch CX4 retimer 40* Transwitch CX4 retimer
41*/ 41*/
42extern struct efx_phy_operations falcon_txc_phy_ops; 42extern const struct efx_phy_operations falcon_txc_phy_ops;
43 43
44#define TXC_GPIO_DIR_INPUT 0 44#define TXC_GPIO_DIR_INPUT 0
45#define TXC_GPIO_DIR_OUTPUT 1 45#define TXC_GPIO_DIR_OUTPUT 1
@@ -50,7 +50,7 @@ extern void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val);
50/**************************************************************************** 50/****************************************************************************
51 * Siena managed PHYs 51 * Siena managed PHYs
52 */ 52 */
53extern struct efx_phy_operations efx_mcdi_phy_ops; 53extern const struct efx_phy_operations efx_mcdi_phy_ops;
54 54
55extern int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus, 55extern int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus,
56 unsigned int prtad, unsigned int devad, 56 unsigned int prtad, unsigned int devad,
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c
index 55f90924247e..7ad97e397406 100644
--- a/drivers/net/sfc/qt202x_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -449,7 +449,7 @@ static void qt202x_phy_remove(struct efx_nic *efx)
449 efx->phy_data = NULL; 449 efx->phy_data = NULL;
450} 450}
451 451
452struct efx_phy_operations falcon_qt202x_phy_ops = { 452const struct efx_phy_operations falcon_qt202x_phy_ops = {
453 .probe = qt202x_phy_probe, 453 .probe = qt202x_phy_probe,
454 .init = qt202x_phy_init, 454 .init = qt202x_phy_init,
455 .reconfigure = qt202x_phy_reconfigure, 455 .reconfigure = qt202x_phy_reconfigure,
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index c0fdb59030fb..b7dc891b4461 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -605,6 +605,9 @@ void __efx_rx_packet(struct efx_channel *channel,
605 skb_record_rx_queue(skb, channel->channel); 605 skb_record_rx_queue(skb, channel->channel);
606 } 606 }
607 607
608 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
609 checksummed = false;
610
608 if (likely(checksummed || rx_buf->is_page)) { 611 if (likely(checksummed || rx_buf->is_page)) {
609 efx_rx_packet_gro(channel, rx_buf, eh, checksummed); 612 efx_rx_packet_gro(channel, rx_buf, eh, checksummed);
610 return; 613 return;
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 50ad3bcaf68a..822f6c2a6a7c 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -695,12 +695,12 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
695 /* Offline (i.e. disruptive) testing 695 /* Offline (i.e. disruptive) testing
696 * This checks MAC and PHY loopback on the specified port. */ 696 * This checks MAC and PHY loopback on the specified port. */
697 697
698 /* force the carrier state off so the kernel doesn't transmit during 698 /* Detach the device so the kernel doesn't transmit during the
699 * the loopback test, and the watchdog timeout doesn't fire. Also put 699 * loopback test and the watchdog timeout doesn't fire.
700 * falcon into loopback for the register test.
701 */ 700 */
701 netif_device_detach(efx->net_dev);
702
702 mutex_lock(&efx->mac_lock); 703 mutex_lock(&efx->mac_lock);
703 efx->port_inhibited = true;
704 if (efx->loopback_modes) { 704 if (efx->loopback_modes) {
705 /* We need the 312 clock from the PHY to test the XMAC 705 /* We need the 312 clock from the PHY to test the XMAC
706 * registers, so move into XGMII loopback if available */ 706 * registers, so move into XGMII loopback if available */
@@ -750,12 +750,11 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
750 /* restore the PHY to the previous state */ 750 /* restore the PHY to the previous state */
751 mutex_lock(&efx->mac_lock); 751 mutex_lock(&efx->mac_lock);
752 efx->phy_mode = phy_mode; 752 efx->phy_mode = phy_mode;
753 efx->port_inhibited = false;
754 efx->loopback_mode = loopback_mode; 753 efx->loopback_mode = loopback_mode;
755 __efx_reconfigure_port(efx); 754 __efx_reconfigure_port(efx);
756 mutex_unlock(&efx->mac_lock); 755 mutex_unlock(&efx->mac_lock);
757 756
758 netif_tx_wake_all_queues(efx->net_dev); 757 netif_device_attach(efx->net_dev);
759 758
760 return rc_test; 759 return rc_test;
761} 760}
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index 837869b71db9..fb4721f780ff 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -599,7 +599,7 @@ static void siena_init_wol(struct efx_nic *efx)
599 ************************************************************************** 599 **************************************************************************
600 */ 600 */
601 601
602struct efx_nic_type siena_a0_nic_type = { 602const struct efx_nic_type siena_a0_nic_type = {
603 .probe = siena_probe_nic, 603 .probe = siena_probe_nic,
604 .remove = siena_remove_nic, 604 .remove = siena_remove_nic,
605 .init = siena_init_nic, 605 .init = siena_init_nic,
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index efdceb35aaae..7b0fd89e7b85 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -460,7 +460,7 @@ tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
460 /* In loopback, the PHY automatically brings up the correct interface, 460 /* In loopback, the PHY automatically brings up the correct interface,
461 * but doesn't advertise the correct speed. So override it */ 461 * but doesn't advertise the correct speed. So override it */
462 if (LOOPBACK_EXTERNAL(efx)) 462 if (LOOPBACK_EXTERNAL(efx))
463 ecmd->speed = SPEED_10000; 463 ethtool_cmd_speed_set(ecmd, SPEED_10000);
464} 464}
465 465
466static int tenxpress_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) 466static int tenxpress_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
@@ -478,7 +478,7 @@ static void sfx7101_set_npage_adv(struct efx_nic *efx, u32 advertising)
478 advertising & ADVERTISED_10000baseT_Full); 478 advertising & ADVERTISED_10000baseT_Full);
479} 479}
480 480
481struct efx_phy_operations falcon_sfx7101_phy_ops = { 481const struct efx_phy_operations falcon_sfx7101_phy_ops = {
482 .probe = tenxpress_phy_probe, 482 .probe = tenxpress_phy_probe,
483 .init = tenxpress_phy_init, 483 .init = tenxpress_phy_init,
484 .reconfigure = tenxpress_phy_reconfigure, 484 .reconfigure = tenxpress_phy_reconfigure,
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index d2c85dfdf3bf..84eb99e0f8d2 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -205,7 +205,9 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
205 goto unwind; 205 goto unwind;
206 } 206 }
207 smp_mb(); 207 smp_mb();
208 netif_tx_start_queue(tx_queue->core_txq); 208 if (likely(!efx->loopback_selftest))
209 netif_tx_start_queue(
210 tx_queue->core_txq);
209 } 211 }
210 212
211 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; 213 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
@@ -338,8 +340,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
338 struct efx_tx_queue *tx_queue; 340 struct efx_tx_queue *tx_queue;
339 unsigned index, type; 341 unsigned index, type;
340 342
341 if (unlikely(efx->port_inhibited)) 343 EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
342 return NETDEV_TX_BUSY;
343 344
344 index = skb_get_queue_mapping(skb); 345 index = skb_get_queue_mapping(skb);
345 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0; 346 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
@@ -436,7 +437,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
436 smp_mb(); 437 smp_mb();
437 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && 438 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
438 likely(efx->port_enabled) && 439 likely(efx->port_enabled) &&
439 likely(!efx->port_inhibited)) { 440 likely(netif_device_present(efx->net_dev))) {
440 fill_level = tx_queue->insert_count - tx_queue->read_count; 441 fill_level = tx_queue->insert_count - tx_queue->read_count;
441 if (fill_level < EFX_TXQ_THRESHOLD(efx)) { 442 if (fill_level < EFX_TXQ_THRESHOLD(efx)) {
442 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); 443 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
diff --git a/drivers/net/sfc/txc43128_phy.c b/drivers/net/sfc/txc43128_phy.c
index d9886addcc99..7c21b334a75b 100644
--- a/drivers/net/sfc/txc43128_phy.c
+++ b/drivers/net/sfc/txc43128_phy.c
@@ -545,7 +545,7 @@ static void txc43128_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
545 mdio45_ethtool_gset(&efx->mdio, ecmd); 545 mdio45_ethtool_gset(&efx->mdio, ecmd);
546} 546}
547 547
548struct efx_phy_operations falcon_txc_phy_ops = { 548const struct efx_phy_operations falcon_txc_phy_ops = {
549 .probe = txc43128_phy_probe, 549 .probe = txc43128_phy_probe,
550 .init = txc43128_phy_init, 550 .init = txc43128_phy_init,
551 .reconfigure = txc43128_phy_reconfigure, 551 .reconfigure = txc43128_phy_reconfigure,
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 35b28f42d208..52a48cb75440 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -303,7 +303,7 @@ static int skge_get_settings(struct net_device *dev,
303 303
304 ecmd->advertising = skge->advertising; 304 ecmd->advertising = skge->advertising;
305 ecmd->autoneg = skge->autoneg; 305 ecmd->autoneg = skge->autoneg;
306 ecmd->speed = skge->speed; 306 ethtool_cmd_speed_set(ecmd, skge->speed);
307 ecmd->duplex = skge->duplex; 307 ecmd->duplex = skge->duplex;
308 return 0; 308 return 0;
309} 309}
@@ -321,8 +321,9 @@ static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
321 skge->speed = -1; 321 skge->speed = -1;
322 } else { 322 } else {
323 u32 setting; 323 u32 setting;
324 u32 speed = ethtool_cmd_speed(ecmd);
324 325
325 switch (ecmd->speed) { 326 switch (speed) {
326 case SPEED_1000: 327 case SPEED_1000:
327 if (ecmd->duplex == DUPLEX_FULL) 328 if (ecmd->duplex == DUPLEX_FULL)
328 setting = SUPPORTED_1000baseT_Full; 329 setting = SUPPORTED_1000baseT_Full;
@@ -355,7 +356,7 @@ static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
355 if ((setting & supported) == 0) 356 if ((setting & supported) == 0)
356 return -EINVAL; 357 return -EINVAL;
357 358
358 skge->speed = ecmd->speed; 359 skge->speed = speed;
359 skge->duplex = ecmd->duplex; 360 skge->duplex = ecmd->duplex;
360 } 361 }
361 362
@@ -537,46 +538,6 @@ static int skge_nway_reset(struct net_device *dev)
537 return 0; 538 return 0;
538} 539}
539 540
540static int skge_set_sg(struct net_device *dev, u32 data)
541{
542 struct skge_port *skge = netdev_priv(dev);
543 struct skge_hw *hw = skge->hw;
544
545 if (hw->chip_id == CHIP_ID_GENESIS && data)
546 return -EOPNOTSUPP;
547 return ethtool_op_set_sg(dev, data);
548}
549
550static int skge_set_tx_csum(struct net_device *dev, u32 data)
551{
552 struct skge_port *skge = netdev_priv(dev);
553 struct skge_hw *hw = skge->hw;
554
555 if (hw->chip_id == CHIP_ID_GENESIS && data)
556 return -EOPNOTSUPP;
557
558 return ethtool_op_set_tx_csum(dev, data);
559}
560
561static u32 skge_get_rx_csum(struct net_device *dev)
562{
563 struct skge_port *skge = netdev_priv(dev);
564
565 return skge->rx_csum;
566}
567
568/* Only Yukon supports checksum offload. */
569static int skge_set_rx_csum(struct net_device *dev, u32 data)
570{
571 struct skge_port *skge = netdev_priv(dev);
572
573 if (skge->hw->chip_id == CHIP_ID_GENESIS && data)
574 return -EOPNOTSUPP;
575
576 skge->rx_csum = data;
577 return 0;
578}
579
580static void skge_get_pauseparam(struct net_device *dev, 541static void skge_get_pauseparam(struct net_device *dev,
581 struct ethtool_pauseparam *ecmd) 542 struct ethtool_pauseparam *ecmd)
582{ 543{
@@ -786,28 +747,27 @@ static void skge_led(struct skge_port *skge, enum led_mode mode)
786} 747}
787 748
788/* blink LED's for finding board */ 749/* blink LED's for finding board */
789static int skge_phys_id(struct net_device *dev, u32 data) 750static int skge_set_phys_id(struct net_device *dev,
751 enum ethtool_phys_id_state state)
790{ 752{
791 struct skge_port *skge = netdev_priv(dev); 753 struct skge_port *skge = netdev_priv(dev);
792 unsigned long ms;
793 enum led_mode mode = LED_MODE_TST;
794 754
795 if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ)) 755 switch (state) {
796 ms = jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT / HZ) * 1000; 756 case ETHTOOL_ID_ACTIVE:
797 else 757 return 2; /* cycle on/off twice per second */
798 ms = data * 1000;
799 758
800 while (ms > 0) { 759 case ETHTOOL_ID_ON:
801 skge_led(skge, mode); 760 skge_led(skge, LED_MODE_TST);
802 mode ^= LED_MODE_TST; 761 break;
803 762
804 if (msleep_interruptible(BLINK_MS)) 763 case ETHTOOL_ID_OFF:
805 break; 764 skge_led(skge, LED_MODE_OFF);
806 ms -= BLINK_MS; 765 break;
807 }
808 766
809 /* back to regular LED state */ 767 case ETHTOOL_ID_INACTIVE:
810 skge_led(skge, netif_running(dev) ? LED_MODE_ON : LED_MODE_OFF); 768 /* back to regular LED state */
769 skge_led(skge, netif_running(dev) ? LED_MODE_ON : LED_MODE_OFF);
770 }
811 771
812 return 0; 772 return 0;
813} 773}
@@ -925,12 +885,8 @@ static const struct ethtool_ops skge_ethtool_ops = {
925 .set_pauseparam = skge_set_pauseparam, 885 .set_pauseparam = skge_set_pauseparam,
926 .get_coalesce = skge_get_coalesce, 886 .get_coalesce = skge_get_coalesce,
927 .set_coalesce = skge_set_coalesce, 887 .set_coalesce = skge_set_coalesce,
928 .set_sg = skge_set_sg,
929 .set_tx_csum = skge_set_tx_csum,
930 .get_rx_csum = skge_get_rx_csum,
931 .set_rx_csum = skge_set_rx_csum,
932 .get_strings = skge_get_strings, 888 .get_strings = skge_get_strings,
933 .phys_id = skge_phys_id, 889 .set_phys_id = skge_set_phys_id,
934 .get_sset_count = skge_get_sset_count, 890 .get_sset_count = skge_get_sset_count,
935 .get_ethtool_stats = skge_get_ethtool_stats, 891 .get_ethtool_stats = skge_get_ethtool_stats,
936}; 892};
@@ -3085,7 +3041,8 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
3085 } 3041 }
3086 3042
3087 skb_put(skb, len); 3043 skb_put(skb, len);
3088 if (skge->rx_csum) { 3044
3045 if (dev->features & NETIF_F_RXCSUM) {
3089 skb->csum = csum; 3046 skb->csum = csum;
3090 skb->ip_summed = CHECKSUM_COMPLETE; 3047 skb->ip_summed = CHECKSUM_COMPLETE;
3091 } 3048 }
@@ -3847,10 +3804,10 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3847 setup_timer(&skge->link_timer, xm_link_timer, (unsigned long) skge); 3804 setup_timer(&skge->link_timer, xm_link_timer, (unsigned long) skge);
3848 3805
3849 if (hw->chip_id != CHIP_ID_GENESIS) { 3806 if (hw->chip_id != CHIP_ID_GENESIS) {
3850 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 3807 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
3851 skge->rx_csum = 1; 3808 NETIF_F_RXCSUM;
3809 dev->features |= dev->hw_features;
3852 } 3810 }
3853 dev->features |= NETIF_F_GRO;
3854 3811
3855 /* read the mac address */ 3812 /* read the mac address */
3856 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); 3813 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index 51c0214ac25c..598bf7a1a55e 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -2460,7 +2460,6 @@ struct skge_port {
2460 struct timer_list link_timer; 2460 struct timer_list link_timer;
2461 enum pause_control flow_control; 2461 enum pause_control flow_control;
2462 enum pause_status flow_status; 2462 enum pause_status flow_status;
2463 u8 rx_csum;
2464 u8 blink_on; 2463 u8 blink_on;
2465 u8 wol; 2464 u8 wol;
2466 u8 autoneg; /* AUTONEG_ENABLE, AUTONEG_DISABLE */ 2465 u8 autoneg; /* AUTONEG_ENABLE, AUTONEG_DISABLE */
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index ff8d262dc276..3ee41da130c2 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -1198,12 +1198,12 @@ static void rx_set_checksum(struct sky2_port *sky2)
1198 1198
1199 sky2_write32(sky2->hw, 1199 sky2_write32(sky2->hw,
1200 Q_ADDR(rxqaddr[sky2->port], Q_CSR), 1200 Q_ADDR(rxqaddr[sky2->port], Q_CSR),
1201 (sky2->flags & SKY2_FLAG_RX_CHECKSUM) 1201 (sky2->netdev->features & NETIF_F_RXCSUM)
1202 ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); 1202 ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
1203} 1203}
1204 1204
1205/* Enable/disable receive hash calculation (RSS) */ 1205/* Enable/disable receive hash calculation (RSS) */
1206static void rx_set_rss(struct net_device *dev) 1206static void rx_set_rss(struct net_device *dev, u32 features)
1207{ 1207{
1208 struct sky2_port *sky2 = netdev_priv(dev); 1208 struct sky2_port *sky2 = netdev_priv(dev);
1209 struct sky2_hw *hw = sky2->hw; 1209 struct sky2_hw *hw = sky2->hw;
@@ -1216,7 +1216,7 @@ static void rx_set_rss(struct net_device *dev)
1216 } 1216 }
1217 1217
1218 /* Program RSS initial values */ 1218 /* Program RSS initial values */
1219 if (dev->features & NETIF_F_RXHASH) { 1219 if (features & NETIF_F_RXHASH) {
1220 u32 key[nkeys]; 1220 u32 key[nkeys];
1221 1221
1222 get_random_bytes(key, nkeys * sizeof(u32)); 1222 get_random_bytes(key, nkeys * sizeof(u32));
@@ -1322,32 +1322,32 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1322 return err; 1322 return err;
1323} 1323}
1324 1324
1325#define NETIF_F_ALL_VLAN (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX) 1325#define SKY2_VLAN_OFFLOADS (NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO)
1326 1326
1327static void sky2_vlan_mode(struct net_device *dev) 1327static void sky2_vlan_mode(struct net_device *dev, u32 features)
1328{ 1328{
1329 struct sky2_port *sky2 = netdev_priv(dev); 1329 struct sky2_port *sky2 = netdev_priv(dev);
1330 struct sky2_hw *hw = sky2->hw; 1330 struct sky2_hw *hw = sky2->hw;
1331 u16 port = sky2->port; 1331 u16 port = sky2->port;
1332 1332
1333 if (dev->features & NETIF_F_HW_VLAN_RX) 1333 if (features & NETIF_F_HW_VLAN_RX)
1334 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), 1334 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
1335 RX_VLAN_STRIP_ON); 1335 RX_VLAN_STRIP_ON);
1336 else 1336 else
1337 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), 1337 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
1338 RX_VLAN_STRIP_OFF); 1338 RX_VLAN_STRIP_OFF);
1339 1339
1340 dev->vlan_features = dev->features &~ NETIF_F_ALL_VLAN; 1340 if (features & NETIF_F_HW_VLAN_TX) {
1341 if (dev->features & NETIF_F_HW_VLAN_TX)
1342 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), 1341 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
1343 TX_VLAN_TAG_ON); 1342 TX_VLAN_TAG_ON);
1344 else { 1343
1344 dev->vlan_features |= SKY2_VLAN_OFFLOADS;
1345 } else {
1345 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), 1346 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
1346 TX_VLAN_TAG_OFF); 1347 TX_VLAN_TAG_OFF);
1347 1348
1348 /* Can't do transmit offload of vlan without hw vlan */ 1349 /* Can't do transmit offload of vlan without hw vlan */
1349 dev->vlan_features &= ~(NETIF_F_TSO | NETIF_F_SG 1350 dev->vlan_features &= ~SKY2_VLAN_OFFLOADS;
1350 | NETIF_F_ALL_CSUM);
1351 } 1351 }
1352} 1352}
1353 1353
@@ -1463,7 +1463,7 @@ static void sky2_rx_start(struct sky2_port *sky2)
1463 rx_set_checksum(sky2); 1463 rx_set_checksum(sky2);
1464 1464
1465 if (!(hw->flags & SKY2_HW_RSS_BROKEN)) 1465 if (!(hw->flags & SKY2_HW_RSS_BROKEN))
1466 rx_set_rss(sky2->netdev); 1466 rx_set_rss(sky2->netdev, sky2->netdev->features);
1467 1467
1468 /* submit Rx ring */ 1468 /* submit Rx ring */
1469 for (i = 0; i < sky2->rx_pending; i++) { 1469 for (i = 0; i < sky2->rx_pending; i++) {
@@ -1626,7 +1626,8 @@ static void sky2_hw_up(struct sky2_port *sky2)
1626 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, 1626 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
1627 sky2->tx_ring_size - 1); 1627 sky2->tx_ring_size - 1);
1628 1628
1629 sky2_vlan_mode(sky2->netdev); 1629 sky2_vlan_mode(sky2->netdev, sky2->netdev->features);
1630 netdev_update_features(sky2->netdev);
1630 1631
1631 sky2_rx_start(sky2); 1632 sky2_rx_start(sky2);
1632} 1633}
@@ -2261,12 +2262,9 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
2261 hw->chip_id == CHIP_ID_YUKON_FE_P)) 2262 hw->chip_id == CHIP_ID_YUKON_FE_P))
2262 return -EINVAL; 2263 return -EINVAL;
2263 2264
2264 /* TSO, etc on Yukon Ultra and MTU > 1500 not supported */
2265 if (new_mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_EC_U)
2266 dev->features &= ~(NETIF_F_TSO|NETIF_F_SG|NETIF_F_ALL_CSUM);
2267
2268 if (!netif_running(dev)) { 2265 if (!netif_running(dev)) {
2269 dev->mtu = new_mtu; 2266 dev->mtu = new_mtu;
2267 netdev_update_features(dev);
2270 return 0; 2268 return 0;
2271 } 2269 }
2272 2270
@@ -2288,6 +2286,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
2288 sky2_rx_clean(sky2); 2286 sky2_rx_clean(sky2);
2289 2287
2290 dev->mtu = new_mtu; 2288 dev->mtu = new_mtu;
2289 netdev_update_features(dev);
2291 2290
2292 mode = DATA_BLIND_VAL(DATA_BLIND_DEF) | 2291 mode = DATA_BLIND_VAL(DATA_BLIND_DEF) |
2293 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF); 2292 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
@@ -2535,8 +2534,11 @@ static void sky2_rx_checksum(struct sky2_port *sky2, u32 status)
2535 "%s: receive checksum problem (status = %#x)\n", 2534 "%s: receive checksum problem (status = %#x)\n",
2536 sky2->netdev->name, status); 2535 sky2->netdev->name, status);
2537 2536
2538 /* Disable checksum offload */ 2537 /* Disable checksum offload
2539 sky2->flags &= ~SKY2_FLAG_RX_CHECKSUM; 2538 * It will be reenabled on next ndo_set_features, but if it's
2539 * really broken, will get disabled again
2540 */
2541 sky2->netdev->features &= ~NETIF_F_RXCSUM;
2540 sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), 2542 sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
2541 BMU_DIS_RX_CHKSUM); 2543 BMU_DIS_RX_CHKSUM);
2542 } 2544 }
@@ -2591,7 +2593,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
2591 2593
2592 /* This chip reports checksum status differently */ 2594 /* This chip reports checksum status differently */
2593 if (hw->flags & SKY2_HW_NEW_LE) { 2595 if (hw->flags & SKY2_HW_NEW_LE) {
2594 if ((sky2->flags & SKY2_FLAG_RX_CHECKSUM) && 2596 if ((dev->features & NETIF_F_RXCSUM) &&
2595 (le->css & (CSS_ISIPV4 | CSS_ISIPV6)) && 2597 (le->css & (CSS_ISIPV4 | CSS_ISIPV6)) &&
2596 (le->css & CSS_TCPUDPCSOK)) 2598 (le->css & CSS_TCPUDPCSOK))
2597 skb->ip_summed = CHECKSUM_UNNECESSARY; 2599 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -2616,7 +2618,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
2616 sky2->rx_tag = length; 2618 sky2->rx_tag = length;
2617 /* fall through */ 2619 /* fall through */
2618 case OP_RXCHKS: 2620 case OP_RXCHKS:
2619 if (likely(sky2->flags & SKY2_FLAG_RX_CHECKSUM)) 2621 if (likely(dev->features & NETIF_F_RXCSUM))
2620 sky2_rx_checksum(sky2, status); 2622 sky2_rx_checksum(sky2, status);
2621 break; 2623 break;
2622 2624
@@ -3411,10 +3413,10 @@ static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3411 ecmd->phy_address = PHY_ADDR_MARV; 3413 ecmd->phy_address = PHY_ADDR_MARV;
3412 if (sky2_is_copper(hw)) { 3414 if (sky2_is_copper(hw)) {
3413 ecmd->port = PORT_TP; 3415 ecmd->port = PORT_TP;
3414 ecmd->speed = sky2->speed; 3416 ethtool_cmd_speed_set(ecmd, sky2->speed);
3415 ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_TP; 3417 ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_TP;
3416 } else { 3418 } else {
3417 ecmd->speed = SPEED_1000; 3419 ethtool_cmd_speed_set(ecmd, SPEED_1000);
3418 ecmd->port = PORT_FIBRE; 3420 ecmd->port = PORT_FIBRE;
3419 ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_FIBRE; 3421 ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_FIBRE;
3420 } 3422 }
@@ -3450,8 +3452,9 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3450 sky2->speed = -1; 3452 sky2->speed = -1;
3451 } else { 3453 } else {
3452 u32 setting; 3454 u32 setting;
3455 u32 speed = ethtool_cmd_speed(ecmd);
3453 3456
3454 switch (ecmd->speed) { 3457 switch (speed) {
3455 case SPEED_1000: 3458 case SPEED_1000:
3456 if (ecmd->duplex == DUPLEX_FULL) 3459 if (ecmd->duplex == DUPLEX_FULL)
3457 setting = SUPPORTED_1000baseT_Full; 3460 setting = SUPPORTED_1000baseT_Full;
@@ -3484,7 +3487,7 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3484 if ((setting & supported) == 0) 3487 if ((setting & supported) == 0)
3485 return -EINVAL; 3488 return -EINVAL;
3486 3489
3487 sky2->speed = ecmd->speed; 3490 sky2->speed = speed;
3488 sky2->duplex = ecmd->duplex; 3491 sky2->duplex = ecmd->duplex;
3489 sky2->flags &= ~SKY2_FLAG_AUTO_SPEED; 3492 sky2->flags &= ~SKY2_FLAG_AUTO_SPEED;
3490 } 3493 }
@@ -3552,28 +3555,6 @@ static const struct sky2_stat {
3552 { "tx_fifo_underrun", GM_TXE_FIFO_UR }, 3555 { "tx_fifo_underrun", GM_TXE_FIFO_UR },
3553}; 3556};
3554 3557
3555static u32 sky2_get_rx_csum(struct net_device *dev)
3556{
3557 struct sky2_port *sky2 = netdev_priv(dev);
3558
3559 return !!(sky2->flags & SKY2_FLAG_RX_CHECKSUM);
3560}
3561
3562static int sky2_set_rx_csum(struct net_device *dev, u32 data)
3563{
3564 struct sky2_port *sky2 = netdev_priv(dev);
3565
3566 if (data)
3567 sky2->flags |= SKY2_FLAG_RX_CHECKSUM;
3568 else
3569 sky2->flags &= ~SKY2_FLAG_RX_CHECKSUM;
3570
3571 sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
3572 data ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
3573
3574 return 0;
3575}
3576
3577static u32 sky2_get_msglevel(struct net_device *netdev) 3558static u32 sky2_get_msglevel(struct net_device *netdev)
3578{ 3559{
3579 struct sky2_port *sky2 = netdev_priv(netdev); 3560 struct sky2_port *sky2 = netdev_priv(netdev);
@@ -3826,23 +3807,24 @@ static void sky2_led(struct sky2_port *sky2, enum led_mode mode)
3826} 3807}
3827 3808
3828/* blink LED's for finding board */ 3809/* blink LED's for finding board */
3829static int sky2_phys_id(struct net_device *dev, u32 data) 3810static int sky2_set_phys_id(struct net_device *dev,
3811 enum ethtool_phys_id_state state)
3830{ 3812{
3831 struct sky2_port *sky2 = netdev_priv(dev); 3813 struct sky2_port *sky2 = netdev_priv(dev);
3832 unsigned int i;
3833
3834 if (data == 0)
3835 data = UINT_MAX;
3836 3814
3837 for (i = 0; i < data; i++) { 3815 switch (state) {
3816 case ETHTOOL_ID_ACTIVE:
3817 return 1; /* cycle on/off once per second */
3818 case ETHTOOL_ID_INACTIVE:
3819 sky2_led(sky2, MO_LED_NORM);
3820 break;
3821 case ETHTOOL_ID_ON:
3838 sky2_led(sky2, MO_LED_ON); 3822 sky2_led(sky2, MO_LED_ON);
3839 if (msleep_interruptible(500)) 3823 break;
3840 break; 3824 case ETHTOOL_ID_OFF:
3841 sky2_led(sky2, MO_LED_OFF); 3825 sky2_led(sky2, MO_LED_OFF);
3842 if (msleep_interruptible(500)) 3826 break;
3843 break;
3844 } 3827 }
3845 sky2_led(sky2, MO_LED_NORM);
3846 3828
3847 return 0; 3829 return 0;
3848} 3830}
@@ -4083,34 +4065,6 @@ static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs,
4083 } 4065 }
4084} 4066}
4085 4067
4086/* In order to do Jumbo packets on these chips, need to turn off the
4087 * transmit store/forward. Therefore checksum offload won't work.
4088 */
4089static int no_tx_offload(struct net_device *dev)
4090{
4091 const struct sky2_port *sky2 = netdev_priv(dev);
4092 const struct sky2_hw *hw = sky2->hw;
4093
4094 return dev->mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_EC_U;
4095}
4096
4097static int sky2_set_tx_csum(struct net_device *dev, u32 data)
4098{
4099 if (data && no_tx_offload(dev))
4100 return -EINVAL;
4101
4102 return ethtool_op_set_tx_csum(dev, data);
4103}
4104
4105
4106static int sky2_set_tso(struct net_device *dev, u32 data)
4107{
4108 if (data && no_tx_offload(dev))
4109 return -EINVAL;
4110
4111 return ethtool_op_set_tso(dev, data);
4112}
4113
4114static int sky2_get_eeprom_len(struct net_device *dev) 4068static int sky2_get_eeprom_len(struct net_device *dev)
4115{ 4069{
4116 struct sky2_port *sky2 = netdev_priv(dev); 4070 struct sky2_port *sky2 = netdev_priv(dev);
@@ -4213,31 +4167,36 @@ static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom
4213 return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len); 4167 return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len);
4214} 4168}
4215 4169
4216static int sky2_set_flags(struct net_device *dev, u32 data) 4170static u32 sky2_fix_features(struct net_device *dev, u32 features)
4217{ 4171{
4218 struct sky2_port *sky2 = netdev_priv(dev); 4172 const struct sky2_port *sky2 = netdev_priv(dev);
4219 unsigned long old_feat = dev->features; 4173 const struct sky2_hw *hw = sky2->hw;
4220 u32 supported = 0;
4221 int rc;
4222 4174
4223 if (!(sky2->hw->flags & SKY2_HW_RSS_BROKEN)) 4175 /* In order to do Jumbo packets on these chips, need to turn off the
4224 supported |= ETH_FLAG_RXHASH; 4176 * transmit store/forward. Therefore checksum offload won't work.
4177 */
4178 if (dev->mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_EC_U)
4179 features &= ~(NETIF_F_TSO|NETIF_F_SG|NETIF_F_ALL_CSUM);
4225 4180
4226 if (!(sky2->hw->flags & SKY2_HW_VLAN_BROKEN)) 4181 return features;
4227 supported |= ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN; 4182}
4228 4183
4229 printk(KERN_DEBUG "sky2 set_flags: supported %x data %x\n", 4184static int sky2_set_features(struct net_device *dev, u32 features)
4230 supported, data); 4185{
4186 struct sky2_port *sky2 = netdev_priv(dev);
4187 u32 changed = dev->features ^ features;
4231 4188
4232 rc = ethtool_op_set_flags(dev, data, supported); 4189 if (changed & NETIF_F_RXCSUM) {
4233 if (rc) 4190 u32 on = features & NETIF_F_RXCSUM;
4234 return rc; 4191 sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
4192 on ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
4193 }
4235 4194
4236 if ((old_feat ^ dev->features) & NETIF_F_RXHASH) 4195 if (changed & NETIF_F_RXHASH)
4237 rx_set_rss(dev); 4196 rx_set_rss(dev, features);
4238 4197
4239 if ((old_feat ^ dev->features) & NETIF_F_ALL_VLAN) 4198 if (changed & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX))
4240 sky2_vlan_mode(dev); 4199 sky2_vlan_mode(dev, features);
4241 4200
4242 return 0; 4201 return 0;
4243} 4202}
@@ -4257,11 +4216,6 @@ static const struct ethtool_ops sky2_ethtool_ops = {
4257 .get_eeprom_len = sky2_get_eeprom_len, 4216 .get_eeprom_len = sky2_get_eeprom_len,
4258 .get_eeprom = sky2_get_eeprom, 4217 .get_eeprom = sky2_get_eeprom,
4259 .set_eeprom = sky2_set_eeprom, 4218 .set_eeprom = sky2_set_eeprom,
4260 .set_sg = ethtool_op_set_sg,
4261 .set_tx_csum = sky2_set_tx_csum,
4262 .set_tso = sky2_set_tso,
4263 .get_rx_csum = sky2_get_rx_csum,
4264 .set_rx_csum = sky2_set_rx_csum,
4265 .get_strings = sky2_get_strings, 4219 .get_strings = sky2_get_strings,
4266 .get_coalesce = sky2_get_coalesce, 4220 .get_coalesce = sky2_get_coalesce,
4267 .set_coalesce = sky2_set_coalesce, 4221 .set_coalesce = sky2_set_coalesce,
@@ -4269,11 +4223,9 @@ static const struct ethtool_ops sky2_ethtool_ops = {
4269 .set_ringparam = sky2_set_ringparam, 4223 .set_ringparam = sky2_set_ringparam,
4270 .get_pauseparam = sky2_get_pauseparam, 4224 .get_pauseparam = sky2_get_pauseparam,
4271 .set_pauseparam = sky2_set_pauseparam, 4225 .set_pauseparam = sky2_set_pauseparam,
4272 .phys_id = sky2_phys_id, 4226 .set_phys_id = sky2_set_phys_id,
4273 .get_sset_count = sky2_get_sset_count, 4227 .get_sset_count = sky2_get_sset_count,
4274 .get_ethtool_stats = sky2_get_ethtool_stats, 4228 .get_ethtool_stats = sky2_get_ethtool_stats,
4275 .set_flags = sky2_set_flags,
4276 .get_flags = ethtool_op_get_flags,
4277}; 4229};
4278 4230
4279#ifdef CONFIG_SKY2_DEBUG 4231#ifdef CONFIG_SKY2_DEBUG
@@ -4553,6 +4505,8 @@ static const struct net_device_ops sky2_netdev_ops[2] = {
4553 .ndo_set_mac_address = sky2_set_mac_address, 4505 .ndo_set_mac_address = sky2_set_mac_address,
4554 .ndo_set_multicast_list = sky2_set_multicast, 4506 .ndo_set_multicast_list = sky2_set_multicast,
4555 .ndo_change_mtu = sky2_change_mtu, 4507 .ndo_change_mtu = sky2_change_mtu,
4508 .ndo_fix_features = sky2_fix_features,
4509 .ndo_set_features = sky2_set_features,
4556 .ndo_tx_timeout = sky2_tx_timeout, 4510 .ndo_tx_timeout = sky2_tx_timeout,
4557 .ndo_get_stats64 = sky2_get_stats, 4511 .ndo_get_stats64 = sky2_get_stats,
4558#ifdef CONFIG_NET_POLL_CONTROLLER 4512#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -4568,6 +4522,8 @@ static const struct net_device_ops sky2_netdev_ops[2] = {
4568 .ndo_set_mac_address = sky2_set_mac_address, 4522 .ndo_set_mac_address = sky2_set_mac_address,
4569 .ndo_set_multicast_list = sky2_set_multicast, 4523 .ndo_set_multicast_list = sky2_set_multicast,
4570 .ndo_change_mtu = sky2_change_mtu, 4524 .ndo_change_mtu = sky2_change_mtu,
4525 .ndo_fix_features = sky2_fix_features,
4526 .ndo_set_features = sky2_set_features,
4571 .ndo_tx_timeout = sky2_tx_timeout, 4527 .ndo_tx_timeout = sky2_tx_timeout,
4572 .ndo_get_stats64 = sky2_get_stats, 4528 .ndo_get_stats64 = sky2_get_stats,
4573 }, 4529 },
@@ -4600,7 +4556,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
4600 /* Auto speed and flow control */ 4556 /* Auto speed and flow control */
4601 sky2->flags = SKY2_FLAG_AUTO_SPEED | SKY2_FLAG_AUTO_PAUSE; 4557 sky2->flags = SKY2_FLAG_AUTO_SPEED | SKY2_FLAG_AUTO_PAUSE;
4602 if (hw->chip_id != CHIP_ID_YUKON_XL) 4558 if (hw->chip_id != CHIP_ID_YUKON_XL)
4603 sky2->flags |= SKY2_FLAG_RX_CHECKSUM; 4559 dev->hw_features |= NETIF_F_RXCSUM;
4604 4560
4605 sky2->flow_mode = FC_BOTH; 4561 sky2->flow_mode = FC_BOTH;
4606 4562
@@ -4619,18 +4575,21 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
4619 4575
4620 sky2->port = port; 4576 sky2->port = port;
4621 4577
4622 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG 4578 dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO;
4623 | NETIF_F_TSO | NETIF_F_GRO;
4624 4579
4625 if (highmem) 4580 if (highmem)
4626 dev->features |= NETIF_F_HIGHDMA; 4581 dev->features |= NETIF_F_HIGHDMA;
4627 4582
4628 /* Enable receive hashing unless hardware is known broken */ 4583 /* Enable receive hashing unless hardware is known broken */
4629 if (!(hw->flags & SKY2_HW_RSS_BROKEN)) 4584 if (!(hw->flags & SKY2_HW_RSS_BROKEN))
4630 dev->features |= NETIF_F_RXHASH; 4585 dev->hw_features |= NETIF_F_RXHASH;
4586
4587 if (!(hw->flags & SKY2_HW_VLAN_BROKEN)) {
4588 dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
4589 dev->vlan_features |= SKY2_VLAN_OFFLOADS;
4590 }
4631 4591
4632 if (!(hw->flags & SKY2_HW_VLAN_BROKEN)) 4592 dev->features |= dev->hw_features;
4633 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
4634 4593
4635 /* read the mac address */ 4594 /* read the mac address */
4636 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN); 4595 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 0c6d10c5f053..318c9ae7bf91 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -2254,7 +2254,6 @@ struct sky2_port {
2254 u8 wol; /* WAKE_ bits */ 2254 u8 wol; /* WAKE_ bits */
2255 u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */ 2255 u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */
2256 u16 flags; 2256 u16 flags;
2257#define SKY2_FLAG_RX_CHECKSUM 0x0001
2258#define SKY2_FLAG_AUTO_SPEED 0x0002 2257#define SKY2_FLAG_AUTO_SPEED 0x0002
2259#define SKY2_FLAG_AUTO_PAUSE 0x0004 2258#define SKY2_FLAG_AUTO_PAUSE 0x0004
2260 2259
diff --git a/drivers/net/smc-mca.c b/drivers/net/smc-mca.c
index d07c39cb4daf..0f29f261fcfe 100644
--- a/drivers/net/smc-mca.c
+++ b/drivers/net/smc-mca.c
@@ -156,7 +156,7 @@ static const struct {
156 { 14, 15 } 156 { 14, 15 }
157}; 157};
158 158
159static short smc_mca_adapter_ids[] __initdata = { 159static const short smc_mca_adapter_ids[] __devinitconst = {
160 0x61c8, 160 0x61c8,
161 0x61c9, 161 0x61c9,
162 0x6fc0, 162 0x6fc0,
@@ -168,7 +168,7 @@ static short smc_mca_adapter_ids[] __initdata = {
168 0x0000 168 0x0000
169}; 169};
170 170
171static char *smc_mca_adapter_names[] __initdata = { 171static const char *const smc_mca_adapter_names[] __devinitconst = {
172 "SMC Ethercard PLUS Elite/A BNC/AUI (WD8013EP/A)", 172 "SMC Ethercard PLUS Elite/A BNC/AUI (WD8013EP/A)",
173 "SMC Ethercard PLUS Elite/A UTP/AUI (WD8013WP/A)", 173 "SMC Ethercard PLUS Elite/A UTP/AUI (WD8013WP/A)",
174 "WD Ethercard PLUS/A (WD8003E/A or WD8003ET/A)", 174 "WD Ethercard PLUS/A (WD8003E/A or WD8003ET/A)",
@@ -199,7 +199,7 @@ static const struct net_device_ops ultramca_netdev_ops = {
199#endif 199#endif
200}; 200};
201 201
202static int __init ultramca_probe(struct device *gen_dev) 202static int __devinit ultramca_probe(struct device *gen_dev)
203{ 203{
204 unsigned short ioaddr; 204 unsigned short ioaddr;
205 struct net_device *dev; 205 struct net_device *dev;
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index 66831f378396..053863aefb12 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -1488,9 +1488,9 @@ smc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
1488 SUPPORTED_TP | SUPPORTED_AUI; 1488 SUPPORTED_TP | SUPPORTED_AUI;
1489 1489
1490 if (lp->ctl_rspeed == 10) 1490 if (lp->ctl_rspeed == 10)
1491 cmd->speed = SPEED_10; 1491 ethtool_cmd_speed_set(cmd, SPEED_10);
1492 else if (lp->ctl_rspeed == 100) 1492 else if (lp->ctl_rspeed == 100)
1493 cmd->speed = SPEED_100; 1493 ethtool_cmd_speed_set(cmd, SPEED_100);
1494 1494
1495 cmd->autoneg = AUTONEG_DISABLE; 1495 cmd->autoneg = AUTONEG_DISABLE;
1496 if (lp->mii.phy_id==1) 1496 if (lp->mii.phy_id==1)
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 43654a3bb0ec..dc4805f473e3 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -1565,9 +1565,9 @@ smc_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
1565 SUPPORTED_TP | SUPPORTED_AUI; 1565 SUPPORTED_TP | SUPPORTED_AUI;
1566 1566
1567 if (lp->ctl_rspeed == 10) 1567 if (lp->ctl_rspeed == 10)
1568 cmd->speed = SPEED_10; 1568 ethtool_cmd_speed_set(cmd, SPEED_10);
1569 else if (lp->ctl_rspeed == 100) 1569 else if (lp->ctl_rspeed == 100)
1570 cmd->speed = SPEED_100; 1570 ethtool_cmd_speed_set(cmd, SPEED_100);
1571 1571
1572 cmd->autoneg = AUTONEG_DISABLE; 1572 cmd->autoneg = AUTONEG_DISABLE;
1573 cmd->transceiver = XCVR_INTERNAL; 1573 cmd->transceiver = XCVR_INTERNAL;
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index 4b42ecc63dcf..c6d47d10590c 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -29,6 +29,8 @@
29 * 29 *
30 */ 30 */
31 31
32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
32#include <linux/crc32.h> 34#include <linux/crc32.h>
33#include <linux/delay.h> 35#include <linux/delay.h>
34#include <linux/errno.h> 36#include <linux/errno.h>
@@ -69,6 +71,17 @@ static int debug = 3;
69module_param(debug, int, 0); 71module_param(debug, int, 0);
70MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 72MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
71 73
74struct smsc911x_data;
75
76struct smsc911x_ops {
77 u32 (*reg_read)(struct smsc911x_data *pdata, u32 reg);
78 void (*reg_write)(struct smsc911x_data *pdata, u32 reg, u32 val);
79 void (*rx_readfifo)(struct smsc911x_data *pdata,
80 unsigned int *buf, unsigned int wordcount);
81 void (*tx_writefifo)(struct smsc911x_data *pdata,
82 unsigned int *buf, unsigned int wordcount);
83};
84
72struct smsc911x_data { 85struct smsc911x_data {
73 void __iomem *ioaddr; 86 void __iomem *ioaddr;
74 87
@@ -116,8 +129,14 @@ struct smsc911x_data {
116 unsigned int clear_bits_mask; 129 unsigned int clear_bits_mask;
117 unsigned int hashhi; 130 unsigned int hashhi;
118 unsigned int hashlo; 131 unsigned int hashlo;
132
133 /* register access functions */
134 const struct smsc911x_ops *ops;
119}; 135};
120 136
137/* Easy access to information */
138#define __smsc_shift(pdata, reg) ((reg) << ((pdata)->config.shift))
139
121static inline u32 __smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg) 140static inline u32 __smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg)
122{ 141{
123 if (pdata->config.flags & SMSC911X_USE_32BIT) 142 if (pdata->config.flags & SMSC911X_USE_32BIT)
@@ -131,13 +150,29 @@ static inline u32 __smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg)
131 return 0; 150 return 0;
132} 151}
133 152
153static inline u32
154__smsc911x_reg_read_shift(struct smsc911x_data *pdata, u32 reg)
155{
156 if (pdata->config.flags & SMSC911X_USE_32BIT)
157 return readl(pdata->ioaddr + __smsc_shift(pdata, reg));
158
159 if (pdata->config.flags & SMSC911X_USE_16BIT)
160 return (readw(pdata->ioaddr +
161 __smsc_shift(pdata, reg)) & 0xFFFF) |
162 ((readw(pdata->ioaddr +
163 __smsc_shift(pdata, reg + 2)) & 0xFFFF) << 16);
164
165 BUG();
166 return 0;
167}
168
134static inline u32 smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg) 169static inline u32 smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg)
135{ 170{
136 u32 data; 171 u32 data;
137 unsigned long flags; 172 unsigned long flags;
138 173
139 spin_lock_irqsave(&pdata->dev_lock, flags); 174 spin_lock_irqsave(&pdata->dev_lock, flags);
140 data = __smsc911x_reg_read(pdata, reg); 175 data = pdata->ops->reg_read(pdata, reg);
141 spin_unlock_irqrestore(&pdata->dev_lock, flags); 176 spin_unlock_irqrestore(&pdata->dev_lock, flags);
142 177
143 return data; 178 return data;
@@ -160,13 +195,32 @@ static inline void __smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg,
160 BUG(); 195 BUG();
161} 196}
162 197
198static inline void
199__smsc911x_reg_write_shift(struct smsc911x_data *pdata, u32 reg, u32 val)
200{
201 if (pdata->config.flags & SMSC911X_USE_32BIT) {
202 writel(val, pdata->ioaddr + __smsc_shift(pdata, reg));
203 return;
204 }
205
206 if (pdata->config.flags & SMSC911X_USE_16BIT) {
207 writew(val & 0xFFFF,
208 pdata->ioaddr + __smsc_shift(pdata, reg));
209 writew((val >> 16) & 0xFFFF,
210 pdata->ioaddr + __smsc_shift(pdata, reg + 2));
211 return;
212 }
213
214 BUG();
215}
216
163static inline void smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg, 217static inline void smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg,
164 u32 val) 218 u32 val)
165{ 219{
166 unsigned long flags; 220 unsigned long flags;
167 221
168 spin_lock_irqsave(&pdata->dev_lock, flags); 222 spin_lock_irqsave(&pdata->dev_lock, flags);
169 __smsc911x_reg_write(pdata, reg, val); 223 pdata->ops->reg_write(pdata, reg, val);
170 spin_unlock_irqrestore(&pdata->dev_lock, flags); 224 spin_unlock_irqrestore(&pdata->dev_lock, flags);
171} 225}
172 226
@@ -202,6 +256,40 @@ out:
202 spin_unlock_irqrestore(&pdata->dev_lock, flags); 256 spin_unlock_irqrestore(&pdata->dev_lock, flags);
203} 257}
204 258
259/* Writes a packet to the TX_DATA_FIFO - shifted version */
260static inline void
261smsc911x_tx_writefifo_shift(struct smsc911x_data *pdata, unsigned int *buf,
262 unsigned int wordcount)
263{
264 unsigned long flags;
265
266 spin_lock_irqsave(&pdata->dev_lock, flags);
267
268 if (pdata->config.flags & SMSC911X_SWAP_FIFO) {
269 while (wordcount--)
270 __smsc911x_reg_write_shift(pdata, TX_DATA_FIFO,
271 swab32(*buf++));
272 goto out;
273 }
274
275 if (pdata->config.flags & SMSC911X_USE_32BIT) {
276 writesl(pdata->ioaddr + __smsc_shift(pdata,
277 TX_DATA_FIFO), buf, wordcount);
278 goto out;
279 }
280
281 if (pdata->config.flags & SMSC911X_USE_16BIT) {
282 while (wordcount--)
283 __smsc911x_reg_write_shift(pdata,
284 TX_DATA_FIFO, *buf++);
285 goto out;
286 }
287
288 BUG();
289out:
290 spin_unlock_irqrestore(&pdata->dev_lock, flags);
291}
292
205/* Reads a packet out of the RX_DATA_FIFO */ 293/* Reads a packet out of the RX_DATA_FIFO */
206static inline void 294static inline void
207smsc911x_rx_readfifo(struct smsc911x_data *pdata, unsigned int *buf, 295smsc911x_rx_readfifo(struct smsc911x_data *pdata, unsigned int *buf,
@@ -234,6 +322,40 @@ out:
234 spin_unlock_irqrestore(&pdata->dev_lock, flags); 322 spin_unlock_irqrestore(&pdata->dev_lock, flags);
235} 323}
236 324
325/* Reads a packet out of the RX_DATA_FIFO - shifted version */
326static inline void
327smsc911x_rx_readfifo_shift(struct smsc911x_data *pdata, unsigned int *buf,
328 unsigned int wordcount)
329{
330 unsigned long flags;
331
332 spin_lock_irqsave(&pdata->dev_lock, flags);
333
334 if (pdata->config.flags & SMSC911X_SWAP_FIFO) {
335 while (wordcount--)
336 *buf++ = swab32(__smsc911x_reg_read_shift(pdata,
337 RX_DATA_FIFO));
338 goto out;
339 }
340
341 if (pdata->config.flags & SMSC911X_USE_32BIT) {
342 readsl(pdata->ioaddr + __smsc_shift(pdata,
343 RX_DATA_FIFO), buf, wordcount);
344 goto out;
345 }
346
347 if (pdata->config.flags & SMSC911X_USE_16BIT) {
348 while (wordcount--)
349 *buf++ = __smsc911x_reg_read_shift(pdata,
350 RX_DATA_FIFO);
351 goto out;
352 }
353
354 BUG();
355out:
356 spin_unlock_irqrestore(&pdata->dev_lock, flags);
357}
358
237/* waits for MAC not busy, with timeout. Only called by smsc911x_mac_read 359/* waits for MAC not busy, with timeout. Only called by smsc911x_mac_read
238 * and smsc911x_mac_write, so assumes mac_lock is held */ 360 * and smsc911x_mac_write, so assumes mac_lock is held */
239static int smsc911x_mac_complete(struct smsc911x_data *pdata) 361static int smsc911x_mac_complete(struct smsc911x_data *pdata)
@@ -248,8 +370,8 @@ static int smsc911x_mac_complete(struct smsc911x_data *pdata)
248 if (!(val & MAC_CSR_CMD_CSR_BUSY_)) 370 if (!(val & MAC_CSR_CMD_CSR_BUSY_))
249 return 0; 371 return 0;
250 } 372 }
251 SMSC_WARNING(HW, "Timed out waiting for MAC not BUSY. " 373 SMSC_WARN(pdata, hw, "Timed out waiting for MAC not BUSY. "
252 "MAC_CSR_CMD: 0x%08X", val); 374 "MAC_CSR_CMD: 0x%08X", val);
253 return -EIO; 375 return -EIO;
254} 376}
255 377
@@ -262,7 +384,7 @@ static u32 smsc911x_mac_read(struct smsc911x_data *pdata, unsigned int offset)
262 384
263 temp = smsc911x_reg_read(pdata, MAC_CSR_CMD); 385 temp = smsc911x_reg_read(pdata, MAC_CSR_CMD);
264 if (unlikely(temp & MAC_CSR_CMD_CSR_BUSY_)) { 386 if (unlikely(temp & MAC_CSR_CMD_CSR_BUSY_)) {
265 SMSC_WARNING(HW, "MAC busy at entry"); 387 SMSC_WARN(pdata, hw, "MAC busy at entry");
266 return 0xFFFFFFFF; 388 return 0xFFFFFFFF;
267 } 389 }
268 390
@@ -277,7 +399,7 @@ static u32 smsc911x_mac_read(struct smsc911x_data *pdata, unsigned int offset)
277 if (likely(smsc911x_mac_complete(pdata) == 0)) 399 if (likely(smsc911x_mac_complete(pdata) == 0))
278 return smsc911x_reg_read(pdata, MAC_CSR_DATA); 400 return smsc911x_reg_read(pdata, MAC_CSR_DATA);
279 401
280 SMSC_WARNING(HW, "MAC busy after read"); 402 SMSC_WARN(pdata, hw, "MAC busy after read");
281 return 0xFFFFFFFF; 403 return 0xFFFFFFFF;
282} 404}
283 405
@@ -291,8 +413,8 @@ static void smsc911x_mac_write(struct smsc911x_data *pdata,
291 413
292 temp = smsc911x_reg_read(pdata, MAC_CSR_CMD); 414 temp = smsc911x_reg_read(pdata, MAC_CSR_CMD);
293 if (unlikely(temp & MAC_CSR_CMD_CSR_BUSY_)) { 415 if (unlikely(temp & MAC_CSR_CMD_CSR_BUSY_)) {
294 SMSC_WARNING(HW, 416 SMSC_WARN(pdata, hw,
295 "smsc911x_mac_write failed, MAC busy at entry"); 417 "smsc911x_mac_write failed, MAC busy at entry");
296 return; 418 return;
297 } 419 }
298 420
@@ -310,8 +432,7 @@ static void smsc911x_mac_write(struct smsc911x_data *pdata,
310 if (likely(smsc911x_mac_complete(pdata) == 0)) 432 if (likely(smsc911x_mac_complete(pdata) == 0))
311 return; 433 return;
312 434
313 SMSC_WARNING(HW, 435 SMSC_WARN(pdata, hw, "smsc911x_mac_write failed, MAC busy after write");
314 "smsc911x_mac_write failed, MAC busy after write");
315} 436}
316 437
317/* Get a phy register */ 438/* Get a phy register */
@@ -326,8 +447,7 @@ static int smsc911x_mii_read(struct mii_bus *bus, int phyaddr, int regidx)
326 447
327 /* Confirm MII not busy */ 448 /* Confirm MII not busy */
328 if (unlikely(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) { 449 if (unlikely(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) {
329 SMSC_WARNING(HW, 450 SMSC_WARN(pdata, hw, "MII is busy in smsc911x_mii_read???");
330 "MII is busy in smsc911x_mii_read???");
331 reg = -EIO; 451 reg = -EIO;
332 goto out; 452 goto out;
333 } 453 }
@@ -343,7 +463,7 @@ static int smsc911x_mii_read(struct mii_bus *bus, int phyaddr, int regidx)
343 goto out; 463 goto out;
344 } 464 }
345 465
346 SMSC_WARNING(HW, "Timed out waiting for MII read to finish"); 466 SMSC_WARN(pdata, hw, "Timed out waiting for MII read to finish");
347 reg = -EIO; 467 reg = -EIO;
348 468
349out: 469out:
@@ -364,8 +484,7 @@ static int smsc911x_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
364 484
365 /* Confirm MII not busy */ 485 /* Confirm MII not busy */
366 if (unlikely(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) { 486 if (unlikely(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) {
367 SMSC_WARNING(HW, 487 SMSC_WARN(pdata, hw, "MII is busy in smsc911x_mii_write???");
368 "MII is busy in smsc911x_mii_write???");
369 reg = -EIO; 488 reg = -EIO;
370 goto out; 489 goto out;
371 } 490 }
@@ -385,7 +504,7 @@ static int smsc911x_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
385 goto out; 504 goto out;
386 } 505 }
387 506
388 SMSC_WARNING(HW, "Timed out waiting for MII write to finish"); 507 SMSC_WARN(pdata, hw, "Timed out waiting for MII write to finish");
389 reg = -EIO; 508 reg = -EIO;
390 509
391out: 510out:
@@ -426,18 +545,20 @@ static void smsc911x_phy_initialise_external(struct smsc911x_data *pdata)
426 unsigned int hwcfg = smsc911x_reg_read(pdata, HW_CFG); 545 unsigned int hwcfg = smsc911x_reg_read(pdata, HW_CFG);
427 546
428 if (pdata->config.flags & SMSC911X_FORCE_INTERNAL_PHY) { 547 if (pdata->config.flags & SMSC911X_FORCE_INTERNAL_PHY) {
429 SMSC_TRACE(HW, "Forcing internal PHY"); 548 SMSC_TRACE(pdata, hw, "Forcing internal PHY");
430 pdata->using_extphy = 0; 549 pdata->using_extphy = 0;
431 } else if (pdata->config.flags & SMSC911X_FORCE_EXTERNAL_PHY) { 550 } else if (pdata->config.flags & SMSC911X_FORCE_EXTERNAL_PHY) {
432 SMSC_TRACE(HW, "Forcing external PHY"); 551 SMSC_TRACE(pdata, hw, "Forcing external PHY");
433 smsc911x_phy_enable_external(pdata); 552 smsc911x_phy_enable_external(pdata);
434 pdata->using_extphy = 1; 553 pdata->using_extphy = 1;
435 } else if (hwcfg & HW_CFG_EXT_PHY_DET_) { 554 } else if (hwcfg & HW_CFG_EXT_PHY_DET_) {
436 SMSC_TRACE(HW, "HW_CFG EXT_PHY_DET set, using external PHY"); 555 SMSC_TRACE(pdata, hw,
556 "HW_CFG EXT_PHY_DET set, using external PHY");
437 smsc911x_phy_enable_external(pdata); 557 smsc911x_phy_enable_external(pdata);
438 pdata->using_extphy = 1; 558 pdata->using_extphy = 1;
439 } else { 559 } else {
440 SMSC_TRACE(HW, "HW_CFG EXT_PHY_DET clear, using internal PHY"); 560 SMSC_TRACE(pdata, hw,
561 "HW_CFG EXT_PHY_DET clear, using internal PHY");
441 pdata->using_extphy = 0; 562 pdata->using_extphy = 0;
442 } 563 }
443} 564}
@@ -499,7 +620,7 @@ static int smsc911x_phy_check_loopbackpkt(struct smsc911x_data *pdata)
499 wrsz += (u32)((ulong)pdata->loopback_tx_pkt & 0x3); 620 wrsz += (u32)((ulong)pdata->loopback_tx_pkt & 0x3);
500 wrsz >>= 2; 621 wrsz >>= 2;
501 622
502 smsc911x_tx_writefifo(pdata, (unsigned int *)bufp, wrsz); 623 pdata->ops->tx_writefifo(pdata, (unsigned int *)bufp, wrsz);
503 624
504 /* Wait till transmit is done */ 625 /* Wait till transmit is done */
505 i = 60; 626 i = 60;
@@ -509,13 +630,13 @@ static int smsc911x_phy_check_loopbackpkt(struct smsc911x_data *pdata)
509 } while ((i--) && (!status)); 630 } while ((i--) && (!status));
510 631
511 if (!status) { 632 if (!status) {
512 SMSC_WARNING(HW, "Failed to transmit " 633 SMSC_WARN(pdata, hw,
513 "during loopback test"); 634 "Failed to transmit during loopback test");
514 continue; 635 continue;
515 } 636 }
516 if (status & TX_STS_ES_) { 637 if (status & TX_STS_ES_) {
517 SMSC_WARNING(HW, "Transmit encountered " 638 SMSC_WARN(pdata, hw,
518 "errors during loopback test"); 639 "Transmit encountered errors during loopback test");
519 continue; 640 continue;
520 } 641 }
521 642
@@ -527,13 +648,13 @@ static int smsc911x_phy_check_loopbackpkt(struct smsc911x_data *pdata)
527 } while ((i--) && (!status)); 648 } while ((i--) && (!status));
528 649
529 if (!status) { 650 if (!status) {
530 SMSC_WARNING(HW, 651 SMSC_WARN(pdata, hw,
531 "Failed to receive during loopback test"); 652 "Failed to receive during loopback test");
532 continue; 653 continue;
533 } 654 }
534 if (status & RX_STS_ES_) { 655 if (status & RX_STS_ES_) {
535 SMSC_WARNING(HW, "Receive encountered " 656 SMSC_WARN(pdata, hw,
536 "errors during loopback test"); 657 "Receive encountered errors during loopback test");
537 continue; 658 continue;
538 } 659 }
539 660
@@ -543,12 +664,12 @@ static int smsc911x_phy_check_loopbackpkt(struct smsc911x_data *pdata)
543 rdsz += (u32)((ulong)pdata->loopback_rx_pkt & 0x3); 664 rdsz += (u32)((ulong)pdata->loopback_rx_pkt & 0x3);
544 rdsz >>= 2; 665 rdsz >>= 2;
545 666
546 smsc911x_rx_readfifo(pdata, (unsigned int *)bufp, rdsz); 667 pdata->ops->rx_readfifo(pdata, (unsigned int *)bufp, rdsz);
547 668
548 if (pktlength != (MIN_PACKET_SIZE + 4)) { 669 if (pktlength != (MIN_PACKET_SIZE + 4)) {
549 SMSC_WARNING(HW, "Unexpected packet size " 670 SMSC_WARN(pdata, hw, "Unexpected packet size "
550 "during loop back test, size=%d, will retry", 671 "during loop back test, size=%d, will retry",
551 pktlength); 672 pktlength);
552 } else { 673 } else {
553 unsigned int j; 674 unsigned int j;
554 int mismatch = 0; 675 int mismatch = 0;
@@ -560,12 +681,12 @@ static int smsc911x_phy_check_loopbackpkt(struct smsc911x_data *pdata)
560 } 681 }
561 } 682 }
562 if (!mismatch) { 683 if (!mismatch) {
563 SMSC_TRACE(HW, "Successfully verified " 684 SMSC_TRACE(pdata, hw, "Successfully verified "
564 "loopback packet"); 685 "loopback packet");
565 return 0; 686 return 0;
566 } else { 687 } else {
567 SMSC_WARNING(HW, "Data mismatch " 688 SMSC_WARN(pdata, hw, "Data mismatch "
568 "during loop back test, will retry"); 689 "during loop back test, will retry");
569 } 690 }
570 } 691 }
571 } 692 }
@@ -582,7 +703,7 @@ static int smsc911x_phy_reset(struct smsc911x_data *pdata)
582 BUG_ON(!phy_dev); 703 BUG_ON(!phy_dev);
583 BUG_ON(!phy_dev->bus); 704 BUG_ON(!phy_dev->bus);
584 705
585 SMSC_TRACE(HW, "Performing PHY BCR Reset"); 706 SMSC_TRACE(pdata, hw, "Performing PHY BCR Reset");
586 smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, BMCR_RESET); 707 smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, BMCR_RESET);
587 do { 708 do {
588 msleep(1); 709 msleep(1);
@@ -591,7 +712,7 @@ static int smsc911x_phy_reset(struct smsc911x_data *pdata)
591 } while ((i--) && (temp & BMCR_RESET)); 712 } while ((i--) && (temp & BMCR_RESET));
592 713
593 if (temp & BMCR_RESET) { 714 if (temp & BMCR_RESET) {
594 SMSC_WARNING(HW, "PHY reset failed to complete."); 715 SMSC_WARN(pdata, hw, "PHY reset failed to complete");
595 return -EIO; 716 return -EIO;
596 } 717 }
597 /* Extra delay required because the phy may not be completed with 718 /* Extra delay required because the phy may not be completed with
@@ -695,11 +816,11 @@ static void smsc911x_phy_update_flowcontrol(struct smsc911x_data *pdata)
695 else 816 else
696 afc &= ~0xF; 817 afc &= ~0xF;
697 818
698 SMSC_TRACE(HW, "rx pause %s, tx pause %s", 819 SMSC_TRACE(pdata, hw, "rx pause %s, tx pause %s",
699 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"), 820 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
700 (cap & FLOW_CTRL_TX ? "enabled" : "disabled")); 821 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
701 } else { 822 } else {
702 SMSC_TRACE(HW, "half duplex"); 823 SMSC_TRACE(pdata, hw, "half duplex");
703 flow = 0; 824 flow = 0;
704 afc |= 0xF; 825 afc |= 0xF;
705 } 826 }
@@ -722,17 +843,17 @@ static void smsc911x_phy_adjust_link(struct net_device *dev)
722 843
723 if (phy_dev->duplex != pdata->last_duplex) { 844 if (phy_dev->duplex != pdata->last_duplex) {
724 unsigned int mac_cr; 845 unsigned int mac_cr;
725 SMSC_TRACE(HW, "duplex state has changed"); 846 SMSC_TRACE(pdata, hw, "duplex state has changed");
726 847
727 spin_lock_irqsave(&pdata->mac_lock, flags); 848 spin_lock_irqsave(&pdata->mac_lock, flags);
728 mac_cr = smsc911x_mac_read(pdata, MAC_CR); 849 mac_cr = smsc911x_mac_read(pdata, MAC_CR);
729 if (phy_dev->duplex) { 850 if (phy_dev->duplex) {
730 SMSC_TRACE(HW, 851 SMSC_TRACE(pdata, hw,
731 "configuring for full duplex mode"); 852 "configuring for full duplex mode");
732 mac_cr |= MAC_CR_FDPX_; 853 mac_cr |= MAC_CR_FDPX_;
733 } else { 854 } else {
734 SMSC_TRACE(HW, 855 SMSC_TRACE(pdata, hw,
735 "configuring for half duplex mode"); 856 "configuring for half duplex mode");
736 mac_cr &= ~MAC_CR_FDPX_; 857 mac_cr &= ~MAC_CR_FDPX_;
737 } 858 }
738 smsc911x_mac_write(pdata, MAC_CR, mac_cr); 859 smsc911x_mac_write(pdata, MAC_CR, mac_cr);
@@ -744,9 +865,9 @@ static void smsc911x_phy_adjust_link(struct net_device *dev)
744 865
745 carrier = netif_carrier_ok(dev); 866 carrier = netif_carrier_ok(dev);
746 if (carrier != pdata->last_carrier) { 867 if (carrier != pdata->last_carrier) {
747 SMSC_TRACE(HW, "carrier state has changed"); 868 SMSC_TRACE(pdata, hw, "carrier state has changed");
748 if (carrier) { 869 if (carrier) {
749 SMSC_TRACE(HW, "configuring for carrier OK"); 870 SMSC_TRACE(pdata, hw, "configuring for carrier OK");
750 if ((pdata->gpio_orig_setting & GPIO_CFG_LED1_EN_) && 871 if ((pdata->gpio_orig_setting & GPIO_CFG_LED1_EN_) &&
751 (!pdata->using_extphy)) { 872 (!pdata->using_extphy)) {
752 /* Restore original GPIO configuration */ 873 /* Restore original GPIO configuration */
@@ -755,7 +876,7 @@ static void smsc911x_phy_adjust_link(struct net_device *dev)
755 pdata->gpio_setting); 876 pdata->gpio_setting);
756 } 877 }
757 } else { 878 } else {
758 SMSC_TRACE(HW, "configuring for no carrier"); 879 SMSC_TRACE(pdata, hw, "configuring for no carrier");
759 /* Check global setting that LED1 880 /* Check global setting that LED1
760 * usage is 10/100 indicator */ 881 * usage is 10/100 indicator */
761 pdata->gpio_setting = smsc911x_reg_read(pdata, 882 pdata->gpio_setting = smsc911x_reg_read(pdata,
@@ -787,25 +908,25 @@ static int smsc911x_mii_probe(struct net_device *dev)
787 /* find the first phy */ 908 /* find the first phy */
788 phydev = phy_find_first(pdata->mii_bus); 909 phydev = phy_find_first(pdata->mii_bus);
789 if (!phydev) { 910 if (!phydev) {
790 pr_err("%s: no PHY found\n", dev->name); 911 netdev_err(dev, "no PHY found\n");
791 return -ENODEV; 912 return -ENODEV;
792 } 913 }
793 914
794 SMSC_TRACE(PROBE, "PHY: addr %d, phy_id 0x%08X", 915 SMSC_TRACE(pdata, probe, "PHY: addr %d, phy_id 0x%08X",
795 phydev->addr, phydev->phy_id); 916 phydev->addr, phydev->phy_id);
796 917
797 ret = phy_connect_direct(dev, phydev, 918 ret = phy_connect_direct(dev, phydev,
798 &smsc911x_phy_adjust_link, 0, 919 &smsc911x_phy_adjust_link, 0,
799 pdata->config.phy_interface); 920 pdata->config.phy_interface);
800 921
801 if (ret) { 922 if (ret) {
802 pr_err("%s: Could not attach to PHY\n", dev->name); 923 netdev_err(dev, "Could not attach to PHY\n");
803 return ret; 924 return ret;
804 } 925 }
805 926
806 pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", 927 netdev_info(dev,
807 dev->name, phydev->drv->name, 928 "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
808 dev_name(&phydev->dev), phydev->irq); 929 phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
809 930
810 /* mask with MAC supported features */ 931 /* mask with MAC supported features */
811 phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause | 932 phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause |
@@ -818,13 +939,13 @@ static int smsc911x_mii_probe(struct net_device *dev)
818 939
819#ifdef USE_PHY_WORK_AROUND 940#ifdef USE_PHY_WORK_AROUND
820 if (smsc911x_phy_loopbacktest(dev) < 0) { 941 if (smsc911x_phy_loopbacktest(dev) < 0) {
821 SMSC_WARNING(HW, "Failed Loop Back Test"); 942 SMSC_WARN(pdata, hw, "Failed Loop Back Test");
822 return -ENODEV; 943 return -ENODEV;
823 } 944 }
824 SMSC_TRACE(HW, "Passed Loop Back Test"); 945 SMSC_TRACE(pdata, hw, "Passed Loop Back Test");
825#endif /* USE_PHY_WORK_AROUND */ 946#endif /* USE_PHY_WORK_AROUND */
826 947
827 SMSC_TRACE(HW, "phy initialised successfully"); 948 SMSC_TRACE(pdata, hw, "phy initialised successfully");
828 return 0; 949 return 0;
829} 950}
830 951
@@ -860,8 +981,8 @@ static int __devinit smsc911x_mii_init(struct platform_device *pdev,
860 smsc911x_phy_initialise_external(pdata); 981 smsc911x_phy_initialise_external(pdata);
861 break; 982 break;
862 default: 983 default:
863 SMSC_TRACE(HW, "External PHY is not supported, " 984 SMSC_TRACE(pdata, hw, "External PHY is not supported, "
864 "using internal PHY"); 985 "using internal PHY");
865 pdata->using_extphy = 0; 986 pdata->using_extphy = 0;
866 break; 987 break;
867 } 988 }
@@ -872,12 +993,12 @@ static int __devinit smsc911x_mii_init(struct platform_device *pdev,
872 } 993 }
873 994
874 if (mdiobus_register(pdata->mii_bus)) { 995 if (mdiobus_register(pdata->mii_bus)) {
875 SMSC_WARNING(PROBE, "Error registering mii bus"); 996 SMSC_WARN(pdata, probe, "Error registering mii bus");
876 goto err_out_free_bus_2; 997 goto err_out_free_bus_2;
877 } 998 }
878 999
879 if (smsc911x_mii_probe(dev) < 0) { 1000 if (smsc911x_mii_probe(dev) < 0) {
880 SMSC_WARNING(PROBE, "Error registering mii bus"); 1001 SMSC_WARN(pdata, probe, "Error registering mii bus");
881 goto err_out_unregister_bus_3; 1002 goto err_out_unregister_bus_3;
882 } 1003 }
883 1004
@@ -913,8 +1034,7 @@ static void smsc911x_tx_update_txcounters(struct net_device *dev)
913 * does not reference a hardware defined reserved bit 1034 * does not reference a hardware defined reserved bit
914 * but rather a driver defined one. 1035 * but rather a driver defined one.
915 */ 1036 */
916 SMSC_WARNING(HW, 1037 SMSC_WARN(pdata, hw, "Packet tag reserved bit is high");
917 "Packet tag reserved bit is high");
918 } else { 1038 } else {
919 if (unlikely(tx_stat & TX_STS_ES_)) { 1039 if (unlikely(tx_stat & TX_STS_ES_)) {
920 dev->stats.tx_errors++; 1040 dev->stats.tx_errors++;
@@ -977,8 +1097,8 @@ smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktbytes)
977 } while ((val & RX_DP_CTRL_RX_FFWD_) && --timeout); 1097 } while ((val & RX_DP_CTRL_RX_FFWD_) && --timeout);
978 1098
979 if (unlikely(timeout == 0)) 1099 if (unlikely(timeout == 0))
980 SMSC_WARNING(HW, "Timed out waiting for " 1100 SMSC_WARN(pdata, hw, "Timed out waiting for "
981 "RX FFWD to finish, RX_DP_CTRL: 0x%08X", val); 1101 "RX FFWD to finish, RX_DP_CTRL: 0x%08X", val);
982 } else { 1102 } else {
983 unsigned int temp; 1103 unsigned int temp;
984 while (pktwords--) 1104 while (pktwords--)
@@ -1021,8 +1141,8 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
1021 smsc911x_rx_counterrors(dev, rxstat); 1141 smsc911x_rx_counterrors(dev, rxstat);
1022 1142
1023 if (unlikely(rxstat & RX_STS_ES_)) { 1143 if (unlikely(rxstat & RX_STS_ES_)) {
1024 SMSC_WARNING(RX_ERR, 1144 SMSC_WARN(pdata, rx_err,
1025 "Discarding packet with error bit set"); 1145 "Discarding packet with error bit set");
1026 /* Packet has an error, discard it and continue with 1146 /* Packet has an error, discard it and continue with
1027 * the next */ 1147 * the next */
1028 smsc911x_rx_fastforward(pdata, pktwords); 1148 smsc911x_rx_fastforward(pdata, pktwords);
@@ -1032,8 +1152,8 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
1032 1152
1033 skb = netdev_alloc_skb(dev, pktlength + NET_IP_ALIGN); 1153 skb = netdev_alloc_skb(dev, pktlength + NET_IP_ALIGN);
1034 if (unlikely(!skb)) { 1154 if (unlikely(!skb)) {
1035 SMSC_WARNING(RX_ERR, 1155 SMSC_WARN(pdata, rx_err,
1036 "Unable to allocate skb for rx packet"); 1156 "Unable to allocate skb for rx packet");
1037 /* Drop the packet and stop this polling iteration */ 1157 /* Drop the packet and stop this polling iteration */
1038 smsc911x_rx_fastforward(pdata, pktwords); 1158 smsc911x_rx_fastforward(pdata, pktwords);
1039 dev->stats.rx_dropped++; 1159 dev->stats.rx_dropped++;
@@ -1046,8 +1166,8 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
1046 /* Align IP on 16B boundary */ 1166 /* Align IP on 16B boundary */
1047 skb_reserve(skb, NET_IP_ALIGN); 1167 skb_reserve(skb, NET_IP_ALIGN);
1048 skb_put(skb, pktlength - 4); 1168 skb_put(skb, pktlength - 4);
1049 smsc911x_rx_readfifo(pdata, (unsigned int *)skb->head, 1169 pdata->ops->rx_readfifo(pdata,
1050 pktwords); 1170 (unsigned int *)skb->head, pktwords);
1051 skb->protocol = eth_type_trans(skb, dev); 1171 skb->protocol = eth_type_trans(skb, dev);
1052 skb_checksum_none_assert(skb); 1172 skb_checksum_none_assert(skb);
1053 netif_receive_skb(skb); 1173 netif_receive_skb(skb);
@@ -1083,8 +1203,8 @@ static void smsc911x_rx_multicast_update(struct smsc911x_data *pdata)
1083 smsc911x_mac_write(pdata, MAC_CR, mac_cr); 1203 smsc911x_mac_write(pdata, MAC_CR, mac_cr);
1084 smsc911x_mac_write(pdata, HASHH, pdata->hashhi); 1204 smsc911x_mac_write(pdata, HASHH, pdata->hashhi);
1085 smsc911x_mac_write(pdata, HASHL, pdata->hashlo); 1205 smsc911x_mac_write(pdata, HASHL, pdata->hashlo);
1086 SMSC_TRACE(HW, "maccr 0x%08X, HASHH 0x%08X, HASHL 0x%08X", 1206 SMSC_TRACE(pdata, hw, "maccr 0x%08X, HASHH 0x%08X, HASHL 0x%08X",
1087 mac_cr, pdata->hashhi, pdata->hashlo); 1207 mac_cr, pdata->hashhi, pdata->hashlo);
1088} 1208}
1089 1209
1090static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata) 1210static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata)
@@ -1102,7 +1222,7 @@ static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata)
1102 1222
1103 /* Check Rx has stopped */ 1223 /* Check Rx has stopped */
1104 if (smsc911x_mac_read(pdata, MAC_CR) & MAC_CR_RXEN_) 1224 if (smsc911x_mac_read(pdata, MAC_CR) & MAC_CR_RXEN_)
1105 SMSC_WARNING(DRV, "Rx not stopped"); 1225 SMSC_WARN(pdata, drv, "Rx not stopped");
1106 1226
1107 /* Perform the update - safe to do now Rx has stopped */ 1227 /* Perform the update - safe to do now Rx has stopped */
1108 smsc911x_rx_multicast_update(pdata); 1228 smsc911x_rx_multicast_update(pdata);
@@ -1131,7 +1251,7 @@ static int smsc911x_soft_reset(struct smsc911x_data *pdata)
1131 } while ((--timeout) && (temp & HW_CFG_SRST_)); 1251 } while ((--timeout) && (temp & HW_CFG_SRST_));
1132 1252
1133 if (unlikely(temp & HW_CFG_SRST_)) { 1253 if (unlikely(temp & HW_CFG_SRST_)) {
1134 SMSC_WARNING(DRV, "Failed to complete reset"); 1254 SMSC_WARN(pdata, drv, "Failed to complete reset");
1135 return -EIO; 1255 return -EIO;
1136 } 1256 }
1137 return 0; 1257 return 0;
@@ -1160,18 +1280,18 @@ static int smsc911x_open(struct net_device *dev)
1160 1280
1161 /* if the phy is not yet registered, retry later*/ 1281 /* if the phy is not yet registered, retry later*/
1162 if (!pdata->phy_dev) { 1282 if (!pdata->phy_dev) {
1163 SMSC_WARNING(HW, "phy_dev is NULL"); 1283 SMSC_WARN(pdata, hw, "phy_dev is NULL");
1164 return -EAGAIN; 1284 return -EAGAIN;
1165 } 1285 }
1166 1286
1167 if (!is_valid_ether_addr(dev->dev_addr)) { 1287 if (!is_valid_ether_addr(dev->dev_addr)) {
1168 SMSC_WARNING(HW, "dev_addr is not a valid MAC address"); 1288 SMSC_WARN(pdata, hw, "dev_addr is not a valid MAC address");
1169 return -EADDRNOTAVAIL; 1289 return -EADDRNOTAVAIL;
1170 } 1290 }
1171 1291
1172 /* Reset the LAN911x */ 1292 /* Reset the LAN911x */
1173 if (smsc911x_soft_reset(pdata)) { 1293 if (smsc911x_soft_reset(pdata)) {
1174 SMSC_WARNING(HW, "soft reset failed"); 1294 SMSC_WARN(pdata, hw, "soft reset failed");
1175 return -EIO; 1295 return -EIO;
1176 } 1296 }
1177 1297
@@ -1191,8 +1311,8 @@ static int smsc911x_open(struct net_device *dev)
1191 } 1311 }
1192 1312
1193 if (unlikely(timeout == 0)) 1313 if (unlikely(timeout == 0))
1194 SMSC_WARNING(IFUP, 1314 SMSC_WARN(pdata, ifup,
1195 "Timed out waiting for EEPROM busy bit to clear"); 1315 "Timed out waiting for EEPROM busy bit to clear");
1196 1316
1197 smsc911x_reg_write(pdata, GPIO_CFG, 0x70070000); 1317 smsc911x_reg_write(pdata, GPIO_CFG, 0x70070000);
1198 1318
@@ -1210,22 +1330,22 @@ static int smsc911x_open(struct net_device *dev)
1210 intcfg = ((10 << 24) | INT_CFG_IRQ_EN_); 1330 intcfg = ((10 << 24) | INT_CFG_IRQ_EN_);
1211 1331
1212 if (pdata->config.irq_polarity) { 1332 if (pdata->config.irq_polarity) {
1213 SMSC_TRACE(IFUP, "irq polarity: active high"); 1333 SMSC_TRACE(pdata, ifup, "irq polarity: active high");
1214 intcfg |= INT_CFG_IRQ_POL_; 1334 intcfg |= INT_CFG_IRQ_POL_;
1215 } else { 1335 } else {
1216 SMSC_TRACE(IFUP, "irq polarity: active low"); 1336 SMSC_TRACE(pdata, ifup, "irq polarity: active low");
1217 } 1337 }
1218 1338
1219 if (pdata->config.irq_type) { 1339 if (pdata->config.irq_type) {
1220 SMSC_TRACE(IFUP, "irq type: push-pull"); 1340 SMSC_TRACE(pdata, ifup, "irq type: push-pull");
1221 intcfg |= INT_CFG_IRQ_TYPE_; 1341 intcfg |= INT_CFG_IRQ_TYPE_;
1222 } else { 1342 } else {
1223 SMSC_TRACE(IFUP, "irq type: open drain"); 1343 SMSC_TRACE(pdata, ifup, "irq type: open drain");
1224 } 1344 }
1225 1345
1226 smsc911x_reg_write(pdata, INT_CFG, intcfg); 1346 smsc911x_reg_write(pdata, INT_CFG, intcfg);
1227 1347
1228 SMSC_TRACE(IFUP, "Testing irq handler using IRQ %d", dev->irq); 1348 SMSC_TRACE(pdata, ifup, "Testing irq handler using IRQ %d", dev->irq);
1229 pdata->software_irq_signal = 0; 1349 pdata->software_irq_signal = 0;
1230 smp_wmb(); 1350 smp_wmb();
1231 1351
@@ -1241,14 +1361,15 @@ static int smsc911x_open(struct net_device *dev)
1241 } 1361 }
1242 1362
1243 if (!pdata->software_irq_signal) { 1363 if (!pdata->software_irq_signal) {
1244 dev_warn(&dev->dev, "ISR failed signaling test (IRQ %d)\n", 1364 netdev_warn(dev, "ISR failed signaling test (IRQ %d)\n",
1245 dev->irq); 1365 dev->irq);
1246 return -ENODEV; 1366 return -ENODEV;
1247 } 1367 }
1248 SMSC_TRACE(IFUP, "IRQ handler passed test using IRQ %d", dev->irq); 1368 SMSC_TRACE(pdata, ifup, "IRQ handler passed test using IRQ %d",
1369 dev->irq);
1249 1370
1250 dev_info(&dev->dev, "SMSC911x/921x identified at %#08lx, IRQ: %d\n", 1371 netdev_info(dev, "SMSC911x/921x identified at %#08lx, IRQ: %d\n",
1251 (unsigned long)pdata->ioaddr, dev->irq); 1372 (unsigned long)pdata->ioaddr, dev->irq);
1252 1373
1253 /* Reset the last known duplex and carrier */ 1374 /* Reset the last known duplex and carrier */
1254 pdata->last_duplex = -1; 1375 pdata->last_duplex = -1;
@@ -1313,7 +1434,7 @@ static int smsc911x_stop(struct net_device *dev)
1313 if (pdata->phy_dev) 1434 if (pdata->phy_dev)
1314 phy_stop(pdata->phy_dev); 1435 phy_stop(pdata->phy_dev);
1315 1436
1316 SMSC_TRACE(IFDOWN, "Interface stopped"); 1437 SMSC_TRACE(pdata, ifdown, "Interface stopped");
1317 return 0; 1438 return 0;
1318} 1439}
1319 1440
@@ -1331,8 +1452,8 @@ static int smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
1331 freespace = smsc911x_reg_read(pdata, TX_FIFO_INF) & TX_FIFO_INF_TDFREE_; 1452 freespace = smsc911x_reg_read(pdata, TX_FIFO_INF) & TX_FIFO_INF_TDFREE_;
1332 1453
1333 if (unlikely(freespace < TX_FIFO_LOW_THRESHOLD)) 1454 if (unlikely(freespace < TX_FIFO_LOW_THRESHOLD))
1334 SMSC_WARNING(TX_ERR, 1455 SMSC_WARN(pdata, tx_err,
1335 "Tx data fifo low, space available: %d", freespace); 1456 "Tx data fifo low, space available: %d", freespace);
1336 1457
1337 /* Word alignment adjustment */ 1458 /* Word alignment adjustment */
1338 tx_cmd_a = (u32)((ulong)skb->data & 0x03) << 16; 1459 tx_cmd_a = (u32)((ulong)skb->data & 0x03) << 16;
@@ -1350,7 +1471,7 @@ static int smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
1350 wrsz += (u32)((ulong)skb->data & 0x3); 1471 wrsz += (u32)((ulong)skb->data & 0x3);
1351 wrsz >>= 2; 1472 wrsz >>= 2;
1352 1473
1353 smsc911x_tx_writefifo(pdata, (unsigned int *)bufp, wrsz); 1474 pdata->ops->tx_writefifo(pdata, (unsigned int *)bufp, wrsz);
1354 freespace -= (skb->len + 32); 1475 freespace -= (skb->len + 32);
1355 dev_kfree_skb(skb); 1476 dev_kfree_skb(skb);
1356 1477
@@ -1432,7 +1553,7 @@ static void smsc911x_set_multicast_list(struct net_device *dev)
1432 * receiving data */ 1553 * receiving data */
1433 if (!pdata->multicast_update_pending) { 1554 if (!pdata->multicast_update_pending) {
1434 unsigned int temp; 1555 unsigned int temp;
1435 SMSC_TRACE(HW, "scheduling mcast update"); 1556 SMSC_TRACE(pdata, hw, "scheduling mcast update");
1436 pdata->multicast_update_pending = 1; 1557 pdata->multicast_update_pending = 1;
1437 1558
1438 /* Request the hardware to stop, then perform the 1559 /* Request the hardware to stop, then perform the
@@ -1474,7 +1595,7 @@ static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
1474 if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) { 1595 if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) {
1475 /* Called when there is a multicast update scheduled and 1596 /* Called when there is a multicast update scheduled and
1476 * it is now safe to complete the update */ 1597 * it is now safe to complete the update */
1477 SMSC_TRACE(INTR, "RX Stop interrupt"); 1598 SMSC_TRACE(pdata, intr, "RX Stop interrupt");
1478 smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_); 1599 smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_);
1479 if (pdata->multicast_update_pending) 1600 if (pdata->multicast_update_pending)
1480 smsc911x_rx_multicast_update_workaround(pdata); 1601 smsc911x_rx_multicast_update_workaround(pdata);
@@ -1491,7 +1612,7 @@ static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
1491 } 1612 }
1492 1613
1493 if (unlikely(intsts & inten & INT_STS_RXE_)) { 1614 if (unlikely(intsts & inten & INT_STS_RXE_)) {
1494 SMSC_TRACE(INTR, "RX Error interrupt"); 1615 SMSC_TRACE(pdata, intr, "RX Error interrupt");
1495 smsc911x_reg_write(pdata, INT_STS, INT_STS_RXE_); 1616 smsc911x_reg_write(pdata, INT_STS, INT_STS_RXE_);
1496 serviced = IRQ_HANDLED; 1617 serviced = IRQ_HANDLED;
1497 } 1618 }
@@ -1505,8 +1626,7 @@ static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
1505 /* Schedule a NAPI poll */ 1626 /* Schedule a NAPI poll */
1506 __napi_schedule(&pdata->napi); 1627 __napi_schedule(&pdata->napi);
1507 } else { 1628 } else {
1508 SMSC_WARNING(RX_ERR, 1629 SMSC_WARN(pdata, rx_err, "napi_schedule_prep failed");
1509 "napi_schedule_prep failed");
1510 } 1630 }
1511 serviced = IRQ_HANDLED; 1631 serviced = IRQ_HANDLED;
1512 } 1632 }
@@ -1543,7 +1663,7 @@ static int smsc911x_set_mac_address(struct net_device *dev, void *p)
1543 smsc911x_set_hw_mac_address(pdata, dev->dev_addr); 1663 smsc911x_set_hw_mac_address(pdata, dev->dev_addr);
1544 spin_unlock_irq(&pdata->mac_lock); 1664 spin_unlock_irq(&pdata->mac_lock);
1545 1665
1546 dev_info(&dev->dev, "MAC Address: %pM\n", dev->dev_addr); 1666 netdev_info(dev, "MAC Address: %pM\n", dev->dev_addr);
1547 1667
1548 return 0; 1668 return 0;
1549} 1669}
@@ -1649,9 +1769,9 @@ static int smsc911x_eeprom_send_cmd(struct smsc911x_data *pdata, u32 op)
1649 int timeout = 100; 1769 int timeout = 100;
1650 u32 e2cmd; 1770 u32 e2cmd;
1651 1771
1652 SMSC_TRACE(DRV, "op 0x%08x", op); 1772 SMSC_TRACE(pdata, drv, "op 0x%08x", op);
1653 if (smsc911x_reg_read(pdata, E2P_CMD) & E2P_CMD_EPC_BUSY_) { 1773 if (smsc911x_reg_read(pdata, E2P_CMD) & E2P_CMD_EPC_BUSY_) {
1654 SMSC_WARNING(DRV, "Busy at start"); 1774 SMSC_WARN(pdata, drv, "Busy at start");
1655 return -EBUSY; 1775 return -EBUSY;
1656 } 1776 }
1657 1777
@@ -1664,12 +1784,12 @@ static int smsc911x_eeprom_send_cmd(struct smsc911x_data *pdata, u32 op)
1664 } while ((e2cmd & E2P_CMD_EPC_BUSY_) && (--timeout)); 1784 } while ((e2cmd & E2P_CMD_EPC_BUSY_) && (--timeout));
1665 1785
1666 if (!timeout) { 1786 if (!timeout) {
1667 SMSC_TRACE(DRV, "TIMED OUT"); 1787 SMSC_TRACE(pdata, drv, "TIMED OUT");
1668 return -EAGAIN; 1788 return -EAGAIN;
1669 } 1789 }
1670 1790
1671 if (e2cmd & E2P_CMD_EPC_TIMEOUT_) { 1791 if (e2cmd & E2P_CMD_EPC_TIMEOUT_) {
1672 SMSC_TRACE(DRV, "Error occurred during eeprom operation"); 1792 SMSC_TRACE(pdata, drv, "Error occurred during eeprom operation");
1673 return -EINVAL; 1793 return -EINVAL;
1674 } 1794 }
1675 1795
@@ -1682,7 +1802,7 @@ static int smsc911x_eeprom_read_location(struct smsc911x_data *pdata,
1682 u32 op = E2P_CMD_EPC_CMD_READ_ | address; 1802 u32 op = E2P_CMD_EPC_CMD_READ_ | address;
1683 int ret; 1803 int ret;
1684 1804
1685 SMSC_TRACE(DRV, "address 0x%x", address); 1805 SMSC_TRACE(pdata, drv, "address 0x%x", address);
1686 ret = smsc911x_eeprom_send_cmd(pdata, op); 1806 ret = smsc911x_eeprom_send_cmd(pdata, op);
1687 1807
1688 if (!ret) 1808 if (!ret)
@@ -1698,7 +1818,7 @@ static int smsc911x_eeprom_write_location(struct smsc911x_data *pdata,
1698 u32 temp; 1818 u32 temp;
1699 int ret; 1819 int ret;
1700 1820
1701 SMSC_TRACE(DRV, "address 0x%x, data 0x%x", address, data); 1821 SMSC_TRACE(pdata, drv, "address 0x%x, data 0x%x", address, data);
1702 ret = smsc911x_eeprom_send_cmd(pdata, op); 1822 ret = smsc911x_eeprom_send_cmd(pdata, op);
1703 1823
1704 if (!ret) { 1824 if (!ret) {
@@ -1811,26 +1931,26 @@ static int __devinit smsc911x_init(struct net_device *dev)
1811 struct smsc911x_data *pdata = netdev_priv(dev); 1931 struct smsc911x_data *pdata = netdev_priv(dev);
1812 unsigned int byte_test; 1932 unsigned int byte_test;
1813 1933
1814 SMSC_TRACE(PROBE, "Driver Parameters:"); 1934 SMSC_TRACE(pdata, probe, "Driver Parameters:");
1815 SMSC_TRACE(PROBE, "LAN base: 0x%08lX", 1935 SMSC_TRACE(pdata, probe, "LAN base: 0x%08lX",
1816 (unsigned long)pdata->ioaddr); 1936 (unsigned long)pdata->ioaddr);
1817 SMSC_TRACE(PROBE, "IRQ: %d", dev->irq); 1937 SMSC_TRACE(pdata, probe, "IRQ: %d", dev->irq);
1818 SMSC_TRACE(PROBE, "PHY will be autodetected."); 1938 SMSC_TRACE(pdata, probe, "PHY will be autodetected.");
1819 1939
1820 spin_lock_init(&pdata->dev_lock); 1940 spin_lock_init(&pdata->dev_lock);
1821 spin_lock_init(&pdata->mac_lock); 1941 spin_lock_init(&pdata->mac_lock);
1822 1942
1823 if (pdata->ioaddr == 0) { 1943 if (pdata->ioaddr == 0) {
1824 SMSC_WARNING(PROBE, "pdata->ioaddr: 0x00000000"); 1944 SMSC_WARN(pdata, probe, "pdata->ioaddr: 0x00000000");
1825 return -ENODEV; 1945 return -ENODEV;
1826 } 1946 }
1827 1947
1828 /* Check byte ordering */ 1948 /* Check byte ordering */
1829 byte_test = smsc911x_reg_read(pdata, BYTE_TEST); 1949 byte_test = smsc911x_reg_read(pdata, BYTE_TEST);
1830 SMSC_TRACE(PROBE, "BYTE_TEST: 0x%08X", byte_test); 1950 SMSC_TRACE(pdata, probe, "BYTE_TEST: 0x%08X", byte_test);
1831 if (byte_test == 0x43218765) { 1951 if (byte_test == 0x43218765) {
1832 SMSC_TRACE(PROBE, "BYTE_TEST looks swapped, " 1952 SMSC_TRACE(pdata, probe, "BYTE_TEST looks swapped, "
1833 "applying WORD_SWAP"); 1953 "applying WORD_SWAP");
1834 smsc911x_reg_write(pdata, WORD_SWAP, 0xffffffff); 1954 smsc911x_reg_write(pdata, WORD_SWAP, 0xffffffff);
1835 1955
1836 /* 1 dummy read of BYTE_TEST is needed after a write to 1956 /* 1 dummy read of BYTE_TEST is needed after a write to
@@ -1841,12 +1961,13 @@ static int __devinit smsc911x_init(struct net_device *dev)
1841 } 1961 }
1842 1962
1843 if (byte_test != 0x87654321) { 1963 if (byte_test != 0x87654321) {
1844 SMSC_WARNING(DRV, "BYTE_TEST: 0x%08X", byte_test); 1964 SMSC_WARN(pdata, drv, "BYTE_TEST: 0x%08X", byte_test);
1845 if (((byte_test >> 16) & 0xFFFF) == (byte_test & 0xFFFF)) { 1965 if (((byte_test >> 16) & 0xFFFF) == (byte_test & 0xFFFF)) {
1846 SMSC_WARNING(PROBE, 1966 SMSC_WARN(pdata, probe,
1847 "top 16 bits equal to bottom 16 bits"); 1967 "top 16 bits equal to bottom 16 bits");
1848 SMSC_TRACE(PROBE, "This may mean the chip is set " 1968 SMSC_TRACE(pdata, probe,
1849 "for 32 bit while the bus is reading 16 bit"); 1969 "This may mean the chip is set "
1970 "for 32 bit while the bus is reading 16 bit");
1850 } 1971 }
1851 return -ENODEV; 1972 return -ENODEV;
1852 } 1973 }
@@ -1881,17 +2002,18 @@ static int __devinit smsc911x_init(struct net_device *dev)
1881 break; 2002 break;
1882 2003
1883 default: 2004 default:
1884 SMSC_WARNING(PROBE, "LAN911x not identified, idrev: 0x%08X", 2005 SMSC_WARN(pdata, probe, "LAN911x not identified, idrev: 0x%08X",
1885 pdata->idrev); 2006 pdata->idrev);
1886 return -ENODEV; 2007 return -ENODEV;
1887 } 2008 }
1888 2009
1889 SMSC_TRACE(PROBE, "LAN911x identified, idrev: 0x%08X, generation: %d", 2010 SMSC_TRACE(pdata, probe,
1890 pdata->idrev, pdata->generation); 2011 "LAN911x identified, idrev: 0x%08X, generation: %d",
2012 pdata->idrev, pdata->generation);
1891 2013
1892 if (pdata->generation == 0) 2014 if (pdata->generation == 0)
1893 SMSC_WARNING(PROBE, 2015 SMSC_WARN(pdata, probe,
1894 "This driver is not intended for this chip revision"); 2016 "This driver is not intended for this chip revision");
1895 2017
1896 /* workaround for platforms without an eeprom, where the mac address 2018 /* workaround for platforms without an eeprom, where the mac address
1897 * is stored elsewhere and set by the bootloader. This saves the 2019 * is stored elsewhere and set by the bootloader. This saves the
@@ -1931,7 +2053,7 @@ static int __devexit smsc911x_drv_remove(struct platform_device *pdev)
1931 BUG_ON(!pdata->ioaddr); 2053 BUG_ON(!pdata->ioaddr);
1932 BUG_ON(!pdata->phy_dev); 2054 BUG_ON(!pdata->phy_dev);
1933 2055
1934 SMSC_TRACE(IFDOWN, "Stopping driver."); 2056 SMSC_TRACE(pdata, ifdown, "Stopping driver");
1935 2057
1936 phy_disconnect(pdata->phy_dev); 2058 phy_disconnect(pdata->phy_dev);
1937 pdata->phy_dev = NULL; 2059 pdata->phy_dev = NULL;
@@ -1955,6 +2077,22 @@ static int __devexit smsc911x_drv_remove(struct platform_device *pdev)
1955 return 0; 2077 return 0;
1956} 2078}
1957 2079
2080/* standard register acces */
2081static const struct smsc911x_ops standard_smsc911x_ops = {
2082 .reg_read = __smsc911x_reg_read,
2083 .reg_write = __smsc911x_reg_write,
2084 .rx_readfifo = smsc911x_rx_readfifo,
2085 .tx_writefifo = smsc911x_tx_writefifo,
2086};
2087
2088/* shifted register access */
2089static const struct smsc911x_ops shifted_smsc911x_ops = {
2090 .reg_read = __smsc911x_reg_read_shift,
2091 .reg_write = __smsc911x_reg_write_shift,
2092 .rx_readfifo = smsc911x_rx_readfifo_shift,
2093 .tx_writefifo = smsc911x_tx_writefifo_shift,
2094};
2095
1958static int __devinit smsc911x_drv_probe(struct platform_device *pdev) 2096static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
1959{ 2097{
1960 struct net_device *dev; 2098 struct net_device *dev;
@@ -1965,11 +2103,11 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
1965 int res_size, irq_flags; 2103 int res_size, irq_flags;
1966 int retval; 2104 int retval;
1967 2105
1968 pr_info("%s: Driver version %s.\n", SMSC_CHIPNAME, SMSC_DRV_VERSION); 2106 pr_info("Driver version %s\n", SMSC_DRV_VERSION);
1969 2107
1970 /* platform data specifies irq & dynamic bus configuration */ 2108 /* platform data specifies irq & dynamic bus configuration */
1971 if (!pdev->dev.platform_data) { 2109 if (!pdev->dev.platform_data) {
1972 pr_warning("%s: platform_data not provided\n", SMSC_CHIPNAME); 2110 pr_warn("platform_data not provided\n");
1973 retval = -ENODEV; 2111 retval = -ENODEV;
1974 goto out_0; 2112 goto out_0;
1975 } 2113 }
@@ -1979,8 +2117,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
1979 if (!res) 2117 if (!res)
1980 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2118 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1981 if (!res) { 2119 if (!res) {
1982 pr_warning("%s: Could not allocate resource.\n", 2120 pr_warn("Could not allocate resource\n");
1983 SMSC_CHIPNAME);
1984 retval = -ENODEV; 2121 retval = -ENODEV;
1985 goto out_0; 2122 goto out_0;
1986 } 2123 }
@@ -1988,8 +2125,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
1988 2125
1989 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2126 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1990 if (!irq_res) { 2127 if (!irq_res) {
1991 pr_warning("%s: Could not allocate irq resource.\n", 2128 pr_warn("Could not allocate irq resource\n");
1992 SMSC_CHIPNAME);
1993 retval = -ENODEV; 2129 retval = -ENODEV;
1994 goto out_0; 2130 goto out_0;
1995 } 2131 }
@@ -2001,7 +2137,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
2001 2137
2002 dev = alloc_etherdev(sizeof(struct smsc911x_data)); 2138 dev = alloc_etherdev(sizeof(struct smsc911x_data));
2003 if (!dev) { 2139 if (!dev) {
2004 pr_warning("%s: Could not allocate device.\n", SMSC_CHIPNAME); 2140 pr_warn("Could not allocate device\n");
2005 retval = -ENOMEM; 2141 retval = -ENOMEM;
2006 goto out_release_io_1; 2142 goto out_release_io_1;
2007 } 2143 }
@@ -2021,12 +2157,17 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
2021 pdata->msg_enable = ((1 << debug) - 1); 2157 pdata->msg_enable = ((1 << debug) - 1);
2022 2158
2023 if (pdata->ioaddr == NULL) { 2159 if (pdata->ioaddr == NULL) {
2024 SMSC_WARNING(PROBE, 2160 SMSC_WARN(pdata, probe, "Error smsc911x base address invalid");
2025 "Error smsc911x base address invalid");
2026 retval = -ENOMEM; 2161 retval = -ENOMEM;
2027 goto out_free_netdev_2; 2162 goto out_free_netdev_2;
2028 } 2163 }
2029 2164
2165 /* assume standard, non-shifted, access to HW registers */
2166 pdata->ops = &standard_smsc911x_ops;
2167 /* apply the right access if shifting is needed */
2168 if (config->shift)
2169 pdata->ops = &shifted_smsc911x_ops;
2170
2030 retval = smsc911x_init(dev); 2171 retval = smsc911x_init(dev);
2031 if (retval < 0) 2172 if (retval < 0)
2032 goto out_unmap_io_3; 2173 goto out_unmap_io_3;
@@ -2047,8 +2188,8 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
2047 retval = request_irq(dev->irq, smsc911x_irqhandler, 2188 retval = request_irq(dev->irq, smsc911x_irqhandler,
2048 irq_flags | IRQF_SHARED, dev->name, dev); 2189 irq_flags | IRQF_SHARED, dev->name, dev);
2049 if (retval) { 2190 if (retval) {
2050 SMSC_WARNING(PROBE, 2191 SMSC_WARN(pdata, probe,
2051 "Unable to claim requested irq: %d", dev->irq); 2192 "Unable to claim requested irq: %d", dev->irq);
2052 goto out_unmap_io_3; 2193 goto out_unmap_io_3;
2053 } 2194 }
2054 2195
@@ -2056,17 +2197,16 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
2056 2197
2057 retval = register_netdev(dev); 2198 retval = register_netdev(dev);
2058 if (retval) { 2199 if (retval) {
2059 SMSC_WARNING(PROBE, 2200 SMSC_WARN(pdata, probe, "Error %i registering device", retval);
2060 "Error %i registering device", retval);
2061 goto out_unset_drvdata_4; 2201 goto out_unset_drvdata_4;
2062 } else { 2202 } else {
2063 SMSC_TRACE(PROBE, "Network interface: \"%s\"", dev->name); 2203 SMSC_TRACE(pdata, probe,
2204 "Network interface: \"%s\"", dev->name);
2064 } 2205 }
2065 2206
2066 retval = smsc911x_mii_init(pdev, dev); 2207 retval = smsc911x_mii_init(pdev, dev);
2067 if (retval) { 2208 if (retval) {
2068 SMSC_WARNING(PROBE, 2209 SMSC_WARN(pdata, probe, "Error %i initialising mii", retval);
2069 "Error %i initialising mii", retval);
2070 goto out_unregister_netdev_5; 2210 goto out_unregister_netdev_5;
2071 } 2211 }
2072 2212
@@ -2075,10 +2215,12 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
2075 /* Check if mac address has been specified when bringing interface up */ 2215 /* Check if mac address has been specified when bringing interface up */
2076 if (is_valid_ether_addr(dev->dev_addr)) { 2216 if (is_valid_ether_addr(dev->dev_addr)) {
2077 smsc911x_set_hw_mac_address(pdata, dev->dev_addr); 2217 smsc911x_set_hw_mac_address(pdata, dev->dev_addr);
2078 SMSC_TRACE(PROBE, "MAC Address is specified by configuration"); 2218 SMSC_TRACE(pdata, probe,
2219 "MAC Address is specified by configuration");
2079 } else if (is_valid_ether_addr(pdata->config.mac)) { 2220 } else if (is_valid_ether_addr(pdata->config.mac)) {
2080 memcpy(dev->dev_addr, pdata->config.mac, 6); 2221 memcpy(dev->dev_addr, pdata->config.mac, 6);
2081 SMSC_TRACE(PROBE, "MAC Address specified by platform data"); 2222 SMSC_TRACE(pdata, probe,
2223 "MAC Address specified by platform data");
2082 } else { 2224 } else {
2083 /* Try reading mac address from device. if EEPROM is present 2225 /* Try reading mac address from device. if EEPROM is present
2084 * it will already have been set */ 2226 * it will already have been set */
@@ -2086,20 +2228,20 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
2086 2228
2087 if (is_valid_ether_addr(dev->dev_addr)) { 2229 if (is_valid_ether_addr(dev->dev_addr)) {
2088 /* eeprom values are valid so use them */ 2230 /* eeprom values are valid so use them */
2089 SMSC_TRACE(PROBE, 2231 SMSC_TRACE(pdata, probe,
2090 "Mac Address is read from LAN911x EEPROM"); 2232 "Mac Address is read from LAN911x EEPROM");
2091 } else { 2233 } else {
2092 /* eeprom values are invalid, generate random MAC */ 2234 /* eeprom values are invalid, generate random MAC */
2093 random_ether_addr(dev->dev_addr); 2235 random_ether_addr(dev->dev_addr);
2094 smsc911x_set_hw_mac_address(pdata, dev->dev_addr); 2236 smsc911x_set_hw_mac_address(pdata, dev->dev_addr);
2095 SMSC_TRACE(PROBE, 2237 SMSC_TRACE(pdata, probe,
2096 "MAC Address is set to random_ether_addr"); 2238 "MAC Address is set to random_ether_addr");
2097 } 2239 }
2098 } 2240 }
2099 2241
2100 spin_unlock_irq(&pdata->mac_lock); 2242 spin_unlock_irq(&pdata->mac_lock);
2101 2243
2102 dev_info(&dev->dev, "MAC Address: %pM\n", dev->dev_addr); 2244 netdev_info(dev, "MAC Address: %pM\n", dev->dev_addr);
2103 2245
2104 return 0; 2246 return 0;
2105 2247
diff --git a/drivers/net/smsc911x.h b/drivers/net/smsc911x.h
index 50f712e99e96..8d67aacf8867 100644
--- a/drivers/net/smsc911x.h
+++ b/drivers/net/smsc911x.h
@@ -33,25 +33,21 @@
33 * can be successfully looped back */ 33 * can be successfully looped back */
34#define USE_PHY_WORK_AROUND 34#define USE_PHY_WORK_AROUND
35 35
36#define DPRINTK(nlevel, klevel, fmt, args...) \
37 ((void)((NETIF_MSG_##nlevel & pdata->msg_enable) && \
38 printk(KERN_##klevel "%s: %s: " fmt "\n", \
39 pdata->dev->name, __func__, ## args)))
40
41#if USE_DEBUG >= 1 36#if USE_DEBUG >= 1
42#define SMSC_WARNING(nlevel, fmt, args...) \ 37#define SMSC_WARN(pdata, nlevel, fmt, args...) \
43 DPRINTK(nlevel, WARNING, fmt, ## args) 38 netif_warn(pdata, nlevel, (pdata)->dev, \
39 "%s: " fmt "\n", __func__, ##args)
44#else 40#else
45#define SMSC_WARNING(nlevel, fmt, args...) \ 41#define SMSC_WARN(pdata, nlevel, fmt, args...) \
46 ({ do {} while (0); 0; }) 42 no_printk(fmt "\n", ##args)
47#endif 43#endif
48 44
49#if USE_DEBUG >= 2 45#if USE_DEBUG >= 2
50#define SMSC_TRACE(nlevel, fmt, args...) \ 46#define SMSC_TRACE(pdata, nlevel, fmt, args...) \
51 DPRINTK(nlevel, INFO, fmt, ## args) 47 netif_info(pdata, nlevel, pdata->dev, fmt "\n", ##args)
52#else 48#else
53#define SMSC_TRACE(nlevel, fmt, args...) \ 49#define SMSC_TRACE(pdata, nlevel, fmt, args...) \
54 ({ do {} while (0); 0; }) 50 no_printk(fmt "\n", ##args)
55#endif 51#endif
56 52
57#ifdef CONFIG_DEBUG_SPINLOCK 53#ifdef CONFIG_DEBUG_SPINLOCK
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index cb6bcca9d541..949f124e1278 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -994,15 +994,13 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
994 skb->protocol = eth_type_trans(skb, netdev); 994 skb->protocol = eth_type_trans(skb, netdev);
995 995
996 /* checksum offload */ 996 /* checksum offload */
997 if (card->options.rx_csum) { 997 skb_checksum_none_assert(skb);
998 if (netdev->features & NETIF_F_RXCSUM) {
998 if ( ( (data_status & SPIDER_NET_DATA_STATUS_CKSUM_MASK) == 999 if ( ( (data_status & SPIDER_NET_DATA_STATUS_CKSUM_MASK) ==
999 SPIDER_NET_DATA_STATUS_CKSUM_MASK) && 1000 SPIDER_NET_DATA_STATUS_CKSUM_MASK) &&
1000 !(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK)) 1001 !(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK))
1001 skb->ip_summed = CHECKSUM_UNNECESSARY; 1002 skb->ip_summed = CHECKSUM_UNNECESSARY;
1002 else 1003 }
1003 skb_checksum_none_assert(skb);
1004 } else
1005 skb_checksum_none_assert(skb);
1006 1004
1007 if (data_status & SPIDER_NET_VLAN_PACKET) { 1005 if (data_status & SPIDER_NET_VLAN_PACKET) {
1008 /* further enhancements: HW-accel VLAN 1006 /* further enhancements: HW-accel VLAN
@@ -2322,14 +2320,15 @@ spider_net_setup_netdev(struct spider_net_card *card)
2322 card->aneg_timer.function = spider_net_link_phy; 2320 card->aneg_timer.function = spider_net_link_phy;
2323 card->aneg_timer.data = (unsigned long) card; 2321 card->aneg_timer.data = (unsigned long) card;
2324 2322
2325 card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT;
2326
2327 netif_napi_add(netdev, &card->napi, 2323 netif_napi_add(netdev, &card->napi,
2328 spider_net_poll, SPIDER_NET_NAPI_WEIGHT); 2324 spider_net_poll, SPIDER_NET_NAPI_WEIGHT);
2329 2325
2330 spider_net_setup_netdev_ops(netdev); 2326 spider_net_setup_netdev_ops(netdev);
2331 2327
2332 netdev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX; 2328 netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
2329 if (SPIDER_NET_RX_CSUM_DEFAULT)
2330 netdev->features |= NETIF_F_RXCSUM;
2331 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_LLTX;
2333 /* some time: NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | 2332 /* some time: NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
2334 * NETIF_F_HW_VLAN_FILTER */ 2333 * NETIF_F_HW_VLAN_FILTER */
2335 2334
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h
index 05f74cbdd617..020f64a8fcf7 100644
--- a/drivers/net/spider_net.h
+++ b/drivers/net/spider_net.h
@@ -429,12 +429,6 @@ struct spider_net_descr_chain {
429 * 701b8000 would be correct, but every packets gets that flag */ 429 * 701b8000 would be correct, but every packets gets that flag */
430#define SPIDER_NET_DESTROY_RX_FLAGS 0x700b8000 430#define SPIDER_NET_DESTROY_RX_FLAGS 0x700b8000
431 431
432/* this will be bigger some time */
433struct spider_net_options {
434 int rx_csum; /* for rx: if 0 ip_summed=NONE,
435 if 1 and hw has verified, ip_summed=UNNECESSARY */
436};
437
438#define SPIDER_NET_DEFAULT_MSG ( NETIF_MSG_DRV | \ 432#define SPIDER_NET_DEFAULT_MSG ( NETIF_MSG_DRV | \
439 NETIF_MSG_PROBE | \ 433 NETIF_MSG_PROBE | \
440 NETIF_MSG_LINK | \ 434 NETIF_MSG_LINK | \
@@ -487,7 +481,6 @@ struct spider_net_card {
487 /* for ethtool */ 481 /* for ethtool */
488 int msg_enable; 482 int msg_enable;
489 struct spider_net_extra_stats spider_stats; 483 struct spider_net_extra_stats spider_stats;
490 struct spider_net_options options;
491 484
492 /* Must be last item in struct */ 485 /* Must be last item in struct */
493 struct spider_net_descr darray[0]; 486 struct spider_net_descr darray[0];
diff --git a/drivers/net/spider_net_ethtool.c b/drivers/net/spider_net_ethtool.c
index 5bae728c3820..9c288cd7d171 100644
--- a/drivers/net/spider_net_ethtool.c
+++ b/drivers/net/spider_net_ethtool.c
@@ -58,7 +58,7 @@ spider_net_ethtool_get_settings(struct net_device *netdev,
58 cmd->advertising = (ADVERTISED_1000baseT_Full | 58 cmd->advertising = (ADVERTISED_1000baseT_Full |
59 ADVERTISED_FIBRE); 59 ADVERTISED_FIBRE);
60 cmd->port = PORT_FIBRE; 60 cmd->port = PORT_FIBRE;
61 cmd->speed = card->phy.speed; 61 ethtool_cmd_speed_set(cmd, card->phy.speed);
62 cmd->duplex = DUPLEX_FULL; 62 cmd->duplex = DUPLEX_FULL;
63 63
64 return 0; 64 return 0;
@@ -115,24 +115,6 @@ spider_net_ethtool_nway_reset(struct net_device *netdev)
115 return 0; 115 return 0;
116} 116}
117 117
118static u32
119spider_net_ethtool_get_rx_csum(struct net_device *netdev)
120{
121 struct spider_net_card *card = netdev_priv(netdev);
122
123 return card->options.rx_csum;
124}
125
126static int
127spider_net_ethtool_set_rx_csum(struct net_device *netdev, u32 n)
128{
129 struct spider_net_card *card = netdev_priv(netdev);
130
131 card->options.rx_csum = n;
132 return 0;
133}
134
135
136static void 118static void
137spider_net_ethtool_get_ringparam(struct net_device *netdev, 119spider_net_ethtool_get_ringparam(struct net_device *netdev,
138 struct ethtool_ringparam *ering) 120 struct ethtool_ringparam *ering)
@@ -189,9 +171,6 @@ const struct ethtool_ops spider_net_ethtool_ops = {
189 .set_msglevel = spider_net_ethtool_set_msglevel, 171 .set_msglevel = spider_net_ethtool_set_msglevel,
190 .get_link = ethtool_op_get_link, 172 .get_link = ethtool_op_get_link,
191 .nway_reset = spider_net_ethtool_nway_reset, 173 .nway_reset = spider_net_ethtool_nway_reset,
192 .get_rx_csum = spider_net_ethtool_get_rx_csum,
193 .set_rx_csum = spider_net_ethtool_set_rx_csum,
194 .set_tx_csum = ethtool_op_set_tx_csum,
195 .get_ringparam = spider_net_ethtool_get_ringparam, 174 .get_ringparam = spider_net_ethtool_get_ringparam,
196 .get_strings = spider_net_get_strings, 175 .get_strings = spider_net_get_strings,
197 .get_sset_count = spider_net_get_sset_count, 176 .get_sset_count = spider_net_get_sset_count,
diff --git a/drivers/net/stmmac/dwmac1000_core.c b/drivers/net/stmmac/dwmac1000_core.c
index 6ae4c3f4c63c..f20455cbfbbc 100644
--- a/drivers/net/stmmac/dwmac1000_core.c
+++ b/drivers/net/stmmac/dwmac1000_core.c
@@ -178,10 +178,11 @@ static void dwmac1000_pmt(void __iomem *ioaddr, unsigned long mode)
178{ 178{
179 unsigned int pmt = 0; 179 unsigned int pmt = 0;
180 180
181 if (mode == WAKE_MAGIC) { 181 if (mode & WAKE_MAGIC) {
182 CHIP_DBG(KERN_DEBUG "GMAC: WOL Magic frame\n"); 182 CHIP_DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
183 pmt |= power_down | magic_pkt_en; 183 pmt |= power_down | magic_pkt_en;
184 } else if (mode == WAKE_UCAST) { 184 }
185 if (mode & WAKE_UCAST) {
185 CHIP_DBG(KERN_DEBUG "GMAC: WOL on global unicast\n"); 186 CHIP_DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
186 pmt |= global_unicast; 187 pmt |= global_unicast;
187 } 188 }
diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h
index 5f06c4706abe..2b076b313622 100644
--- a/drivers/net/stmmac/stmmac.h
+++ b/drivers/net/stmmac/stmmac.h
@@ -21,7 +21,6 @@
21*******************************************************************************/ 21*******************************************************************************/
22 22
23#define DRV_MODULE_VERSION "Nov_2010" 23#define DRV_MODULE_VERSION "Nov_2010"
24#include <linux/platform_device.h>
25#include <linux/stmmac.h> 24#include <linux/stmmac.h>
26 25
27#include "common.h" 26#include "common.h"
diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c
index fd719edc7f7c..ae5213a8c4cd 100644
--- a/drivers/net/stmmac/stmmac_ethtool.c
+++ b/drivers/net/stmmac/stmmac_ethtool.c
@@ -197,13 +197,6 @@ static void stmmac_ethtool_gregs(struct net_device *dev,
197 } 197 }
198} 198}
199 199
200static u32 stmmac_ethtool_get_rx_csum(struct net_device *dev)
201{
202 struct stmmac_priv *priv = netdev_priv(dev);
203
204 return priv->rx_coe;
205}
206
207static void 200static void
208stmmac_get_pauseparam(struct net_device *netdev, 201stmmac_get_pauseparam(struct net_device *netdev,
209 struct ethtool_pauseparam *pause) 202 struct ethtool_pauseparam *pause)
@@ -241,20 +234,11 @@ stmmac_set_pauseparam(struct net_device *netdev,
241 new_pause |= FLOW_TX; 234 new_pause |= FLOW_TX;
242 235
243 priv->flow_ctrl = new_pause; 236 priv->flow_ctrl = new_pause;
237 phy->autoneg = pause->autoneg;
244 238
245 if (phy->autoneg) { 239 if (phy->autoneg) {
246 if (netif_running(netdev)) { 240 if (netif_running(netdev))
247 struct ethtool_cmd cmd; 241 ret = phy_start_aneg(phy);
248 /* auto-negotiation automatically restarted */
249 cmd.cmd = ETHTOOL_NWAY_RST;
250 cmd.supported = phy->supported;
251 cmd.advertising = phy->advertising;
252 cmd.autoneg = phy->autoneg;
253 cmd.speed = phy->speed;
254 cmd.duplex = phy->duplex;
255 cmd.phy_address = phy->addr;
256 ret = phy_ethtool_sset(phy, &cmd);
257 }
258 } else 242 } else
259 priv->hw->mac->flow_ctrl(priv->ioaddr, phy->duplex, 243 priv->hw->mac->flow_ctrl(priv->ioaddr, phy->duplex,
260 priv->flow_ctrl, priv->pause); 244 priv->flow_ctrl, priv->pause);
@@ -315,7 +299,7 @@ static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
315 299
316 spin_lock_irq(&priv->lock); 300 spin_lock_irq(&priv->lock);
317 if (device_can_wakeup(priv->device)) { 301 if (device_can_wakeup(priv->device)) {
318 wol->supported = WAKE_MAGIC; 302 wol->supported = WAKE_MAGIC | WAKE_UCAST;
319 wol->wolopts = priv->wolopts; 303 wol->wolopts = priv->wolopts;
320 } 304 }
321 spin_unlock_irq(&priv->lock); 305 spin_unlock_irq(&priv->lock);
@@ -324,7 +308,7 @@ static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
324static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 308static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
325{ 309{
326 struct stmmac_priv *priv = netdev_priv(dev); 310 struct stmmac_priv *priv = netdev_priv(dev);
327 u32 support = WAKE_MAGIC; 311 u32 support = WAKE_MAGIC | WAKE_UCAST;
328 312
329 if (!device_can_wakeup(priv->device)) 313 if (!device_can_wakeup(priv->device))
330 return -EINVAL; 314 return -EINVAL;
@@ -358,11 +342,6 @@ static struct ethtool_ops stmmac_ethtool_ops = {
358 .get_regs = stmmac_ethtool_gregs, 342 .get_regs = stmmac_ethtool_gregs,
359 .get_regs_len = stmmac_ethtool_get_regs_len, 343 .get_regs_len = stmmac_ethtool_get_regs_len,
360 .get_link = ethtool_op_get_link, 344 .get_link = ethtool_op_get_link,
361 .get_rx_csum = stmmac_ethtool_get_rx_csum,
362 .get_tx_csum = ethtool_op_get_tx_csum,
363 .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
364 .get_sg = ethtool_op_get_sg,
365 .set_sg = ethtool_op_set_sg,
366 .get_pauseparam = stmmac_get_pauseparam, 345 .get_pauseparam = stmmac_get_pauseparam,
367 .set_pauseparam = stmmac_set_pauseparam, 346 .set_pauseparam = stmmac_set_pauseparam,
368 .get_ethtool_stats = stmmac_get_ethtool_stats, 347 .get_ethtool_stats = stmmac_get_ethtool_stats,
@@ -370,8 +349,6 @@ static struct ethtool_ops stmmac_ethtool_ops = {
370 .get_wol = stmmac_get_wol, 349 .get_wol = stmmac_get_wol,
371 .set_wol = stmmac_set_wol, 350 .set_wol = stmmac_set_wol,
372 .get_sset_count = stmmac_get_sset_count, 351 .get_sset_count = stmmac_get_sset_count,
373 .get_tso = ethtool_op_get_tso,
374 .set_tso = ethtool_op_set_tso,
375}; 352};
376 353
377void stmmac_set_ethtool_ops(struct net_device *netdev) 354void stmmac_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index cc973fc38405..e15c4a0bb96d 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -116,9 +116,6 @@ static int tc = TC_DEFAULT;
116module_param(tc, int, S_IRUGO | S_IWUSR); 116module_param(tc, int, S_IRUGO | S_IWUSR);
117MODULE_PARM_DESC(tc, "DMA threshold control value"); 117MODULE_PARM_DESC(tc, "DMA threshold control value");
118 118
119#define RX_NO_COALESCE 1 /* Always interrupt on completion */
120#define TX_NO_COALESCE -1 /* No moderation by default */
121
122/* Pay attention to tune this parameter; take care of both 119/* Pay attention to tune this parameter; take care of both
123 * hardware capability and network stabitily/performance impact. 120 * hardware capability and network stabitily/performance impact.
124 * Many tests showed that ~4ms latency seems to be good enough. */ 121 * Many tests showed that ~4ms latency seems to be good enough. */
@@ -139,7 +136,6 @@ static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
139 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); 136 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
140 137
141static irqreturn_t stmmac_interrupt(int irq, void *dev_id); 138static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
142static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev);
143 139
144/** 140/**
145 * stmmac_verify_args - verify the driver parameters. 141 * stmmac_verify_args - verify the driver parameters.
@@ -831,6 +827,7 @@ static int stmmac_open(struct net_device *dev)
831 pr_info("stmmac: Rx Checksum Offload Engine supported\n"); 827 pr_info("stmmac: Rx Checksum Offload Engine supported\n");
832 if (priv->plat->tx_coe) 828 if (priv->plat->tx_coe)
833 pr_info("\tTX Checksum insertion supported\n"); 829 pr_info("\tTX Checksum insertion supported\n");
830 netdev_update_features(dev);
834 831
835 /* Initialise the MMC (if present) to disable all interrupts. */ 832 /* Initialise the MMC (if present) to disable all interrupts. */
836 writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK); 833 writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK);
@@ -934,46 +931,6 @@ static int stmmac_release(struct net_device *dev)
934 return 0; 931 return 0;
935} 932}
936 933
937/*
938 * To perform emulated hardware segmentation on skb.
939 */
940static int stmmac_sw_tso(struct stmmac_priv *priv, struct sk_buff *skb)
941{
942 struct sk_buff *segs, *curr_skb;
943 int gso_segs = skb_shinfo(skb)->gso_segs;
944
945 /* Estimate the number of fragments in the worst case */
946 if (unlikely(stmmac_tx_avail(priv) < gso_segs)) {
947 netif_stop_queue(priv->dev);
948 TX_DBG(KERN_ERR "%s: TSO BUG! Tx Ring full when queue awake\n",
949 __func__);
950 if (stmmac_tx_avail(priv) < gso_segs)
951 return NETDEV_TX_BUSY;
952
953 netif_wake_queue(priv->dev);
954 }
955 TX_DBG("\tstmmac_sw_tso: segmenting: skb %p (len %d)\n",
956 skb, skb->len);
957
958 segs = skb_gso_segment(skb, priv->dev->features & ~NETIF_F_TSO);
959 if (IS_ERR(segs))
960 goto sw_tso_end;
961
962 do {
963 curr_skb = segs;
964 segs = segs->next;
965 TX_DBG("\t\tcurrent skb->len: %d, *curr %p,"
966 "*next %p\n", curr_skb->len, curr_skb, segs);
967 curr_skb->next = NULL;
968 stmmac_xmit(curr_skb, priv->dev);
969 } while (segs);
970
971sw_tso_end:
972 dev_kfree_skb(skb);
973
974 return NETDEV_TX_OK;
975}
976
977static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb, 934static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb,
978 struct net_device *dev, 935 struct net_device *dev,
979 int csum_insertion) 936 int csum_insertion)
@@ -1051,16 +1008,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1051 !skb_is_gso(skb) ? "isn't" : "is"); 1008 !skb_is_gso(skb) ? "isn't" : "is");
1052#endif 1009#endif
1053 1010
1054 if (unlikely(skb_is_gso(skb))) 1011 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
1055 return stmmac_sw_tso(priv, skb);
1056
1057 if (likely((skb->ip_summed == CHECKSUM_PARTIAL))) {
1058 if (unlikely((!priv->plat->tx_coe) ||
1059 (priv->no_csum_insertion)))
1060 skb_checksum_help(skb);
1061 else
1062 csum_insertion = 1;
1063 }
1064 1012
1065 desc = priv->dma_tx + entry; 1013 desc = priv->dma_tx + entry;
1066 first = desc; 1014 first = desc;
@@ -1380,18 +1328,29 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
1380 return -EINVAL; 1328 return -EINVAL;
1381 } 1329 }
1382 1330
1331 dev->mtu = new_mtu;
1332 netdev_update_features(dev);
1333
1334 return 0;
1335}
1336
1337static u32 stmmac_fix_features(struct net_device *dev, u32 features)
1338{
1339 struct stmmac_priv *priv = netdev_priv(dev);
1340
1341 if (!priv->rx_coe)
1342 features &= ~NETIF_F_RXCSUM;
1343 if (!priv->plat->tx_coe)
1344 features &= ~NETIF_F_ALL_CSUM;
1345
1383 /* Some GMAC devices have a bugged Jumbo frame support that 1346 /* Some GMAC devices have a bugged Jumbo frame support that
1384 * needs to have the Tx COE disabled for oversized frames 1347 * needs to have the Tx COE disabled for oversized frames
1385 * (due to limited buffer sizes). In this case we disable 1348 * (due to limited buffer sizes). In this case we disable
1386 * the TX csum insertionin the TDES and not use SF. */ 1349 * the TX csum insertionin the TDES and not use SF. */
1387 if ((priv->plat->bugged_jumbo) && (priv->dev->mtu > ETH_DATA_LEN)) 1350 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
1388 priv->no_csum_insertion = 1; 1351 features &= ~NETIF_F_ALL_CSUM;
1389 else
1390 priv->no_csum_insertion = 0;
1391 1352
1392 dev->mtu = new_mtu; 1353 return features;
1393
1394 return 0;
1395} 1354}
1396 1355
1397static irqreturn_t stmmac_interrupt(int irq, void *dev_id) 1356static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
@@ -1471,6 +1430,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
1471 .ndo_start_xmit = stmmac_xmit, 1430 .ndo_start_xmit = stmmac_xmit,
1472 .ndo_stop = stmmac_release, 1431 .ndo_stop = stmmac_release,
1473 .ndo_change_mtu = stmmac_change_mtu, 1432 .ndo_change_mtu = stmmac_change_mtu,
1433 .ndo_fix_features = stmmac_fix_features,
1474 .ndo_set_multicast_list = stmmac_multicast_list, 1434 .ndo_set_multicast_list = stmmac_multicast_list,
1475 .ndo_tx_timeout = stmmac_tx_timeout, 1435 .ndo_tx_timeout = stmmac_tx_timeout,
1476 .ndo_do_ioctl = stmmac_ioctl, 1436 .ndo_do_ioctl = stmmac_ioctl,
@@ -1501,8 +1461,8 @@ static int stmmac_probe(struct net_device *dev)
1501 dev->netdev_ops = &stmmac_netdev_ops; 1461 dev->netdev_ops = &stmmac_netdev_ops;
1502 stmmac_set_ethtool_ops(dev); 1462 stmmac_set_ethtool_ops(dev);
1503 1463
1504 dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA | 1464 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1505 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1465 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
1506 dev->watchdog_timeo = msecs_to_jiffies(watchdog); 1466 dev->watchdog_timeo = msecs_to_jiffies(watchdog);
1507#ifdef STMMAC_VLAN_TAG_USED 1467#ifdef STMMAC_VLAN_TAG_USED
1508 /* Both mac100 and gmac support receive VLAN tag detection */ 1468 /* Both mac100 and gmac support receive VLAN tag detection */
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index d3be735c4719..ab5930099267 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -1294,7 +1294,7 @@ static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep)
1294 autoneg = 1; 1294 autoneg = 1;
1295 } else { 1295 } else {
1296 autoneg = 0; 1296 autoneg = 0;
1297 speed = ep->speed; 1297 speed = ethtool_cmd_speed(ep);
1298 duplex = ep->duplex; 1298 duplex = ep->duplex;
1299 } 1299 }
1300 1300
@@ -2642,7 +2642,7 @@ static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2642 /* Return current PHY settings */ 2642 /* Return current PHY settings */
2643 spin_lock_irq(&gp->lock); 2643 spin_lock_irq(&gp->lock);
2644 cmd->autoneg = gp->want_autoneg; 2644 cmd->autoneg = gp->want_autoneg;
2645 cmd->speed = gp->phy_mii.speed; 2645 ethtool_cmd_speed_set(cmd, gp->phy_mii.speed);
2646 cmd->duplex = gp->phy_mii.duplex; 2646 cmd->duplex = gp->phy_mii.duplex;
2647 cmd->advertising = gp->phy_mii.advertising; 2647 cmd->advertising = gp->phy_mii.advertising;
2648 2648
@@ -2659,7 +2659,7 @@ static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2659 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | 2659 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2660 SUPPORTED_Autoneg); 2660 SUPPORTED_Autoneg);
2661 cmd->advertising = cmd->supported; 2661 cmd->advertising = cmd->supported;
2662 cmd->speed = 0; 2662 ethtool_cmd_speed_set(cmd, 0);
2663 cmd->duplex = cmd->port = cmd->phy_address = 2663 cmd->duplex = cmd->port = cmd->phy_address =
2664 cmd->transceiver = cmd->autoneg = 0; 2664 cmd->transceiver = cmd->autoneg = 0;
2665 2665
@@ -2673,7 +2673,7 @@ static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2673 cmd->advertising = cmd->supported; 2673 cmd->advertising = cmd->supported;
2674 cmd->transceiver = XCVR_INTERNAL; 2674 cmd->transceiver = XCVR_INTERNAL;
2675 if (gp->lstate == link_up) 2675 if (gp->lstate == link_up)
2676 cmd->speed = SPEED_1000; 2676 ethtool_cmd_speed_set(cmd, SPEED_1000);
2677 cmd->duplex = DUPLEX_FULL; 2677 cmd->duplex = DUPLEX_FULL;
2678 cmd->autoneg = 1; 2678 cmd->autoneg = 1;
2679 } 2679 }
@@ -2686,6 +2686,7 @@ static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2686static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2686static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2687{ 2687{
2688 struct gem *gp = netdev_priv(dev); 2688 struct gem *gp = netdev_priv(dev);
2689 u32 speed = ethtool_cmd_speed(cmd);
2689 2690
2690 /* Verify the settings we care about. */ 2691 /* Verify the settings we care about. */
2691 if (cmd->autoneg != AUTONEG_ENABLE && 2692 if (cmd->autoneg != AUTONEG_ENABLE &&
@@ -2697,9 +2698,9 @@ static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2697 return -EINVAL; 2698 return -EINVAL;
2698 2699
2699 if (cmd->autoneg == AUTONEG_DISABLE && 2700 if (cmd->autoneg == AUTONEG_DISABLE &&
2700 ((cmd->speed != SPEED_1000 && 2701 ((speed != SPEED_1000 &&
2701 cmd->speed != SPEED_100 && 2702 speed != SPEED_100 &&
2702 cmd->speed != SPEED_10) || 2703 speed != SPEED_10) ||
2703 (cmd->duplex != DUPLEX_HALF && 2704 (cmd->duplex != DUPLEX_HALF &&
2704 cmd->duplex != DUPLEX_FULL))) 2705 cmd->duplex != DUPLEX_FULL)))
2705 return -EINVAL; 2706 return -EINVAL;
@@ -3146,7 +3147,8 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
3146 gp->phy_mii.def ? gp->phy_mii.def->name : "no"); 3147 gp->phy_mii.def ? gp->phy_mii.def->name : "no");
3147 3148
3148 /* GEM can do it all... */ 3149 /* GEM can do it all... */
3149 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_LLTX; 3150 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
3151 dev->features |= dev->hw_features | NETIF_F_RXCSUM | NETIF_F_LLTX;
3150 if (pci_using_dac) 3152 if (pci_using_dac)
3151 dev->features |= NETIF_F_HIGHDMA; 3153 dev->features |= NETIF_F_HIGHDMA;
3152 3154
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index eb4f59fb01e9..d381a0f9ee18 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -1383,7 +1383,7 @@ force_link:
1383 if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) { 1383 if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) {
1384 hp->sw_bmcr = BMCR_SPEED100; 1384 hp->sw_bmcr = BMCR_SPEED100;
1385 } else { 1385 } else {
1386 if (ep->speed == SPEED_100) 1386 if (ethtool_cmd_speed(ep) == SPEED_100)
1387 hp->sw_bmcr = BMCR_SPEED100; 1387 hp->sw_bmcr = BMCR_SPEED100;
1388 else 1388 else
1389 hp->sw_bmcr = 0; 1389 hp->sw_bmcr = 0;
@@ -2401,6 +2401,7 @@ static void happy_meal_set_multicast(struct net_device *dev)
2401static int hme_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2401static int hme_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2402{ 2402{
2403 struct happy_meal *hp = netdev_priv(dev); 2403 struct happy_meal *hp = netdev_priv(dev);
2404 u32 speed;
2404 2405
2405 cmd->supported = 2406 cmd->supported =
2406 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | 2407 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
@@ -2420,10 +2421,9 @@ static int hme_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2420 2421
2421 if (hp->sw_bmcr & BMCR_ANENABLE) { 2422 if (hp->sw_bmcr & BMCR_ANENABLE) {
2422 cmd->autoneg = AUTONEG_ENABLE; 2423 cmd->autoneg = AUTONEG_ENABLE;
2423 cmd->speed = 2424 speed = ((hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) ?
2424 (hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) ? 2425 SPEED_100 : SPEED_10);
2425 SPEED_100 : SPEED_10; 2426 if (speed == SPEED_100)
2426 if (cmd->speed == SPEED_100)
2427 cmd->duplex = 2427 cmd->duplex =
2428 (hp->sw_lpa & (LPA_100FULL)) ? 2428 (hp->sw_lpa & (LPA_100FULL)) ?
2429 DUPLEX_FULL : DUPLEX_HALF; 2429 DUPLEX_FULL : DUPLEX_HALF;
@@ -2433,13 +2433,12 @@ static int hme_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2433 DUPLEX_FULL : DUPLEX_HALF; 2433 DUPLEX_FULL : DUPLEX_HALF;
2434 } else { 2434 } else {
2435 cmd->autoneg = AUTONEG_DISABLE; 2435 cmd->autoneg = AUTONEG_DISABLE;
2436 cmd->speed = 2436 speed = (hp->sw_bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
2437 (hp->sw_bmcr & BMCR_SPEED100) ?
2438 SPEED_100 : SPEED_10;
2439 cmd->duplex = 2437 cmd->duplex =
2440 (hp->sw_bmcr & BMCR_FULLDPLX) ? 2438 (hp->sw_bmcr & BMCR_FULLDPLX) ?
2441 DUPLEX_FULL : DUPLEX_HALF; 2439 DUPLEX_FULL : DUPLEX_HALF;
2442 } 2440 }
2441 ethtool_cmd_speed_set(cmd, speed);
2443 return 0; 2442 return 0;
2444} 2443}
2445 2444
@@ -2452,8 +2451,8 @@ static int hme_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2452 cmd->autoneg != AUTONEG_DISABLE) 2451 cmd->autoneg != AUTONEG_DISABLE)
2453 return -EINVAL; 2452 return -EINVAL;
2454 if (cmd->autoneg == AUTONEG_DISABLE && 2453 if (cmd->autoneg == AUTONEG_DISABLE &&
2455 ((cmd->speed != SPEED_100 && 2454 ((ethtool_cmd_speed(cmd) != SPEED_100 &&
2456 cmd->speed != SPEED_10) || 2455 ethtool_cmd_speed(cmd) != SPEED_10) ||
2457 (cmd->duplex != DUPLEX_HALF && 2456 (cmd->duplex != DUPLEX_HALF &&
2458 cmd->duplex != DUPLEX_FULL))) 2457 cmd->duplex != DUPLEX_FULL)))
2459 return -EINVAL; 2458 return -EINVAL;
@@ -2788,7 +2787,8 @@ static int __devinit happy_meal_sbus_probe_one(struct platform_device *op, int i
2788 dev->ethtool_ops = &hme_ethtool_ops; 2787 dev->ethtool_ops = &hme_ethtool_ops;
2789 2788
2790 /* Happy Meal can do it all... */ 2789 /* Happy Meal can do it all... */
2791 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; 2790 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
2791 dev->features |= dev->hw_features | NETIF_F_RXCSUM;
2792 2792
2793 dev->irq = op->archdata.irqs[0]; 2793 dev->irq = op->archdata.irqs[0];
2794 2794
@@ -3113,7 +3113,8 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
3113 dev->dma = 0; 3113 dev->dma = 0;
3114 3114
3115 /* Happy Meal can do it all... */ 3115 /* Happy Meal can do it all... */
3116 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; 3116 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
3117 dev->features |= dev->hw_features | NETIF_F_RXCSUM;
3117 3118
3118#if defined(CONFIG_SBUS) && defined(CONFIG_PCI) 3119#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
3119 /* Hook up PCI register/descriptor accessors. */ 3120 /* Hook up PCI register/descriptor accessors. */
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index 8564ec5cfb7f..80fbee0d40af 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -2017,9 +2017,11 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2017 ndev->irq = pdev->irq; 2017 ndev->irq = pdev->irq;
2018 ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO 2018 ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO
2019 | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | 2019 | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
2020 NETIF_F_HW_VLAN_FILTER 2020 NETIF_F_HW_VLAN_FILTER | NETIF_F_RXCSUM
2021 /*| NETIF_F_FRAGLIST */ 2021 /*| NETIF_F_FRAGLIST */
2022 ; 2022 ;
2023 ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
2024 NETIF_F_TSO | NETIF_F_HW_VLAN_TX;
2023 2025
2024 if (pci_using_dac) 2026 if (pci_using_dac)
2025 ndev->features |= NETIF_F_HIGHDMA; 2027 ndev->features |= NETIF_F_HIGHDMA;
@@ -2149,7 +2151,7 @@ static int bdx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
2149 2151
2150 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); 2152 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
2151 ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); 2153 ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
2152 ecmd->speed = SPEED_10000; 2154 ethtool_cmd_speed_set(ecmd, SPEED_10000);
2153 ecmd->duplex = DUPLEX_FULL; 2155 ecmd->duplex = DUPLEX_FULL;
2154 ecmd->port = PORT_FIBRE; 2156 ecmd->port = PORT_FIBRE;
2155 ecmd->transceiver = XCVR_EXTERNAL; /* what does it mean? */ 2157 ecmd->transceiver = XCVR_EXTERNAL; /* what does it mean? */
@@ -2188,24 +2190,6 @@ bdx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
2188} 2190}
2189 2191
2190/* 2192/*
2191 * bdx_get_rx_csum - report whether receive checksums are turned on or off
2192 * @netdev
2193 */
2194static u32 bdx_get_rx_csum(struct net_device *netdev)
2195{
2196 return 1; /* always on */
2197}
2198
2199/*
2200 * bdx_get_tx_csum - report whether transmit checksums are turned on or off
2201 * @netdev
2202 */
2203static u32 bdx_get_tx_csum(struct net_device *netdev)
2204{
2205 return (netdev->features & NETIF_F_IP_CSUM) != 0;
2206}
2207
2208/*
2209 * bdx_get_coalesce - get interrupt coalescing parameters 2193 * bdx_get_coalesce - get interrupt coalescing parameters
2210 * @netdev 2194 * @netdev
2211 * @ecoal 2195 * @ecoal
@@ -2424,10 +2408,6 @@ static void bdx_set_ethtool_ops(struct net_device *netdev)
2424 .set_coalesce = bdx_set_coalesce, 2408 .set_coalesce = bdx_set_coalesce,
2425 .get_ringparam = bdx_get_ringparam, 2409 .get_ringparam = bdx_get_ringparam,
2426 .set_ringparam = bdx_set_ringparam, 2410 .set_ringparam = bdx_set_ringparam,
2427 .get_rx_csum = bdx_get_rx_csum,
2428 .get_tx_csum = bdx_get_tx_csum,
2429 .get_sg = ethtool_op_get_sg,
2430 .get_tso = ethtool_op_get_tso,
2431 .get_strings = bdx_get_strings, 2411 .get_strings = bdx_get_strings,
2432 .get_sset_count = bdx_get_sset_count, 2412 .get_sset_count = bdx_get_sset_count,
2433 .get_ethtool_stats = bdx_get_ethtool_stats, 2413 .get_ethtool_stats = bdx_get_ethtool_stats,
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 7a5daefb6f33..d5a1f9e3794c 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -62,12 +62,36 @@
62 62
63#include "tg3.h" 63#include "tg3.h"
64 64
65/* Functions & macros to verify TG3_FLAGS types */
66
67static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
68{
69 return test_bit(flag, bits);
70}
71
72static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
73{
74 set_bit(flag, bits);
75}
76
77static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
78{
79 clear_bit(flag, bits);
80}
81
82#define tg3_flag(tp, flag) \
83 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
84#define tg3_flag_set(tp, flag) \
85 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
86#define tg3_flag_clear(tp, flag) \
87 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
88
65#define DRV_MODULE_NAME "tg3" 89#define DRV_MODULE_NAME "tg3"
66#define TG3_MAJ_NUM 3 90#define TG3_MAJ_NUM 3
67#define TG3_MIN_NUM 117 91#define TG3_MIN_NUM 118
68#define DRV_MODULE_VERSION \ 92#define DRV_MODULE_VERSION \
69 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) 93 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
70#define DRV_MODULE_RELDATE "January 25, 2011" 94#define DRV_MODULE_RELDATE "April 22, 2011"
71 95
72#define TG3_DEF_MAC_MODE 0 96#define TG3_DEF_MAC_MODE 0
73#define TG3_DEF_RX_MODE 0 97#define TG3_DEF_RX_MODE 0
@@ -85,26 +109,25 @@
85/* length of time before we decide the hardware is borked, 109/* length of time before we decide the hardware is borked,
86 * and dev->tx_timeout() should be called to fix the problem 110 * and dev->tx_timeout() should be called to fix the problem
87 */ 111 */
112
88#define TG3_TX_TIMEOUT (5 * HZ) 113#define TG3_TX_TIMEOUT (5 * HZ)
89 114
90/* hardware minimum and maximum for a single frame's data payload */ 115/* hardware minimum and maximum for a single frame's data payload */
91#define TG3_MIN_MTU 60 116#define TG3_MIN_MTU 60
92#define TG3_MAX_MTU(tp) \ 117#define TG3_MAX_MTU(tp) \
93 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500) 118 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
94 119
95/* These numbers seem to be hard coded in the NIC firmware somehow. 120/* These numbers seem to be hard coded in the NIC firmware somehow.
96 * You can't change the ring sizes, but you can change where you place 121 * You can't change the ring sizes, but you can change where you place
97 * them in the NIC onboard memory. 122 * them in the NIC onboard memory.
98 */ 123 */
99#define TG3_RX_STD_RING_SIZE(tp) \ 124#define TG3_RX_STD_RING_SIZE(tp) \
100 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || \ 125 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
101 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) ? \ 126 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
102 RX_STD_MAX_SIZE_5717 : 512)
103#define TG3_DEF_RX_RING_PENDING 200 127#define TG3_DEF_RX_RING_PENDING 200
104#define TG3_RX_JMB_RING_SIZE(tp) \ 128#define TG3_RX_JMB_RING_SIZE(tp) \
105 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || \ 129 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
106 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) ? \ 130 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
107 1024 : 256)
108#define TG3_DEF_RX_JUMBO_RING_PENDING 100 131#define TG3_DEF_RX_JUMBO_RING_PENDING 100
109#define TG3_RSS_INDIR_TBL_SIZE 128 132#define TG3_RSS_INDIR_TBL_SIZE 128
110 133
@@ -167,11 +190,6 @@
167 190
168#define TG3_RAW_IP_ALIGN 2 191#define TG3_RAW_IP_ALIGN 2
169 192
170/* number of ETHTOOL_GSTATS u64's */
171#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
172
173#define TG3_NUM_TEST 6
174
175#define TG3_FW_UPDATE_TIMEOUT_SEC 5 193#define TG3_FW_UPDATE_TIMEOUT_SEC 5
176 194
177#define FIRMWARE_TG3 "tigon/tg3.bin" 195#define FIRMWARE_TG3 "tigon/tg3.bin"
@@ -266,6 +284,7 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)}, 284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)}, 285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)}, 286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
269 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, 288 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
270 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, 289 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
271 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, 290 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -280,7 +299,7 @@ MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
280 299
281static const struct { 300static const struct {
282 const char string[ETH_GSTRING_LEN]; 301 const char string[ETH_GSTRING_LEN];
283} ethtool_stats_keys[TG3_NUM_STATS] = { 302} ethtool_stats_keys[] = {
284 { "rx_octets" }, 303 { "rx_octets" },
285 { "rx_fragments" }, 304 { "rx_fragments" },
286 { "rx_ucast_packets" }, 305 { "rx_ucast_packets" },
@@ -345,6 +364,7 @@ static const struct {
345 { "dma_write_prioq_full" }, 364 { "dma_write_prioq_full" },
346 { "rxbds_empty" }, 365 { "rxbds_empty" },
347 { "rx_discards" }, 366 { "rx_discards" },
367 { "mbuf_lwm_thresh_hit" },
348 { "rx_errors" }, 368 { "rx_errors" },
349 { "rx_threshold_hit" }, 369 { "rx_threshold_hit" },
350 370
@@ -359,9 +379,12 @@ static const struct {
359 { "nic_tx_threshold_hit" } 379 { "nic_tx_threshold_hit" }
360}; 380};
361 381
382#define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
383
384
362static const struct { 385static const struct {
363 const char string[ETH_GSTRING_LEN]; 386 const char string[ETH_GSTRING_LEN];
364} ethtool_test_keys[TG3_NUM_TEST] = { 387} ethtool_test_keys[] = {
365 { "nvram test (online) " }, 388 { "nvram test (online) " },
366 { "link test (online) " }, 389 { "link test (online) " },
367 { "register test (offline)" }, 390 { "register test (offline)" },
@@ -370,6 +393,9 @@ static const struct {
370 { "interrupt test (offline)" }, 393 { "interrupt test (offline)" },
371}; 394};
372 395
396#define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
397
398
373static void tg3_write32(struct tg3 *tp, u32 off, u32 val) 399static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
374{ 400{
375 writel(val, tp->regs + off); 401 writel(val, tp->regs + off);
@@ -467,8 +493,7 @@ static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
467 */ 493 */
468static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait) 494static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
469{ 495{
470 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) || 496 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
471 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
472 /* Non-posted methods */ 497 /* Non-posted methods */
473 tp->write32(tp, off, val); 498 tp->write32(tp, off, val);
474 else { 499 else {
@@ -488,8 +513,7 @@ static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
488static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val) 513static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
489{ 514{
490 tp->write32_mbox(tp, off, val); 515 tp->write32_mbox(tp, off, val);
491 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) && 516 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
492 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
493 tp->read32_mbox(tp, off); 517 tp->read32_mbox(tp, off);
494} 518}
495 519
@@ -497,9 +521,9 @@ static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
497{ 521{
498 void __iomem *mbox = tp->regs + off; 522 void __iomem *mbox = tp->regs + off;
499 writel(val, mbox); 523 writel(val, mbox);
500 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) 524 if (tg3_flag(tp, TXD_MBOX_HWBUG))
501 writel(val, mbox); 525 writel(val, mbox);
502 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) 526 if (tg3_flag(tp, MBOX_WRITE_REORDER))
503 readl(mbox); 527 readl(mbox);
504} 528}
505 529
@@ -533,7 +557,7 @@ static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
533 return; 557 return;
534 558
535 spin_lock_irqsave(&tp->indirect_lock, flags); 559 spin_lock_irqsave(&tp->indirect_lock, flags);
536 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) { 560 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
537 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 561 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
538 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 562 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
539 563
@@ -560,7 +584,7 @@ static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
560 } 584 }
561 585
562 spin_lock_irqsave(&tp->indirect_lock, flags); 586 spin_lock_irqsave(&tp->indirect_lock, flags);
563 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) { 587 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
564 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 588 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
565 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 589 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
566 590
@@ -597,7 +621,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
597 int ret = 0; 621 int ret = 0;
598 u32 status, req, gnt; 622 u32 status, req, gnt;
599 623
600 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) 624 if (!tg3_flag(tp, ENABLE_APE))
601 return 0; 625 return 0;
602 626
603 switch (locknum) { 627 switch (locknum) {
@@ -643,7 +667,7 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
643{ 667{
644 u32 gnt; 668 u32 gnt;
645 669
646 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) 670 if (!tg3_flag(tp, ENABLE_APE))
647 return; 671 return;
648 672
649 switch (locknum) { 673 switch (locknum) {
@@ -687,14 +711,14 @@ static void tg3_enable_ints(struct tg3 *tp)
687 struct tg3_napi *tnapi = &tp->napi[i]; 711 struct tg3_napi *tnapi = &tp->napi[i];
688 712
689 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 713 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
690 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) 714 if (tg3_flag(tp, 1SHOT_MSI))
691 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 715 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
692 716
693 tp->coal_now |= tnapi->coal_now; 717 tp->coal_now |= tnapi->coal_now;
694 } 718 }
695 719
696 /* Force an initial interrupt */ 720 /* Force an initial interrupt */
697 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) && 721 if (!tg3_flag(tp, TAGGED_STATUS) &&
698 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED)) 722 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
699 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 723 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
700 else 724 else
@@ -710,9 +734,7 @@ static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
710 unsigned int work_exists = 0; 734 unsigned int work_exists = 0;
711 735
712 /* check for phy events */ 736 /* check for phy events */
713 if (!(tp->tg3_flags & 737 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
714 (TG3_FLAG_USE_LINKCHG_REG |
715 TG3_FLAG_POLL_SERDES))) {
716 if (sblk->status & SD_STATUS_LINK_CHG) 738 if (sblk->status & SD_STATUS_LINK_CHG)
717 work_exists = 1; 739 work_exists = 1;
718 } 740 }
@@ -740,8 +762,7 @@ static void tg3_int_reenable(struct tg3_napi *tnapi)
740 * The last_tag we write above tells the chip which piece of 762 * The last_tag we write above tells the chip which piece of
741 * work we've completed. 763 * work we've completed.
742 */ 764 */
743 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) && 765 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
744 tg3_has_work(tnapi))
745 tw32(HOSTCC_MODE, tp->coalesce_mode | 766 tw32(HOSTCC_MODE, tp->coalesce_mode |
746 HOSTCC_MODE_ENABLE | tnapi->coal_now); 767 HOSTCC_MODE_ENABLE | tnapi->coal_now);
747} 768}
@@ -751,8 +772,7 @@ static void tg3_switch_clocks(struct tg3 *tp)
751 u32 clock_ctrl; 772 u32 clock_ctrl;
752 u32 orig_clock_ctrl; 773 u32 orig_clock_ctrl;
753 774
754 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) || 775 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
755 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
756 return; 776 return;
757 777
758 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL); 778 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
@@ -763,7 +783,7 @@ static void tg3_switch_clocks(struct tg3 *tp)
763 0x1f); 783 0x1f);
764 tp->pci_clock_ctrl = clock_ctrl; 784 tp->pci_clock_ctrl = clock_ctrl;
765 785
766 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { 786 if (tg3_flag(tp, 5705_PLUS)) {
767 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) { 787 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
768 tw32_wait_f(TG3PCI_CLOCK_CTRL, 788 tw32_wait_f(TG3PCI_CLOCK_CTRL,
769 clock_ctrl | CLOCK_CTRL_625_CORE, 40); 789 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
@@ -880,6 +900,104 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
880 return ret; 900 return ret;
881} 901}
882 902
903static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
904{
905 int err;
906
907 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
908 if (err)
909 goto done;
910
911 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
912 if (err)
913 goto done;
914
915 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
916 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
917 if (err)
918 goto done;
919
920 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
921
922done:
923 return err;
924}
925
926static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
927{
928 int err;
929
930 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
931 if (err)
932 goto done;
933
934 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
935 if (err)
936 goto done;
937
938 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
939 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
940 if (err)
941 goto done;
942
943 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
944
945done:
946 return err;
947}
948
949static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
950{
951 int err;
952
953 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
954 if (!err)
955 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
956
957 return err;
958}
959
960static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
961{
962 int err;
963
964 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
965 if (!err)
966 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
967
968 return err;
969}
970
971static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
972{
973 int err;
974
975 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
976 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
977 MII_TG3_AUXCTL_SHDWSEL_MISC);
978 if (!err)
979 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
980
981 return err;
982}
983
984static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
985{
986 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
987 set |= MII_TG3_AUXCTL_MISC_WREN;
988
989 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
990}
991
992#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
993 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
994 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
995 MII_TG3_AUXCTL_ACTL_TX_6DB)
996
997#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
998 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
999 MII_TG3_AUXCTL_ACTL_TX_6DB);
1000
883static int tg3_bmcr_reset(struct tg3 *tp) 1001static int tg3_bmcr_reset(struct tg3 *tp)
884{ 1002{
885 u32 phy_control; 1003 u32 phy_control;
@@ -982,7 +1100,7 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
982 return; 1100 return;
983 } 1101 }
984 1102
985 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) 1103 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
986 val |= MAC_PHYCFG2_EMODE_MASK_MASK | 1104 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
987 MAC_PHYCFG2_FMODE_MASK_MASK | 1105 MAC_PHYCFG2_FMODE_MASK_MASK |
988 MAC_PHYCFG2_GMODE_MASK_MASK | 1106 MAC_PHYCFG2_GMODE_MASK_MASK |
@@ -995,10 +1113,10 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
995 val = tr32(MAC_PHYCFG1); 1113 val = tr32(MAC_PHYCFG1);
996 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK | 1114 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
997 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN); 1115 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
998 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) { 1116 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
999 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) 1117 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1000 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC; 1118 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1001 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN) 1119 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1002 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN; 1120 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1003 } 1121 }
1004 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT | 1122 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
@@ -1013,13 +1131,13 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
1013 MAC_RGMII_MODE_TX_ENABLE | 1131 MAC_RGMII_MODE_TX_ENABLE |
1014 MAC_RGMII_MODE_TX_LOWPWR | 1132 MAC_RGMII_MODE_TX_LOWPWR |
1015 MAC_RGMII_MODE_TX_RESET); 1133 MAC_RGMII_MODE_TX_RESET);
1016 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) { 1134 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1017 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) 1135 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1018 val |= MAC_RGMII_MODE_RX_INT_B | 1136 val |= MAC_RGMII_MODE_RX_INT_B |
1019 MAC_RGMII_MODE_RX_QUALITY | 1137 MAC_RGMII_MODE_RX_QUALITY |
1020 MAC_RGMII_MODE_RX_ACTIVITY | 1138 MAC_RGMII_MODE_RX_ACTIVITY |
1021 MAC_RGMII_MODE_RX_ENG_DET; 1139 MAC_RGMII_MODE_RX_ENG_DET;
1022 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN) 1140 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1023 val |= MAC_RGMII_MODE_TX_ENABLE | 1141 val |= MAC_RGMII_MODE_TX_ENABLE |
1024 MAC_RGMII_MODE_TX_LOWPWR | 1142 MAC_RGMII_MODE_TX_LOWPWR |
1025 MAC_RGMII_MODE_TX_RESET; 1143 MAC_RGMII_MODE_TX_RESET;
@@ -1033,7 +1151,7 @@ static void tg3_mdio_start(struct tg3 *tp)
1033 tw32_f(MAC_MI_MODE, tp->mi_mode); 1151 tw32_f(MAC_MI_MODE, tp->mi_mode);
1034 udelay(80); 1152 udelay(80);
1035 1153
1036 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) && 1154 if (tg3_flag(tp, MDIOBUS_INITED) &&
1037 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) 1155 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1038 tg3_mdio_config_5785(tp); 1156 tg3_mdio_config_5785(tp);
1039} 1157}
@@ -1044,8 +1162,7 @@ static int tg3_mdio_init(struct tg3 *tp)
1044 u32 reg; 1162 u32 reg;
1045 struct phy_device *phydev; 1163 struct phy_device *phydev;
1046 1164
1047 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 1165 if (tg3_flag(tp, 5717_PLUS)) {
1048 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
1049 u32 is_serdes; 1166 u32 is_serdes;
1050 1167
1051 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1; 1168 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
@@ -1062,8 +1179,7 @@ static int tg3_mdio_init(struct tg3 *tp)
1062 1179
1063 tg3_mdio_start(tp); 1180 tg3_mdio_start(tp);
1064 1181
1065 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) || 1182 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1066 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
1067 return 0; 1183 return 0;
1068 1184
1069 tp->mdio_bus = mdiobus_alloc(); 1185 tp->mdio_bus = mdiobus_alloc();
@@ -1119,11 +1235,11 @@ static int tg3_mdio_init(struct tg3 *tp)
1119 PHY_BRCM_RX_REFCLK_UNUSED | 1235 PHY_BRCM_RX_REFCLK_UNUSED |
1120 PHY_BRCM_DIS_TXCRXC_NOENRGY | 1236 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1121 PHY_BRCM_AUTO_PWRDWN_ENABLE; 1237 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1122 if (tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE) 1238 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1123 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE; 1239 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1124 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) 1240 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1125 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE; 1241 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1126 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN) 1242 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1127 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE; 1243 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1128 /* fallthru */ 1244 /* fallthru */
1129 case PHY_ID_RTL8211C: 1245 case PHY_ID_RTL8211C:
@@ -1137,7 +1253,7 @@ static int tg3_mdio_init(struct tg3 *tp)
1137 break; 1253 break;
1138 } 1254 }
1139 1255
1140 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED; 1256 tg3_flag_set(tp, MDIOBUS_INITED);
1141 1257
1142 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) 1258 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1143 tg3_mdio_config_5785(tp); 1259 tg3_mdio_config_5785(tp);
@@ -1147,59 +1263,13 @@ static int tg3_mdio_init(struct tg3 *tp)
1147 1263
1148static void tg3_mdio_fini(struct tg3 *tp) 1264static void tg3_mdio_fini(struct tg3 *tp)
1149{ 1265{
1150 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) { 1266 if (tg3_flag(tp, MDIOBUS_INITED)) {
1151 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED; 1267 tg3_flag_clear(tp, MDIOBUS_INITED);
1152 mdiobus_unregister(tp->mdio_bus); 1268 mdiobus_unregister(tp->mdio_bus);
1153 mdiobus_free(tp->mdio_bus); 1269 mdiobus_free(tp->mdio_bus);
1154 } 1270 }
1155} 1271}
1156 1272
1157static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1158{
1159 int err;
1160
1161 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1162 if (err)
1163 goto done;
1164
1165 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1166 if (err)
1167 goto done;
1168
1169 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1170 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1171 if (err)
1172 goto done;
1173
1174 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1175
1176done:
1177 return err;
1178}
1179
1180static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1181{
1182 int err;
1183
1184 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1185 if (err)
1186 goto done;
1187
1188 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1189 if (err)
1190 goto done;
1191
1192 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1193 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1194 if (err)
1195 goto done;
1196
1197 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1198
1199done:
1200 return err;
1201}
1202
1203/* tp->lock is held. */ 1273/* tp->lock is held. */
1204static inline void tg3_generate_fw_event(struct tg3 *tp) 1274static inline void tg3_generate_fw_event(struct tg3 *tp)
1205{ 1275{
@@ -1247,8 +1317,7 @@ static void tg3_ump_link_report(struct tg3 *tp)
1247 u32 reg; 1317 u32 reg;
1248 u32 val; 1318 u32 val;
1249 1319
1250 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || 1320 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1251 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1252 return; 1321 return;
1253 1322
1254 tg3_wait_for_event_ack(tp); 1323 tg3_wait_for_event_ack(tp);
@@ -1308,6 +1377,11 @@ static void tg3_link_report(struct tg3 *tp)
1308 "on" : "off", 1377 "on" : "off",
1309 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ? 1378 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1310 "on" : "off"); 1379 "on" : "off");
1380
1381 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1382 netdev_info(tp->dev, "EEE is %s\n",
1383 tp->setlpicnt ? "enabled" : "disabled");
1384
1311 tg3_ump_link_report(tp); 1385 tg3_ump_link_report(tp);
1312 } 1386 }
1313} 1387}
@@ -1373,13 +1447,12 @@ static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1373 u32 old_rx_mode = tp->rx_mode; 1447 u32 old_rx_mode = tp->rx_mode;
1374 u32 old_tx_mode = tp->tx_mode; 1448 u32 old_tx_mode = tp->tx_mode;
1375 1449
1376 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) 1450 if (tg3_flag(tp, USE_PHYLIB))
1377 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg; 1451 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1378 else 1452 else
1379 autoneg = tp->link_config.autoneg; 1453 autoneg = tp->link_config.autoneg;
1380 1454
1381 if (autoneg == AUTONEG_ENABLE && 1455 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1382 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1383 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) 1456 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1384 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv); 1457 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1385 else 1458 else
@@ -1576,28 +1649,6 @@ static void tg3_phy_fini(struct tg3 *tp)
1576 } 1649 }
1577} 1650}
1578 1651
1579static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1580{
1581 int err;
1582
1583 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1584 if (!err)
1585 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1586
1587 return err;
1588}
1589
1590static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1591{
1592 int err;
1593
1594 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1595 if (!err)
1596 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1597
1598 return err;
1599}
1600
1601static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable) 1652static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1602{ 1653{
1603 u32 phytest; 1654 u32 phytest;
@@ -1622,9 +1673,8 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1622{ 1673{
1623 u32 reg; 1674 u32 reg;
1624 1675
1625 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || 1676 if (!tg3_flag(tp, 5705_PLUS) ||
1626 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 1677 (tg3_flag(tp, 5717_PLUS) &&
1627 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
1628 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))) 1678 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1629 return; 1679 return;
1630 1680
@@ -1658,7 +1708,7 @@ static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1658{ 1708{
1659 u32 phy; 1709 u32 phy;
1660 1710
1661 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || 1711 if (!tg3_flag(tp, 5705_PLUS) ||
1662 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) 1712 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1663 return; 1713 return;
1664 1714
@@ -1680,31 +1730,33 @@ static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1680 tg3_writephy(tp, MII_TG3_FET_TEST, ephy); 1730 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1681 } 1731 }
1682 } else { 1732 } else {
1683 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC | 1733 int ret;
1684 MII_TG3_AUXCTL_SHDWSEL_MISC; 1734
1685 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) && 1735 ret = tg3_phy_auxctl_read(tp,
1686 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) { 1736 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1737 if (!ret) {
1687 if (enable) 1738 if (enable)
1688 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX; 1739 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1689 else 1740 else
1690 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX; 1741 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1691 phy |= MII_TG3_AUXCTL_MISC_WREN; 1742 tg3_phy_auxctl_write(tp,
1692 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy); 1743 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1693 } 1744 }
1694 } 1745 }
1695} 1746}
1696 1747
1697static void tg3_phy_set_wirespeed(struct tg3 *tp) 1748static void tg3_phy_set_wirespeed(struct tg3 *tp)
1698{ 1749{
1750 int ret;
1699 u32 val; 1751 u32 val;
1700 1752
1701 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) 1753 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1702 return; 1754 return;
1703 1755
1704 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) && 1756 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1705 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val)) 1757 if (!ret)
1706 tg3_writephy(tp, MII_TG3_AUX_CTRL, 1758 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1707 (val | (1 << 15) | (1 << 4))); 1759 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1708} 1760}
1709 1761
1710static void tg3_phy_apply_otp(struct tg3 *tp) 1762static void tg3_phy_apply_otp(struct tg3 *tp)
@@ -1716,11 +1768,8 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
1716 1768
1717 otp = tp->phy_otp; 1769 otp = tp->phy_otp;
1718 1770
1719 /* Enable SM_DSP clock and tx 6dB coding. */ 1771 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1720 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | 1772 return;
1721 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1722 MII_TG3_AUXCTL_ACTL_TX_6DB;
1723 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1724 1773
1725 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT); 1774 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1726 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT; 1775 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
@@ -1744,10 +1793,7 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
1744 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT); 1793 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1745 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy); 1794 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1746 1795
1747 /* Turn off SM_DSP clock. */ 1796 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1748 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1749 MII_TG3_AUXCTL_ACTL_TX_6DB;
1750 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1751} 1797}
1752 1798
1753static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up) 1799static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
@@ -1782,18 +1828,11 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1782 case ASIC_REV_5717: 1828 case ASIC_REV_5717:
1783 case ASIC_REV_5719: 1829 case ASIC_REV_5719:
1784 case ASIC_REV_57765: 1830 case ASIC_REV_57765:
1785 /* Enable SM_DSP clock and tx 6dB coding. */ 1831 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1786 val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | 1832 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26,
1787 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | 1833 0x0000);
1788 MII_TG3_AUXCTL_ACTL_TX_6DB; 1834 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1789 tg3_writephy(tp, MII_TG3_AUX_CTRL, val); 1835 }
1790
1791 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
1792
1793 /* Turn off SM_DSP clock. */
1794 val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1795 MII_TG3_AUXCTL_ACTL_TX_6DB;
1796 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1797 } 1836 }
1798 /* Fallthrough */ 1837 /* Fallthrough */
1799 case TG3_CL45_D7_EEERES_STAT_LP_100TX: 1838 case TG3_CL45_D7_EEERES_STAT_LP_100TX:
@@ -1945,8 +1984,9 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1945 (MII_TG3_CTRL_AS_MASTER | 1984 (MII_TG3_CTRL_AS_MASTER |
1946 MII_TG3_CTRL_ENABLE_AS_MASTER)); 1985 MII_TG3_CTRL_ENABLE_AS_MASTER));
1947 1986
1948 /* Enable SM_DSP_CLOCK and 6dB. */ 1987 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
1949 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); 1988 if (err)
1989 return err;
1950 1990
1951 /* Block the PHY control access. */ 1991 /* Block the PHY control access. */
1952 tg3_phydsp_write(tp, 0x8005, 0x0800); 1992 tg3_phydsp_write(tp, 0x8005, 0x0800);
@@ -1965,13 +2005,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1965 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200); 2005 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1966 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000); 2006 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
1967 2007
1968 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || 2008 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1969 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1970 /* Set Extended packet length bit for jumbo frames */
1971 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1972 } else {
1973 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1974 }
1975 2009
1976 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig); 2010 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1977 2011
@@ -2047,8 +2081,7 @@ static int tg3_phy_reset(struct tg3 *tp)
2047 } 2081 }
2048 } 2082 }
2049 2083
2050 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 2084 if (tg3_flag(tp, 5717_PLUS) &&
2051 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
2052 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) 2085 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2053 return 0; 2086 return 0;
2054 2087
@@ -2060,49 +2093,57 @@ static int tg3_phy_reset(struct tg3 *tp)
2060 tg3_phy_toggle_apd(tp, false); 2093 tg3_phy_toggle_apd(tp, false);
2061 2094
2062out: 2095out:
2063 if (tp->phy_flags & TG3_PHYFLG_ADC_BUG) { 2096 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2064 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); 2097 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2065 tg3_phydsp_write(tp, 0x201f, 0x2aaa); 2098 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2066 tg3_phydsp_write(tp, 0x000a, 0x0323); 2099 tg3_phydsp_write(tp, 0x000a, 0x0323);
2067 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); 2100 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2068 } 2101 }
2102
2069 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) { 2103 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2070 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); 2104 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2071 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); 2105 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2072 } 2106 }
2107
2073 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) { 2108 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2074 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); 2109 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2075 tg3_phydsp_write(tp, 0x000a, 0x310b); 2110 tg3_phydsp_write(tp, 0x000a, 0x310b);
2076 tg3_phydsp_write(tp, 0x201f, 0x9506); 2111 tg3_phydsp_write(tp, 0x201f, 0x9506);
2077 tg3_phydsp_write(tp, 0x401f, 0x14e2); 2112 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2078 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); 2113 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2114 }
2079 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) { 2115 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2080 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); 2116 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2081 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); 2117 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2082 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) { 2118 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2083 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); 2119 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2084 tg3_writephy(tp, MII_TG3_TEST1, 2120 tg3_writephy(tp, MII_TG3_TEST1,
2085 MII_TG3_TEST1_TRIM_EN | 0x4); 2121 MII_TG3_TEST1_TRIM_EN | 0x4);
2086 } else 2122 } else
2087 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); 2123 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2088 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); 2124
2125 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2126 }
2089 } 2127 }
2128
2090 /* Set Extended packet length bit (bit 14) on all chips that */ 2129 /* Set Extended packet length bit (bit 14) on all chips that */
2091 /* support jumbo frames */ 2130 /* support jumbo frames */
2092 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 2131 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2093 /* Cannot do read-modify-write on 5401 */ 2132 /* Cannot do read-modify-write on 5401 */
2094 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20); 2133 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2095 } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { 2134 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2096 /* Set bit 14 with read-modify-write to preserve other bits */ 2135 /* Set bit 14 with read-modify-write to preserve other bits */
2097 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) && 2136 err = tg3_phy_auxctl_read(tp,
2098 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val)) 2137 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2099 tg3_writephy(tp, MII_TG3_AUX_CTRL, val | 0x4000); 2138 if (!err)
2139 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2140 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2100 } 2141 }
2101 2142
2102 /* Set phy register 0x10 bit 0 to high fifo elasticity to support 2143 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2103 * jumbo frames transmission. 2144 * jumbo frames transmission.
2104 */ 2145 */
2105 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { 2146 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2106 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val)) 2147 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2107 tg3_writephy(tp, MII_TG3_EXT_CTRL, 2148 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2108 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC); 2149 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
@@ -2123,14 +2164,15 @@ static void tg3_frob_aux_power(struct tg3 *tp)
2123 bool need_vaux = false; 2164 bool need_vaux = false;
2124 2165
2125 /* The GPIOs do something completely different on 57765. */ 2166 /* The GPIOs do something completely different on 57765. */
2126 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 || 2167 if (!tg3_flag(tp, IS_NIC) ||
2127 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || 2168 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2128 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 2169 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2129 return; 2170 return;
2130 2171
2131 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || 2172 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2132 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 || 2173 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2133 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) && 2174 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2175 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
2134 tp->pdev_peer != tp->pdev) { 2176 tp->pdev_peer != tp->pdev) {
2135 struct net_device *dev_peer; 2177 struct net_device *dev_peer;
2136 2178
@@ -2140,17 +2182,16 @@ static void tg3_frob_aux_power(struct tg3 *tp)
2140 if (dev_peer) { 2182 if (dev_peer) {
2141 struct tg3 *tp_peer = netdev_priv(dev_peer); 2183 struct tg3 *tp_peer = netdev_priv(dev_peer);
2142 2184
2143 if (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) 2185 if (tg3_flag(tp_peer, INIT_COMPLETE))
2144 return; 2186 return;
2145 2187
2146 if ((tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) || 2188 if (tg3_flag(tp_peer, WOL_ENABLE) ||
2147 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF)) 2189 tg3_flag(tp_peer, ENABLE_ASF))
2148 need_vaux = true; 2190 need_vaux = true;
2149 } 2191 }
2150 } 2192 }
2151 2193
2152 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) || 2194 if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF))
2153 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2154 need_vaux = true; 2195 need_vaux = true;
2155 2196
2156 if (need_vaux) { 2197 if (need_vaux) {
@@ -2304,11 +2345,10 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2304 tg3_writephy(tp, MII_TG3_EXT_CTRL, 2345 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2305 MII_TG3_EXT_CTRL_FORCE_LED_OFF); 2346 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2306 2347
2307 tg3_writephy(tp, MII_TG3_AUX_CTRL, 2348 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2308 MII_TG3_AUXCTL_SHDWSEL_PWRCTL | 2349 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2309 MII_TG3_AUXCTL_PCTL_100TX_LPWR | 2350 MII_TG3_AUXCTL_PCTL_VREG_11V;
2310 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | 2351 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2311 MII_TG3_AUXCTL_PCTL_VREG_11V);
2312 } 2352 }
2313 2353
2314 /* The PHY should not be powered down on some chips because 2354 /* The PHY should not be powered down on some chips because
@@ -2334,7 +2374,7 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2334/* tp->lock is held. */ 2374/* tp->lock is held. */
2335static int tg3_nvram_lock(struct tg3 *tp) 2375static int tg3_nvram_lock(struct tg3 *tp)
2336{ 2376{
2337 if (tp->tg3_flags & TG3_FLAG_NVRAM) { 2377 if (tg3_flag(tp, NVRAM)) {
2338 int i; 2378 int i;
2339 2379
2340 if (tp->nvram_lock_cnt == 0) { 2380 if (tp->nvram_lock_cnt == 0) {
@@ -2357,7 +2397,7 @@ static int tg3_nvram_lock(struct tg3 *tp)
2357/* tp->lock is held. */ 2397/* tp->lock is held. */
2358static void tg3_nvram_unlock(struct tg3 *tp) 2398static void tg3_nvram_unlock(struct tg3 *tp)
2359{ 2399{
2360 if (tp->tg3_flags & TG3_FLAG_NVRAM) { 2400 if (tg3_flag(tp, NVRAM)) {
2361 if (tp->nvram_lock_cnt > 0) 2401 if (tp->nvram_lock_cnt > 0)
2362 tp->nvram_lock_cnt--; 2402 tp->nvram_lock_cnt--;
2363 if (tp->nvram_lock_cnt == 0) 2403 if (tp->nvram_lock_cnt == 0)
@@ -2368,8 +2408,7 @@ static void tg3_nvram_unlock(struct tg3 *tp)
2368/* tp->lock is held. */ 2408/* tp->lock is held. */
2369static void tg3_enable_nvram_access(struct tg3 *tp) 2409static void tg3_enable_nvram_access(struct tg3 *tp)
2370{ 2410{
2371 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 2411 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2372 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2373 u32 nvaccess = tr32(NVRAM_ACCESS); 2412 u32 nvaccess = tr32(NVRAM_ACCESS);
2374 2413
2375 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); 2414 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
@@ -2379,8 +2418,7 @@ static void tg3_enable_nvram_access(struct tg3 *tp)
2379/* tp->lock is held. */ 2418/* tp->lock is held. */
2380static void tg3_disable_nvram_access(struct tg3 *tp) 2419static void tg3_disable_nvram_access(struct tg3 *tp)
2381{ 2420{
2382 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 2421 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2383 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2384 u32 nvaccess = tr32(NVRAM_ACCESS); 2422 u32 nvaccess = tr32(NVRAM_ACCESS);
2385 2423
2386 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); 2424 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
@@ -2450,10 +2488,10 @@ static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2450 2488
2451static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr) 2489static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2452{ 2490{
2453 if ((tp->tg3_flags & TG3_FLAG_NVRAM) && 2491 if (tg3_flag(tp, NVRAM) &&
2454 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) && 2492 tg3_flag(tp, NVRAM_BUFFERED) &&
2455 (tp->tg3_flags2 & TG3_FLG2_FLASH) && 2493 tg3_flag(tp, FLASH) &&
2456 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) && 2494 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2457 (tp->nvram_jedecnum == JEDEC_ATMEL)) 2495 (tp->nvram_jedecnum == JEDEC_ATMEL))
2458 2496
2459 addr = ((addr / tp->nvram_pagesize) << 2497 addr = ((addr / tp->nvram_pagesize) <<
@@ -2465,10 +2503,10 @@ static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2465 2503
2466static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr) 2504static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2467{ 2505{
2468 if ((tp->tg3_flags & TG3_FLAG_NVRAM) && 2506 if (tg3_flag(tp, NVRAM) &&
2469 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) && 2507 tg3_flag(tp, NVRAM_BUFFERED) &&
2470 (tp->tg3_flags2 & TG3_FLG2_FLASH) && 2508 tg3_flag(tp, FLASH) &&
2471 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) && 2509 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2472 (tp->nvram_jedecnum == JEDEC_ATMEL)) 2510 (tp->nvram_jedecnum == JEDEC_ATMEL))
2473 2511
2474 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) * 2512 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
@@ -2488,7 +2526,7 @@ static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2488{ 2526{
2489 int ret; 2527 int ret;
2490 2528
2491 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) 2529 if (!tg3_flag(tp, NVRAM))
2492 return tg3_nvram_read_using_eeprom(tp, offset, val); 2530 return tg3_nvram_read_using_eeprom(tp, offset, val);
2493 2531
2494 offset = tg3_nvram_phys_addr(tp, offset); 2532 offset = tg3_nvram_phys_addr(tp, offset);
@@ -2580,7 +2618,7 @@ static int tg3_power_up(struct tg3 *tp)
2580 pci_set_power_state(tp->pdev, PCI_D0); 2618 pci_set_power_state(tp->pdev, PCI_D0);
2581 2619
2582 /* Switch out of Vaux if it is a NIC */ 2620 /* Switch out of Vaux if it is a NIC */
2583 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC) 2621 if (tg3_flag(tp, IS_NIC))
2584 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100); 2622 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2585 2623
2586 return 0; 2624 return 0;
@@ -2594,7 +2632,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
2594 tg3_enable_register_access(tp); 2632 tg3_enable_register_access(tp);
2595 2633
2596 /* Restore the CLKREQ setting. */ 2634 /* Restore the CLKREQ setting. */
2597 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) { 2635 if (tg3_flag(tp, CLKREQ_BUG)) {
2598 u16 lnkctl; 2636 u16 lnkctl;
2599 2637
2600 pci_read_config_word(tp->pdev, 2638 pci_read_config_word(tp->pdev,
@@ -2611,9 +2649,9 @@ static int tg3_power_down_prepare(struct tg3 *tp)
2611 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); 2649 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2612 2650
2613 device_should_wake = device_may_wakeup(&tp->pdev->dev) && 2651 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2614 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE); 2652 tg3_flag(tp, WOL_ENABLE);
2615 2653
2616 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 2654 if (tg3_flag(tp, USE_PHYLIB)) {
2617 do_low_power = false; 2655 do_low_power = false;
2618 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) && 2656 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2619 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { 2657 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
@@ -2634,9 +2672,8 @@ static int tg3_power_down_prepare(struct tg3 *tp)
2634 ADVERTISED_Autoneg | 2672 ADVERTISED_Autoneg |
2635 ADVERTISED_10baseT_Half; 2673 ADVERTISED_10baseT_Half;
2636 2674
2637 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) || 2675 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2638 device_should_wake) { 2676 if (tg3_flag(tp, WOL_SPEED_100MB))
2639 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2640 advertising |= 2677 advertising |=
2641 ADVERTISED_100baseT_Half | 2678 ADVERTISED_100baseT_Half |
2642 ADVERTISED_100baseT_Full | 2679 ADVERTISED_100baseT_Full |
@@ -2681,7 +2718,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
2681 2718
2682 val = tr32(GRC_VCPU_EXT_CTRL); 2719 val = tr32(GRC_VCPU_EXT_CTRL);
2683 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL); 2720 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2684 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { 2721 } else if (!tg3_flag(tp, ENABLE_ASF)) {
2685 int i; 2722 int i;
2686 u32 val; 2723 u32 val;
2687 2724
@@ -2692,7 +2729,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
2692 msleep(1); 2729 msleep(1);
2693 } 2730 }
2694 } 2731 }
2695 if (tp->tg3_flags & TG3_FLAG_WOL_CAP) 2732 if (tg3_flag(tp, WOL_CAP))
2696 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE | 2733 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2697 WOL_DRV_STATE_SHUTDOWN | 2734 WOL_DRV_STATE_SHUTDOWN |
2698 WOL_DRV_WOL | 2735 WOL_DRV_WOL |
@@ -2702,8 +2739,13 @@ static int tg3_power_down_prepare(struct tg3 *tp)
2702 u32 mac_mode; 2739 u32 mac_mode;
2703 2740
2704 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { 2741 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2705 if (do_low_power) { 2742 if (do_low_power &&
2706 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a); 2743 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2744 tg3_phy_auxctl_write(tp,
2745 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2746 MII_TG3_AUXCTL_PCTL_WOL_EN |
2747 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2748 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2707 udelay(40); 2749 udelay(40);
2708 } 2750 }
2709 2751
@@ -2715,8 +2757,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
2715 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY; 2757 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2716 if (GET_ASIC_REV(tp->pci_chip_rev_id) == 2758 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2717 ASIC_REV_5700) { 2759 ASIC_REV_5700) {
2718 u32 speed = (tp->tg3_flags & 2760 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2719 TG3_FLAG_WOL_SPEED_100MB) ?
2720 SPEED_100 : SPEED_10; 2761 SPEED_100 : SPEED_10;
2721 if (tg3_5700_link_polarity(tp, speed)) 2762 if (tg3_5700_link_polarity(tp, speed))
2722 mac_mode |= MAC_MODE_LINK_POLARITY; 2763 mac_mode |= MAC_MODE_LINK_POLARITY;
@@ -2727,17 +2768,15 @@ static int tg3_power_down_prepare(struct tg3 *tp)
2727 mac_mode = MAC_MODE_PORT_MODE_TBI; 2768 mac_mode = MAC_MODE_PORT_MODE_TBI;
2728 } 2769 }
2729 2770
2730 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) 2771 if (!tg3_flag(tp, 5750_PLUS))
2731 tw32(MAC_LED_CTRL, tp->led_ctrl); 2772 tw32(MAC_LED_CTRL, tp->led_ctrl);
2732 2773
2733 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; 2774 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2734 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && 2775 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2735 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) && 2776 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2736 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2737 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2738 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL; 2777 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2739 2778
2740 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) 2779 if (tg3_flag(tp, ENABLE_APE))
2741 mac_mode |= MAC_MODE_APE_TX_EN | 2780 mac_mode |= MAC_MODE_APE_TX_EN |
2742 MAC_MODE_APE_RX_EN | 2781 MAC_MODE_APE_RX_EN |
2743 MAC_MODE_TDE_ENABLE; 2782 MAC_MODE_TDE_ENABLE;
@@ -2749,7 +2788,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
2749 udelay(10); 2788 udelay(10);
2750 } 2789 }
2751 2790
2752 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) && 2791 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2753 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 2792 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2754 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { 2793 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2755 u32 base_val; 2794 u32 base_val;
@@ -2760,12 +2799,11 @@ static int tg3_power_down_prepare(struct tg3 *tp)
2760 2799
2761 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK | 2800 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2762 CLOCK_CTRL_PWRDOWN_PLL133, 40); 2801 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2763 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || 2802 } else if (tg3_flag(tp, 5780_CLASS) ||
2764 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) || 2803 tg3_flag(tp, CPMU_PRESENT) ||
2765 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) { 2804 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2766 /* do nothing */ 2805 /* do nothing */
2767 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 2806 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2768 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2769 u32 newbits1, newbits2; 2807 u32 newbits1, newbits2;
2770 2808
2771 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 2809 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
@@ -2774,7 +2812,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
2774 CLOCK_CTRL_TXCLK_DISABLE | 2812 CLOCK_CTRL_TXCLK_DISABLE |
2775 CLOCK_CTRL_ALTCLK); 2813 CLOCK_CTRL_ALTCLK);
2776 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; 2814 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2777 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { 2815 } else if (tg3_flag(tp, 5705_PLUS)) {
2778 newbits1 = CLOCK_CTRL_625_CORE; 2816 newbits1 = CLOCK_CTRL_625_CORE;
2779 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK; 2817 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2780 } else { 2818 } else {
@@ -2788,7 +2826,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
2788 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2, 2826 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2789 40); 2827 40);
2790 2828
2791 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 2829 if (!tg3_flag(tp, 5705_PLUS)) {
2792 u32 newbits3; 2830 u32 newbits3;
2793 2831
2794 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 2832 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
@@ -2805,8 +2843,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
2805 } 2843 }
2806 } 2844 }
2807 2845
2808 if (!(device_should_wake) && 2846 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
2809 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2810 tg3_power_down_phy(tp, do_low_power); 2847 tg3_power_down_phy(tp, do_low_power);
2811 2848
2812 tg3_frob_aux_power(tp); 2849 tg3_frob_aux_power(tp);
@@ -2818,7 +2855,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
2818 2855
2819 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); 2856 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2820 tw32(0x7d00, val); 2857 tw32(0x7d00, val);
2821 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { 2858 if (!tg3_flag(tp, ENABLE_ASF)) {
2822 int err; 2859 int err;
2823 2860
2824 err = tg3_nvram_lock(tp); 2861 err = tg3_nvram_lock(tp);
@@ -2837,7 +2874,7 @@ static void tg3_power_down(struct tg3 *tp)
2837{ 2874{
2838 tg3_power_down_prepare(tp); 2875 tg3_power_down_prepare(tp);
2839 2876
2840 pci_wake_from_d3(tp->pdev, tp->tg3_flags & TG3_FLAG_WOL_ENABLE); 2877 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
2841 pci_set_power_state(tp->pdev, PCI_D3hot); 2878 pci_set_power_state(tp->pdev, PCI_D3hot);
2842} 2879}
2843 2880
@@ -2901,7 +2938,7 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
2901 2938
2902 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL | 2939 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2903 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP); 2940 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2904 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) 2941 if (tg3_flag(tp, WOL_SPEED_100MB))
2905 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL); 2942 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2906 2943
2907 tg3_writephy(tp, MII_ADVERTISE, new_adv); 2944 tg3_writephy(tp, MII_ADVERTISE, new_adv);
@@ -2983,11 +3020,7 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
2983 tw32(TG3_CPMU_EEE_MODE, 3020 tw32(TG3_CPMU_EEE_MODE,
2984 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE); 3021 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2985 3022
2986 /* Enable SM_DSP clock and tx 6dB coding. */ 3023 TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2987 val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
2988 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
2989 MII_TG3_AUXCTL_ACTL_TX_6DB;
2990 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2991 3024
2992 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) { 3025 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
2993 case ASIC_REV_5717: 3026 case ASIC_REV_5717:
@@ -3016,10 +3049,7 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
3016 } 3049 }
3017 tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); 3050 tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3018 3051
3019 /* Turn off SM_DSP clock. */ 3052 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3020 val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
3021 MII_TG3_AUXCTL_ACTL_TX_6DB;
3022 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3023 } 3053 }
3024 3054
3025 if (tp->link_config.autoneg == AUTONEG_DISABLE && 3055 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
@@ -3077,7 +3107,7 @@ static int tg3_init_5401phy_dsp(struct tg3 *tp)
3077 3107
3078 /* Turn off tap power management. */ 3108 /* Turn off tap power management. */
3079 /* Set Extended packet length bit */ 3109 /* Set Extended packet length bit */
3080 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20); 3110 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3081 3111
3082 err |= tg3_phydsp_write(tp, 0x0012, 0x1804); 3112 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3083 err |= tg3_phydsp_write(tp, 0x0013, 0x1204); 3113 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
@@ -3140,7 +3170,7 @@ static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3140 if (curadv != reqadv) 3170 if (curadv != reqadv)
3141 return 0; 3171 return 0;
3142 3172
3143 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) 3173 if (tg3_flag(tp, PAUSE_AUTONEG))
3144 tg3_readphy(tp, MII_LPA, rmtadv); 3174 tg3_readphy(tp, MII_LPA, rmtadv);
3145 } else { 3175 } else {
3146 /* Reprogram the advertisement register, even if it 3176 /* Reprogram the advertisement register, even if it
@@ -3183,7 +3213,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3183 udelay(80); 3213 udelay(80);
3184 } 3214 }
3185 3215
3186 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02); 3216 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3187 3217
3188 /* Some third-party PHYs need to be reset on link going 3218 /* Some third-party PHYs need to be reset on link going
3189 * down. 3219 * down.
@@ -3203,7 +3233,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3203 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { 3233 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3204 tg3_readphy(tp, MII_BMSR, &bmsr); 3234 tg3_readphy(tp, MII_BMSR, &bmsr);
3205 if (tg3_readphy(tp, MII_BMSR, &bmsr) || 3235 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3206 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) 3236 !tg3_flag(tp, INIT_COMPLETE))
3207 bmsr = 0; 3237 bmsr = 0;
3208 3238
3209 if (!(bmsr & BMSR_LSTATUS)) { 3239 if (!(bmsr & BMSR_LSTATUS)) {
@@ -3264,11 +3294,13 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3264 current_duplex = DUPLEX_INVALID; 3294 current_duplex = DUPLEX_INVALID;
3265 3295
3266 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) { 3296 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3267 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007); 3297 err = tg3_phy_auxctl_read(tp,
3268 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val); 3298 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3269 if (!(val & (1 << 10))) { 3299 &val);
3270 val |= (1 << 10); 3300 if (!err && !(val & (1 << 10))) {
3271 tg3_writephy(tp, MII_TG3_AUX_CTRL, val); 3301 tg3_phy_auxctl_write(tp,
3302 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3303 val | (1 << 10));
3272 goto relink; 3304 goto relink;
3273 } 3305 }
3274 } 3306 }
@@ -3341,8 +3373,8 @@ relink:
3341 tg3_phy_copper_begin(tp); 3373 tg3_phy_copper_begin(tp);
3342 3374
3343 tg3_readphy(tp, MII_BMSR, &bmsr); 3375 tg3_readphy(tp, MII_BMSR, &bmsr);
3344 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 3376 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3345 (bmsr & BMSR_LSTATUS)) 3377 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3346 current_link_up = 1; 3378 current_link_up = 1;
3347 } 3379 }
3348 3380
@@ -3385,7 +3417,7 @@ relink:
3385 3417
3386 tg3_phy_eee_adjust(tp, current_link_up); 3418 tg3_phy_eee_adjust(tp, current_link_up);
3387 3419
3388 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) { 3420 if (tg3_flag(tp, USE_LINKCHG_REG)) {
3389 /* Polled via timer. */ 3421 /* Polled via timer. */
3390 tw32_f(MAC_EVENT, 0); 3422 tw32_f(MAC_EVENT, 0);
3391 } else { 3423 } else {
@@ -3396,8 +3428,7 @@ relink:
3396 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 && 3428 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3397 current_link_up == 1 && 3429 current_link_up == 1 &&
3398 tp->link_config.active_speed == SPEED_1000 && 3430 tp->link_config.active_speed == SPEED_1000 &&
3399 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) || 3431 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3400 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
3401 udelay(120); 3432 udelay(120);
3402 tw32_f(MAC_STATUS, 3433 tw32_f(MAC_STATUS,
3403 (MAC_STATUS_SYNC_CHANGED | 3434 (MAC_STATUS_SYNC_CHANGED |
@@ -3409,7 +3440,7 @@ relink:
3409 } 3440 }
3410 3441
3411 /* Prevent send BD corruption. */ 3442 /* Prevent send BD corruption. */
3412 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) { 3443 if (tg3_flag(tp, CLKREQ_BUG)) {
3413 u16 oldlnkctl, newlnkctl; 3444 u16 oldlnkctl, newlnkctl;
3414 3445
3415 pci_read_config_word(tp->pdev, 3446 pci_read_config_word(tp->pdev,
@@ -3804,7 +3835,7 @@ static void tg3_init_bcm8002(struct tg3 *tp)
3804 int i; 3835 int i;
3805 3836
3806 /* Reset when initting first time or we have a link. */ 3837 /* Reset when initting first time or we have a link. */
3807 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) && 3838 if (tg3_flag(tp, INIT_COMPLETE) &&
3808 !(mac_status & MAC_STATUS_PCS_SYNCED)) 3839 !(mac_status & MAC_STATUS_PCS_SYNCED))
3809 return; 3840 return;
3810 3841
@@ -4065,9 +4096,9 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4065 orig_active_speed = tp->link_config.active_speed; 4096 orig_active_speed = tp->link_config.active_speed;
4066 orig_active_duplex = tp->link_config.active_duplex; 4097 orig_active_duplex = tp->link_config.active_duplex;
4067 4098
4068 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) && 4099 if (!tg3_flag(tp, HW_AUTONEG) &&
4069 netif_carrier_ok(tp->dev) && 4100 netif_carrier_ok(tp->dev) &&
4070 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) { 4101 tg3_flag(tp, INIT_COMPLETE)) {
4071 mac_status = tr32(MAC_STATUS); 4102 mac_status = tr32(MAC_STATUS);
4072 mac_status &= (MAC_STATUS_PCS_SYNCED | 4103 mac_status &= (MAC_STATUS_PCS_SYNCED |
4073 MAC_STATUS_SIGNAL_DET | 4104 MAC_STATUS_SIGNAL_DET |
@@ -4098,7 +4129,7 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4098 current_link_up = 0; 4129 current_link_up = 0;
4099 mac_status = tr32(MAC_STATUS); 4130 mac_status = tr32(MAC_STATUS);
4100 4131
4101 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) 4132 if (tg3_flag(tp, HW_AUTONEG))
4102 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status); 4133 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4103 else 4134 else
4104 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status); 4135 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
@@ -4297,7 +4328,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4297 current_duplex = DUPLEX_FULL; 4328 current_duplex = DUPLEX_FULL;
4298 else 4329 else
4299 current_duplex = DUPLEX_HALF; 4330 current_duplex = DUPLEX_HALF;
4300 } else if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { 4331 } else if (!tg3_flag(tp, 5780_CLASS)) {
4301 /* Link is up via parallel detect */ 4332 /* Link is up via parallel detect */
4302 } else { 4333 } else {
4303 current_link_up = 0; 4334 current_link_up = 0;
@@ -4394,6 +4425,7 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp)
4394 4425
4395static int tg3_setup_phy(struct tg3 *tp, int force_reset) 4426static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4396{ 4427{
4428 u32 val;
4397 int err; 4429 int err;
4398 4430
4399 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 4431 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
@@ -4404,7 +4436,7 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4404 err = tg3_setup_copper_phy(tp, force_reset); 4436 err = tg3_setup_copper_phy(tp, force_reset);
4405 4437
4406 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) { 4438 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4407 u32 val, scale; 4439 u32 scale;
4408 4440
4409 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK; 4441 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4410 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5) 4442 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
@@ -4419,19 +4451,22 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4419 tw32(GRC_MISC_CFG, val); 4451 tw32(GRC_MISC_CFG, val);
4420 } 4452 }
4421 4453
4454 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4455 (6 << TX_LENGTHS_IPG_SHIFT);
4456 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4457 val |= tr32(MAC_TX_LENGTHS) &
4458 (TX_LENGTHS_JMB_FRM_LEN_MSK |
4459 TX_LENGTHS_CNT_DWN_VAL_MSK);
4460
4422 if (tp->link_config.active_speed == SPEED_1000 && 4461 if (tp->link_config.active_speed == SPEED_1000 &&
4423 tp->link_config.active_duplex == DUPLEX_HALF) 4462 tp->link_config.active_duplex == DUPLEX_HALF)
4424 tw32(MAC_TX_LENGTHS, 4463 tw32(MAC_TX_LENGTHS, val |
4425 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | 4464 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4426 (6 << TX_LENGTHS_IPG_SHIFT) |
4427 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
4428 else 4465 else
4429 tw32(MAC_TX_LENGTHS, 4466 tw32(MAC_TX_LENGTHS, val |
4430 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | 4467 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4431 (6 << TX_LENGTHS_IPG_SHIFT) |
4432 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
4433 4468
4434 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 4469 if (!tg3_flag(tp, 5705_PLUS)) {
4435 if (netif_carrier_ok(tp->dev)) { 4470 if (netif_carrier_ok(tp->dev)) {
4436 tw32(HOSTCC_STAT_COAL_TICKS, 4471 tw32(HOSTCC_STAT_COAL_TICKS,
4437 tp->coal.stats_block_coalesce_usecs); 4472 tp->coal.stats_block_coalesce_usecs);
@@ -4440,8 +4475,8 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4440 } 4475 }
4441 } 4476 }
4442 4477
4443 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) { 4478 if (tg3_flag(tp, ASPM_WORKAROUND)) {
4444 u32 val = tr32(PCIE_PWR_MGMT_THRESH); 4479 val = tr32(PCIE_PWR_MGMT_THRESH);
4445 if (!netif_carrier_ok(tp->dev)) 4480 if (!netif_carrier_ok(tp->dev))
4446 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) | 4481 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4447 tp->pwrmgmt_thresh; 4482 tp->pwrmgmt_thresh;
@@ -4458,6 +4493,123 @@ static inline int tg3_irq_sync(struct tg3 *tp)
4458 return tp->irq_sync; 4493 return tp->irq_sync;
4459} 4494}
4460 4495
4496static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4497{
4498 int i;
4499
4500 dst = (u32 *)((u8 *)dst + off);
4501 for (i = 0; i < len; i += sizeof(u32))
4502 *dst++ = tr32(off + i);
4503}
4504
4505static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4506{
4507 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4508 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4509 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4510 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4511 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4512 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4513 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4514 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4515 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4516 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4517 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4518 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4519 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4520 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4521 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4522 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4523 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4524 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4525 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4526
4527 if (tg3_flag(tp, SUPPORT_MSIX))
4528 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4529
4530 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4531 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4532 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4533 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4534 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4535 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4536 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4537 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4538
4539 if (!tg3_flag(tp, 5705_PLUS)) {
4540 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4541 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4542 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4543 }
4544
4545 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4546 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4547 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4548 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4549 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4550
4551 if (tg3_flag(tp, NVRAM))
4552 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4553}
4554
4555static void tg3_dump_state(struct tg3 *tp)
4556{
4557 int i;
4558 u32 *regs;
4559
4560 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4561 if (!regs) {
4562 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4563 return;
4564 }
4565
4566 if (tg3_flag(tp, PCI_EXPRESS)) {
4567 /* Read up to but not including private PCI registers */
4568 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4569 regs[i / sizeof(u32)] = tr32(i);
4570 } else
4571 tg3_dump_legacy_regs(tp, regs);
4572
4573 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4574 if (!regs[i + 0] && !regs[i + 1] &&
4575 !regs[i + 2] && !regs[i + 3])
4576 continue;
4577
4578 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4579 i * 4,
4580 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4581 }
4582
4583 kfree(regs);
4584
4585 for (i = 0; i < tp->irq_cnt; i++) {
4586 struct tg3_napi *tnapi = &tp->napi[i];
4587
4588 /* SW status block */
4589 netdev_err(tp->dev,
4590 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4591 i,
4592 tnapi->hw_status->status,
4593 tnapi->hw_status->status_tag,
4594 tnapi->hw_status->rx_jumbo_consumer,
4595 tnapi->hw_status->rx_consumer,
4596 tnapi->hw_status->rx_mini_consumer,
4597 tnapi->hw_status->idx[0].rx_producer,
4598 tnapi->hw_status->idx[0].tx_consumer);
4599
4600 netdev_err(tp->dev,
4601 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4602 i,
4603 tnapi->last_tag, tnapi->last_irq_tag,
4604 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4605 tnapi->rx_rcb_ptr,
4606 tnapi->prodring.rx_std_prod_idx,
4607 tnapi->prodring.rx_std_cons_idx,
4608 tnapi->prodring.rx_jmb_prod_idx,
4609 tnapi->prodring.rx_jmb_cons_idx);
4610 }
4611}
4612
4461/* This is called whenever we suspect that the system chipset is re- 4613/* This is called whenever we suspect that the system chipset is re-
4462 * ordering the sequence of MMIO to the tx send mailbox. The symptom 4614 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4463 * is bogus tx completions. We try to recover by setting the 4615 * is bogus tx completions. We try to recover by setting the
@@ -4466,7 +4618,7 @@ static inline int tg3_irq_sync(struct tg3 *tp)
4466 */ 4618 */
4467static void tg3_tx_recover(struct tg3 *tp) 4619static void tg3_tx_recover(struct tg3 *tp)
4468{ 4620{
4469 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) || 4621 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4470 tp->write32_tx_mbox == tg3_write_indirect_mbox); 4622 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4471 4623
4472 netdev_warn(tp->dev, 4624 netdev_warn(tp->dev,
@@ -4476,7 +4628,7 @@ static void tg3_tx_recover(struct tg3 *tp)
4476 "and include system chipset information.\n"); 4628 "and include system chipset information.\n");
4477 4629
4478 spin_lock(&tp->lock); 4630 spin_lock(&tp->lock);
4479 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING; 4631 tg3_flag_set(tp, TX_RECOVERY_PENDING);
4480 spin_unlock(&tp->lock); 4632 spin_unlock(&tp->lock);
4481} 4633}
4482 4634
@@ -4500,7 +4652,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
4500 struct netdev_queue *txq; 4652 struct netdev_queue *txq;
4501 int index = tnapi - tp->napi; 4653 int index = tnapi - tp->napi;
4502 4654
4503 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) 4655 if (tg3_flag(tp, ENABLE_TSS))
4504 index--; 4656 index--;
4505 4657
4506 txq = netdev_get_tx_queue(tp->dev, index); 4658 txq = netdev_get_tx_queue(tp->dev, index);
@@ -4815,7 +4967,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4815 skb = copy_skb; 4967 skb = copy_skb;
4816 } 4968 }
4817 4969
4818 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) && 4970 if ((tp->dev->features & NETIF_F_RXCSUM) &&
4819 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && 4971 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4820 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK) 4972 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4821 >> RXD_TCPCSUM_SHIFT) == 0xffff)) 4973 >> RXD_TCPCSUM_SHIFT) == 0xffff))
@@ -4868,7 +5020,7 @@ next_pkt_nopost:
4868 tw32_rx_mbox(tnapi->consmbox, sw_idx); 5020 tw32_rx_mbox(tnapi->consmbox, sw_idx);
4869 5021
4870 /* Refill RX ring(s). */ 5022 /* Refill RX ring(s). */
4871 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) { 5023 if (!tg3_flag(tp, ENABLE_RSS)) {
4872 if (work_mask & RXD_OPAQUE_RING_STD) { 5024 if (work_mask & RXD_OPAQUE_RING_STD) {
4873 tpr->rx_std_prod_idx = std_prod_idx & 5025 tpr->rx_std_prod_idx = std_prod_idx &
4874 tp->rx_std_ring_mask; 5026 tp->rx_std_ring_mask;
@@ -4901,16 +5053,14 @@ next_pkt_nopost:
4901static void tg3_poll_link(struct tg3 *tp) 5053static void tg3_poll_link(struct tg3 *tp)
4902{ 5054{
4903 /* handle link change and other phy events */ 5055 /* handle link change and other phy events */
4904 if (!(tp->tg3_flags & 5056 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
4905 (TG3_FLAG_USE_LINKCHG_REG |
4906 TG3_FLAG_POLL_SERDES))) {
4907 struct tg3_hw_status *sblk = tp->napi[0].hw_status; 5057 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
4908 5058
4909 if (sblk->status & SD_STATUS_LINK_CHG) { 5059 if (sblk->status & SD_STATUS_LINK_CHG) {
4910 sblk->status = SD_STATUS_UPDATED | 5060 sblk->status = SD_STATUS_UPDATED |
4911 (sblk->status & ~SD_STATUS_LINK_CHG); 5061 (sblk->status & ~SD_STATUS_LINK_CHG);
4912 spin_lock(&tp->lock); 5062 spin_lock(&tp->lock);
4913 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 5063 if (tg3_flag(tp, USE_PHYLIB)) {
4914 tw32_f(MAC_STATUS, 5064 tw32_f(MAC_STATUS,
4915 (MAC_STATUS_SYNC_CHANGED | 5065 (MAC_STATUS_SYNC_CHANGED |
4916 MAC_STATUS_CFG_CHANGED | 5066 MAC_STATUS_CFG_CHANGED |
@@ -5057,7 +5207,7 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5057 /* run TX completion thread */ 5207 /* run TX completion thread */
5058 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { 5208 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5059 tg3_tx(tnapi); 5209 tg3_tx(tnapi);
5060 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) 5210 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5061 return work_done; 5211 return work_done;
5062 } 5212 }
5063 5213
@@ -5068,7 +5218,7 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5068 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) 5218 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5069 work_done += tg3_rx(tnapi, budget - work_done); 5219 work_done += tg3_rx(tnapi, budget - work_done);
5070 5220
5071 if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) { 5221 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5072 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring; 5222 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5073 int i, err = 0; 5223 int i, err = 0;
5074 u32 std_prod_idx = dpr->rx_std_prod_idx; 5224 u32 std_prod_idx = dpr->rx_std_prod_idx;
@@ -5107,7 +5257,7 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget)
5107 while (1) { 5257 while (1) {
5108 work_done = tg3_poll_work(tnapi, work_done, budget); 5258 work_done = tg3_poll_work(tnapi, work_done, budget);
5109 5259
5110 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) 5260 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5111 goto tx_recovery; 5261 goto tx_recovery;
5112 5262
5113 if (unlikely(work_done >= budget)) 5263 if (unlikely(work_done >= budget))
@@ -5141,6 +5291,40 @@ tx_recovery:
5141 return work_done; 5291 return work_done;
5142} 5292}
5143 5293
5294static void tg3_process_error(struct tg3 *tp)
5295{
5296 u32 val;
5297 bool real_error = false;
5298
5299 if (tg3_flag(tp, ERROR_PROCESSED))
5300 return;
5301
5302 /* Check Flow Attention register */
5303 val = tr32(HOSTCC_FLOW_ATTN);
5304 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5305 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
5306 real_error = true;
5307 }
5308
5309 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5310 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
5311 real_error = true;
5312 }
5313
5314 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5315 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
5316 real_error = true;
5317 }
5318
5319 if (!real_error)
5320 return;
5321
5322 tg3_dump_state(tp);
5323
5324 tg3_flag_set(tp, ERROR_PROCESSED);
5325 schedule_work(&tp->reset_task);
5326}
5327
5144static int tg3_poll(struct napi_struct *napi, int budget) 5328static int tg3_poll(struct napi_struct *napi, int budget)
5145{ 5329{
5146 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); 5330 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
@@ -5149,17 +5333,20 @@ static int tg3_poll(struct napi_struct *napi, int budget)
5149 struct tg3_hw_status *sblk = tnapi->hw_status; 5333 struct tg3_hw_status *sblk = tnapi->hw_status;
5150 5334
5151 while (1) { 5335 while (1) {
5336 if (sblk->status & SD_STATUS_ERROR)
5337 tg3_process_error(tp);
5338
5152 tg3_poll_link(tp); 5339 tg3_poll_link(tp);
5153 5340
5154 work_done = tg3_poll_work(tnapi, work_done, budget); 5341 work_done = tg3_poll_work(tnapi, work_done, budget);
5155 5342
5156 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) 5343 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5157 goto tx_recovery; 5344 goto tx_recovery;
5158 5345
5159 if (unlikely(work_done >= budget)) 5346 if (unlikely(work_done >= budget))
5160 break; 5347 break;
5161 5348
5162 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) { 5349 if (tg3_flag(tp, TAGGED_STATUS)) {
5163 /* tp->last_tag is used in tg3_int_reenable() below 5350 /* tp->last_tag is used in tg3_int_reenable() below
5164 * to tell the hw how much work has been processed, 5351 * to tell the hw how much work has been processed,
5165 * so we must read it before checking for more work. 5352 * so we must read it before checking for more work.
@@ -5326,7 +5513,7 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5326 * interrupt is ours and will flush the status block. 5513 * interrupt is ours and will flush the status block.
5327 */ 5514 */
5328 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) { 5515 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5329 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) || 5516 if (tg3_flag(tp, CHIP_RESETTING) ||
5330 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 5517 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5331 handled = 0; 5518 handled = 0;
5332 goto out; 5519 goto out;
@@ -5375,7 +5562,7 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5375 * interrupt is ours and will flush the status block. 5562 * interrupt is ours and will flush the status block.
5376 */ 5563 */
5377 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) { 5564 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5378 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) || 5565 if (tg3_flag(tp, CHIP_RESETTING) ||
5379 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 5566 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5380 handled = 0; 5567 handled = 0;
5381 goto out; 5568 goto out;
@@ -5488,14 +5675,14 @@ static void tg3_reset_task(struct work_struct *work)
5488 5675
5489 tg3_full_lock(tp, 1); 5676 tg3_full_lock(tp, 1);
5490 5677
5491 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER; 5678 restart_timer = tg3_flag(tp, RESTART_TIMER);
5492 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER; 5679 tg3_flag_clear(tp, RESTART_TIMER);
5493 5680
5494 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) { 5681 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5495 tp->write32_tx_mbox = tg3_write32_tx_mbox; 5682 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5496 tp->write32_rx_mbox = tg3_write_flush_reg32; 5683 tp->write32_rx_mbox = tg3_write_flush_reg32;
5497 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER; 5684 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5498 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING; 5685 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5499 } 5686 }
5500 5687
5501 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 5688 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
@@ -5515,21 +5702,13 @@ out:
5515 tg3_phy_start(tp); 5702 tg3_phy_start(tp);
5516} 5703}
5517 5704
5518static void tg3_dump_short_state(struct tg3 *tp)
5519{
5520 netdev_err(tp->dev, "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
5521 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
5522 netdev_err(tp->dev, "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
5523 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
5524}
5525
5526static void tg3_tx_timeout(struct net_device *dev) 5705static void tg3_tx_timeout(struct net_device *dev)
5527{ 5706{
5528 struct tg3 *tp = netdev_priv(dev); 5707 struct tg3 *tp = netdev_priv(dev);
5529 5708
5530 if (netif_msg_tx_err(tp)) { 5709 if (netif_msg_tx_err(tp)) {
5531 netdev_err(dev, "transmit timed out, resetting\n"); 5710 netdev_err(dev, "transmit timed out, resetting\n");
5532 tg3_dump_short_state(tp); 5711 tg3_dump_state(tp);
5533 } 5712 }
5534 5713
5535 schedule_work(&tp->reset_task); 5714 schedule_work(&tp->reset_task);
@@ -5548,7 +5727,7 @@ static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5548 int len) 5727 int len)
5549{ 5728{
5550#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64) 5729#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5551 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) 5730 if (tg3_flag(tp, 40BIT_DMA_BUG))
5552 return ((u64) mapping + len) > DMA_BIT_MASK(40); 5731 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5553 return 0; 5732 return 0;
5554#else 5733#else
@@ -5595,8 +5774,8 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5595 /* Make sure new skb does not cross any 4G boundaries. 5774 /* Make sure new skb does not cross any 4G boundaries.
5596 * Drop the packet if it does. 5775 * Drop the packet if it does.
5597 */ 5776 */
5598 } else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) && 5777 } else if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
5599 tg3_4g_overflow_test(new_addr, new_skb->len)) { 5778 tg3_4g_overflow_test(new_addr, new_skb->len)) {
5600 pci_unmap_single(tp->pdev, new_addr, new_skb->len, 5779 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5601 PCI_DMA_TODEVICE); 5780 PCI_DMA_TODEVICE);
5602 ret = -1; 5781 ret = -1;
@@ -5663,7 +5842,7 @@ static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5663} 5842}
5664 5843
5665/* hard_start_xmit for devices that don't have any bugs and 5844/* hard_start_xmit for devices that don't have any bugs and
5666 * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only. 5845 * support TG3_FLAG_HW_TSO_2 and TG3_FLAG_HW_TSO_3 only.
5667 */ 5846 */
5668static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, 5847static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5669 struct net_device *dev) 5848 struct net_device *dev)
@@ -5677,7 +5856,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5677 5856
5678 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 5857 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5679 tnapi = &tp->napi[skb_get_queue_mapping(skb)]; 5858 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5680 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) 5859 if (tg3_flag(tp, ENABLE_TSS))
5681 tnapi++; 5860 tnapi++;
5682 5861
5683 /* We are running in BH disabled context with netif_tx_lock 5862 /* We are running in BH disabled context with netif_tx_lock
@@ -5722,7 +5901,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5722 hdrlen = ip_tcp_len + tcp_opt_len; 5901 hdrlen = ip_tcp_len + tcp_opt_len;
5723 } 5902 }
5724 5903
5725 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) { 5904 if (tg3_flag(tp, HW_TSO_3)) {
5726 mss |= (hdrlen & 0xc) << 12; 5905 mss |= (hdrlen & 0xc) << 12;
5727 if (hdrlen & 0x10) 5906 if (hdrlen & 0x10)
5728 base_flags |= 0x00000010; 5907 base_flags |= 0x00000010;
@@ -5755,7 +5934,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5755 tnapi->tx_buffers[entry].skb = skb; 5934 tnapi->tx_buffers[entry].skb = skb;
5756 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); 5935 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5757 5936
5758 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) && 5937 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
5759 !mss && skb->len > VLAN_ETH_FRAME_LEN) 5938 !mss && skb->len > VLAN_ETH_FRAME_LEN)
5760 base_flags |= TXD_FLAG_JMB_PKT; 5939 base_flags |= TXD_FLAG_JMB_PKT;
5761 5940
@@ -5878,7 +6057,7 @@ tg3_tso_bug_end:
5878} 6057}
5879 6058
5880/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and 6059/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5881 * support TG3_FLG2_HW_TSO_1 or firmware TSO only. 6060 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
5882 */ 6061 */
5883static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, 6062static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5884 struct net_device *dev) 6063 struct net_device *dev)
@@ -5893,7 +6072,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5893 6072
5894 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 6073 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5895 tnapi = &tp->napi[skb_get_queue_mapping(skb)]; 6074 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5896 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) 6075 if (tg3_flag(tp, ENABLE_TSS))
5897 tnapi++; 6076 tnapi++;
5898 6077
5899 /* We are running in BH disabled context with netif_tx_lock 6078 /* We are running in BH disabled context with netif_tx_lock
@@ -5944,13 +6123,15 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5944 } 6123 }
5945 6124
5946 if (unlikely((ETH_HLEN + hdr_len) > 80) && 6125 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5947 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG)) 6126 tg3_flag(tp, TSO_BUG))
5948 return tg3_tso_bug(tp, skb); 6127 return tg3_tso_bug(tp, skb);
5949 6128
5950 base_flags |= (TXD_FLAG_CPU_PRE_DMA | 6129 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5951 TXD_FLAG_CPU_POST_DMA); 6130 TXD_FLAG_CPU_POST_DMA);
5952 6131
5953 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { 6132 if (tg3_flag(tp, HW_TSO_1) ||
6133 tg3_flag(tp, HW_TSO_2) ||
6134 tg3_flag(tp, HW_TSO_3)) {
5954 tcp_hdr(skb)->check = 0; 6135 tcp_hdr(skb)->check = 0;
5955 base_flags &= ~TXD_FLAG_TCPUDP_CSUM; 6136 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5956 } else 6137 } else
@@ -5959,14 +6140,14 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5959 IPPROTO_TCP, 6140 IPPROTO_TCP,
5960 0); 6141 0);
5961 6142
5962 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) { 6143 if (tg3_flag(tp, HW_TSO_3)) {
5963 mss |= (hdr_len & 0xc) << 12; 6144 mss |= (hdr_len & 0xc) << 12;
5964 if (hdr_len & 0x10) 6145 if (hdr_len & 0x10)
5965 base_flags |= 0x00000010; 6146 base_flags |= 0x00000010;
5966 base_flags |= (hdr_len & 0x3e0) << 5; 6147 base_flags |= (hdr_len & 0x3e0) << 5;
5967 } else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) 6148 } else if (tg3_flag(tp, HW_TSO_2))
5968 mss |= hdr_len << 9; 6149 mss |= hdr_len << 9;
5969 else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) || 6150 else if (tg3_flag(tp, HW_TSO_1) ||
5970 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { 6151 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5971 if (tcp_opt_len || iph->ihl > 5) { 6152 if (tcp_opt_len || iph->ihl > 5) {
5972 int tsflags; 6153 int tsflags;
@@ -5988,7 +6169,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5988 base_flags |= (TXD_FLAG_VLAN | 6169 base_flags |= (TXD_FLAG_VLAN |
5989 (vlan_tx_tag_get(skb) << 16)); 6170 (vlan_tx_tag_get(skb) << 16));
5990 6171
5991 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) && 6172 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
5992 !mss && skb->len > VLAN_ETH_FRAME_LEN) 6173 !mss && skb->len > VLAN_ETH_FRAME_LEN)
5993 base_flags |= TXD_FLAG_JMB_PKT; 6174 base_flags |= TXD_FLAG_JMB_PKT;
5994 6175
@@ -6005,18 +6186,18 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
6005 6186
6006 would_hit_hwbug = 0; 6187 would_hit_hwbug = 0;
6007 6188
6008 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8) 6189 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6009 would_hit_hwbug = 1; 6190 would_hit_hwbug = 1;
6010 6191
6011 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) && 6192 if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6012 tg3_4g_overflow_test(mapping, len)) 6193 tg3_4g_overflow_test(mapping, len))
6013 would_hit_hwbug = 1; 6194 would_hit_hwbug = 1;
6014 6195
6015 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) && 6196 if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6016 tg3_40bit_overflow_test(tp, mapping, len)) 6197 tg3_40bit_overflow_test(tp, mapping, len))
6017 would_hit_hwbug = 1; 6198 would_hit_hwbug = 1;
6018 6199
6019 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG) 6200 if (tg3_flag(tp, 5701_DMA_BUG))
6020 would_hit_hwbug = 1; 6201 would_hit_hwbug = 1;
6021 6202
6022 tg3_set_txd(tnapi, entry, mapping, len, base_flags, 6203 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
@@ -6042,19 +6223,21 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
6042 if (pci_dma_mapping_error(tp->pdev, mapping)) 6223 if (pci_dma_mapping_error(tp->pdev, mapping))
6043 goto dma_error; 6224 goto dma_error;
6044 6225
6045 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && 6226 if (tg3_flag(tp, SHORT_DMA_BUG) &&
6046 len <= 8) 6227 len <= 8)
6047 would_hit_hwbug = 1; 6228 would_hit_hwbug = 1;
6048 6229
6049 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) && 6230 if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6050 tg3_4g_overflow_test(mapping, len)) 6231 tg3_4g_overflow_test(mapping, len))
6051 would_hit_hwbug = 1; 6232 would_hit_hwbug = 1;
6052 6233
6053 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) && 6234 if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6054 tg3_40bit_overflow_test(tp, mapping, len)) 6235 tg3_40bit_overflow_test(tp, mapping, len))
6055 would_hit_hwbug = 1; 6236 would_hit_hwbug = 1;
6056 6237
6057 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) 6238 if (tg3_flag(tp, HW_TSO_1) ||
6239 tg3_flag(tp, HW_TSO_2) ||
6240 tg3_flag(tp, HW_TSO_3))
6058 tg3_set_txd(tnapi, entry, mapping, len, 6241 tg3_set_txd(tnapi, entry, mapping, len,
6059 base_flags, (i == last)|(mss << 1)); 6242 base_flags, (i == last)|(mss << 1));
6060 else 6243 else
@@ -6126,22 +6309,80 @@ dma_error:
6126 return NETDEV_TX_OK; 6309 return NETDEV_TX_OK;
6127} 6310}
6128 6311
6312static void tg3_set_loopback(struct net_device *dev, u32 features)
6313{
6314 struct tg3 *tp = netdev_priv(dev);
6315
6316 if (features & NETIF_F_LOOPBACK) {
6317 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6318 return;
6319
6320 /*
6321 * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6322 * loopback mode if Half-Duplex mode was negotiated earlier.
6323 */
6324 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6325
6326 /* Enable internal MAC loopback mode */
6327 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6328 spin_lock_bh(&tp->lock);
6329 tw32(MAC_MODE, tp->mac_mode);
6330 netif_carrier_on(tp->dev);
6331 spin_unlock_bh(&tp->lock);
6332 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6333 } else {
6334 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6335 return;
6336
6337 /* Disable internal MAC loopback mode */
6338 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6339 spin_lock_bh(&tp->lock);
6340 tw32(MAC_MODE, tp->mac_mode);
6341 /* Force link status check */
6342 tg3_setup_phy(tp, 1);
6343 spin_unlock_bh(&tp->lock);
6344 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6345 }
6346}
6347
6348static u32 tg3_fix_features(struct net_device *dev, u32 features)
6349{
6350 struct tg3 *tp = netdev_priv(dev);
6351
6352 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6353 features &= ~NETIF_F_ALL_TSO;
6354
6355 return features;
6356}
6357
6358static int tg3_set_features(struct net_device *dev, u32 features)
6359{
6360 u32 changed = dev->features ^ features;
6361
6362 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6363 tg3_set_loopback(dev, features);
6364
6365 return 0;
6366}
6367
6129static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, 6368static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6130 int new_mtu) 6369 int new_mtu)
6131{ 6370{
6132 dev->mtu = new_mtu; 6371 dev->mtu = new_mtu;
6133 6372
6134 if (new_mtu > ETH_DATA_LEN) { 6373 if (new_mtu > ETH_DATA_LEN) {
6135 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) { 6374 if (tg3_flag(tp, 5780_CLASS)) {
6136 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; 6375 netdev_update_features(dev);
6137 ethtool_op_set_tso(dev, 0); 6376 tg3_flag_clear(tp, TSO_CAPABLE);
6138 } else { 6377 } else {
6139 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE; 6378 tg3_flag_set(tp, JUMBO_RING_ENABLE);
6140 } 6379 }
6141 } else { 6380 } else {
6142 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) 6381 if (tg3_flag(tp, 5780_CLASS)) {
6143 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; 6382 tg3_flag_set(tp, TSO_CAPABLE);
6144 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE; 6383 netdev_update_features(dev);
6384 }
6385 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6145 } 6386 }
6146} 6387}
6147 6388
@@ -6195,7 +6436,7 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
6195 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], 6436 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6196 tp->rx_pkt_map_sz); 6437 tp->rx_pkt_map_sz);
6197 6438
6198 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { 6439 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6199 for (i = tpr->rx_jmb_cons_idx; 6440 for (i = tpr->rx_jmb_cons_idx;
6200 i != tpr->rx_jmb_prod_idx; 6441 i != tpr->rx_jmb_prod_idx;
6201 i = (i + 1) & tp->rx_jmb_ring_mask) { 6442 i = (i + 1) & tp->rx_jmb_ring_mask) {
@@ -6211,8 +6452,7 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
6211 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], 6452 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6212 tp->rx_pkt_map_sz); 6453 tp->rx_pkt_map_sz);
6213 6454
6214 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && 6455 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6215 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
6216 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) 6456 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6217 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], 6457 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6218 TG3_RX_JMB_MAP_SZ); 6458 TG3_RX_JMB_MAP_SZ);
@@ -6249,7 +6489,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
6249 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp)); 6489 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6250 6490
6251 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ; 6491 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6252 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) && 6492 if (tg3_flag(tp, 5780_CLASS) &&
6253 tp->dev->mtu > ETH_DATA_LEN) 6493 tp->dev->mtu > ETH_DATA_LEN)
6254 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ; 6494 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6255 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz); 6495 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
@@ -6282,13 +6522,12 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
6282 } 6522 }
6283 } 6523 }
6284 6524
6285 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) || 6525 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6286 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6287 goto done; 6526 goto done;
6288 6527
6289 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp)); 6528 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6290 6529
6291 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)) 6530 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6292 goto done; 6531 goto done;
6293 6532
6294 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) { 6533 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
@@ -6357,8 +6596,7 @@ static int tg3_rx_prodring_init(struct tg3 *tp,
6357 if (!tpr->rx_std) 6596 if (!tpr->rx_std)
6358 goto err_out; 6597 goto err_out;
6359 6598
6360 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && 6599 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6361 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
6362 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp), 6600 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6363 GFP_KERNEL); 6601 GFP_KERNEL);
6364 if (!tpr->rx_jmb_buffers) 6602 if (!tpr->rx_jmb_buffers)
@@ -6556,8 +6794,8 @@ static int tg3_alloc_consistent(struct tg3 *tp)
6556 /* If multivector TSS is enabled, vector 0 does not handle 6794 /* If multivector TSS is enabled, vector 0 does not handle
6557 * tx interrupts. Don't allocate any resources for it. 6795 * tx interrupts. Don't allocate any resources for it.
6558 */ 6796 */
6559 if ((!i && !(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) || 6797 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6560 (i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))) { 6798 (i && tg3_flag(tp, ENABLE_TSS))) {
6561 tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) * 6799 tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6562 TG3_TX_RING_SIZE, 6800 TG3_TX_RING_SIZE,
6563 GFP_KERNEL); 6801 GFP_KERNEL);
@@ -6597,7 +6835,7 @@ static int tg3_alloc_consistent(struct tg3 *tp)
6597 * If multivector RSS is enabled, vector 0 does not handle 6835 * If multivector RSS is enabled, vector 0 does not handle
6598 * rx or tx interrupts. Don't allocate any resources for it. 6836 * rx or tx interrupts. Don't allocate any resources for it.
6599 */ 6837 */
6600 if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) 6838 if (!i && tg3_flag(tp, ENABLE_RSS))
6601 continue; 6839 continue;
6602 6840
6603 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev, 6841 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
@@ -6627,7 +6865,7 @@ static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int
6627 unsigned int i; 6865 unsigned int i;
6628 u32 val; 6866 u32 val;
6629 6867
6630 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { 6868 if (tg3_flag(tp, 5705_PLUS)) {
6631 switch (ofs) { 6869 switch (ofs) {
6632 case RCVLSC_MODE: 6870 case RCVLSC_MODE:
6633 case DMAC_MODE: 6871 case DMAC_MODE:
@@ -6737,7 +6975,7 @@ static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6737 u32 apedata; 6975 u32 apedata;
6738 6976
6739 /* NCSI does not support APE events */ 6977 /* NCSI does not support APE events */
6740 if (tp->tg3_flags3 & TG3_FLG3_APE_HAS_NCSI) 6978 if (tg3_flag(tp, APE_HAS_NCSI))
6741 return; 6979 return;
6742 6980
6743 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 6981 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
@@ -6776,7 +7014,7 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6776 u32 event; 7014 u32 event;
6777 u32 apedata; 7015 u32 apedata;
6778 7016
6779 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) 7017 if (!tg3_flag(tp, ENABLE_APE))
6780 return; 7018 return;
6781 7019
6782 switch (kind) { 7020 switch (kind) {
@@ -6805,7 +7043,7 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6805 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0); 7043 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6806 7044
6807 if (device_may_wakeup(&tp->pdev->dev) && 7045 if (device_may_wakeup(&tp->pdev->dev) &&
6808 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) { 7046 tg3_flag(tp, WOL_ENABLE)) {
6809 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED, 7047 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
6810 TG3_APE_HOST_WOL_SPEED_AUTO); 7048 TG3_APE_HOST_WOL_SPEED_AUTO);
6811 apedata = TG3_APE_HOST_DRVR_STATE_WOL; 7049 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
@@ -6834,7 +7072,7 @@ static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6834 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX, 7072 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6835 NIC_SRAM_FIRMWARE_MBOX_MAGIC1); 7073 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6836 7074
6837 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) { 7075 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6838 switch (kind) { 7076 switch (kind) {
6839 case RESET_KIND_INIT: 7077 case RESET_KIND_INIT:
6840 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 7078 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
@@ -6864,7 +7102,7 @@ static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6864/* tp->lock is held. */ 7102/* tp->lock is held. */
6865static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) 7103static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6866{ 7104{
6867 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) { 7105 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6868 switch (kind) { 7106 switch (kind) {
6869 case RESET_KIND_INIT: 7107 case RESET_KIND_INIT:
6870 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 7108 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
@@ -6888,7 +7126,7 @@ static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6888/* tp->lock is held. */ 7126/* tp->lock is held. */
6889static void tg3_write_sig_legacy(struct tg3 *tp, int kind) 7127static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6890{ 7128{
6891 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { 7129 if (tg3_flag(tp, ENABLE_ASF)) {
6892 switch (kind) { 7130 switch (kind) {
6893 case RESET_KIND_INIT: 7131 case RESET_KIND_INIT:
6894 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 7132 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
@@ -6939,9 +7177,8 @@ static int tg3_poll_fw(struct tg3 *tp)
6939 * of the above loop as an error, but do report the lack of 7177 * of the above loop as an error, but do report the lack of
6940 * running firmware once. 7178 * running firmware once.
6941 */ 7179 */
6942 if (i >= 100000 && 7180 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
6943 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) { 7181 tg3_flag_set(tp, NO_FWARE_REPORTED);
6944 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
6945 7182
6946 netdev_info(tp->dev, "No firmware running\n"); 7183 netdev_info(tp->dev, "No firmware running\n");
6947 } 7184 }
@@ -6974,10 +7211,10 @@ static void tg3_restore_pci_state(struct tg3 *tp)
6974 /* Set MAX PCI retry to zero. */ 7211 /* Set MAX PCI retry to zero. */
6975 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE); 7212 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
6976 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && 7213 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6977 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) 7214 tg3_flag(tp, PCIX_MODE))
6978 val |= PCISTATE_RETRY_SAME_DMA; 7215 val |= PCISTATE_RETRY_SAME_DMA;
6979 /* Allow reads and writes to the APE register and memory space. */ 7216 /* Allow reads and writes to the APE register and memory space. */
6980 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) 7217 if (tg3_flag(tp, ENABLE_APE))
6981 val |= PCISTATE_ALLOW_APE_CTLSPC_WR | 7218 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6982 PCISTATE_ALLOW_APE_SHMEM_WR | 7219 PCISTATE_ALLOW_APE_SHMEM_WR |
6983 PCISTATE_ALLOW_APE_PSPACE_WR; 7220 PCISTATE_ALLOW_APE_PSPACE_WR;
@@ -6986,7 +7223,7 @@ static void tg3_restore_pci_state(struct tg3 *tp)
6986 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd); 7223 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
6987 7224
6988 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) { 7225 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
6989 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) 7226 if (tg3_flag(tp, PCI_EXPRESS))
6990 pcie_set_readrq(tp->pdev, tp->pcie_readrq); 7227 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
6991 else { 7228 else {
6992 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, 7229 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
@@ -6997,7 +7234,7 @@ static void tg3_restore_pci_state(struct tg3 *tp)
6997 } 7234 }
6998 7235
6999 /* Make sure PCI-X relaxed ordering bit is clear. */ 7236 /* Make sure PCI-X relaxed ordering bit is clear. */
7000 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) { 7237 if (tg3_flag(tp, PCIX_MODE)) {
7001 u16 pcix_cmd; 7238 u16 pcix_cmd;
7002 7239
7003 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 7240 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
@@ -7007,12 +7244,12 @@ static void tg3_restore_pci_state(struct tg3 *tp)
7007 pcix_cmd); 7244 pcix_cmd);
7008 } 7245 }
7009 7246
7010 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) { 7247 if (tg3_flag(tp, 5780_CLASS)) {
7011 7248
7012 /* Chip reset on 5780 will reset MSI enable bit, 7249 /* Chip reset on 5780 will reset MSI enable bit,
7013 * so need to restore it. 7250 * so need to restore it.
7014 */ 7251 */
7015 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 7252 if (tg3_flag(tp, USING_MSI)) {
7016 u16 ctrl; 7253 u16 ctrl;
7017 7254
7018 pci_read_config_word(tp->pdev, 7255 pci_read_config_word(tp->pdev,
@@ -7052,7 +7289,7 @@ static int tg3_chip_reset(struct tg3 *tp)
7052 tg3_save_pci_state(tp); 7289 tg3_save_pci_state(tp);
7053 7290
7054 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || 7291 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7055 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) 7292 tg3_flag(tp, 5755_PLUS))
7056 tw32(GRC_FASTBOOT_PC, 0); 7293 tw32(GRC_FASTBOOT_PC, 0);
7057 7294
7058 /* 7295 /*
@@ -7071,7 +7308,7 @@ static int tg3_chip_reset(struct tg3 *tp)
7071 * at this time, but the irq handler may still be called due to irq 7308 * at this time, but the irq handler may still be called due to irq
7072 * sharing or irqpoll. 7309 * sharing or irqpoll.
7073 */ 7310 */
7074 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING; 7311 tg3_flag_set(tp, CHIP_RESETTING);
7075 for (i = 0; i < tp->irq_cnt; i++) { 7312 for (i = 0; i < tp->irq_cnt; i++) {
7076 struct tg3_napi *tnapi = &tp->napi[i]; 7313 struct tg3_napi *tnapi = &tp->napi[i];
7077 if (tnapi->hw_status) { 7314 if (tnapi->hw_status) {
@@ -7094,10 +7331,10 @@ static int tg3_chip_reset(struct tg3 *tp)
7094 /* do the reset */ 7331 /* do the reset */
7095 val = GRC_MISC_CFG_CORECLK_RESET; 7332 val = GRC_MISC_CFG_CORECLK_RESET;
7096 7333
7097 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { 7334 if (tg3_flag(tp, PCI_EXPRESS)) {
7098 /* Force PCIe 1.0a mode */ 7335 /* Force PCIe 1.0a mode */
7099 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && 7336 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7100 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS) && 7337 !tg3_flag(tp, 57765_PLUS) &&
7101 tr32(TG3_PCIE_PHY_TSTCTL) == 7338 tr32(TG3_PCIE_PHY_TSTCTL) ==
7102 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM)) 7339 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7103 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM); 7340 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
@@ -7115,8 +7352,7 @@ static int tg3_chip_reset(struct tg3 *tp)
7115 } 7352 }
7116 7353
7117 /* Manage gphy power for all CPMU absent PCIe devices. */ 7354 /* Manage gphy power for all CPMU absent PCIe devices. */
7118 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && 7355 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7119 !(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
7120 val |= GRC_MISC_CFG_KEEP_GPHY_POWER; 7356 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7121 7357
7122 tw32(GRC_MISC_CFG, val); 7358 tw32(GRC_MISC_CFG, val);
@@ -7149,7 +7385,7 @@ static int tg3_chip_reset(struct tg3 *tp)
7149 7385
7150 udelay(120); 7386 udelay(120);
7151 7387
7152 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) { 7388 if (tg3_flag(tp, PCI_EXPRESS) && tp->pcie_cap) {
7153 u16 val16; 7389 u16 val16;
7154 7390
7155 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) { 7391 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
@@ -7175,7 +7411,7 @@ static int tg3_chip_reset(struct tg3 *tp)
7175 * Older PCIe devices only support the 128 byte 7411 * Older PCIe devices only support the 128 byte
7176 * MPS setting. Enforce the restriction. 7412 * MPS setting. Enforce the restriction.
7177 */ 7413 */
7178 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) 7414 if (!tg3_flag(tp, CPMU_PRESENT))
7179 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD; 7415 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7180 pci_write_config_word(tp->pdev, 7416 pci_write_config_word(tp->pdev,
7181 tp->pcie_cap + PCI_EXP_DEVCTL, 7417 tp->pcie_cap + PCI_EXP_DEVCTL,
@@ -7194,10 +7430,11 @@ static int tg3_chip_reset(struct tg3 *tp)
7194 7430
7195 tg3_restore_pci_state(tp); 7431 tg3_restore_pci_state(tp);
7196 7432
7197 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING; 7433 tg3_flag_clear(tp, CHIP_RESETTING);
7434 tg3_flag_clear(tp, ERROR_PROCESSED);
7198 7435
7199 val = 0; 7436 val = 0;
7200 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) 7437 if (tg3_flag(tp, 5780_CLASS))
7201 val = tr32(MEMARB_MODE); 7438 val = tr32(MEMARB_MODE);
7202 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); 7439 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7203 7440
@@ -7222,7 +7459,7 @@ static int tg3_chip_reset(struct tg3 *tp)
7222 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); 7459 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7223 } 7460 }
7224 7461
7225 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) 7462 if (tg3_flag(tp, ENABLE_APE))
7226 tp->mac_mode = MAC_MODE_APE_TX_EN | 7463 tp->mac_mode = MAC_MODE_APE_TX_EN |
7227 MAC_MODE_APE_RX_EN | 7464 MAC_MODE_APE_RX_EN |
7228 MAC_MODE_TDE_ENABLE; 7465 MAC_MODE_TDE_ENABLE;
@@ -7247,28 +7484,33 @@ static int tg3_chip_reset(struct tg3 *tp)
7247 7484
7248 tg3_mdio_start(tp); 7485 tg3_mdio_start(tp);
7249 7486
7250 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && 7487 if (tg3_flag(tp, PCI_EXPRESS) &&
7251 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && 7488 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7252 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && 7489 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7253 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) { 7490 !tg3_flag(tp, 57765_PLUS)) {
7254 val = tr32(0x7c00); 7491 val = tr32(0x7c00);
7255 7492
7256 tw32(0x7c00, val | (1 << 25)); 7493 tw32(0x7c00, val | (1 << 25));
7257 } 7494 }
7258 7495
7496 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7497 val = tr32(TG3_CPMU_CLCK_ORIDE);
7498 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7499 }
7500
7259 /* Reprobe ASF enable state. */ 7501 /* Reprobe ASF enable state. */
7260 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF; 7502 tg3_flag_clear(tp, ENABLE_ASF);
7261 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE; 7503 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7262 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); 7504 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7263 if (val == NIC_SRAM_DATA_SIG_MAGIC) { 7505 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7264 u32 nic_cfg; 7506 u32 nic_cfg;
7265 7507
7266 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); 7508 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7267 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { 7509 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7268 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF; 7510 tg3_flag_set(tp, ENABLE_ASF);
7269 tp->last_event_jiffies = jiffies; 7511 tp->last_event_jiffies = jiffies;
7270 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) 7512 if (tg3_flag(tp, 5750_PLUS))
7271 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE; 7513 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7272 } 7514 }
7273 } 7515 }
7274 7516
@@ -7278,8 +7520,7 @@ static int tg3_chip_reset(struct tg3 *tp)
7278/* tp->lock is held. */ 7520/* tp->lock is held. */
7279static void tg3_stop_fw(struct tg3 *tp) 7521static void tg3_stop_fw(struct tg3 *tp)
7280{ 7522{
7281 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && 7523 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7282 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7283 /* Wait for RX cpu to ACK the previous event. */ 7524 /* Wait for RX cpu to ACK the previous event. */
7284 tg3_wait_for_event_ack(tp); 7525 tg3_wait_for_event_ack(tp);
7285 7526
@@ -7325,8 +7566,7 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7325{ 7566{
7326 int i; 7567 int i;
7327 7568
7328 BUG_ON(offset == TX_CPU_BASE && 7569 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7329 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
7330 7570
7331 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 7571 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7332 u32 val = tr32(GRC_VCPU_EXT_CTRL); 7572 u32 val = tr32(GRC_VCPU_EXT_CTRL);
@@ -7361,7 +7601,7 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7361 } 7601 }
7362 7602
7363 /* Clear firmware's nvram arbitration. */ 7603 /* Clear firmware's nvram arbitration. */
7364 if (tp->tg3_flags & TG3_FLAG_NVRAM) 7604 if (tg3_flag(tp, NVRAM))
7365 tw32(NVRAM_SWARB, SWARB_REQ_CLR0); 7605 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7366 return 0; 7606 return 0;
7367} 7607}
@@ -7379,15 +7619,14 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b
7379 int err, lock_err, i; 7619 int err, lock_err, i;
7380 void (*write_op)(struct tg3 *, u32, u32); 7620 void (*write_op)(struct tg3 *, u32, u32);
7381 7621
7382 if (cpu_base == TX_CPU_BASE && 7622 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7383 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7384 netdev_err(tp->dev, 7623 netdev_err(tp->dev,
7385 "%s: Trying to load TX cpu firmware which is 5705\n", 7624 "%s: Trying to load TX cpu firmware which is 5705\n",
7386 __func__); 7625 __func__);
7387 return -EINVAL; 7626 return -EINVAL;
7388 } 7627 }
7389 7628
7390 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) 7629 if (tg3_flag(tp, 5705_PLUS))
7391 write_op = tg3_write_mem; 7630 write_op = tg3_write_mem;
7392 else 7631 else
7393 write_op = tg3_write_indirect_reg32; 7632 write_op = tg3_write_indirect_reg32;
@@ -7473,8 +7712,6 @@ static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7473 return 0; 7712 return 0;
7474} 7713}
7475 7714
7476/* 5705 needs a special version of the TSO firmware. */
7477
7478/* tp->lock is held. */ 7715/* tp->lock is held. */
7479static int tg3_load_tso_firmware(struct tg3 *tp) 7716static int tg3_load_tso_firmware(struct tg3 *tp)
7480{ 7717{
@@ -7483,7 +7720,9 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
7483 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; 7720 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7484 int err, i; 7721 int err, i;
7485 7722
7486 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) 7723 if (tg3_flag(tp, HW_TSO_1) ||
7724 tg3_flag(tp, HW_TSO_2) ||
7725 tg3_flag(tp, HW_TSO_3))
7487 return 0; 7726 return 0;
7488 7727
7489 fw_data = (void *)tp->fw->data; 7728 fw_data = (void *)tp->fw->data;
@@ -7552,7 +7791,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
7552 if (!netif_running(dev)) 7791 if (!netif_running(dev))
7553 return 0; 7792 return 0;
7554 7793
7555 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { 7794 if (tg3_flag(tp, ENABLE_ASF)) {
7556 u32 addr0_high, addr0_low, addr1_high, addr1_low; 7795 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7557 7796
7558 addr0_high = tr32(MAC_ADDR_0_HIGH); 7797 addr0_high = tr32(MAC_ADDR_0_HIGH);
@@ -7587,7 +7826,7 @@ static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7587 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS), 7826 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7588 maxlen_flags); 7827 maxlen_flags);
7589 7828
7590 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 7829 if (!tg3_flag(tp, 5705_PLUS))
7591 tg3_write_mem(tp, 7830 tg3_write_mem(tp,
7592 (bdinfo_addr + TG3_BDINFO_NIC_ADDR), 7831 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7593 nic_addr); 7832 nic_addr);
@@ -7598,7 +7837,7 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7598{ 7837{
7599 int i; 7838 int i;
7600 7839
7601 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) { 7840 if (!tg3_flag(tp, ENABLE_TSS)) {
7602 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); 7841 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7603 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames); 7842 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7604 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq); 7843 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
@@ -7608,7 +7847,7 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7608 tw32(HOSTCC_TXCOAL_MAXF_INT, 0); 7847 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7609 } 7848 }
7610 7849
7611 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) { 7850 if (!tg3_flag(tp, ENABLE_RSS)) {
7612 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); 7851 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7613 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); 7852 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7614 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); 7853 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
@@ -7618,7 +7857,7 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7618 tw32(HOSTCC_RXCOAL_MAXF_INT, 0); 7857 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7619 } 7858 }
7620 7859
7621 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 7860 if (!tg3_flag(tp, 5705_PLUS)) {
7622 u32 val = ec->stats_block_coalesce_usecs; 7861 u32 val = ec->stats_block_coalesce_usecs;
7623 7862
7624 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq); 7863 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
@@ -7640,7 +7879,7 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7640 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18; 7879 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7641 tw32(reg, ec->rx_max_coalesced_frames_irq); 7880 tw32(reg, ec->rx_max_coalesced_frames_irq);
7642 7881
7643 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) { 7882 if (tg3_flag(tp, ENABLE_TSS)) {
7644 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18; 7883 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7645 tw32(reg, ec->tx_coalesce_usecs); 7884 tw32(reg, ec->tx_coalesce_usecs);
7646 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18; 7885 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
@@ -7655,7 +7894,7 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7655 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0); 7894 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7656 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); 7895 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7657 7896
7658 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) { 7897 if (tg3_flag(tp, ENABLE_TSS)) {
7659 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0); 7898 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7660 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0); 7899 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7661 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); 7900 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
@@ -7671,10 +7910,9 @@ static void tg3_rings_reset(struct tg3 *tp)
7671 struct tg3_napi *tnapi = &tp->napi[0]; 7910 struct tg3_napi *tnapi = &tp->napi[0];
7672 7911
7673 /* Disable all transmit rings but the first. */ 7912 /* Disable all transmit rings but the first. */
7674 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 7913 if (!tg3_flag(tp, 5705_PLUS))
7675 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; 7914 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7676 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 7915 else if (tg3_flag(tp, 5717_PLUS))
7677 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
7678 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4; 7916 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7679 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 7917 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7680 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2; 7918 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
@@ -7688,10 +7926,9 @@ static void tg3_rings_reset(struct tg3 *tp)
7688 7926
7689 7927
7690 /* Disable all receive return rings but the first. */ 7928 /* Disable all receive return rings but the first. */
7691 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 7929 if (tg3_flag(tp, 5717_PLUS))
7692 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
7693 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17; 7930 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7694 else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 7931 else if (!tg3_flag(tp, 5705_PLUS))
7695 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; 7932 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7696 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 7933 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7697 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 7934 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
@@ -7708,16 +7945,16 @@ static void tg3_rings_reset(struct tg3 *tp)
7708 tw32_mailbox_f(tp->napi[0].int_mbox, 1); 7945 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7709 7946
7710 /* Zero mailbox registers. */ 7947 /* Zero mailbox registers. */
7711 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) { 7948 if (tg3_flag(tp, SUPPORT_MSIX)) {
7712 for (i = 1; i < tp->irq_max; i++) { 7949 for (i = 1; i < tp->irq_max; i++) {
7713 tp->napi[i].tx_prod = 0; 7950 tp->napi[i].tx_prod = 0;
7714 tp->napi[i].tx_cons = 0; 7951 tp->napi[i].tx_cons = 0;
7715 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) 7952 if (tg3_flag(tp, ENABLE_TSS))
7716 tw32_mailbox(tp->napi[i].prodmbox, 0); 7953 tw32_mailbox(tp->napi[i].prodmbox, 0);
7717 tw32_rx_mbox(tp->napi[i].consmbox, 0); 7954 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7718 tw32_mailbox_f(tp->napi[i].int_mbox, 1); 7955 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7719 } 7956 }
7720 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) 7957 if (!tg3_flag(tp, ENABLE_TSS))
7721 tw32_mailbox(tp->napi[0].prodmbox, 0); 7958 tw32_mailbox(tp->napi[0].prodmbox, 0);
7722 } else { 7959 } else {
7723 tp->napi[0].tx_prod = 0; 7960 tp->napi[0].tx_prod = 0;
@@ -7727,7 +7964,7 @@ static void tg3_rings_reset(struct tg3 *tp)
7727 } 7964 }
7728 7965
7729 /* Make sure the NIC-based send BD rings are disabled. */ 7966 /* Make sure the NIC-based send BD rings are disabled. */
7730 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 7967 if (!tg3_flag(tp, 5705_PLUS)) {
7731 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW; 7968 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7732 for (i = 0; i < 16; i++) 7969 for (i = 0; i < 16; i++)
7733 tw32_tx_mbox(mbox + i * 8, 0); 7970 tw32_tx_mbox(mbox + i * 8, 0);
@@ -7787,6 +8024,47 @@ static void tg3_rings_reset(struct tg3 *tp)
7787 } 8024 }
7788} 8025}
7789 8026
8027static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8028{
8029 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8030
8031 if (!tg3_flag(tp, 5750_PLUS) ||
8032 tg3_flag(tp, 5780_CLASS) ||
8033 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8034 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8035 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8036 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8037 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8038 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8039 else
8040 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8041
8042 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8043 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8044
8045 val = min(nic_rep_thresh, host_rep_thresh);
8046 tw32(RCVBDI_STD_THRESH, val);
8047
8048 if (tg3_flag(tp, 57765_PLUS))
8049 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8050
8051 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8052 return;
8053
8054 if (!tg3_flag(tp, 5705_PLUS))
8055 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8056 else
8057 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
8058
8059 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8060
8061 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8062 tw32(RCVBDI_JUMBO_THRESH, val);
8063
8064 if (tg3_flag(tp, 57765_PLUS))
8065 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8066}
8067
7790/* tp->lock is held. */ 8068/* tp->lock is held. */
7791static int tg3_reset_hw(struct tg3 *tp, int reset_phy) 8069static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7792{ 8070{
@@ -7800,7 +8078,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7800 8078
7801 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT); 8079 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7802 8080
7803 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) 8081 if (tg3_flag(tp, INIT_COMPLETE))
7804 tg3_abort_hw(tp, 1); 8082 tg3_abort_hw(tp, 1);
7805 8083
7806 /* Enable MAC control of LPI */ 8084 /* Enable MAC control of LPI */
@@ -7820,7 +8098,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7820 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) 8098 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7821 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN; 8099 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
7822 8100
7823 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) 8101 if (tg3_flag(tp, ENABLE_APE))
7824 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN; 8102 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
7825 8103
7826 tw32_f(TG3_CPMU_EEE_MODE, val); 8104 tw32_f(TG3_CPMU_EEE_MODE, val);
@@ -7879,7 +8157,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7879 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); 8157 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7880 } 8158 }
7881 8159
7882 if (tp->tg3_flags3 & TG3_FLG3_L1PLLPD_EN) { 8160 if (tg3_flag(tp, L1PLLPD_EN)) {
7883 u32 grc_mode = tr32(GRC_MODE); 8161 u32 grc_mode = tr32(GRC_MODE);
7884 8162
7885 /* Access the lower 1K of PL PCIE block registers. */ 8163 /* Access the lower 1K of PL PCIE block registers. */
@@ -7920,20 +8198,20 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7920 * other revision. But do not set this on PCI Express 8198 * other revision. But do not set this on PCI Express
7921 * chips and don't even touch the clocks if the CPMU is present. 8199 * chips and don't even touch the clocks if the CPMU is present.
7922 */ 8200 */
7923 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) { 8201 if (!tg3_flag(tp, CPMU_PRESENT)) {
7924 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) 8202 if (!tg3_flag(tp, PCI_EXPRESS))
7925 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT; 8203 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7926 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); 8204 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7927 } 8205 }
7928 8206
7929 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && 8207 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7930 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) { 8208 tg3_flag(tp, PCIX_MODE)) {
7931 val = tr32(TG3PCI_PCISTATE); 8209 val = tr32(TG3PCI_PCISTATE);
7932 val |= PCISTATE_RETRY_SAME_DMA; 8210 val |= PCISTATE_RETRY_SAME_DMA;
7933 tw32(TG3PCI_PCISTATE, val); 8211 tw32(TG3PCI_PCISTATE, val);
7934 } 8212 }
7935 8213
7936 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { 8214 if (tg3_flag(tp, ENABLE_APE)) {
7937 /* Allow reads and writes to the 8215 /* Allow reads and writes to the
7938 * APE register and memory space. 8216 * APE register and memory space.
7939 */ 8217 */
@@ -7960,11 +8238,14 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7960 if (err) 8238 if (err)
7961 return err; 8239 return err;
7962 8240
7963 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) { 8241 if (tg3_flag(tp, 57765_PLUS)) {
7964 val = tr32(TG3PCI_DMA_RW_CTRL) & 8242 val = tr32(TG3PCI_DMA_RW_CTRL) &
7965 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; 8243 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
7966 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) 8244 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
7967 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK; 8245 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8246 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8247 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8248 val |= DMA_RWCTRL_TAGGED_STAT_WA;
7968 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); 8249 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
7969 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && 8250 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7970 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) { 8251 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
@@ -7999,7 +8280,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7999 tw32(GRC_MISC_CFG, val); 8280 tw32(GRC_MISC_CFG, val);
8000 8281
8001 /* Initialize MBUF/DESC pool. */ 8282 /* Initialize MBUF/DESC pool. */
8002 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { 8283 if (tg3_flag(tp, 5750_PLUS)) {
8003 /* Do nothing. */ 8284 /* Do nothing. */
8004 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) { 8285 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8005 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE); 8286 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
@@ -8009,7 +8290,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8009 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96); 8290 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8010 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE); 8291 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8011 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE); 8292 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8012 } else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) { 8293 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8013 int fw_len; 8294 int fw_len;
8014 8295
8015 fw_len = tp->fw_len; 8296 fw_len = tp->fw_len;
@@ -8043,6 +8324,10 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8043 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE; 8324 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8044 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) 8325 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8045 val |= BUFMGR_MODE_NO_TX_UNDERRUN; 8326 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8327 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8328 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8329 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8330 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8046 tw32(BUFMGR_MODE, val); 8331 tw32(BUFMGR_MODE, val);
8047 for (i = 0; i < 2000; i++) { 8332 for (i = 0; i < 2000; i++) {
8048 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE) 8333 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
@@ -8054,21 +8339,10 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8054 return -ENODEV; 8339 return -ENODEV;
8055 } 8340 }
8056 8341
8057 /* Setup replenish threshold. */ 8342 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8058 val = tp->rx_pending / 8; 8343 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8059 if (val == 0)
8060 val = 1;
8061 else if (val > tp->rx_std_max_post)
8062 val = tp->rx_std_max_post;
8063 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8064 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8065 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8066 8344
8067 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2)) 8345 tg3_setup_rxbd_thresholds(tp);
8068 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
8069 }
8070
8071 tw32(RCVBDI_STD_THRESH, val);
8072 8346
8073 /* Initialize TG3_BDINFO's at: 8347 /* Initialize TG3_BDINFO's at:
8074 * RCVDBDI_STD_BD: standard eth size rx ring 8348 * RCVDBDI_STD_BD: standard eth size rx ring
@@ -8091,13 +8365,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8091 ((u64) tpr->rx_std_mapping >> 32)); 8365 ((u64) tpr->rx_std_mapping >> 32));
8092 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 8366 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8093 ((u64) tpr->rx_std_mapping & 0xffffffff)); 8367 ((u64) tpr->rx_std_mapping & 0xffffffff));
8094 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && 8368 if (!tg3_flag(tp, 5717_PLUS))
8095 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
8096 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, 8369 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8097 NIC_SRAM_RX_BUFFER_DESC); 8370 NIC_SRAM_RX_BUFFER_DESC);
8098 8371
8099 /* Disable the mini ring */ 8372 /* Disable the mini ring */
8100 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 8373 if (!tg3_flag(tp, 5705_PLUS))
8101 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS, 8374 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8102 BDINFO_FLAGS_DISABLED); 8375 BDINFO_FLAGS_DISABLED);
8103 8376
@@ -8105,20 +8378,18 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8105 * blocks on those devices that have them. 8378 * blocks on those devices that have them.
8106 */ 8379 */
8107 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || 8380 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8108 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && 8381 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8109 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))) {
8110 /* Setup replenish threshold. */
8111 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
8112 8382
8113 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { 8383 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8114 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, 8384 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8115 ((u64) tpr->rx_jmb_mapping >> 32)); 8385 ((u64) tpr->rx_jmb_mapping >> 32));
8116 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 8386 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8117 ((u64) tpr->rx_jmb_mapping & 0xffffffff)); 8387 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8388 val = TG3_RX_JMB_RING_SIZE(tp) <<
8389 BDINFO_FLAGS_MAXLEN_SHIFT;
8118 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 8390 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8119 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) | 8391 val | BDINFO_FLAGS_USE_EXT_RECV);
8120 BDINFO_FLAGS_USE_EXT_RECV); 8392 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8121 if (!(tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) ||
8122 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 8393 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8123 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, 8394 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8124 NIC_SRAM_RX_JUMBO_BUFFER_DESC); 8395 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
@@ -8127,32 +8398,27 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8127 BDINFO_FLAGS_DISABLED); 8398 BDINFO_FLAGS_DISABLED);
8128 } 8399 }
8129 8400
8130 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) { 8401 if (tg3_flag(tp, 57765_PLUS)) {
8131 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 8402 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8132 val = RX_STD_MAX_SIZE_5705; 8403 val = TG3_RX_STD_MAX_SIZE_5700;
8133 else 8404 else
8134 val = RX_STD_MAX_SIZE_5717; 8405 val = TG3_RX_STD_MAX_SIZE_5717;
8135 val <<= BDINFO_FLAGS_MAXLEN_SHIFT; 8406 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8136 val |= (TG3_RX_STD_DMA_SZ << 2); 8407 val |= (TG3_RX_STD_DMA_SZ << 2);
8137 } else 8408 } else
8138 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT; 8409 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8139 } else 8410 } else
8140 val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT; 8411 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8141 8412
8142 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); 8413 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8143 8414
8144 tpr->rx_std_prod_idx = tp->rx_pending; 8415 tpr->rx_std_prod_idx = tp->rx_pending;
8145 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx); 8416 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8146 8417
8147 tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ? 8418 tpr->rx_jmb_prod_idx =
8148 tp->rx_jumbo_pending : 0; 8419 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8149 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); 8420 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8150 8421
8151 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
8152 tw32(STD_REPLENISH_LWM, 32);
8153 tw32(JMB_REPLENISH_LWM, 16);
8154 }
8155
8156 tg3_rings_reset(tp); 8422 tg3_rings_reset(tp);
8157 8423
8158 /* Initialize MAC address and backoff seed. */ 8424 /* Initialize MAC address and backoff seed. */
@@ -8165,10 +8431,16 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8165 /* The slot time is changed by tg3_setup_phy if we 8431 /* The slot time is changed by tg3_setup_phy if we
8166 * run at gigabit with half duplex. 8432 * run at gigabit with half duplex.
8167 */ 8433 */
8168 tw32(MAC_TX_LENGTHS, 8434 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8169 (2 << TX_LENGTHS_IPG_CRS_SHIFT) | 8435 (6 << TX_LENGTHS_IPG_SHIFT) |
8170 (6 << TX_LENGTHS_IPG_SHIFT) | 8436 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8171 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)); 8437
8438 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8439 val |= tr32(MAC_TX_LENGTHS) &
8440 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8441 TX_LENGTHS_CNT_DWN_VAL_MSK);
8442
8443 tw32(MAC_TX_LENGTHS, val);
8172 8444
8173 /* Receive rules. */ 8445 /* Receive rules. */
8174 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS); 8446 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
@@ -8195,33 +8467,39 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8195 8467
8196 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && 8468 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8197 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { 8469 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8198 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE && 8470 if (tg3_flag(tp, TSO_CAPABLE) &&
8199 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { 8471 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8200 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128; 8472 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8201 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && 8473 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8202 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) { 8474 !tg3_flag(tp, IS_5788)) {
8203 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; 8475 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8204 } 8476 }
8205 } 8477 }
8206 8478
8207 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) 8479 if (tg3_flag(tp, PCI_EXPRESS))
8208 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; 8480 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8209 8481
8210 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) 8482 if (tg3_flag(tp, HW_TSO_1) ||
8483 tg3_flag(tp, HW_TSO_2) ||
8484 tg3_flag(tp, HW_TSO_3))
8211 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; 8485 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8212 8486
8213 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) || 8487 if (tg3_flag(tp, HW_TSO_3) ||
8214 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 8488 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8215 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) 8489 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8216 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; 8490 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8217 8491
8492 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8493 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8494
8218 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 8495 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8219 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 8496 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8220 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 8497 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8221 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || 8498 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8222 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) { 8499 tg3_flag(tp, 57765_PLUS)) {
8223 val = tr32(TG3_RDMA_RSRVCTRL_REG); 8500 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8224 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) { 8501 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8502 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8225 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK | 8503 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8226 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK | 8504 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8227 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK); 8505 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
@@ -8233,7 +8511,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8233 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX); 8511 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8234 } 8512 }
8235 8513
8236 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) { 8514 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8515 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8237 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); 8516 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8238 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val | 8517 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8239 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K | 8518 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
@@ -8241,12 +8520,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8241 } 8520 }
8242 8521
8243 /* Receive/send statistics. */ 8522 /* Receive/send statistics. */
8244 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { 8523 if (tg3_flag(tp, 5750_PLUS)) {
8245 val = tr32(RCVLPC_STATS_ENABLE); 8524 val = tr32(RCVLPC_STATS_ENABLE);
8246 val &= ~RCVLPC_STATSENAB_DACK_FIX; 8525 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8247 tw32(RCVLPC_STATS_ENABLE, val); 8526 tw32(RCVLPC_STATS_ENABLE, val);
8248 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) && 8527 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8249 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) { 8528 tg3_flag(tp, TSO_CAPABLE)) {
8250 val = tr32(RCVLPC_STATS_ENABLE); 8529 val = tr32(RCVLPC_STATS_ENABLE);
8251 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX; 8530 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8252 tw32(RCVLPC_STATS_ENABLE, val); 8531 tw32(RCVLPC_STATS_ENABLE, val);
@@ -8269,7 +8548,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8269 8548
8270 __tg3_set_coalesce(tp, &tp->coal); 8549 __tg3_set_coalesce(tp, &tp->coal);
8271 8550
8272 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 8551 if (!tg3_flag(tp, 5705_PLUS)) {
8273 /* Status/statistics block address. See tg3_timer, 8552 /* Status/statistics block address. See tg3_timer,
8274 * the tg3_periodic_fetch_stats call there, and 8553 * the tg3_periodic_fetch_stats call there, and
8275 * tg3_get_stats to see how this works for 5705/5750 chips. 8554 * tg3_get_stats to see how this works for 5705/5750 chips.
@@ -8295,7 +8574,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8295 8574
8296 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE); 8575 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8297 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE); 8576 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8298 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 8577 if (!tg3_flag(tp, 5705_PLUS))
8299 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE); 8578 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8300 8579
8301 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { 8580 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
@@ -8305,13 +8584,13 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8305 udelay(10); 8584 udelay(10);
8306 } 8585 }
8307 8586
8308 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) 8587 if (tg3_flag(tp, ENABLE_APE))
8309 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; 8588 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8310 else 8589 else
8311 tp->mac_mode = 0; 8590 tp->mac_mode = 0;
8312 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | 8591 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8313 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE; 8592 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8314 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && 8593 if (!tg3_flag(tp, 5705_PLUS) &&
8315 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 8594 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8316 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) 8595 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8317 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 8596 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
@@ -8319,12 +8598,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8319 udelay(40); 8598 udelay(40);
8320 8599
8321 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants(). 8600 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8322 * If TG3_FLG2_IS_NIC is zero, we should read the 8601 * If TG3_FLAG_IS_NIC is zero, we should read the
8323 * register to preserve the GPIO settings for LOMs. The GPIOs, 8602 * register to preserve the GPIO settings for LOMs. The GPIOs,
8324 * whether used as inputs or outputs, are set by boot code after 8603 * whether used as inputs or outputs, are set by boot code after
8325 * reset. 8604 * reset.
8326 */ 8605 */
8327 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) { 8606 if (!tg3_flag(tp, IS_NIC)) {
8328 u32 gpio_mask; 8607 u32 gpio_mask;
8329 8608
8330 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 | 8609 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
@@ -8342,21 +8621,20 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8342 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask; 8621 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8343 8622
8344 /* GPIO1 must be driven high for eeprom write protect */ 8623 /* GPIO1 must be driven high for eeprom write protect */
8345 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) 8624 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8346 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | 8625 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8347 GRC_LCLCTRL_GPIO_OUTPUT1); 8626 GRC_LCLCTRL_GPIO_OUTPUT1);
8348 } 8627 }
8349 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 8628 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8350 udelay(100); 8629 udelay(100);
8351 8630
8352 if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) && 8631 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8353 tp->irq_cnt > 1) {
8354 val = tr32(MSGINT_MODE); 8632 val = tr32(MSGINT_MODE);
8355 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE; 8633 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8356 tw32(MSGINT_MODE, val); 8634 tw32(MSGINT_MODE, val);
8357 } 8635 }
8358 8636
8359 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 8637 if (!tg3_flag(tp, 5705_PLUS)) {
8360 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); 8638 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8361 udelay(40); 8639 udelay(40);
8362 } 8640 }
@@ -8369,18 +8647,18 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8369 8647
8370 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && 8648 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8371 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { 8649 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8372 if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) && 8650 if (tg3_flag(tp, TSO_CAPABLE) &&
8373 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 || 8651 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8374 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) { 8652 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8375 /* nothing */ 8653 /* nothing */
8376 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && 8654 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8377 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) { 8655 !tg3_flag(tp, IS_5788)) {
8378 val |= WDMAC_MODE_RX_ACCEL; 8656 val |= WDMAC_MODE_RX_ACCEL;
8379 } 8657 }
8380 } 8658 }
8381 8659
8382 /* Enable host coalescing bug fix */ 8660 /* Enable host coalescing bug fix */
8383 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) 8661 if (tg3_flag(tp, 5755_PLUS))
8384 val |= WDMAC_MODE_STATUS_TAG_FIX; 8662 val |= WDMAC_MODE_STATUS_TAG_FIX;
8385 8663
8386 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) 8664 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
@@ -8389,7 +8667,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8389 tw32_f(WDMAC_MODE, val); 8667 tw32_f(WDMAC_MODE, val);
8390 udelay(40); 8668 udelay(40);
8391 8669
8392 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) { 8670 if (tg3_flag(tp, PCIX_MODE)) {
8393 u16 pcix_cmd; 8671 u16 pcix_cmd;
8394 8672
8395 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 8673 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
@@ -8409,7 +8687,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8409 udelay(40); 8687 udelay(40);
8410 8688
8411 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE); 8689 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8412 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 8690 if (!tg3_flag(tp, 5705_PLUS))
8413 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE); 8691 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8414 8692
8415 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) 8693 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
@@ -8421,15 +8699,16 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8421 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE); 8699 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8422 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB); 8700 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8423 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ; 8701 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8424 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 8702 if (tg3_flag(tp, LRG_PROD_RING_CAP))
8425 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8426 val |= RCVDBDI_MODE_LRG_RING_SZ; 8703 val |= RCVDBDI_MODE_LRG_RING_SZ;
8427 tw32(RCVDBDI_MODE, val); 8704 tw32(RCVDBDI_MODE, val);
8428 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE); 8705 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8429 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) 8706 if (tg3_flag(tp, HW_TSO_1) ||
8707 tg3_flag(tp, HW_TSO_2) ||
8708 tg3_flag(tp, HW_TSO_3))
8430 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); 8709 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8431 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE; 8710 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8432 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) 8711 if (tg3_flag(tp, ENABLE_TSS))
8433 val |= SNDBDI_MODE_MULTI_TXQ_EN; 8712 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8434 tw32(SNDBDI_MODE, val); 8713 tw32(SNDBDI_MODE, val);
8435 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); 8714 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
@@ -8440,20 +8719,28 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8440 return err; 8719 return err;
8441 } 8720 }
8442 8721
8443 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) { 8722 if (tg3_flag(tp, TSO_CAPABLE)) {
8444 err = tg3_load_tso_firmware(tp); 8723 err = tg3_load_tso_firmware(tp);
8445 if (err) 8724 if (err)
8446 return err; 8725 return err;
8447 } 8726 }
8448 8727
8449 tp->tx_mode = TX_MODE_ENABLE; 8728 tp->tx_mode = TX_MODE_ENABLE;
8450 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || 8729
8730 if (tg3_flag(tp, 5755_PLUS) ||
8451 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 8731 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8452 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX; 8732 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8733
8734 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8735 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8736 tp->tx_mode &= ~val;
8737 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8738 }
8739
8453 tw32_f(MAC_TX_MODE, tp->tx_mode); 8740 tw32_f(MAC_TX_MODE, tp->tx_mode);
8454 udelay(100); 8741 udelay(100);
8455 8742
8456 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) { 8743 if (tg3_flag(tp, ENABLE_RSS)) {
8457 u32 reg = MAC_RSS_INDIR_TBL_0; 8744 u32 reg = MAC_RSS_INDIR_TBL_0;
8458 u8 *ent = (u8 *)&val; 8745 u8 *ent = (u8 *)&val;
8459 8746
@@ -8482,10 +8769,10 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8482 } 8769 }
8483 8770
8484 tp->rx_mode = RX_MODE_ENABLE; 8771 tp->rx_mode = RX_MODE_ENABLE;
8485 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) 8772 if (tg3_flag(tp, 5755_PLUS))
8486 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; 8773 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8487 8774
8488 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) 8775 if (tg3_flag(tp, ENABLE_RSS))
8489 tp->rx_mode |= RX_MODE_RSS_ENABLE | 8776 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8490 RX_MODE_RSS_ITBL_HASH_BITS_7 | 8777 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8491 RX_MODE_RSS_IPV6_HASH_EN | 8778 RX_MODE_RSS_IPV6_HASH_EN |
@@ -8532,7 +8819,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8532 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 && 8819 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8533 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { 8820 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8534 /* Use hardware link auto-negotiation */ 8821 /* Use hardware link auto-negotiation */
8535 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG; 8822 tg3_flag_set(tp, HW_AUTONEG);
8536 } 8823 }
8537 8824
8538 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 8825 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
@@ -8546,7 +8833,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8546 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 8833 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8547 } 8834 }
8548 8835
8549 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) { 8836 if (!tg3_flag(tp, USE_PHYLIB)) {
8550 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { 8837 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8551 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; 8838 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8552 tp->link_config.speed = tp->link_config.orig_speed; 8839 tp->link_config.speed = tp->link_config.orig_speed;
@@ -8579,12 +8866,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8579 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK); 8866 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
8580 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK); 8867 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8581 8868
8582 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && 8869 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
8583 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
8584 limit = 8; 8870 limit = 8;
8585 else 8871 else
8586 limit = 16; 8872 limit = 16;
8587 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) 8873 if (tg3_flag(tp, ENABLE_ASF))
8588 limit -= 4; 8874 limit -= 4;
8589 switch (limit) { 8875 switch (limit) {
8590 case 16: 8876 case 16:
@@ -8622,7 +8908,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8622 break; 8908 break;
8623 } 8909 }
8624 8910
8625 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) 8911 if (tg3_flag(tp, ENABLE_APE))
8626 /* Write our heartbeat update interval to APE. */ 8912 /* Write our heartbeat update interval to APE. */
8627 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS, 8913 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8628 APE_HOST_HEARTBEAT_INT_DISABLE); 8914 APE_HOST_HEARTBEAT_INT_DISABLE);
@@ -8688,7 +8974,19 @@ static void tg3_periodic_fetch_stats(struct tg3 *tp)
8688 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE); 8974 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8689 8975
8690 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT); 8976 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8691 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT); 8977 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
8978 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8979 } else {
8980 u32 val = tr32(HOSTCC_FLOW_ATTN);
8981 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
8982 if (val) {
8983 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
8984 sp->rx_discards.low += val;
8985 if (sp->rx_discards.low < val)
8986 sp->rx_discards.high += 1;
8987 }
8988 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
8989 }
8692 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT); 8990 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8693} 8991}
8694 8992
@@ -8701,7 +8999,7 @@ static void tg3_timer(unsigned long __opaque)
8701 8999
8702 spin_lock(&tp->lock); 9000 spin_lock(&tp->lock);
8703 9001
8704 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) { 9002 if (!tg3_flag(tp, TAGGED_STATUS)) {
8705 /* All of this garbage is because when using non-tagged 9003 /* All of this garbage is because when using non-tagged
8706 * IRQ status the mailbox/status_block protocol the chip 9004 * IRQ status the mailbox/status_block protocol the chip
8707 * uses with the cpu is race prone. 9005 * uses with the cpu is race prone.
@@ -8715,7 +9013,7 @@ static void tg3_timer(unsigned long __opaque)
8715 } 9013 }
8716 9014
8717 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 9015 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8718 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER; 9016 tg3_flag_set(tp, RESTART_TIMER);
8719 spin_unlock(&tp->lock); 9017 spin_unlock(&tp->lock);
8720 schedule_work(&tp->reset_task); 9018 schedule_work(&tp->reset_task);
8721 return; 9019 return;
@@ -8724,7 +9022,7 @@ static void tg3_timer(unsigned long __opaque)
8724 9022
8725 /* This part only runs once per second. */ 9023 /* This part only runs once per second. */
8726 if (!--tp->timer_counter) { 9024 if (!--tp->timer_counter) {
8727 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) 9025 if (tg3_flag(tp, 5705_PLUS))
8728 tg3_periodic_fetch_stats(tp); 9026 tg3_periodic_fetch_stats(tp);
8729 9027
8730 if (tp->setlpicnt && !--tp->setlpicnt) { 9028 if (tp->setlpicnt && !--tp->setlpicnt) {
@@ -8733,7 +9031,7 @@ static void tg3_timer(unsigned long __opaque)
8733 val | TG3_CPMU_EEEMD_LPI_ENABLE); 9031 val | TG3_CPMU_EEEMD_LPI_ENABLE);
8734 } 9032 }
8735 9033
8736 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) { 9034 if (tg3_flag(tp, USE_LINKCHG_REG)) {
8737 u32 mac_stat; 9035 u32 mac_stat;
8738 int phy_event; 9036 int phy_event;
8739 9037
@@ -8748,7 +9046,7 @@ static void tg3_timer(unsigned long __opaque)
8748 9046
8749 if (phy_event) 9047 if (phy_event)
8750 tg3_setup_phy(tp, 0); 9048 tg3_setup_phy(tp, 0);
8751 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) { 9049 } else if (tg3_flag(tp, POLL_SERDES)) {
8752 u32 mac_stat = tr32(MAC_STATUS); 9050 u32 mac_stat = tr32(MAC_STATUS);
8753 int need_setup = 0; 9051 int need_setup = 0;
8754 9052
@@ -8773,7 +9071,7 @@ static void tg3_timer(unsigned long __opaque)
8773 tg3_setup_phy(tp, 0); 9071 tg3_setup_phy(tp, 0);
8774 } 9072 }
8775 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 9073 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8776 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { 9074 tg3_flag(tp, 5780_CLASS)) {
8777 tg3_serdes_parallel_detect(tp); 9075 tg3_serdes_parallel_detect(tp);
8778 } 9076 }
8779 9077
@@ -8798,8 +9096,7 @@ static void tg3_timer(unsigned long __opaque)
8798 * resets. 9096 * resets.
8799 */ 9097 */
8800 if (!--tp->asf_counter) { 9098 if (!--tp->asf_counter) {
8801 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && 9099 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
8802 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
8803 tg3_wait_for_event_ack(tp); 9100 tg3_wait_for_event_ack(tp);
8804 9101
8805 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, 9102 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
@@ -8835,16 +9132,16 @@ static int tg3_request_irq(struct tg3 *tp, int irq_num)
8835 name[IFNAMSIZ-1] = 0; 9132 name[IFNAMSIZ-1] = 0;
8836 } 9133 }
8837 9134
8838 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) { 9135 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
8839 fn = tg3_msi; 9136 fn = tg3_msi;
8840 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) 9137 if (tg3_flag(tp, 1SHOT_MSI))
8841 fn = tg3_msi_1shot; 9138 fn = tg3_msi_1shot;
8842 flags = IRQF_SAMPLE_RANDOM; 9139 flags = 0;
8843 } else { 9140 } else {
8844 fn = tg3_interrupt; 9141 fn = tg3_interrupt;
8845 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) 9142 if (tg3_flag(tp, TAGGED_STATUS))
8846 fn = tg3_interrupt_tagged; 9143 fn = tg3_interrupt_tagged;
8847 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM; 9144 flags = IRQF_SHARED;
8848 } 9145 }
8849 9146
8850 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi); 9147 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
@@ -8868,8 +9165,7 @@ static int tg3_test_interrupt(struct tg3 *tp)
8868 * Turn off MSI one shot mode. Otherwise this test has no 9165 * Turn off MSI one shot mode. Otherwise this test has no
8869 * observable way to know whether the interrupt was delivered. 9166 * observable way to know whether the interrupt was delivered.
8870 */ 9167 */
8871 if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) && 9168 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
8872 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8873 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE; 9169 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
8874 tw32(MSGINT_MODE, val); 9170 tw32(MSGINT_MODE, val);
8875 } 9171 }
@@ -8911,8 +9207,7 @@ static int tg3_test_interrupt(struct tg3 *tp)
8911 9207
8912 if (intr_ok) { 9208 if (intr_ok) {
8913 /* Reenable MSI one shot mode. */ 9209 /* Reenable MSI one shot mode. */
8914 if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) && 9210 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
8915 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8916 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE; 9211 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
8917 tw32(MSGINT_MODE, val); 9212 tw32(MSGINT_MODE, val);
8918 } 9213 }
@@ -8930,7 +9225,7 @@ static int tg3_test_msi(struct tg3 *tp)
8930 int err; 9225 int err;
8931 u16 pci_cmd; 9226 u16 pci_cmd;
8932 9227
8933 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI)) 9228 if (!tg3_flag(tp, USING_MSI))
8934 return 0; 9229 return 0;
8935 9230
8936 /* Turn off SERR reporting in case MSI terminates with Master 9231 /* Turn off SERR reporting in case MSI terminates with Master
@@ -8960,7 +9255,7 @@ static int tg3_test_msi(struct tg3 *tp)
8960 9255
8961 pci_disable_msi(tp->pdev); 9256 pci_disable_msi(tp->pdev);
8962 9257
8963 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; 9258 tg3_flag_clear(tp, USING_MSI);
8964 tp->napi[0].irq_vec = tp->pdev->irq; 9259 tp->napi[0].irq_vec = tp->pdev->irq;
8965 9260
8966 err = tg3_request_irq(tp, 0); 9261 err = tg3_request_irq(tp, 0);
@@ -9057,9 +9352,11 @@ static bool tg3_enable_msix(struct tg3 *tp)
9057 } 9352 }
9058 9353
9059 if (tp->irq_cnt > 1) { 9354 if (tp->irq_cnt > 1) {
9060 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS; 9355 tg3_flag_set(tp, ENABLE_RSS);
9061 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) { 9356
9062 tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS; 9357 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9358 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9359 tg3_flag_set(tp, ENABLE_TSS);
9063 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1); 9360 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9064 } 9361 }
9065 } 9362 }
@@ -9069,8 +9366,8 @@ static bool tg3_enable_msix(struct tg3 *tp)
9069 9366
9070static void tg3_ints_init(struct tg3 *tp) 9367static void tg3_ints_init(struct tg3 *tp)
9071{ 9368{
9072 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI_OR_MSIX) && 9369 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9073 !(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) { 9370 !tg3_flag(tp, TAGGED_STATUS)) {
9074 /* All MSI supporting chips should support tagged 9371 /* All MSI supporting chips should support tagged
9075 * status. Assert that this is the case. 9372 * status. Assert that this is the case.
9076 */ 9373 */
@@ -9079,21 +9376,19 @@ static void tg3_ints_init(struct tg3 *tp)
9079 goto defcfg; 9376 goto defcfg;
9080 } 9377 }
9081 9378
9082 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) && tg3_enable_msix(tp)) 9379 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9083 tp->tg3_flags2 |= TG3_FLG2_USING_MSIX; 9380 tg3_flag_set(tp, USING_MSIX);
9084 else if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) && 9381 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9085 pci_enable_msi(tp->pdev) == 0) 9382 tg3_flag_set(tp, USING_MSI);
9086 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
9087 9383
9088 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) { 9384 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9089 u32 msi_mode = tr32(MSGINT_MODE); 9385 u32 msi_mode = tr32(MSGINT_MODE);
9090 if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) && 9386 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9091 tp->irq_cnt > 1)
9092 msi_mode |= MSGINT_MODE_MULTIVEC_EN; 9387 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9093 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); 9388 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9094 } 9389 }
9095defcfg: 9390defcfg:
9096 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) { 9391 if (!tg3_flag(tp, USING_MSIX)) {
9097 tp->irq_cnt = 1; 9392 tp->irq_cnt = 1;
9098 tp->napi[0].irq_vec = tp->pdev->irq; 9393 tp->napi[0].irq_vec = tp->pdev->irq;
9099 netif_set_real_num_tx_queues(tp->dev, 1); 9394 netif_set_real_num_tx_queues(tp->dev, 1);
@@ -9103,12 +9398,14 @@ defcfg:
9103 9398
9104static void tg3_ints_fini(struct tg3 *tp) 9399static void tg3_ints_fini(struct tg3 *tp)
9105{ 9400{
9106 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) 9401 if (tg3_flag(tp, USING_MSIX))
9107 pci_disable_msix(tp->pdev); 9402 pci_disable_msix(tp->pdev);
9108 else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) 9403 else if (tg3_flag(tp, USING_MSI))
9109 pci_disable_msi(tp->pdev); 9404 pci_disable_msi(tp->pdev);
9110 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX; 9405 tg3_flag_clear(tp, USING_MSI);
9111 tp->tg3_flags3 &= ~(TG3_FLG3_ENABLE_RSS | TG3_FLG3_ENABLE_TSS); 9406 tg3_flag_clear(tp, USING_MSIX);
9407 tg3_flag_clear(tp, ENABLE_RSS);
9408 tg3_flag_clear(tp, ENABLE_TSS);
9112} 9409}
9113 9410
9114static int tg3_open(struct net_device *dev) 9411static int tg3_open(struct net_device *dev)
@@ -9123,10 +9420,10 @@ static int tg3_open(struct net_device *dev)
9123 return err; 9420 return err;
9124 } else if (err) { 9421 } else if (err) {
9125 netdev_warn(tp->dev, "TSO capability disabled\n"); 9422 netdev_warn(tp->dev, "TSO capability disabled\n");
9126 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; 9423 tg3_flag_clear(tp, TSO_CAPABLE);
9127 } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) { 9424 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9128 netdev_notice(tp->dev, "TSO capability restored\n"); 9425 netdev_notice(tp->dev, "TSO capability restored\n");
9129 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; 9426 tg3_flag_set(tp, TSO_CAPABLE);
9130 } 9427 }
9131 } 9428 }
9132 9429
@@ -9139,7 +9436,7 @@ static int tg3_open(struct net_device *dev)
9139 tg3_full_lock(tp, 0); 9436 tg3_full_lock(tp, 0);
9140 9437
9141 tg3_disable_ints(tp); 9438 tg3_disable_ints(tp);
9142 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; 9439 tg3_flag_clear(tp, INIT_COMPLETE);
9143 9440
9144 tg3_full_unlock(tp); 9441 tg3_full_unlock(tp);
9145 9442
@@ -9180,7 +9477,7 @@ static int tg3_open(struct net_device *dev)
9180 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 9477 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9181 tg3_free_rings(tp); 9478 tg3_free_rings(tp);
9182 } else { 9479 } else {
9183 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) 9480 if (tg3_flag(tp, TAGGED_STATUS))
9184 tp->timer_offset = HZ; 9481 tp->timer_offset = HZ;
9185 else 9482 else
9186 tp->timer_offset = HZ / 10; 9483 tp->timer_offset = HZ / 10;
@@ -9202,7 +9499,7 @@ static int tg3_open(struct net_device *dev)
9202 if (err) 9499 if (err)
9203 goto err_out3; 9500 goto err_out3;
9204 9501
9205 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 9502 if (tg3_flag(tp, USING_MSI)) {
9206 err = tg3_test_msi(tp); 9503 err = tg3_test_msi(tp);
9207 9504
9208 if (err) { 9505 if (err) {
@@ -9214,8 +9511,7 @@ static int tg3_open(struct net_device *dev)
9214 goto err_out2; 9511 goto err_out2;
9215 } 9512 }
9216 9513
9217 if (!(tp->tg3_flags3 & TG3_FLG3_5717_PLUS) && 9514 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9218 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
9219 u32 val = tr32(PCIE_TRANSACTION_CFG); 9515 u32 val = tr32(PCIE_TRANSACTION_CFG);
9220 9516
9221 tw32(PCIE_TRANSACTION_CFG, 9517 tw32(PCIE_TRANSACTION_CFG,
@@ -9228,13 +9524,20 @@ static int tg3_open(struct net_device *dev)
9228 tg3_full_lock(tp, 0); 9524 tg3_full_lock(tp, 0);
9229 9525
9230 add_timer(&tp->timer); 9526 add_timer(&tp->timer);
9231 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 9527 tg3_flag_set(tp, INIT_COMPLETE);
9232 tg3_enable_ints(tp); 9528 tg3_enable_ints(tp);
9233 9529
9234 tg3_full_unlock(tp); 9530 tg3_full_unlock(tp);
9235 9531
9236 netif_tx_start_all_queues(dev); 9532 netif_tx_start_all_queues(dev);
9237 9533
9534 /*
9535 * Reset loopback feature if it was turned on while the device was down
9536 * make sure that it's installed properly now.
9537 */
9538 if (dev->features & NETIF_F_LOOPBACK)
9539 tg3_set_loopback(dev, dev->features);
9540
9238 return 0; 9541 return 0;
9239 9542
9240err_out3: 9543err_out3:
@@ -9277,7 +9580,7 @@ static int tg3_close(struct net_device *dev)
9277 9580
9278 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 9581 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9279 tg3_free_rings(tp); 9582 tg3_free_rings(tp);
9280 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; 9583 tg3_flag_clear(tp, INIT_COMPLETE);
9281 9584
9282 tg3_full_unlock(tp); 9585 tg3_full_unlock(tp);
9283 9586
@@ -9534,7 +9837,7 @@ static void __tg3_set_rx_mode(struct net_device *dev)
9534 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG 9837 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9535 * flag clear. 9838 * flag clear.
9536 */ 9839 */
9537 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) 9840 if (!tg3_flag(tp, ENABLE_ASF))
9538 rx_mode |= RX_MODE_KEEP_VLAN_TAG; 9841 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9539#endif 9842#endif
9540 9843
@@ -9588,82 +9891,26 @@ static void tg3_set_rx_mode(struct net_device *dev)
9588 tg3_full_unlock(tp); 9891 tg3_full_unlock(tp);
9589} 9892}
9590 9893
9591#define TG3_REGDUMP_LEN (32 * 1024)
9592
9593static int tg3_get_regs_len(struct net_device *dev) 9894static int tg3_get_regs_len(struct net_device *dev)
9594{ 9895{
9595 return TG3_REGDUMP_LEN; 9896 return TG3_REG_BLK_SIZE;
9596} 9897}
9597 9898
9598static void tg3_get_regs(struct net_device *dev, 9899static void tg3_get_regs(struct net_device *dev,
9599 struct ethtool_regs *regs, void *_p) 9900 struct ethtool_regs *regs, void *_p)
9600{ 9901{
9601 u32 *p = _p;
9602 struct tg3 *tp = netdev_priv(dev); 9902 struct tg3 *tp = netdev_priv(dev);
9603 u8 *orig_p = _p;
9604 int i;
9605 9903
9606 regs->version = 0; 9904 regs->version = 0;
9607 9905
9608 memset(p, 0, TG3_REGDUMP_LEN); 9906 memset(_p, 0, TG3_REG_BLK_SIZE);
9609 9907
9610 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 9908 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9611 return; 9909 return;
9612 9910
9613 tg3_full_lock(tp, 0); 9911 tg3_full_lock(tp, 0);
9614 9912
9615#define __GET_REG32(reg) (*(p)++ = tr32(reg)) 9913 tg3_dump_legacy_regs(tp, (u32 *)_p);
9616#define GET_REG32_LOOP(base, len) \
9617do { p = (u32 *)(orig_p + (base)); \
9618 for (i = 0; i < len; i += 4) \
9619 __GET_REG32((base) + i); \
9620} while (0)
9621#define GET_REG32_1(reg) \
9622do { p = (u32 *)(orig_p + (reg)); \
9623 __GET_REG32((reg)); \
9624} while (0)
9625
9626 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
9627 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
9628 GET_REG32_LOOP(MAC_MODE, 0x4f0);
9629 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
9630 GET_REG32_1(SNDDATAC_MODE);
9631 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
9632 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
9633 GET_REG32_1(SNDBDC_MODE);
9634 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
9635 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
9636 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
9637 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
9638 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
9639 GET_REG32_1(RCVDCC_MODE);
9640 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
9641 GET_REG32_LOOP(RCVCC_MODE, 0x14);
9642 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
9643 GET_REG32_1(MBFREE_MODE);
9644 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
9645 GET_REG32_LOOP(MEMARB_MODE, 0x10);
9646 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
9647 GET_REG32_LOOP(RDMAC_MODE, 0x08);
9648 GET_REG32_LOOP(WDMAC_MODE, 0x08);
9649 GET_REG32_1(RX_CPU_MODE);
9650 GET_REG32_1(RX_CPU_STATE);
9651 GET_REG32_1(RX_CPU_PGMCTR);
9652 GET_REG32_1(RX_CPU_HWBKPT);
9653 GET_REG32_1(TX_CPU_MODE);
9654 GET_REG32_1(TX_CPU_STATE);
9655 GET_REG32_1(TX_CPU_PGMCTR);
9656 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
9657 GET_REG32_LOOP(FTQ_RESET, 0x120);
9658 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
9659 GET_REG32_1(DMAC_MODE);
9660 GET_REG32_LOOP(GRC_MODE, 0x4c);
9661 if (tp->tg3_flags & TG3_FLAG_NVRAM)
9662 GET_REG32_LOOP(NVRAM_CMD, 0x24);
9663
9664#undef __GET_REG32
9665#undef GET_REG32_LOOP
9666#undef GET_REG32_1
9667 9914
9668 tg3_full_unlock(tp); 9915 tg3_full_unlock(tp);
9669} 9916}
@@ -9683,7 +9930,7 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
9683 u32 i, offset, len, b_offset, b_count; 9930 u32 i, offset, len, b_offset, b_count;
9684 __be32 val; 9931 __be32 val;
9685 9932
9686 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) 9933 if (tg3_flag(tp, NO_NVRAM))
9687 return -EINVAL; 9934 return -EINVAL;
9688 9935
9689 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 9936 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
@@ -9751,7 +9998,7 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
9751 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 9998 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9752 return -EAGAIN; 9999 return -EAGAIN;
9753 10000
9754 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) || 10001 if (tg3_flag(tp, NO_NVRAM) ||
9755 eeprom->magic != TG3_EEPROM_MAGIC) 10002 eeprom->magic != TG3_EEPROM_MAGIC)
9756 return -EINVAL; 10003 return -EINVAL;
9757 10004
@@ -9803,7 +10050,7 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9803{ 10050{
9804 struct tg3 *tp = netdev_priv(dev); 10051 struct tg3 *tp = netdev_priv(dev);
9805 10052
9806 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 10053 if (tg3_flag(tp, USE_PHYLIB)) {
9807 struct phy_device *phydev; 10054 struct phy_device *phydev;
9808 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 10055 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9809 return -EAGAIN; 10056 return -EAGAIN;
@@ -9831,10 +10078,10 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9831 10078
9832 cmd->advertising = tp->link_config.advertising; 10079 cmd->advertising = tp->link_config.advertising;
9833 if (netif_running(dev)) { 10080 if (netif_running(dev)) {
9834 cmd->speed = tp->link_config.active_speed; 10081 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
9835 cmd->duplex = tp->link_config.active_duplex; 10082 cmd->duplex = tp->link_config.active_duplex;
9836 } else { 10083 } else {
9837 cmd->speed = SPEED_INVALID; 10084 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
9838 cmd->duplex = DUPLEX_INVALID; 10085 cmd->duplex = DUPLEX_INVALID;
9839 } 10086 }
9840 cmd->phy_address = tp->phy_addr; 10087 cmd->phy_address = tp->phy_addr;
@@ -9848,8 +10095,9 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9848static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 10095static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9849{ 10096{
9850 struct tg3 *tp = netdev_priv(dev); 10097 struct tg3 *tp = netdev_priv(dev);
10098 u32 speed = ethtool_cmd_speed(cmd);
9851 10099
9852 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 10100 if (tg3_flag(tp, USE_PHYLIB)) {
9853 struct phy_device *phydev; 10101 struct phy_device *phydev;
9854 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 10102 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9855 return -EAGAIN; 10103 return -EAGAIN;
@@ -9897,14 +10145,14 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9897 cmd->advertising &= mask; 10145 cmd->advertising &= mask;
9898 } else { 10146 } else {
9899 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) { 10147 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
9900 if (cmd->speed != SPEED_1000) 10148 if (speed != SPEED_1000)
9901 return -EINVAL; 10149 return -EINVAL;
9902 10150
9903 if (cmd->duplex != DUPLEX_FULL) 10151 if (cmd->duplex != DUPLEX_FULL)
9904 return -EINVAL; 10152 return -EINVAL;
9905 } else { 10153 } else {
9906 if (cmd->speed != SPEED_100 && 10154 if (speed != SPEED_100 &&
9907 cmd->speed != SPEED_10) 10155 speed != SPEED_10)
9908 return -EINVAL; 10156 return -EINVAL;
9909 } 10157 }
9910 } 10158 }
@@ -9919,7 +10167,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9919 tp->link_config.duplex = DUPLEX_INVALID; 10167 tp->link_config.duplex = DUPLEX_INVALID;
9920 } else { 10168 } else {
9921 tp->link_config.advertising = 0; 10169 tp->link_config.advertising = 0;
9922 tp->link_config.speed = cmd->speed; 10170 tp->link_config.speed = speed;
9923 tp->link_config.duplex = cmd->duplex; 10171 tp->link_config.duplex = cmd->duplex;
9924 } 10172 }
9925 10173
@@ -9949,14 +10197,12 @@ static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9949{ 10197{
9950 struct tg3 *tp = netdev_priv(dev); 10198 struct tg3 *tp = netdev_priv(dev);
9951 10199
9952 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) && 10200 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
9953 device_can_wakeup(&tp->pdev->dev))
9954 wol->supported = WAKE_MAGIC; 10201 wol->supported = WAKE_MAGIC;
9955 else 10202 else
9956 wol->supported = 0; 10203 wol->supported = 0;
9957 wol->wolopts = 0; 10204 wol->wolopts = 0;
9958 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) && 10205 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
9959 device_can_wakeup(&tp->pdev->dev))
9960 wol->wolopts = WAKE_MAGIC; 10206 wol->wolopts = WAKE_MAGIC;
9961 memset(&wol->sopass, 0, sizeof(wol->sopass)); 10207 memset(&wol->sopass, 0, sizeof(wol->sopass));
9962} 10208}
@@ -9969,19 +10215,18 @@ static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9969 if (wol->wolopts & ~WAKE_MAGIC) 10215 if (wol->wolopts & ~WAKE_MAGIC)
9970 return -EINVAL; 10216 return -EINVAL;
9971 if ((wol->wolopts & WAKE_MAGIC) && 10217 if ((wol->wolopts & WAKE_MAGIC) &&
9972 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp))) 10218 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
9973 return -EINVAL; 10219 return -EINVAL;
9974 10220
9975 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC); 10221 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
9976 10222
9977 spin_lock_bh(&tp->lock); 10223 spin_lock_bh(&tp->lock);
9978 if (device_may_wakeup(dp)) 10224 if (device_may_wakeup(dp))
9979 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; 10225 tg3_flag_set(tp, WOL_ENABLE);
9980 else 10226 else
9981 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE; 10227 tg3_flag_clear(tp, WOL_ENABLE);
9982 spin_unlock_bh(&tp->lock); 10228 spin_unlock_bh(&tp->lock);
9983 10229
9984
9985 return 0; 10230 return 0;
9986} 10231}
9987 10232
@@ -9997,33 +10242,6 @@ static void tg3_set_msglevel(struct net_device *dev, u32 value)
9997 tp->msg_enable = value; 10242 tp->msg_enable = value;
9998} 10243}
9999 10244
10000static int tg3_set_tso(struct net_device *dev, u32 value)
10001{
10002 struct tg3 *tp = netdev_priv(dev);
10003
10004 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
10005 if (value)
10006 return -EINVAL;
10007 return 0;
10008 }
10009 if ((dev->features & NETIF_F_IPV6_CSUM) &&
10010 ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
10011 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3))) {
10012 if (value) {
10013 dev->features |= NETIF_F_TSO6;
10014 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
10015 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10016 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
10017 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
10018 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
10019 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
10020 dev->features |= NETIF_F_TSO_ECN;
10021 } else
10022 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
10023 }
10024 return ethtool_op_set_tso(dev, value);
10025}
10026
10027static int tg3_nway_reset(struct net_device *dev) 10245static int tg3_nway_reset(struct net_device *dev)
10028{ 10246{
10029 struct tg3 *tp = netdev_priv(dev); 10247 struct tg3 *tp = netdev_priv(dev);
@@ -10035,7 +10253,7 @@ static int tg3_nway_reset(struct net_device *dev)
10035 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 10253 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10036 return -EINVAL; 10254 return -EINVAL;
10037 10255
10038 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 10256 if (tg3_flag(tp, USE_PHYLIB)) {
10039 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 10257 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10040 return -EAGAIN; 10258 return -EAGAIN;
10041 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); 10259 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
@@ -10064,7 +10282,7 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *
10064 10282
10065 ering->rx_max_pending = tp->rx_std_ring_mask; 10283 ering->rx_max_pending = tp->rx_std_ring_mask;
10066 ering->rx_mini_max_pending = 0; 10284 ering->rx_mini_max_pending = 0;
10067 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) 10285 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10068 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask; 10286 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10069 else 10287 else
10070 ering->rx_jumbo_max_pending = 0; 10288 ering->rx_jumbo_max_pending = 0;
@@ -10073,7 +10291,7 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *
10073 10291
10074 ering->rx_pending = tp->rx_pending; 10292 ering->rx_pending = tp->rx_pending;
10075 ering->rx_mini_pending = 0; 10293 ering->rx_mini_pending = 0;
10076 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) 10294 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10077 ering->rx_jumbo_pending = tp->rx_jumbo_pending; 10295 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10078 else 10296 else
10079 ering->rx_jumbo_pending = 0; 10297 ering->rx_jumbo_pending = 0;
@@ -10090,7 +10308,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
10090 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) || 10308 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10091 (ering->tx_pending > TG3_TX_RING_SIZE - 1) || 10309 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10092 (ering->tx_pending <= MAX_SKB_FRAGS) || 10310 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10093 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) && 10311 (tg3_flag(tp, TSO_BUG) &&
10094 (ering->tx_pending <= (MAX_SKB_FRAGS * 3)))) 10312 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10095 return -EINVAL; 10313 return -EINVAL;
10096 10314
@@ -10104,7 +10322,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
10104 10322
10105 tp->rx_pending = ering->rx_pending; 10323 tp->rx_pending = ering->rx_pending;
10106 10324
10107 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) && 10325 if (tg3_flag(tp, MAX_RXPEND_64) &&
10108 tp->rx_pending > 63) 10326 tp->rx_pending > 63)
10109 tp->rx_pending = 63; 10327 tp->rx_pending = 63;
10110 tp->rx_jumbo_pending = ering->rx_jumbo_pending; 10328 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
@@ -10131,7 +10349,7 @@ static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam
10131{ 10349{
10132 struct tg3 *tp = netdev_priv(dev); 10350 struct tg3 *tp = netdev_priv(dev);
10133 10351
10134 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0; 10352 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10135 10353
10136 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX) 10354 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10137 epause->rx_pause = 1; 10355 epause->rx_pause = 1;
@@ -10149,7 +10367,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
10149 struct tg3 *tp = netdev_priv(dev); 10367 struct tg3 *tp = netdev_priv(dev);
10150 int err = 0; 10368 int err = 0;
10151 10369
10152 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 10370 if (tg3_flag(tp, USE_PHYLIB)) {
10153 u32 newadv; 10371 u32 newadv;
10154 struct phy_device *phydev; 10372 struct phy_device *phydev;
10155 10373
@@ -10177,9 +10395,9 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
10177 newadv = 0; 10395 newadv = 0;
10178 10396
10179 if (epause->autoneg) 10397 if (epause->autoneg)
10180 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; 10398 tg3_flag_set(tp, PAUSE_AUTONEG);
10181 else 10399 else
10182 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG; 10400 tg3_flag_clear(tp, PAUSE_AUTONEG);
10183 10401
10184 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { 10402 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10185 u32 oldadv = phydev->advertising & 10403 u32 oldadv = phydev->advertising &
@@ -10221,9 +10439,9 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
10221 tg3_full_lock(tp, irq_sync); 10439 tg3_full_lock(tp, irq_sync);
10222 10440
10223 if (epause->autoneg) 10441 if (epause->autoneg)
10224 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; 10442 tg3_flag_set(tp, PAUSE_AUTONEG);
10225 else 10443 else
10226 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG; 10444 tg3_flag_clear(tp, PAUSE_AUTONEG);
10227 if (epause->rx_pause) 10445 if (epause->rx_pause)
10228 tp->link_config.flowctrl |= FLOW_CTRL_RX; 10446 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10229 else 10447 else
@@ -10246,50 +10464,6 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
10246 return err; 10464 return err;
10247} 10465}
10248 10466
10249static u32 tg3_get_rx_csum(struct net_device *dev)
10250{
10251 struct tg3 *tp = netdev_priv(dev);
10252 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
10253}
10254
10255static int tg3_set_rx_csum(struct net_device *dev, u32 data)
10256{
10257 struct tg3 *tp = netdev_priv(dev);
10258
10259 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
10260 if (data != 0)
10261 return -EINVAL;
10262 return 0;
10263 }
10264
10265 spin_lock_bh(&tp->lock);
10266 if (data)
10267 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10268 else
10269 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10270 spin_unlock_bh(&tp->lock);
10271
10272 return 0;
10273}
10274
10275static int tg3_set_tx_csum(struct net_device *dev, u32 data)
10276{
10277 struct tg3 *tp = netdev_priv(dev);
10278
10279 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
10280 if (data != 0)
10281 return -EINVAL;
10282 return 0;
10283 }
10284
10285 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10286 ethtool_op_set_tx_ipv6_csum(dev, data);
10287 else
10288 ethtool_op_set_tx_csum(dev, data);
10289
10290 return 0;
10291}
10292
10293static int tg3_get_sset_count(struct net_device *dev, int sset) 10467static int tg3_get_sset_count(struct net_device *dev, int sset)
10294{ 10468{
10295 switch (sset) { 10469 switch (sset) {
@@ -10317,35 +10491,38 @@ static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10317 } 10491 }
10318} 10492}
10319 10493
10320static int tg3_phys_id(struct net_device *dev, u32 data) 10494static int tg3_set_phys_id(struct net_device *dev,
10495 enum ethtool_phys_id_state state)
10321{ 10496{
10322 struct tg3 *tp = netdev_priv(dev); 10497 struct tg3 *tp = netdev_priv(dev);
10323 int i;
10324 10498
10325 if (!netif_running(tp->dev)) 10499 if (!netif_running(tp->dev))
10326 return -EAGAIN; 10500 return -EAGAIN;
10327 10501
10328 if (data == 0) 10502 switch (state) {
10329 data = UINT_MAX / 2; 10503 case ETHTOOL_ID_ACTIVE:
10330 10504 return 1; /* cycle on/off once per second */
10331 for (i = 0; i < (data * 2); i++) { 10505
10332 if ((i % 2) == 0) 10506 case ETHTOOL_ID_ON:
10333 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | 10507 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10334 LED_CTRL_1000MBPS_ON | 10508 LED_CTRL_1000MBPS_ON |
10335 LED_CTRL_100MBPS_ON | 10509 LED_CTRL_100MBPS_ON |
10336 LED_CTRL_10MBPS_ON | 10510 LED_CTRL_10MBPS_ON |
10337 LED_CTRL_TRAFFIC_OVERRIDE | 10511 LED_CTRL_TRAFFIC_OVERRIDE |
10338 LED_CTRL_TRAFFIC_BLINK | 10512 LED_CTRL_TRAFFIC_BLINK |
10339 LED_CTRL_TRAFFIC_LED); 10513 LED_CTRL_TRAFFIC_LED);
10514 break;
10340 10515
10341 else 10516 case ETHTOOL_ID_OFF:
10342 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | 10517 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10343 LED_CTRL_TRAFFIC_OVERRIDE); 10518 LED_CTRL_TRAFFIC_OVERRIDE);
10519 break;
10344 10520
10345 if (msleep_interruptible(500)) 10521 case ETHTOOL_ID_INACTIVE:
10346 break; 10522 tw32(MAC_LED_CTRL, tp->led_ctrl);
10523 break;
10347 } 10524 }
10348 tw32(MAC_LED_CTRL, tp->led_ctrl); 10525
10349 return 0; 10526 return 0;
10350} 10527}
10351 10528
@@ -10356,6 +10533,80 @@ static void tg3_get_ethtool_stats(struct net_device *dev,
10356 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats)); 10533 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10357} 10534}
10358 10535
10536static __be32 * tg3_vpd_readblock(struct tg3 *tp)
10537{
10538 int i;
10539 __be32 *buf;
10540 u32 offset = 0, len = 0;
10541 u32 magic, val;
10542
10543 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10544 return NULL;
10545
10546 if (magic == TG3_EEPROM_MAGIC) {
10547 for (offset = TG3_NVM_DIR_START;
10548 offset < TG3_NVM_DIR_END;
10549 offset += TG3_NVM_DIRENT_SIZE) {
10550 if (tg3_nvram_read(tp, offset, &val))
10551 return NULL;
10552
10553 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10554 TG3_NVM_DIRTYPE_EXTVPD)
10555 break;
10556 }
10557
10558 if (offset != TG3_NVM_DIR_END) {
10559 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10560 if (tg3_nvram_read(tp, offset + 4, &offset))
10561 return NULL;
10562
10563 offset = tg3_nvram_logical_addr(tp, offset);
10564 }
10565 }
10566
10567 if (!offset || !len) {
10568 offset = TG3_NVM_VPD_OFF;
10569 len = TG3_NVM_VPD_LEN;
10570 }
10571
10572 buf = kmalloc(len, GFP_KERNEL);
10573 if (buf == NULL)
10574 return NULL;
10575
10576 if (magic == TG3_EEPROM_MAGIC) {
10577 for (i = 0; i < len; i += 4) {
10578 /* The data is in little-endian format in NVRAM.
10579 * Use the big-endian read routines to preserve
10580 * the byte order as it exists in NVRAM.
10581 */
10582 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10583 goto error;
10584 }
10585 } else {
10586 u8 *ptr;
10587 ssize_t cnt;
10588 unsigned int pos = 0;
10589
10590 ptr = (u8 *)&buf[0];
10591 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10592 cnt = pci_read_vpd(tp->pdev, pos,
10593 len - pos, ptr);
10594 if (cnt == -ETIMEDOUT || cnt == -EINTR)
10595 cnt = 0;
10596 else if (cnt < 0)
10597 goto error;
10598 }
10599 if (pos != len)
10600 goto error;
10601 }
10602
10603 return buf;
10604
10605error:
10606 kfree(buf);
10607 return NULL;
10608}
10609
10359#define NVRAM_TEST_SIZE 0x100 10610#define NVRAM_TEST_SIZE 0x100
10360#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14 10611#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10361#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18 10612#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
@@ -10369,7 +10620,7 @@ static int tg3_test_nvram(struct tg3 *tp)
10369 __be32 *buf; 10620 __be32 *buf;
10370 int i, j, k, err = 0, size; 10621 int i, j, k, err = 0, size;
10371 10622
10372 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) 10623 if (tg3_flag(tp, NO_NVRAM))
10373 return 0; 10624 return 0;
10374 10625
10375 if (tg3_nvram_read(tp, 0, &magic) != 0) 10626 if (tg3_nvram_read(tp, 0, &magic) != 0)
@@ -10495,14 +10746,11 @@ static int tg3_test_nvram(struct tg3 *tp)
10495 if (csum != le32_to_cpu(buf[0xfc/4])) 10746 if (csum != le32_to_cpu(buf[0xfc/4]))
10496 goto out; 10747 goto out;
10497 10748
10498 for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) { 10749 kfree(buf);
10499 /* The data is in little-endian format in NVRAM. 10750
10500 * Use the big-endian read routines to preserve 10751 buf = tg3_vpd_readblock(tp);
10501 * the byte order as it exists in NVRAM. 10752 if (!buf)
10502 */ 10753 return -ENOMEM;
10503 if (tg3_nvram_read_be32(tp, TG3_NVM_VPD_OFF + i, &buf[i/4]))
10504 goto out;
10505 }
10506 10754
10507 i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN, 10755 i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
10508 PCI_VPD_LRDT_RO_DATA); 10756 PCI_VPD_LRDT_RO_DATA);
@@ -10714,9 +10962,9 @@ static int tg3_test_registers(struct tg3 *tp)
10714 }; 10962 };
10715 10963
10716 is_5705 = is_5750 = 0; 10964 is_5705 = is_5750 = 0;
10717 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { 10965 if (tg3_flag(tp, 5705_PLUS)) {
10718 is_5705 = 1; 10966 is_5705 = 1;
10719 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) 10967 if (tg3_flag(tp, 5750_PLUS))
10720 is_5750 = 1; 10968 is_5750 = 1;
10721 } 10969 }
10722 10970
@@ -10727,7 +10975,7 @@ static int tg3_test_registers(struct tg3 *tp)
10727 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705)) 10975 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10728 continue; 10976 continue;
10729 10977
10730 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) && 10978 if (tg3_flag(tp, IS_5788) &&
10731 (reg_tbl[i].flags & TG3_FL_NOT_5788)) 10979 (reg_tbl[i].flags & TG3_FL_NOT_5788))
10732 continue; 10980 continue;
10733 10981
@@ -10850,16 +11098,15 @@ static int tg3_test_memory(struct tg3 *tp)
10850 int err = 0; 11098 int err = 0;
10851 int i; 11099 int i;
10852 11100
10853 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 11101 if (tg3_flag(tp, 5717_PLUS))
10854 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
10855 mem_tbl = mem_tbl_5717; 11102 mem_tbl = mem_tbl_5717;
10856 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 11103 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
10857 mem_tbl = mem_tbl_57765; 11104 mem_tbl = mem_tbl_57765;
10858 else if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) 11105 else if (tg3_flag(tp, 5755_PLUS))
10859 mem_tbl = mem_tbl_5755; 11106 mem_tbl = mem_tbl_5755;
10860 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 11107 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10861 mem_tbl = mem_tbl_5906; 11108 mem_tbl = mem_tbl_5906;
10862 else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) 11109 else if (tg3_flag(tp, 5705_PLUS))
10863 mem_tbl = mem_tbl_5705; 11110 mem_tbl = mem_tbl_5705;
10864 else 11111 else
10865 mem_tbl = mem_tbl_570x; 11112 mem_tbl = mem_tbl_570x;
@@ -10875,11 +11122,35 @@ static int tg3_test_memory(struct tg3 *tp)
10875 11122
10876#define TG3_MAC_LOOPBACK 0 11123#define TG3_MAC_LOOPBACK 0
10877#define TG3_PHY_LOOPBACK 1 11124#define TG3_PHY_LOOPBACK 1
11125#define TG3_TSO_LOOPBACK 2
11126
11127#define TG3_TSO_MSS 500
11128
11129#define TG3_TSO_IP_HDR_LEN 20
11130#define TG3_TSO_TCP_HDR_LEN 20
11131#define TG3_TSO_TCP_OPT_LEN 12
11132
11133static const u8 tg3_tso_header[] = {
111340x08, 0x00,
111350x45, 0x00, 0x00, 0x00,
111360x00, 0x00, 0x40, 0x00,
111370x40, 0x06, 0x00, 0x00,
111380x0a, 0x00, 0x00, 0x01,
111390x0a, 0x00, 0x00, 0x02,
111400x0d, 0x00, 0xe0, 0x00,
111410x00, 0x00, 0x01, 0x00,
111420x00, 0x00, 0x02, 0x00,
111430x80, 0x10, 0x10, 0x00,
111440x14, 0x09, 0x00, 0x00,
111450x01, 0x01, 0x08, 0x0a,
111460x11, 0x11, 0x11, 0x11,
111470x11, 0x11, 0x11, 0x11,
11148};
10878 11149
10879static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) 11150static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
10880{ 11151{
10881 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key; 11152 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10882 u32 desc_idx, coal_now; 11153 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
10883 struct sk_buff *skb, *rx_skb; 11154 struct sk_buff *skb, *rx_skb;
10884 u8 *tx_data; 11155 u8 *tx_data;
10885 dma_addr_t map; 11156 dma_addr_t map;
@@ -10891,9 +11162,9 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10891 tnapi = &tp->napi[0]; 11162 tnapi = &tp->napi[0];
10892 rnapi = &tp->napi[0]; 11163 rnapi = &tp->napi[0];
10893 if (tp->irq_cnt > 1) { 11164 if (tp->irq_cnt > 1) {
10894 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) 11165 if (tg3_flag(tp, ENABLE_RSS))
10895 rnapi = &tp->napi[1]; 11166 rnapi = &tp->napi[1];
10896 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) 11167 if (tg3_flag(tp, ENABLE_TSS))
10897 tnapi = &tp->napi[1]; 11168 tnapi = &tp->napi[1];
10898 } 11169 }
10899 coal_now = tnapi->coal_now | rnapi->coal_now; 11170 coal_now = tnapi->coal_now | rnapi->coal_now;
@@ -10905,22 +11176,20 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10905 * all newer ASIC revisions. 11176 * all newer ASIC revisions.
10906 */ 11177 */
10907 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 || 11178 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10908 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) 11179 tg3_flag(tp, CPMU_PRESENT))
10909 return 0; 11180 return 0;
10910 11181
10911 mac_mode = tp->mac_mode & 11182 mac_mode = tp->mac_mode &
10912 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); 11183 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
10913 mac_mode |= MAC_MODE_PORT_INT_LPBACK; 11184 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
10914 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 11185 if (!tg3_flag(tp, 5705_PLUS))
10915 mac_mode |= MAC_MODE_LINK_POLARITY; 11186 mac_mode |= MAC_MODE_LINK_POLARITY;
10916 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) 11187 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
10917 mac_mode |= MAC_MODE_PORT_MODE_MII; 11188 mac_mode |= MAC_MODE_PORT_MODE_MII;
10918 else 11189 else
10919 mac_mode |= MAC_MODE_PORT_MODE_GMII; 11190 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10920 tw32(MAC_MODE, mac_mode); 11191 tw32(MAC_MODE, mac_mode);
10921 } else if (loopback_mode == TG3_PHY_LOOPBACK) { 11192 } else {
10922 u32 val;
10923
10924 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 11193 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
10925 tg3_phy_fet_toggle_apd(tp, false); 11194 tg3_phy_fet_toggle_apd(tp, false);
10926 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100; 11195 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
@@ -10968,13 +11237,11 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10968 break; 11237 break;
10969 mdelay(1); 11238 mdelay(1);
10970 } 11239 }
10971 } else {
10972 return -EINVAL;
10973 } 11240 }
10974 11241
10975 err = -EIO; 11242 err = -EIO;
10976 11243
10977 tx_len = 1514; 11244 tx_len = pktsz;
10978 skb = netdev_alloc_skb(tp->dev, tx_len); 11245 skb = netdev_alloc_skb(tp->dev, tx_len);
10979 if (!skb) 11246 if (!skb)
10980 return -ENOMEM; 11247 return -ENOMEM;
@@ -10983,9 +11250,58 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10983 memcpy(tx_data, tp->dev->dev_addr, 6); 11250 memcpy(tx_data, tp->dev->dev_addr, 6);
10984 memset(tx_data + 6, 0x0, 8); 11251 memset(tx_data + 6, 0x0, 8);
10985 11252
10986 tw32(MAC_RX_MTU_SIZE, tx_len + 4); 11253 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11254
11255 if (loopback_mode == TG3_TSO_LOOPBACK) {
11256 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11257
11258 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11259 TG3_TSO_TCP_OPT_LEN;
11260
11261 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11262 sizeof(tg3_tso_header));
11263 mss = TG3_TSO_MSS;
11264
11265 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11266 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11267
11268 /* Set the total length field in the IP header */
11269 iph->tot_len = htons((u16)(mss + hdr_len));
11270
11271 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11272 TXD_FLAG_CPU_POST_DMA);
11273
11274 if (tg3_flag(tp, HW_TSO_1) ||
11275 tg3_flag(tp, HW_TSO_2) ||
11276 tg3_flag(tp, HW_TSO_3)) {
11277 struct tcphdr *th;
11278 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11279 th = (struct tcphdr *)&tx_data[val];
11280 th->check = 0;
11281 } else
11282 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11283
11284 if (tg3_flag(tp, HW_TSO_3)) {
11285 mss |= (hdr_len & 0xc) << 12;
11286 if (hdr_len & 0x10)
11287 base_flags |= 0x00000010;
11288 base_flags |= (hdr_len & 0x3e0) << 5;
11289 } else if (tg3_flag(tp, HW_TSO_2))
11290 mss |= hdr_len << 9;
11291 else if (tg3_flag(tp, HW_TSO_1) ||
11292 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11293 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11294 } else {
11295 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11296 }
11297
11298 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11299 } else {
11300 num_pkts = 1;
11301 data_off = ETH_HLEN;
11302 }
10987 11303
10988 for (i = 14; i < tx_len; i++) 11304 for (i = data_off; i < tx_len; i++)
10989 tx_data[i] = (u8) (i & 0xff); 11305 tx_data[i] = (u8) (i & 0xff);
10990 11306
10991 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE); 11307 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
@@ -11001,12 +11317,10 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
11001 11317
11002 rx_start_idx = rnapi->hw_status->idx[0].rx_producer; 11318 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11003 11319
11004 num_pkts = 0; 11320 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len,
11005 11321 base_flags, (mss << 1) | 1);
11006 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1);
11007 11322
11008 tnapi->tx_prod++; 11323 tnapi->tx_prod++;
11009 num_pkts++;
11010 11324
11011 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod); 11325 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11012 tr32_mailbox(tnapi->prodmbox); 11326 tr32_mailbox(tnapi->prodmbox);
@@ -11036,29 +11350,56 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
11036 if (rx_idx != rx_start_idx + num_pkts) 11350 if (rx_idx != rx_start_idx + num_pkts)
11037 goto out; 11351 goto out;
11038 11352
11039 desc = &rnapi->rx_rcb[rx_start_idx]; 11353 val = data_off;
11040 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 11354 while (rx_idx != rx_start_idx) {
11041 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 11355 desc = &rnapi->rx_rcb[rx_start_idx++];
11042 if (opaque_key != RXD_OPAQUE_RING_STD) 11356 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11043 goto out; 11357 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11044 11358
11045 if ((desc->err_vlan & RXD_ERR_MASK) != 0 && 11359 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11046 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) 11360 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11047 goto out; 11361 goto out;
11048 11362
11049 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; 11363 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11050 if (rx_len != tx_len) 11364 - ETH_FCS_LEN;
11051 goto out;
11052 11365
11053 rx_skb = tpr->rx_std_buffers[desc_idx].skb; 11366 if (loopback_mode != TG3_TSO_LOOPBACK) {
11367 if (rx_len != tx_len)
11368 goto out;
11054 11369
11055 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping); 11370 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11056 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE); 11371 if (opaque_key != RXD_OPAQUE_RING_STD)
11372 goto out;
11373 } else {
11374 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11375 goto out;
11376 }
11377 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11378 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11379 >> RXD_TCPCSUM_SHIFT == 0xffff) {
11380 goto out;
11381 }
11057 11382
11058 for (i = 14; i < tx_len; i++) { 11383 if (opaque_key == RXD_OPAQUE_RING_STD) {
11059 if (*(rx_skb->data + i) != (u8) (i & 0xff)) 11384 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11385 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11386 mapping);
11387 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11388 rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11389 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11390 mapping);
11391 } else
11060 goto out; 11392 goto out;
11393
11394 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11395 PCI_DMA_FROMDEVICE);
11396
11397 for (i = data_off; i < rx_len; i++, val++) {
11398 if (*(rx_skb->data + i) != (u8) (val & 0xff))
11399 goto out;
11400 }
11061 } 11401 }
11402
11062 err = 0; 11403 err = 0;
11063 11404
11064 /* tg3_free_rings will unmap and free the rx_skb */ 11405 /* tg3_free_rings will unmap and free the rx_skb */
@@ -11066,10 +11407,13 @@ out:
11066 return err; 11407 return err;
11067} 11408}
11068 11409
11069#define TG3_MAC_LOOPBACK_FAILED 1 11410#define TG3_STD_LOOPBACK_FAILED 1
11070#define TG3_PHY_LOOPBACK_FAILED 2 11411#define TG3_JMB_LOOPBACK_FAILED 2
11071#define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \ 11412#define TG3_TSO_LOOPBACK_FAILED 4
11072 TG3_PHY_LOOPBACK_FAILED) 11413
11414#define TG3_MAC_LOOPBACK_SHIFT 0
11415#define TG3_PHY_LOOPBACK_SHIFT 4
11416#define TG3_LOOPBACK_FAILED 0x00000077
11073 11417
11074static int tg3_test_loopback(struct tg3 *tp) 11418static int tg3_test_loopback(struct tg3 *tp)
11075{ 11419{
@@ -11088,11 +11432,20 @@ static int tg3_test_loopback(struct tg3 *tp)
11088 goto done; 11432 goto done;
11089 } 11433 }
11090 11434
11435 if (tg3_flag(tp, ENABLE_RSS)) {
11436 int i;
11437
11438 /* Reroute all rx packets to the 1st queue */
11439 for (i = MAC_RSS_INDIR_TBL_0;
11440 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11441 tw32(i, 0x0);
11442 }
11443
11091 /* Turn off gphy autopowerdown. */ 11444 /* Turn off gphy autopowerdown. */
11092 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) 11445 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11093 tg3_phy_toggle_apd(tp, false); 11446 tg3_phy_toggle_apd(tp, false);
11094 11447
11095 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) { 11448 if (tg3_flag(tp, CPMU_PRESENT)) {
11096 int i; 11449 int i;
11097 u32 status; 11450 u32 status;
11098 11451
@@ -11118,10 +11471,14 @@ static int tg3_test_loopback(struct tg3 *tp)
11118 CPMU_CTRL_LINK_AWARE_MODE)); 11471 CPMU_CTRL_LINK_AWARE_MODE));
11119 } 11472 }
11120 11473
11121 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK)) 11474 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11122 err |= TG3_MAC_LOOPBACK_FAILED; 11475 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11123 11476
11124 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) { 11477 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11478 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11479 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11480
11481 if (tg3_flag(tp, CPMU_PRESENT)) {
11125 tw32(TG3_CPMU_CTRL, cpmuctrl); 11482 tw32(TG3_CPMU_CTRL, cpmuctrl);
11126 11483
11127 /* Release the mutex */ 11484 /* Release the mutex */
@@ -11129,9 +11486,18 @@ static int tg3_test_loopback(struct tg3 *tp)
11129 } 11486 }
11130 11487
11131 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && 11488 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11132 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) { 11489 !tg3_flag(tp, USE_PHYLIB)) {
11133 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK)) 11490 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11134 err |= TG3_PHY_LOOPBACK_FAILED; 11491 err |= TG3_STD_LOOPBACK_FAILED <<
11492 TG3_PHY_LOOPBACK_SHIFT;
11493 if (tg3_flag(tp, TSO_CAPABLE) &&
11494 tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11495 err |= TG3_TSO_LOOPBACK_FAILED <<
11496 TG3_PHY_LOOPBACK_SHIFT;
11497 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11498 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11499 err |= TG3_JMB_LOOPBACK_FAILED <<
11500 TG3_PHY_LOOPBACK_SHIFT;
11135 } 11501 }
11136 11502
11137 /* Re-enable gphy autopowerdown. */ 11503 /* Re-enable gphy autopowerdown. */
@@ -11176,7 +11542,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11176 tg3_halt(tp, RESET_KIND_SUSPEND, 1); 11542 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11177 err = tg3_nvram_lock(tp); 11543 err = tg3_nvram_lock(tp);
11178 tg3_halt_cpu(tp, RX_CPU_BASE); 11544 tg3_halt_cpu(tp, RX_CPU_BASE);
11179 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 11545 if (!tg3_flag(tp, 5705_PLUS))
11180 tg3_halt_cpu(tp, TX_CPU_BASE); 11546 tg3_halt_cpu(tp, TX_CPU_BASE);
11181 if (!err) 11547 if (!err)
11182 tg3_nvram_unlock(tp); 11548 tg3_nvram_unlock(tp);
@@ -11206,7 +11572,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11206 11572
11207 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11573 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11208 if (netif_running(dev)) { 11574 if (netif_running(dev)) {
11209 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 11575 tg3_flag_set(tp, INIT_COMPLETE);
11210 err2 = tg3_restart_hw(tp, 1); 11576 err2 = tg3_restart_hw(tp, 1);
11211 if (!err2) 11577 if (!err2)
11212 tg3_netif_start(tp); 11578 tg3_netif_start(tp);
@@ -11228,7 +11594,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11228 struct tg3 *tp = netdev_priv(dev); 11594 struct tg3 *tp = netdev_priv(dev);
11229 int err; 11595 int err;
11230 11596
11231 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 11597 if (tg3_flag(tp, USE_PHYLIB)) {
11232 struct phy_device *phydev; 11598 struct phy_device *phydev;
11233 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) 11599 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11234 return -EAGAIN; 11600 return -EAGAIN;
@@ -11247,9 +11613,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11247 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 11613 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11248 break; /* We have no PHY */ 11614 break; /* We have no PHY */
11249 11615
11250 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) || 11616 if (!netif_running(dev))
11251 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
11252 !netif_running(dev)))
11253 return -EAGAIN; 11617 return -EAGAIN;
11254 11618
11255 spin_lock_bh(&tp->lock); 11619 spin_lock_bh(&tp->lock);
@@ -11265,9 +11629,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11265 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 11629 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11266 break; /* We have no PHY */ 11630 break; /* We have no PHY */
11267 11631
11268 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) || 11632 if (!netif_running(dev))
11269 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
11270 !netif_running(dev)))
11271 return -EAGAIN; 11633 return -EAGAIN;
11272 11634
11273 spin_lock_bh(&tp->lock); 11635 spin_lock_bh(&tp->lock);
@@ -11297,7 +11659,7 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11297 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0; 11659 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11298 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0; 11660 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11299 11661
11300 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 11662 if (!tg3_flag(tp, 5705_PLUS)) {
11301 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT; 11663 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11302 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT; 11664 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11303 max_stat_coal_ticks = MAX_STAT_COAL_TICKS; 11665 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
@@ -11364,14 +11726,9 @@ static const struct ethtool_ops tg3_ethtool_ops = {
11364 .set_ringparam = tg3_set_ringparam, 11726 .set_ringparam = tg3_set_ringparam,
11365 .get_pauseparam = tg3_get_pauseparam, 11727 .get_pauseparam = tg3_get_pauseparam,
11366 .set_pauseparam = tg3_set_pauseparam, 11728 .set_pauseparam = tg3_set_pauseparam,
11367 .get_rx_csum = tg3_get_rx_csum,
11368 .set_rx_csum = tg3_set_rx_csum,
11369 .set_tx_csum = tg3_set_tx_csum,
11370 .set_sg = ethtool_op_set_sg,
11371 .set_tso = tg3_set_tso,
11372 .self_test = tg3_self_test, 11729 .self_test = tg3_self_test,
11373 .get_strings = tg3_get_strings, 11730 .get_strings = tg3_get_strings,
11374 .phys_id = tg3_phys_id, 11731 .set_phys_id = tg3_set_phys_id,
11375 .get_ethtool_stats = tg3_get_ethtool_stats, 11732 .get_ethtool_stats = tg3_get_ethtool_stats,
11376 .get_coalesce = tg3_get_coalesce, 11733 .get_coalesce = tg3_get_coalesce,
11377 .set_coalesce = tg3_set_coalesce, 11734 .set_coalesce = tg3_set_coalesce,
@@ -11416,8 +11773,7 @@ static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11416{ 11773{
11417 u32 val; 11774 u32 val;
11418 11775
11419 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) || 11776 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11420 tg3_nvram_read(tp, 0, &val) != 0)
11421 return; 11777 return;
11422 11778
11423 /* Selfboot format */ 11779 /* Selfboot format */
@@ -11452,19 +11808,19 @@ static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11452 11808
11453 nvcfg1 = tr32(NVRAM_CFG1); 11809 nvcfg1 = tr32(NVRAM_CFG1);
11454 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) { 11810 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11455 tp->tg3_flags2 |= TG3_FLG2_FLASH; 11811 tg3_flag_set(tp, FLASH);
11456 } else { 11812 } else {
11457 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 11813 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11458 tw32(NVRAM_CFG1, nvcfg1); 11814 tw32(NVRAM_CFG1, nvcfg1);
11459 } 11815 }
11460 11816
11461 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) || 11817 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
11462 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { 11818 tg3_flag(tp, 5780_CLASS)) {
11463 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { 11819 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11464 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: 11820 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11465 tp->nvram_jedecnum = JEDEC_ATMEL; 11821 tp->nvram_jedecnum = JEDEC_ATMEL;
11466 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; 11822 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11467 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 11823 tg3_flag_set(tp, NVRAM_BUFFERED);
11468 break; 11824 break;
11469 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED: 11825 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11470 tp->nvram_jedecnum = JEDEC_ATMEL; 11826 tp->nvram_jedecnum = JEDEC_ATMEL;
@@ -11473,12 +11829,12 @@ static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11473 case FLASH_VENDOR_ATMEL_EEPROM: 11829 case FLASH_VENDOR_ATMEL_EEPROM:
11474 tp->nvram_jedecnum = JEDEC_ATMEL; 11830 tp->nvram_jedecnum = JEDEC_ATMEL;
11475 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 11831 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11476 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 11832 tg3_flag_set(tp, NVRAM_BUFFERED);
11477 break; 11833 break;
11478 case FLASH_VENDOR_ST: 11834 case FLASH_VENDOR_ST:
11479 tp->nvram_jedecnum = JEDEC_ST; 11835 tp->nvram_jedecnum = JEDEC_ST;
11480 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE; 11836 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11481 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 11837 tg3_flag_set(tp, NVRAM_BUFFERED);
11482 break; 11838 break;
11483 case FLASH_VENDOR_SAIFUN: 11839 case FLASH_VENDOR_SAIFUN:
11484 tp->nvram_jedecnum = JEDEC_SAIFUN; 11840 tp->nvram_jedecnum = JEDEC_SAIFUN;
@@ -11493,7 +11849,7 @@ static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11493 } else { 11849 } else {
11494 tp->nvram_jedecnum = JEDEC_ATMEL; 11850 tp->nvram_jedecnum = JEDEC_ATMEL;
11495 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; 11851 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11496 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 11852 tg3_flag_set(tp, NVRAM_BUFFERED);
11497 } 11853 }
11498} 11854}
11499 11855
@@ -11532,29 +11888,29 @@ static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11532 11888
11533 /* NVRAM protection for TPM */ 11889 /* NVRAM protection for TPM */
11534 if (nvcfg1 & (1 << 27)) 11890 if (nvcfg1 & (1 << 27))
11535 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM; 11891 tg3_flag_set(tp, PROTECTED_NVRAM);
11536 11892
11537 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 11893 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11538 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: 11894 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11539 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ: 11895 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11540 tp->nvram_jedecnum = JEDEC_ATMEL; 11896 tp->nvram_jedecnum = JEDEC_ATMEL;
11541 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 11897 tg3_flag_set(tp, NVRAM_BUFFERED);
11542 break; 11898 break;
11543 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 11899 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11544 tp->nvram_jedecnum = JEDEC_ATMEL; 11900 tp->nvram_jedecnum = JEDEC_ATMEL;
11545 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 11901 tg3_flag_set(tp, NVRAM_BUFFERED);
11546 tp->tg3_flags2 |= TG3_FLG2_FLASH; 11902 tg3_flag_set(tp, FLASH);
11547 break; 11903 break;
11548 case FLASH_5752VENDOR_ST_M45PE10: 11904 case FLASH_5752VENDOR_ST_M45PE10:
11549 case FLASH_5752VENDOR_ST_M45PE20: 11905 case FLASH_5752VENDOR_ST_M45PE20:
11550 case FLASH_5752VENDOR_ST_M45PE40: 11906 case FLASH_5752VENDOR_ST_M45PE40:
11551 tp->nvram_jedecnum = JEDEC_ST; 11907 tp->nvram_jedecnum = JEDEC_ST;
11552 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 11908 tg3_flag_set(tp, NVRAM_BUFFERED);
11553 tp->tg3_flags2 |= TG3_FLG2_FLASH; 11909 tg3_flag_set(tp, FLASH);
11554 break; 11910 break;
11555 } 11911 }
11556 11912
11557 if (tp->tg3_flags2 & TG3_FLG2_FLASH) { 11913 if (tg3_flag(tp, FLASH)) {
11558 tg3_nvram_get_pagesize(tp, nvcfg1); 11914 tg3_nvram_get_pagesize(tp, nvcfg1);
11559 } else { 11915 } else {
11560 /* For eeprom, set pagesize to maximum eeprom size */ 11916 /* For eeprom, set pagesize to maximum eeprom size */
@@ -11573,7 +11929,7 @@ static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11573 11929
11574 /* NVRAM protection for TPM */ 11930 /* NVRAM protection for TPM */
11575 if (nvcfg1 & (1 << 27)) { 11931 if (nvcfg1 & (1 << 27)) {
11576 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM; 11932 tg3_flag_set(tp, PROTECTED_NVRAM);
11577 protect = 1; 11933 protect = 1;
11578 } 11934 }
11579 11935
@@ -11584,8 +11940,8 @@ static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11584 case FLASH_5755VENDOR_ATMEL_FLASH_3: 11940 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11585 case FLASH_5755VENDOR_ATMEL_FLASH_5: 11941 case FLASH_5755VENDOR_ATMEL_FLASH_5:
11586 tp->nvram_jedecnum = JEDEC_ATMEL; 11942 tp->nvram_jedecnum = JEDEC_ATMEL;
11587 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 11943 tg3_flag_set(tp, NVRAM_BUFFERED);
11588 tp->tg3_flags2 |= TG3_FLG2_FLASH; 11944 tg3_flag_set(tp, FLASH);
11589 tp->nvram_pagesize = 264; 11945 tp->nvram_pagesize = 264;
11590 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 || 11946 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11591 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5) 11947 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
@@ -11602,8 +11958,8 @@ static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11602 case FLASH_5752VENDOR_ST_M45PE20: 11958 case FLASH_5752VENDOR_ST_M45PE20:
11603 case FLASH_5752VENDOR_ST_M45PE40: 11959 case FLASH_5752VENDOR_ST_M45PE40:
11604 tp->nvram_jedecnum = JEDEC_ST; 11960 tp->nvram_jedecnum = JEDEC_ST;
11605 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 11961 tg3_flag_set(tp, NVRAM_BUFFERED);
11606 tp->tg3_flags2 |= TG3_FLG2_FLASH; 11962 tg3_flag_set(tp, FLASH);
11607 tp->nvram_pagesize = 256; 11963 tp->nvram_pagesize = 256;
11608 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10) 11964 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11609 tp->nvram_size = (protect ? 11965 tp->nvram_size = (protect ?
@@ -11633,7 +11989,7 @@ static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11633 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ: 11989 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11634 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: 11990 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11635 tp->nvram_jedecnum = JEDEC_ATMEL; 11991 tp->nvram_jedecnum = JEDEC_ATMEL;
11636 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 11992 tg3_flag_set(tp, NVRAM_BUFFERED);
11637 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 11993 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11638 11994
11639 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 11995 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
@@ -11644,16 +12000,16 @@ static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11644 case FLASH_5755VENDOR_ATMEL_FLASH_2: 12000 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11645 case FLASH_5755VENDOR_ATMEL_FLASH_3: 12001 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11646 tp->nvram_jedecnum = JEDEC_ATMEL; 12002 tp->nvram_jedecnum = JEDEC_ATMEL;
11647 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 12003 tg3_flag_set(tp, NVRAM_BUFFERED);
11648 tp->tg3_flags2 |= TG3_FLG2_FLASH; 12004 tg3_flag_set(tp, FLASH);
11649 tp->nvram_pagesize = 264; 12005 tp->nvram_pagesize = 264;
11650 break; 12006 break;
11651 case FLASH_5752VENDOR_ST_M45PE10: 12007 case FLASH_5752VENDOR_ST_M45PE10:
11652 case FLASH_5752VENDOR_ST_M45PE20: 12008 case FLASH_5752VENDOR_ST_M45PE20:
11653 case FLASH_5752VENDOR_ST_M45PE40: 12009 case FLASH_5752VENDOR_ST_M45PE40:
11654 tp->nvram_jedecnum = JEDEC_ST; 12010 tp->nvram_jedecnum = JEDEC_ST;
11655 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 12011 tg3_flag_set(tp, NVRAM_BUFFERED);
11656 tp->tg3_flags2 |= TG3_FLG2_FLASH; 12012 tg3_flag_set(tp, FLASH);
11657 tp->nvram_pagesize = 256; 12013 tp->nvram_pagesize = 256;
11658 break; 12014 break;
11659 } 12015 }
@@ -11667,7 +12023,7 @@ static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11667 12023
11668 /* NVRAM protection for TPM */ 12024 /* NVRAM protection for TPM */
11669 if (nvcfg1 & (1 << 27)) { 12025 if (nvcfg1 & (1 << 27)) {
11670 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM; 12026 tg3_flag_set(tp, PROTECTED_NVRAM);
11671 protect = 1; 12027 protect = 1;
11672 } 12028 }
11673 12029
@@ -11682,9 +12038,9 @@ static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11682 case FLASH_5761VENDOR_ATMEL_MDB081D: 12038 case FLASH_5761VENDOR_ATMEL_MDB081D:
11683 case FLASH_5761VENDOR_ATMEL_MDB161D: 12039 case FLASH_5761VENDOR_ATMEL_MDB161D:
11684 tp->nvram_jedecnum = JEDEC_ATMEL; 12040 tp->nvram_jedecnum = JEDEC_ATMEL;
11685 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 12041 tg3_flag_set(tp, NVRAM_BUFFERED);
11686 tp->tg3_flags2 |= TG3_FLG2_FLASH; 12042 tg3_flag_set(tp, FLASH);
11687 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS; 12043 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
11688 tp->nvram_pagesize = 256; 12044 tp->nvram_pagesize = 256;
11689 break; 12045 break;
11690 case FLASH_5761VENDOR_ST_A_M45PE20: 12046 case FLASH_5761VENDOR_ST_A_M45PE20:
@@ -11696,8 +12052,8 @@ static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11696 case FLASH_5761VENDOR_ST_M_M45PE80: 12052 case FLASH_5761VENDOR_ST_M_M45PE80:
11697 case FLASH_5761VENDOR_ST_M_M45PE16: 12053 case FLASH_5761VENDOR_ST_M_M45PE16:
11698 tp->nvram_jedecnum = JEDEC_ST; 12054 tp->nvram_jedecnum = JEDEC_ST;
11699 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 12055 tg3_flag_set(tp, NVRAM_BUFFERED);
11700 tp->tg3_flags2 |= TG3_FLG2_FLASH; 12056 tg3_flag_set(tp, FLASH);
11701 tp->nvram_pagesize = 256; 12057 tp->nvram_pagesize = 256;
11702 break; 12058 break;
11703 } 12059 }
@@ -11737,7 +12093,7 @@ static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11737static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp) 12093static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
11738{ 12094{
11739 tp->nvram_jedecnum = JEDEC_ATMEL; 12095 tp->nvram_jedecnum = JEDEC_ATMEL;
11740 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 12096 tg3_flag_set(tp, NVRAM_BUFFERED);
11741 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 12097 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11742} 12098}
11743 12099
@@ -11751,7 +12107,7 @@ static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11751 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: 12107 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11752 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: 12108 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11753 tp->nvram_jedecnum = JEDEC_ATMEL; 12109 tp->nvram_jedecnum = JEDEC_ATMEL;
11754 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 12110 tg3_flag_set(tp, NVRAM_BUFFERED);
11755 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 12111 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11756 12112
11757 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 12113 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
@@ -11765,8 +12121,8 @@ static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11765 case FLASH_57780VENDOR_ATMEL_AT45DB041D: 12121 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11766 case FLASH_57780VENDOR_ATMEL_AT45DB041B: 12122 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11767 tp->nvram_jedecnum = JEDEC_ATMEL; 12123 tp->nvram_jedecnum = JEDEC_ATMEL;
11768 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 12124 tg3_flag_set(tp, NVRAM_BUFFERED);
11769 tp->tg3_flags2 |= TG3_FLG2_FLASH; 12125 tg3_flag_set(tp, FLASH);
11770 12126
11771 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 12127 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11772 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 12128 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
@@ -11788,8 +12144,8 @@ static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11788 case FLASH_5752VENDOR_ST_M45PE20: 12144 case FLASH_5752VENDOR_ST_M45PE20:
11789 case FLASH_5752VENDOR_ST_M45PE40: 12145 case FLASH_5752VENDOR_ST_M45PE40:
11790 tp->nvram_jedecnum = JEDEC_ST; 12146 tp->nvram_jedecnum = JEDEC_ST;
11791 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 12147 tg3_flag_set(tp, NVRAM_BUFFERED);
11792 tp->tg3_flags2 |= TG3_FLG2_FLASH; 12148 tg3_flag_set(tp, FLASH);
11793 12149
11794 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 12150 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11795 case FLASH_5752VENDOR_ST_M45PE10: 12151 case FLASH_5752VENDOR_ST_M45PE10:
@@ -11804,13 +12160,13 @@ static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11804 } 12160 }
11805 break; 12161 break;
11806 default: 12162 default:
11807 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM; 12163 tg3_flag_set(tp, NO_NVRAM);
11808 return; 12164 return;
11809 } 12165 }
11810 12166
11811 tg3_nvram_get_pagesize(tp, nvcfg1); 12167 tg3_nvram_get_pagesize(tp, nvcfg1);
11812 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) 12168 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11813 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS; 12169 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
11814} 12170}
11815 12171
11816 12172
@@ -11824,7 +12180,7 @@ static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
11824 case FLASH_5717VENDOR_ATMEL_EEPROM: 12180 case FLASH_5717VENDOR_ATMEL_EEPROM:
11825 case FLASH_5717VENDOR_MICRO_EEPROM: 12181 case FLASH_5717VENDOR_MICRO_EEPROM:
11826 tp->nvram_jedecnum = JEDEC_ATMEL; 12182 tp->nvram_jedecnum = JEDEC_ATMEL;
11827 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 12183 tg3_flag_set(tp, NVRAM_BUFFERED);
11828 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 12184 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11829 12185
11830 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 12186 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
@@ -11838,11 +12194,13 @@ static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
11838 case FLASH_5717VENDOR_ATMEL_ADB021D: 12194 case FLASH_5717VENDOR_ATMEL_ADB021D:
11839 case FLASH_5717VENDOR_ATMEL_45USPT: 12195 case FLASH_5717VENDOR_ATMEL_45USPT:
11840 tp->nvram_jedecnum = JEDEC_ATMEL; 12196 tp->nvram_jedecnum = JEDEC_ATMEL;
11841 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 12197 tg3_flag_set(tp, NVRAM_BUFFERED);
11842 tp->tg3_flags2 |= TG3_FLG2_FLASH; 12198 tg3_flag_set(tp, FLASH);
11843 12199
11844 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 12200 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11845 case FLASH_5717VENDOR_ATMEL_MDB021D: 12201 case FLASH_5717VENDOR_ATMEL_MDB021D:
12202 /* Detect size with tg3_nvram_get_size() */
12203 break;
11846 case FLASH_5717VENDOR_ATMEL_ADB021B: 12204 case FLASH_5717VENDOR_ATMEL_ADB021B:
11847 case FLASH_5717VENDOR_ATMEL_ADB021D: 12205 case FLASH_5717VENDOR_ATMEL_ADB021D:
11848 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 12206 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
@@ -11863,13 +12221,15 @@ static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
11863 case FLASH_5717VENDOR_ST_25USPT: 12221 case FLASH_5717VENDOR_ST_25USPT:
11864 case FLASH_5717VENDOR_ST_45USPT: 12222 case FLASH_5717VENDOR_ST_45USPT:
11865 tp->nvram_jedecnum = JEDEC_ST; 12223 tp->nvram_jedecnum = JEDEC_ST;
11866 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 12224 tg3_flag_set(tp, NVRAM_BUFFERED);
11867 tp->tg3_flags2 |= TG3_FLG2_FLASH; 12225 tg3_flag_set(tp, FLASH);
11868 12226
11869 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 12227 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11870 case FLASH_5717VENDOR_ST_M_M25PE20: 12228 case FLASH_5717VENDOR_ST_M_M25PE20:
11871 case FLASH_5717VENDOR_ST_A_M25PE20:
11872 case FLASH_5717VENDOR_ST_M_M45PE20: 12229 case FLASH_5717VENDOR_ST_M_M45PE20:
12230 /* Detect size with tg3_nvram_get_size() */
12231 break;
12232 case FLASH_5717VENDOR_ST_A_M25PE20:
11873 case FLASH_5717VENDOR_ST_A_M45PE20: 12233 case FLASH_5717VENDOR_ST_A_M45PE20:
11874 tp->nvram_size = TG3_NVRAM_SIZE_256KB; 12234 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11875 break; 12235 break;
@@ -11879,13 +12239,125 @@ static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
11879 } 12239 }
11880 break; 12240 break;
11881 default: 12241 default:
11882 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM; 12242 tg3_flag_set(tp, NO_NVRAM);
12243 return;
12244 }
12245
12246 tg3_nvram_get_pagesize(tp, nvcfg1);
12247 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12248 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12249}
12250
12251static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12252{
12253 u32 nvcfg1, nvmpinstrp;
12254
12255 nvcfg1 = tr32(NVRAM_CFG1);
12256 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12257
12258 switch (nvmpinstrp) {
12259 case FLASH_5720_EEPROM_HD:
12260 case FLASH_5720_EEPROM_LD:
12261 tp->nvram_jedecnum = JEDEC_ATMEL;
12262 tg3_flag_set(tp, NVRAM_BUFFERED);
12263
12264 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12265 tw32(NVRAM_CFG1, nvcfg1);
12266 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12267 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12268 else
12269 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12270 return;
12271 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12272 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12273 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12274 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12275 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12276 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12277 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12278 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12279 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12280 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12281 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12282 case FLASH_5720VENDOR_ATMEL_45USPT:
12283 tp->nvram_jedecnum = JEDEC_ATMEL;
12284 tg3_flag_set(tp, NVRAM_BUFFERED);
12285 tg3_flag_set(tp, FLASH);
12286
12287 switch (nvmpinstrp) {
12288 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12289 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12290 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12291 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12292 break;
12293 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12294 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12295 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12296 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12297 break;
12298 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12299 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12300 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12301 break;
12302 default:
12303 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12304 break;
12305 }
12306 break;
12307 case FLASH_5720VENDOR_M_ST_M25PE10:
12308 case FLASH_5720VENDOR_M_ST_M45PE10:
12309 case FLASH_5720VENDOR_A_ST_M25PE10:
12310 case FLASH_5720VENDOR_A_ST_M45PE10:
12311 case FLASH_5720VENDOR_M_ST_M25PE20:
12312 case FLASH_5720VENDOR_M_ST_M45PE20:
12313 case FLASH_5720VENDOR_A_ST_M25PE20:
12314 case FLASH_5720VENDOR_A_ST_M45PE20:
12315 case FLASH_5720VENDOR_M_ST_M25PE40:
12316 case FLASH_5720VENDOR_M_ST_M45PE40:
12317 case FLASH_5720VENDOR_A_ST_M25PE40:
12318 case FLASH_5720VENDOR_A_ST_M45PE40:
12319 case FLASH_5720VENDOR_M_ST_M25PE80:
12320 case FLASH_5720VENDOR_M_ST_M45PE80:
12321 case FLASH_5720VENDOR_A_ST_M25PE80:
12322 case FLASH_5720VENDOR_A_ST_M45PE80:
12323 case FLASH_5720VENDOR_ST_25USPT:
12324 case FLASH_5720VENDOR_ST_45USPT:
12325 tp->nvram_jedecnum = JEDEC_ST;
12326 tg3_flag_set(tp, NVRAM_BUFFERED);
12327 tg3_flag_set(tp, FLASH);
12328
12329 switch (nvmpinstrp) {
12330 case FLASH_5720VENDOR_M_ST_M25PE20:
12331 case FLASH_5720VENDOR_M_ST_M45PE20:
12332 case FLASH_5720VENDOR_A_ST_M25PE20:
12333 case FLASH_5720VENDOR_A_ST_M45PE20:
12334 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12335 break;
12336 case FLASH_5720VENDOR_M_ST_M25PE40:
12337 case FLASH_5720VENDOR_M_ST_M45PE40:
12338 case FLASH_5720VENDOR_A_ST_M25PE40:
12339 case FLASH_5720VENDOR_A_ST_M45PE40:
12340 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12341 break;
12342 case FLASH_5720VENDOR_M_ST_M25PE80:
12343 case FLASH_5720VENDOR_M_ST_M45PE80:
12344 case FLASH_5720VENDOR_A_ST_M25PE80:
12345 case FLASH_5720VENDOR_A_ST_M45PE80:
12346 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12347 break;
12348 default:
12349 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12350 break;
12351 }
12352 break;
12353 default:
12354 tg3_flag_set(tp, NO_NVRAM);
11883 return; 12355 return;
11884 } 12356 }
11885 12357
11886 tg3_nvram_get_pagesize(tp, nvcfg1); 12358 tg3_nvram_get_pagesize(tp, nvcfg1);
11887 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) 12359 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11888 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS; 12360 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
11889} 12361}
11890 12362
11891/* Chips other than 5700/5701 use the NVRAM for fetching info. */ 12363/* Chips other than 5700/5701 use the NVRAM for fetching info. */
@@ -11905,7 +12377,7 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
11905 12377
11906 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && 12378 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11907 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) { 12379 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
11908 tp->tg3_flags |= TG3_FLAG_NVRAM; 12380 tg3_flag_set(tp, NVRAM);
11909 12381
11910 if (tg3_nvram_lock(tp)) { 12382 if (tg3_nvram_lock(tp)) {
11911 netdev_warn(tp->dev, 12383 netdev_warn(tp->dev,
@@ -11935,6 +12407,8 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
11935 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 12407 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
11936 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) 12408 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
11937 tg3_get_5717_nvram_info(tp); 12409 tg3_get_5717_nvram_info(tp);
12410 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12411 tg3_get_5720_nvram_info(tp);
11938 else 12412 else
11939 tg3_get_nvram_info(tp); 12413 tg3_get_nvram_info(tp);
11940 12414
@@ -11945,7 +12419,8 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
11945 tg3_nvram_unlock(tp); 12419 tg3_nvram_unlock(tp);
11946 12420
11947 } else { 12421 } else {
11948 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED); 12422 tg3_flag_clear(tp, NVRAM);
12423 tg3_flag_clear(tp, NVRAM_BUFFERED);
11949 12424
11950 tg3_get_eeprom_size(tp); 12425 tg3_get_eeprom_size(tp);
11951 } 12426 }
@@ -12128,7 +12603,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12128 nvram_cmd |= NVRAM_CMD_LAST; 12603 nvram_cmd |= NVRAM_CMD_LAST;
12129 12604
12130 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 && 12605 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12131 !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) && 12606 !tg3_flag(tp, 5755_PLUS) &&
12132 (tp->nvram_jedecnum == JEDEC_ST) && 12607 (tp->nvram_jedecnum == JEDEC_ST) &&
12133 (nvram_cmd & NVRAM_CMD_FIRST)) { 12608 (nvram_cmd & NVRAM_CMD_FIRST)) {
12134 12609
@@ -12138,7 +12613,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12138 12613
12139 break; 12614 break;
12140 } 12615 }
12141 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) { 12616 if (!tg3_flag(tp, FLASH)) {
12142 /* We always do complete word writes to eeprom. */ 12617 /* We always do complete word writes to eeprom. */
12143 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST); 12618 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12144 } 12619 }
@@ -12154,13 +12629,13 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12154{ 12629{
12155 int ret; 12630 int ret;
12156 12631
12157 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) { 12632 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12158 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl & 12633 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12159 ~GRC_LCLCTRL_GPIO_OUTPUT1); 12634 ~GRC_LCLCTRL_GPIO_OUTPUT1);
12160 udelay(40); 12635 udelay(40);
12161 } 12636 }
12162 12637
12163 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) { 12638 if (!tg3_flag(tp, NVRAM)) {
12164 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf); 12639 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12165 } else { 12640 } else {
12166 u32 grc_mode; 12641 u32 grc_mode;
@@ -12170,16 +12645,13 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12170 return ret; 12645 return ret;
12171 12646
12172 tg3_enable_nvram_access(tp); 12647 tg3_enable_nvram_access(tp);
12173 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 12648 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12174 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM))
12175 tw32(NVRAM_WRITE1, 0x406); 12649 tw32(NVRAM_WRITE1, 0x406);
12176 12650
12177 grc_mode = tr32(GRC_MODE); 12651 grc_mode = tr32(GRC_MODE);
12178 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE); 12652 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12179 12653
12180 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) || 12654 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12181 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
12182
12183 ret = tg3_nvram_write_block_buffered(tp, offset, len, 12655 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12184 buf); 12656 buf);
12185 } else { 12657 } else {
@@ -12194,7 +12666,7 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12194 tg3_nvram_unlock(tp); 12666 tg3_nvram_unlock(tp);
12195 } 12667 }
12196 12668
12197 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) { 12669 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12198 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 12670 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12199 udelay(40); 12671 udelay(40);
12200 } 12672 }
@@ -12316,19 +12788,20 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12316 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 12788 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12317 12789
12318 /* Assume an onboard device and WOL capable by default. */ 12790 /* Assume an onboard device and WOL capable by default. */
12319 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP; 12791 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12792 tg3_flag_set(tp, WOL_CAP);
12320 12793
12321 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 12794 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12322 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) { 12795 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12323 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT; 12796 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12324 tp->tg3_flags2 |= TG3_FLG2_IS_NIC; 12797 tg3_flag_set(tp, IS_NIC);
12325 } 12798 }
12326 val = tr32(VCPU_CFGSHDW); 12799 val = tr32(VCPU_CFGSHDW);
12327 if (val & VCPU_CFGSHDW_ASPM_DBNC) 12800 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12328 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND; 12801 tg3_flag_set(tp, ASPM_WORKAROUND);
12329 if ((val & VCPU_CFGSHDW_WOL_ENABLE) && 12802 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12330 (val & VCPU_CFGSHDW_WOL_MAGPKT)) { 12803 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12331 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; 12804 tg3_flag_set(tp, WOL_ENABLE);
12332 device_set_wakeup_enable(&tp->pdev->dev, true); 12805 device_set_wakeup_enable(&tp->pdev->dev, true);
12333 } 12806 }
12334 goto done; 12807 goto done;
@@ -12371,13 +12844,13 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12371 12844
12372 tp->phy_id = eeprom_phy_id; 12845 tp->phy_id = eeprom_phy_id;
12373 if (eeprom_phy_serdes) { 12846 if (eeprom_phy_serdes) {
12374 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 12847 if (!tg3_flag(tp, 5705_PLUS))
12375 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; 12848 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12376 else 12849 else
12377 tp->phy_flags |= TG3_PHYFLG_MII_SERDES; 12850 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12378 } 12851 }
12379 12852
12380 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) 12853 if (tg3_flag(tp, 5750_PLUS))
12381 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK | 12854 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12382 SHASTA_EXT_LED_MODE_MASK); 12855 SHASTA_EXT_LED_MODE_MASK);
12383 else 12856 else
@@ -12437,34 +12910,34 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12437 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 12910 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12438 12911
12439 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) { 12912 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12440 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT; 12913 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12441 if ((tp->pdev->subsystem_vendor == 12914 if ((tp->pdev->subsystem_vendor ==
12442 PCI_VENDOR_ID_ARIMA) && 12915 PCI_VENDOR_ID_ARIMA) &&
12443 (tp->pdev->subsystem_device == 0x205a || 12916 (tp->pdev->subsystem_device == 0x205a ||
12444 tp->pdev->subsystem_device == 0x2063)) 12917 tp->pdev->subsystem_device == 0x2063))
12445 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT; 12918 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12446 } else { 12919 } else {
12447 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT; 12920 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12448 tp->tg3_flags2 |= TG3_FLG2_IS_NIC; 12921 tg3_flag_set(tp, IS_NIC);
12449 } 12922 }
12450 12923
12451 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { 12924 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12452 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF; 12925 tg3_flag_set(tp, ENABLE_ASF);
12453 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) 12926 if (tg3_flag(tp, 5750_PLUS))
12454 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE; 12927 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
12455 } 12928 }
12456 12929
12457 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) && 12930 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12458 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) 12931 tg3_flag(tp, 5750_PLUS))
12459 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE; 12932 tg3_flag_set(tp, ENABLE_APE);
12460 12933
12461 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES && 12934 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12462 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)) 12935 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12463 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP; 12936 tg3_flag_clear(tp, WOL_CAP);
12464 12937
12465 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) && 12938 if (tg3_flag(tp, WOL_CAP) &&
12466 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) { 12939 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
12467 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; 12940 tg3_flag_set(tp, WOL_ENABLE);
12468 device_set_wakeup_enable(&tp->pdev->dev, true); 12941 device_set_wakeup_enable(&tp->pdev->dev, true);
12469 } 12942 }
12470 12943
@@ -12476,33 +12949,33 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12476 if (cfg2 & (1 << 18)) 12949 if (cfg2 & (1 << 18))
12477 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS; 12950 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12478 12951
12479 if (((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) || 12952 if ((tg3_flag(tp, 57765_PLUS) ||
12480 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && 12953 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12481 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX))) && 12954 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12482 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN)) 12955 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12483 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD; 12956 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12484 12957
12485 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && 12958 if (tg3_flag(tp, PCI_EXPRESS) &&
12486 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && 12959 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12487 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) { 12960 !tg3_flag(tp, 57765_PLUS)) {
12488 u32 cfg3; 12961 u32 cfg3;
12489 12962
12490 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3); 12963 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12491 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE) 12964 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12492 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND; 12965 tg3_flag_set(tp, ASPM_WORKAROUND);
12493 } 12966 }
12494 12967
12495 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE) 12968 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12496 tp->tg3_flags3 |= TG3_FLG3_RGMII_INBAND_DISABLE; 12969 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
12497 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN) 12970 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12498 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN; 12971 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
12499 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN) 12972 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12500 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN; 12973 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
12501 } 12974 }
12502done: 12975done:
12503 if (tp->tg3_flags & TG3_FLAG_WOL_CAP) 12976 if (tg3_flag(tp, WOL_CAP))
12504 device_set_wakeup_enable(&tp->pdev->dev, 12977 device_set_wakeup_enable(&tp->pdev->dev,
12505 tp->tg3_flags & TG3_FLAG_WOL_ENABLE); 12978 tg3_flag(tp, WOL_ENABLE));
12506 else 12979 else
12507 device_set_wakeup_capable(&tp->pdev->dev, false); 12980 device_set_wakeup_capable(&tp->pdev->dev, false);
12508} 12981}
@@ -12592,18 +13065,17 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
12592 int err; 13065 int err;
12593 13066
12594 /* flow control autonegotiation is default behavior */ 13067 /* flow control autonegotiation is default behavior */
12595 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; 13068 tg3_flag_set(tp, PAUSE_AUTONEG);
12596 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; 13069 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
12597 13070
12598 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) 13071 if (tg3_flag(tp, USE_PHYLIB))
12599 return tg3_phy_init(tp); 13072 return tg3_phy_init(tp);
12600 13073
12601 /* Reading the PHY ID register can conflict with ASF 13074 /* Reading the PHY ID register can conflict with ASF
12602 * firmware access to the PHY hardware. 13075 * firmware access to the PHY hardware.
12603 */ 13076 */
12604 err = 0; 13077 err = 0;
12605 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) || 13078 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
12606 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
12607 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID; 13079 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
12608 } else { 13080 } else {
12609 /* Now read the physical PHY_ID from the chip and verify 13081 /* Now read the physical PHY_ID from the chip and verify
@@ -12659,8 +13131,8 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
12659 tg3_phy_init_link_config(tp); 13131 tg3_phy_init_link_config(tp);
12660 13132
12661 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 13133 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12662 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) && 13134 !tg3_flag(tp, ENABLE_APE) &&
12663 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { 13135 !tg3_flag(tp, ENABLE_ASF)) {
12664 u32 bmsr, adv_reg, tg3_ctrl, mask; 13136 u32 bmsr, adv_reg, tg3_ctrl, mask;
12665 13137
12666 tg3_readphy(tp, MII_BMSR, &bmsr); 13138 tg3_readphy(tp, MII_BMSR, &bmsr);
@@ -12721,46 +13193,11 @@ static void __devinit tg3_read_vpd(struct tg3 *tp)
12721 u8 *vpd_data; 13193 u8 *vpd_data;
12722 unsigned int block_end, rosize, len; 13194 unsigned int block_end, rosize, len;
12723 int j, i = 0; 13195 int j, i = 0;
12724 u32 magic;
12725 13196
12726 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) || 13197 vpd_data = (u8 *)tg3_vpd_readblock(tp);
12727 tg3_nvram_read(tp, 0x0, &magic))
12728 goto out_no_vpd;
12729
12730 vpd_data = kmalloc(TG3_NVM_VPD_LEN, GFP_KERNEL);
12731 if (!vpd_data) 13198 if (!vpd_data)
12732 goto out_no_vpd; 13199 goto out_no_vpd;
12733 13200
12734 if (magic == TG3_EEPROM_MAGIC) {
12735 for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) {
12736 u32 tmp;
12737
12738 /* The data is in little-endian format in NVRAM.
12739 * Use the big-endian read routines to preserve
12740 * the byte order as it exists in NVRAM.
12741 */
12742 if (tg3_nvram_read_be32(tp, TG3_NVM_VPD_OFF + i, &tmp))
12743 goto out_not_found;
12744
12745 memcpy(&vpd_data[i], &tmp, sizeof(tmp));
12746 }
12747 } else {
12748 ssize_t cnt;
12749 unsigned int pos = 0;
12750
12751 for (; pos < TG3_NVM_VPD_LEN && i < 3; i++, pos += cnt) {
12752 cnt = pci_read_vpd(tp->pdev, pos,
12753 TG3_NVM_VPD_LEN - pos,
12754 &vpd_data[pos]);
12755 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12756 cnt = 0;
12757 else if (cnt < 0)
12758 goto out_not_found;
12759 }
12760 if (pos != TG3_NVM_VPD_LEN)
12761 goto out_not_found;
12762 }
12763
12764 i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN, 13201 i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
12765 PCI_VPD_LRDT_RO_DATA); 13202 PCI_VPD_LRDT_RO_DATA);
12766 if (i < 0) 13203 if (i < 0)
@@ -13014,7 +13451,7 @@ static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13014 if (offset == TG3_NVM_DIR_END) 13451 if (offset == TG3_NVM_DIR_END)
13015 return; 13452 return;
13016 13453
13017 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 13454 if (!tg3_flag(tp, 5705_PLUS))
13018 start = 0x08000000; 13455 start = 0x08000000;
13019 else if (tg3_nvram_read(tp, offset - 4, &start)) 13456 else if (tg3_nvram_read(tp, offset - 4, &start))
13020 return; 13457 return;
@@ -13054,8 +13491,7 @@ static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13054 u32 apedata; 13491 u32 apedata;
13055 char *fwtype; 13492 char *fwtype;
13056 13493
13057 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) || 13494 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13058 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
13059 return; 13495 return;
13060 13496
13061 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 13497 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
@@ -13069,7 +13505,7 @@ static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13069 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION); 13505 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13070 13506
13071 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) { 13507 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13072 tp->tg3_flags3 |= TG3_FLG3_APE_HAS_NCSI; 13508 tg3_flag_set(tp, APE_HAS_NCSI);
13073 fwtype = "NCSI"; 13509 fwtype = "NCSI";
13074 } else { 13510 } else {
13075 fwtype = "DASH"; 13511 fwtype = "DASH";
@@ -13093,7 +13529,7 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13093 if (tp->fw_ver[0] != 0) 13529 if (tp->fw_ver[0] != 0)
13094 vpd_vers = true; 13530 vpd_vers = true;
13095 13531
13096 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) { 13532 if (tg3_flag(tp, NO_NVRAM)) {
13097 strcat(tp->fw_ver, "sb"); 13533 strcat(tp->fw_ver, "sb");
13098 return; 13534 return;
13099 } 13535 }
@@ -13110,8 +13546,7 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13110 else 13546 else
13111 return; 13547 return;
13112 13548
13113 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) || 13549 if (!tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || vpd_vers)
13114 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) || vpd_vers)
13115 goto done; 13550 goto done;
13116 13551
13117 tg3_read_mgmtfw_ver(tp); 13552 tg3_read_mgmtfw_ver(tp);
@@ -13122,21 +13557,14 @@ done:
13122 13557
13123static struct pci_dev * __devinit tg3_find_peer(struct tg3 *); 13558static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13124 13559
13125static inline void vlan_features_add(struct net_device *dev, unsigned long flags)
13126{
13127 dev->vlan_features |= flags;
13128}
13129
13130static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp) 13560static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13131{ 13561{
13132 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 13562 if (tg3_flag(tp, LRG_PROD_RING_CAP))
13133 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) 13563 return TG3_RX_RET_MAX_SIZE_5717;
13134 return 4096; 13564 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13135 else if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && 13565 return TG3_RX_RET_MAX_SIZE_5700;
13136 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13137 return 1024;
13138 else 13566 else
13139 return 512; 13567 return TG3_RX_RET_MAX_SIZE_5705;
13140} 13568}
13141 13569
13142static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = { 13570static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
@@ -13181,7 +13609,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13181 13609
13182 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 13610 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13183 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || 13611 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13184 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719) 13612 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13613 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13185 pci_read_config_dword(tp->pdev, 13614 pci_read_config_dword(tp->pdev,
13186 TG3PCI_GEN2_PRODID_ASICREV, 13615 TG3PCI_GEN2_PRODID_ASICREV,
13187 &prod_id_asic_rev); 13616 &prod_id_asic_rev);
@@ -13258,8 +13687,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13258 if (bridge->subordinate && 13687 if (bridge->subordinate &&
13259 (bridge->subordinate->number == 13688 (bridge->subordinate->number ==
13260 tp->pdev->bus->number)) { 13689 tp->pdev->bus->number)) {
13261 13690 tg3_flag_set(tp, ICH_WORKAROUND);
13262 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
13263 pci_dev_put(bridge); 13691 pci_dev_put(bridge);
13264 break; 13692 break;
13265 } 13693 }
@@ -13291,7 +13719,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13291 tp->pdev->bus->number) && 13719 tp->pdev->bus->number) &&
13292 (bridge->subordinate->subordinate >= 13720 (bridge->subordinate->subordinate >=
13293 tp->pdev->bus->number)) { 13721 tp->pdev->bus->number)) {
13294 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG; 13722 tg3_flag_set(tp, 5701_DMA_BUG);
13295 pci_dev_put(bridge); 13723 pci_dev_put(bridge);
13296 break; 13724 break;
13297 } 13725 }
@@ -13306,8 +13734,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13306 */ 13734 */
13307 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 || 13735 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13308 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { 13736 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13309 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS; 13737 tg3_flag_set(tp, 5780_CLASS);
13310 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG; 13738 tg3_flag_set(tp, 40BIT_DMA_BUG);
13311 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI); 13739 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13312 } else { 13740 } else {
13313 struct pci_dev *bridge = NULL; 13741 struct pci_dev *bridge = NULL;
@@ -13321,7 +13749,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13321 tp->pdev->bus->number) && 13749 tp->pdev->bus->number) &&
13322 (bridge->subordinate->subordinate >= 13750 (bridge->subordinate->subordinate >=
13323 tp->pdev->bus->number)) { 13751 tp->pdev->bus->number)) {
13324 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG; 13752 tg3_flag_set(tp, 40BIT_DMA_BUG);
13325 pci_dev_put(bridge); 13753 pci_dev_put(bridge);
13326 break; 13754 break;
13327 } 13755 }
@@ -13336,13 +13764,18 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13336 13764
13337 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || 13765 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13338 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 || 13766 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
13339 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) 13767 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13768 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13340 tp->pdev_peer = tg3_find_peer(tp); 13769 tp->pdev_peer = tg3_find_peer(tp);
13341 13770
13342 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 13771 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13343 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || 13772 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13344 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 13773 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13345 tp->tg3_flags3 |= TG3_FLG3_5717_PLUS; 13774 tg3_flag_set(tp, 5717_PLUS);
13775
13776 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13777 tg3_flag(tp, 5717_PLUS))
13778 tg3_flag_set(tp, 57765_PLUS);
13346 13779
13347 /* Intentionally exclude ASIC_REV_5906 */ 13780 /* Intentionally exclude ASIC_REV_5906 */
13348 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 13781 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
@@ -13351,52 +13784,51 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13351 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 13784 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13352 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 13785 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13353 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || 13786 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13354 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) 13787 tg3_flag(tp, 57765_PLUS))
13355 tp->tg3_flags3 |= TG3_FLG3_5755_PLUS; 13788 tg3_flag_set(tp, 5755_PLUS);
13356 13789
13357 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || 13790 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13358 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || 13791 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13359 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 || 13792 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13360 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || 13793 tg3_flag(tp, 5755_PLUS) ||
13361 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) 13794 tg3_flag(tp, 5780_CLASS))
13362 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS; 13795 tg3_flag_set(tp, 5750_PLUS);
13363 13796
13364 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) || 13797 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
13365 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) 13798 tg3_flag(tp, 5750_PLUS))
13366 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS; 13799 tg3_flag_set(tp, 5705_PLUS);
13367 13800
13368 /* 5700 B0 chips do not support checksumming correctly due 13801 /* 5700 B0 chips do not support checksumming correctly due
13369 * to hardware bugs. 13802 * to hardware bugs.
13370 */ 13803 */
13371 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0) 13804 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
13372 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS; 13805 u32 features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
13373 else {
13374 unsigned long features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO;
13375 13806
13376 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS; 13807 if (tg3_flag(tp, 5755_PLUS))
13377 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
13378 features |= NETIF_F_IPV6_CSUM; 13808 features |= NETIF_F_IPV6_CSUM;
13379 tp->dev->features |= features; 13809 tp->dev->features |= features;
13380 vlan_features_add(tp->dev, features); 13810 tp->dev->hw_features |= features;
13811 tp->dev->vlan_features |= features;
13381 } 13812 }
13382 13813
13383 /* Determine TSO capabilities */ 13814 /* Determine TSO capabilities */
13384 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) 13815 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13385 ; /* Do nothing. HW bug. */ 13816 ; /* Do nothing. HW bug. */
13386 else if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) 13817 else if (tg3_flag(tp, 57765_PLUS))
13387 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3; 13818 tg3_flag_set(tp, HW_TSO_3);
13388 else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || 13819 else if (tg3_flag(tp, 5755_PLUS) ||
13389 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 13820 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13390 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2; 13821 tg3_flag_set(tp, HW_TSO_2);
13391 else if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { 13822 else if (tg3_flag(tp, 5750_PLUS)) {
13392 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG; 13823 tg3_flag_set(tp, HW_TSO_1);
13824 tg3_flag_set(tp, TSO_BUG);
13393 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 && 13825 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13394 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2) 13826 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13395 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG; 13827 tg3_flag_clear(tp, TSO_BUG);
13396 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && 13828 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13397 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && 13829 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13398 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { 13830 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13399 tp->tg3_flags2 |= TG3_FLG2_TSO_BUG; 13831 tg3_flag_set(tp, TSO_BUG);
13400 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) 13832 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13401 tp->fw_needed = FIRMWARE_TG3TSO5; 13833 tp->fw_needed = FIRMWARE_TG3TSO5;
13402 else 13834 else
@@ -13405,22 +13837,22 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13405 13837
13406 tp->irq_max = 1; 13838 tp->irq_max = 1;
13407 13839
13408 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { 13840 if (tg3_flag(tp, 5750_PLUS)) {
13409 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI; 13841 tg3_flag_set(tp, SUPPORT_MSI);
13410 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX || 13842 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13411 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX || 13843 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13412 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 && 13844 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13413 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 && 13845 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13414 tp->pdev_peer == tp->pdev)) 13846 tp->pdev_peer == tp->pdev))
13415 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI; 13847 tg3_flag_clear(tp, SUPPORT_MSI);
13416 13848
13417 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || 13849 if (tg3_flag(tp, 5755_PLUS) ||
13418 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 13850 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13419 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI; 13851 tg3_flag_set(tp, 1SHOT_MSI);
13420 } 13852 }
13421 13853
13422 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) { 13854 if (tg3_flag(tp, 57765_PLUS)) {
13423 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX; 13855 tg3_flag_set(tp, SUPPORT_MSIX);
13424 tp->irq_max = TG3_IRQ_MAX_VECS; 13856 tp->irq_max = TG3_IRQ_MAX_VECS;
13425 } 13857 }
13426 } 13858 }
@@ -13428,20 +13860,23 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13428 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 13860 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13429 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || 13861 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13430 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 13862 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13431 tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG; 13863 tg3_flag_set(tp, SHORT_DMA_BUG);
13432 else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) { 13864 else if (!tg3_flag(tp, 5755_PLUS)) {
13433 tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG; 13865 tg3_flag_set(tp, 4G_DMA_BNDRY_BUG);
13434 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG; 13866 tg3_flag_set(tp, 40BIT_DMA_LIMIT_BUG);
13435 } 13867 }
13436 13868
13437 if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) && 13869 if (tg3_flag(tp, 5717_PLUS))
13870 tg3_flag_set(tp, LRG_PROD_RING_CAP);
13871
13872 if (tg3_flag(tp, 57765_PLUS) &&
13438 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719) 13873 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13439 tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG; 13874 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
13440 13875
13441 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || 13876 if (!tg3_flag(tp, 5705_PLUS) ||
13442 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || 13877 tg3_flag(tp, 5780_CLASS) ||
13443 (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG)) 13878 tg3_flag(tp, USE_JUMBO_BDFLAG))
13444 tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE; 13879 tg3_flag_set(tp, JUMBO_CAPABLE);
13445 13880
13446 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, 13881 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13447 &pci_state_reg); 13882 &pci_state_reg);
@@ -13450,10 +13885,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13450 if (tp->pcie_cap != 0) { 13885 if (tp->pcie_cap != 0) {
13451 u16 lnkctl; 13886 u16 lnkctl;
13452 13887
13453 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; 13888 tg3_flag_set(tp, PCI_EXPRESS);
13454 13889
13455 tp->pcie_readrq = 4096; 13890 tp->pcie_readrq = 4096;
13456 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) 13891 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13892 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13457 tp->pcie_readrq = 2048; 13893 tp->pcie_readrq = 2048;
13458 13894
13459 pcie_set_readrq(tp->pdev, tp->pcie_readrq); 13895 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
@@ -13463,19 +13899,19 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13463 &lnkctl); 13899 &lnkctl);
13464 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) { 13900 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13465 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 13901 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13466 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2; 13902 tg3_flag_clear(tp, HW_TSO_2);
13467 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 13903 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13468 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 13904 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13469 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 || 13905 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13470 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1) 13906 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13471 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG; 13907 tg3_flag_set(tp, CLKREQ_BUG);
13472 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) { 13908 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13473 tp->tg3_flags3 |= TG3_FLG3_L1PLLPD_EN; 13909 tg3_flag_set(tp, L1PLLPD_EN);
13474 } 13910 }
13475 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { 13911 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13476 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; 13912 tg3_flag_set(tp, PCI_EXPRESS);
13477 } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || 13913 } else if (!tg3_flag(tp, 5705_PLUS) ||
13478 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { 13914 tg3_flag(tp, 5780_CLASS)) {
13479 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX); 13915 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13480 if (!tp->pcix_cap) { 13916 if (!tp->pcix_cap) {
13481 dev_err(&tp->pdev->dev, 13917 dev_err(&tp->pdev->dev,
@@ -13484,7 +13920,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13484 } 13920 }
13485 13921
13486 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE)) 13922 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13487 tp->tg3_flags |= TG3_FLAG_PCIX_MODE; 13923 tg3_flag_set(tp, PCIX_MODE);
13488 } 13924 }
13489 13925
13490 /* If we have an AMD 762 or VIA K8T800 chipset, write 13926 /* If we have an AMD 762 or VIA K8T800 chipset, write
@@ -13494,8 +13930,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13494 * posted to the chip in order. 13930 * posted to the chip in order.
13495 */ 13931 */
13496 if (pci_dev_present(tg3_write_reorder_chipsets) && 13932 if (pci_dev_present(tg3_write_reorder_chipsets) &&
13497 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) 13933 !tg3_flag(tp, PCI_EXPRESS))
13498 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER; 13934 tg3_flag_set(tp, MBOX_WRITE_REORDER);
13499 13935
13500 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, 13936 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13501 &tp->pci_cacheline_sz); 13937 &tp->pci_cacheline_sz);
@@ -13512,17 +13948,17 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13512 /* 5700 BX chips need to have their TX producer index 13948 /* 5700 BX chips need to have their TX producer index
13513 * mailboxes written twice to workaround a bug. 13949 * mailboxes written twice to workaround a bug.
13514 */ 13950 */
13515 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG; 13951 tg3_flag_set(tp, TXD_MBOX_HWBUG);
13516 13952
13517 /* If we are in PCI-X mode, enable register write workaround. 13953 /* If we are in PCI-X mode, enable register write workaround.
13518 * 13954 *
13519 * The workaround is to use indirect register accesses 13955 * The workaround is to use indirect register accesses
13520 * for all chip writes not to mailbox registers. 13956 * for all chip writes not to mailbox registers.
13521 */ 13957 */
13522 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) { 13958 if (tg3_flag(tp, PCIX_MODE)) {
13523 u32 pm_reg; 13959 u32 pm_reg;
13524 13960
13525 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG; 13961 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
13526 13962
13527 /* The chip can have it's power management PCI config 13963 /* The chip can have it's power management PCI config
13528 * space registers clobbered due to this bug. 13964 * space registers clobbered due to this bug.
@@ -13545,9 +13981,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13545 } 13981 }
13546 13982
13547 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0) 13983 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13548 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED; 13984 tg3_flag_set(tp, PCI_HIGH_SPEED);
13549 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0) 13985 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13550 tp->tg3_flags |= TG3_FLAG_PCI_32BIT; 13986 tg3_flag_set(tp, PCI_32BIT);
13551 13987
13552 /* Chip-specific fixup from Broadcom driver */ 13988 /* Chip-specific fixup from Broadcom driver */
13553 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) && 13989 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
@@ -13565,10 +14001,10 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13565 tp->write32_rx_mbox = tg3_write32; 14001 tp->write32_rx_mbox = tg3_write32;
13566 14002
13567 /* Various workaround register access methods */ 14003 /* Various workaround register access methods */
13568 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) 14004 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
13569 tp->write32 = tg3_write_indirect_reg32; 14005 tp->write32 = tg3_write_indirect_reg32;
13570 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 || 14006 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13571 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && 14007 (tg3_flag(tp, PCI_EXPRESS) &&
13572 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) { 14008 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13573 /* 14009 /*
13574 * Back to back register writes can cause problems on these 14010 * Back to back register writes can cause problems on these
@@ -13580,14 +14016,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13580 tp->write32 = tg3_write_flush_reg32; 14016 tp->write32 = tg3_write_flush_reg32;
13581 } 14017 }
13582 14018
13583 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) || 14019 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
13584 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
13585 tp->write32_tx_mbox = tg3_write32_tx_mbox; 14020 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13586 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) 14021 if (tg3_flag(tp, MBOX_WRITE_REORDER))
13587 tp->write32_rx_mbox = tg3_write_flush_reg32; 14022 tp->write32_rx_mbox = tg3_write_flush_reg32;
13588 } 14023 }
13589 14024
13590 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) { 14025 if (tg3_flag(tp, ICH_WORKAROUND)) {
13591 tp->read32 = tg3_read_indirect_reg32; 14026 tp->read32 = tg3_read_indirect_reg32;
13592 tp->write32 = tg3_write_indirect_reg32; 14027 tp->write32 = tg3_write_indirect_reg32;
13593 tp->read32_mbox = tg3_read_indirect_mbox; 14028 tp->read32_mbox = tg3_read_indirect_mbox;
@@ -13610,13 +14045,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13610 } 14045 }
13611 14046
13612 if (tp->write32 == tg3_write_indirect_reg32 || 14047 if (tp->write32 == tg3_write_indirect_reg32 ||
13613 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) && 14048 (tg3_flag(tp, PCIX_MODE) &&
13614 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 14049 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13615 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701))) 14050 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13616 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG; 14051 tg3_flag_set(tp, SRAM_USE_CONFIG);
13617 14052
13618 /* Get eeprom hw config before calling tg3_set_power_state(). 14053 /* Get eeprom hw config before calling tg3_set_power_state().
13619 * In particular, the TG3_FLG2_IS_NIC flag must be 14054 * In particular, the TG3_FLAG_IS_NIC flag must be
13620 * determined before calling tg3_set_power_state() so that 14055 * determined before calling tg3_set_power_state() so that
13621 * we know whether or not to switch out of Vaux power. 14056 * we know whether or not to switch out of Vaux power.
13622 * When the flag is set, it means that GPIO1 is used for eeprom 14057 * When the flag is set, it means that GPIO1 is used for eeprom
@@ -13625,7 +14060,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13625 */ 14060 */
13626 tg3_get_eeprom_hw_cfg(tp); 14061 tg3_get_eeprom_hw_cfg(tp);
13627 14062
13628 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { 14063 if (tg3_flag(tp, ENABLE_APE)) {
13629 /* Allow reads and writes to the 14064 /* Allow reads and writes to the
13630 * APE register and memory space. 14065 * APE register and memory space.
13631 */ 14066 */
@@ -13640,16 +14075,16 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13640 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 14075 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13641 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 14076 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13642 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || 14077 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13643 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) 14078 tg3_flag(tp, 57765_PLUS))
13644 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT; 14079 tg3_flag_set(tp, CPMU_PRESENT);
13645 14080
13646 /* Set up tp->grc_local_ctrl before calling tg_power_up(). 14081 /* Set up tp->grc_local_ctrl before calling tg3_power_up().
13647 * GPIO1 driven high will bring 5700's external PHY out of reset. 14082 * GPIO1 driven high will bring 5700's external PHY out of reset.
13648 * It is also used as eeprom write protect on LOMs. 14083 * It is also used as eeprom write protect on LOMs.
13649 */ 14084 */
13650 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM; 14085 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
13651 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) || 14086 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13652 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) 14087 tg3_flag(tp, EEPROM_WRITE_PROT))
13653 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | 14088 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
13654 GRC_LCLCTRL_GPIO_OUTPUT1); 14089 GRC_LCLCTRL_GPIO_OUTPUT1);
13655 /* Unused GPIO3 must be driven as output on 5752 because there 14090 /* Unused GPIO3 must be driven as output on 5752 because there
@@ -13667,7 +14102,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13667 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { 14102 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
13668 /* Turn off the debug UART. */ 14103 /* Turn off the debug UART. */
13669 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; 14104 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13670 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC) 14105 if (tg3_flag(tp, IS_NIC))
13671 /* Keep VMain power. */ 14106 /* Keep VMain power. */
13672 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | 14107 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
13673 GRC_LCLCTRL_GPIO_OUTPUT0; 14108 GRC_LCLCTRL_GPIO_OUTPUT0;
@@ -13683,18 +14118,17 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13683 /* Derive initial jumbo mode from MTU assigned in 14118 /* Derive initial jumbo mode from MTU assigned in
13684 * ether_setup() via the alloc_etherdev() call 14119 * ether_setup() via the alloc_etherdev() call
13685 */ 14120 */
13686 if (tp->dev->mtu > ETH_DATA_LEN && 14121 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
13687 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) 14122 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13688 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
13689 14123
13690 /* Determine WakeOnLan speed to use. */ 14124 /* Determine WakeOnLan speed to use. */
13691 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 14125 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13692 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || 14126 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
13693 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 || 14127 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
13694 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) { 14128 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
13695 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB); 14129 tg3_flag_clear(tp, WOL_SPEED_100MB);
13696 } else { 14130 } else {
13697 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB; 14131 tg3_flag_set(tp, WOL_SPEED_100MB);
13698 } 14132 }
13699 14133
13700 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 14134 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
@@ -13715,11 +14149,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13715 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) 14149 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
13716 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG; 14150 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
13717 14151
13718 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && 14152 if (tg3_flag(tp, 5705_PLUS) &&
13719 !(tp->phy_flags & TG3_PHYFLG_IS_FET) && 14153 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
13720 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && 14154 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13721 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 && 14155 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
13722 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) { 14156 !tg3_flag(tp, 57765_PLUS)) {
13723 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 14157 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13724 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 14158 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13725 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 14159 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
@@ -13740,7 +14174,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13740 tp->phy_otp = TG3_OTP_DEFAULT; 14174 tp->phy_otp = TG3_OTP_DEFAULT;
13741 } 14175 }
13742 14176
13743 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) 14177 if (tg3_flag(tp, CPMU_PRESENT))
13744 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST; 14178 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
13745 else 14179 else
13746 tp->mi_mode = MAC_MI_MODE_BASE; 14180 tp->mi_mode = MAC_MI_MODE_BASE;
@@ -13750,9 +14184,17 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13750 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) 14184 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
13751 tp->coalesce_mode |= HOSTCC_MODE_32BYTE; 14185 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
13752 14186
14187 /* Set these bits to enable statistics workaround. */
14188 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14189 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14190 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14191 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14192 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14193 }
14194
13753 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 14195 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13754 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) 14196 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
13755 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB; 14197 tg3_flag_set(tp, USE_PHYLIB);
13756 14198
13757 err = tg3_mdio_init(tp); 14199 err = tg3_mdio_init(tp);
13758 if (err) 14200 if (err)
@@ -13760,7 +14202,15 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13760 14202
13761 /* Initialize data/descriptor byte/word swapping. */ 14203 /* Initialize data/descriptor byte/word swapping. */
13762 val = tr32(GRC_MODE); 14204 val = tr32(GRC_MODE);
13763 val &= GRC_MODE_HOST_STACKUP; 14205 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14206 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14207 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14208 GRC_MODE_B2HRX_ENABLE |
14209 GRC_MODE_HTX2B_ENABLE |
14210 GRC_MODE_HOST_STACKUP);
14211 else
14212 val &= GRC_MODE_HOST_STACKUP;
14213
13764 tw32(GRC_MODE, val | tp->grc_mode); 14214 tw32(GRC_MODE, val | tp->grc_mode);
13765 14215
13766 tg3_switch_clocks(tp); 14216 tg3_switch_clocks(tp);
@@ -13771,7 +14221,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13771 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, 14221 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13772 &pci_state_reg); 14222 &pci_state_reg);
13773 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && 14223 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
13774 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) { 14224 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
13775 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl); 14225 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
13776 14226
13777 if (chiprevid == CHIPREV_ID_5701_A0 || 14227 if (chiprevid == CHIPREV_ID_5701_A0 ||
@@ -13790,7 +14240,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13790 writel(0x00000000, sram_base + 4); 14240 writel(0x00000000, sram_base + 4);
13791 writel(0xffffffff, sram_base + 4); 14241 writel(0xffffffff, sram_base + 4);
13792 if (readl(sram_base) != 0x00000000) 14242 if (readl(sram_base) != 0x00000000)
13793 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG; 14243 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
13794 } 14244 }
13795 } 14245 }
13796 14246
@@ -13803,12 +14253,12 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13803 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && 14253 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13804 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 || 14254 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
13805 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) 14255 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
13806 tp->tg3_flags2 |= TG3_FLG2_IS_5788; 14256 tg3_flag_set(tp, IS_5788);
13807 14257
13808 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) && 14258 if (!tg3_flag(tp, IS_5788) &&
13809 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)) 14259 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
13810 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS; 14260 tg3_flag_set(tp, TAGGED_STATUS);
13811 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) { 14261 if (tg3_flag(tp, TAGGED_STATUS)) {
13812 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD | 14262 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
13813 HOSTCC_MODE_CLRTICK_TXBD); 14263 HOSTCC_MODE_CLRTICK_TXBD);
13814 14264
@@ -13818,7 +14268,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13818 } 14268 }
13819 14269
13820 /* Preserve the APE MAC_MODE bits */ 14270 /* Preserve the APE MAC_MODE bits */
13821 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) 14271 if (tg3_flag(tp, ENABLE_APE))
13822 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; 14272 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
13823 else 14273 else
13824 tp->mac_mode = TG3_DEF_MAC_MODE; 14274 tp->mac_mode = TG3_DEF_MAC_MODE;
@@ -13865,9 +14315,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13865 * status register in those cases. 14315 * status register in those cases.
13866 */ 14316 */
13867 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) 14317 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
13868 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG; 14318 tg3_flag_set(tp, USE_LINKCHG_REG);
13869 else 14319 else
13870 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG; 14320 tg3_flag_clear(tp, USE_LINKCHG_REG);
13871 14321
13872 /* The led_ctrl is set during tg3_phy_probe, here we might 14322 /* The led_ctrl is set during tg3_phy_probe, here we might
13873 * have to force the link status polling mechanism based 14323 * have to force the link status polling mechanism based
@@ -13877,19 +14327,19 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13877 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && 14327 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13878 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { 14328 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
13879 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; 14329 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
13880 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG; 14330 tg3_flag_set(tp, USE_LINKCHG_REG);
13881 } 14331 }
13882 14332
13883 /* For all SERDES we poll the MAC status register. */ 14333 /* For all SERDES we poll the MAC status register. */
13884 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 14334 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13885 tp->tg3_flags |= TG3_FLAG_POLL_SERDES; 14335 tg3_flag_set(tp, POLL_SERDES);
13886 else 14336 else
13887 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES; 14337 tg3_flag_clear(tp, POLL_SERDES);
13888 14338
13889 tp->rx_offset = NET_IP_ALIGN; 14339 tp->rx_offset = NET_IP_ALIGN;
13890 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; 14340 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
13891 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && 14341 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13892 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) { 14342 tg3_flag(tp, PCIX_MODE)) {
13893 tp->rx_offset = 0; 14343 tp->rx_offset = 0;
13894#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 14344#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
13895 tp->rx_copy_thresh = ~(u16)0; 14345 tp->rx_copy_thresh = ~(u16)0;
@@ -13910,7 +14360,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13910 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) 14360 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13911 tp->rx_std_max_post = 8; 14361 tp->rx_std_max_post = 8;
13912 14362
13913 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) 14363 if (tg3_flag(tp, ASPM_WORKAROUND))
13914 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) & 14364 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
13915 PCIE_PWR_MGMT_L1_THRESH_MSK; 14365 PCIE_PWR_MGMT_L1_THRESH_MSK;
13916 14366
@@ -13958,15 +14408,14 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
13958 14408
13959 mac_offset = 0x7c; 14409 mac_offset = 0x7c;
13960 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) || 14410 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
13961 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { 14411 tg3_flag(tp, 5780_CLASS)) {
13962 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) 14412 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
13963 mac_offset = 0xcc; 14413 mac_offset = 0xcc;
13964 if (tg3_nvram_lock(tp)) 14414 if (tg3_nvram_lock(tp))
13965 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET); 14415 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
13966 else 14416 else
13967 tg3_nvram_unlock(tp); 14417 tg3_nvram_unlock(tp);
13968 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 14418 } else if (tg3_flag(tp, 5717_PLUS)) {
13969 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
13970 if (PCI_FUNC(tp->pdev->devfn) & 1) 14419 if (PCI_FUNC(tp->pdev->devfn) & 1)
13971 mac_offset = 0xcc; 14420 mac_offset = 0xcc;
13972 if (PCI_FUNC(tp->pdev->devfn) > 1) 14421 if (PCI_FUNC(tp->pdev->devfn) > 1)
@@ -13991,7 +14440,7 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
13991 } 14440 }
13992 if (!addr_ok) { 14441 if (!addr_ok) {
13993 /* Next, try NVRAM. */ 14442 /* Next, try NVRAM. */
13994 if (!(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) && 14443 if (!tg3_flag(tp, NO_NVRAM) &&
13995 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) && 14444 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
13996 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) { 14445 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
13997 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2); 14446 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
@@ -14042,7 +14491,7 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14042 */ 14491 */
14043 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && 14492 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14044 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && 14493 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14045 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) 14494 !tg3_flag(tp, PCI_EXPRESS))
14046 goto out; 14495 goto out;
14047 14496
14048#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC) 14497#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
@@ -14055,7 +14504,7 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14055#endif 14504#endif
14056#endif 14505#endif
14057 14506
14058 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) { 14507 if (tg3_flag(tp, 57765_PLUS)) {
14059 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT; 14508 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14060 goto out; 14509 goto out;
14061 } 14510 }
@@ -14074,8 +14523,7 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14074 * other than 5700 and 5701 which do not implement the 14523 * other than 5700 and 5701 which do not implement the
14075 * boundary bits. 14524 * boundary bits.
14076 */ 14525 */
14077 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) && 14526 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14078 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
14079 switch (cacheline_size) { 14527 switch (cacheline_size) {
14080 case 16: 14528 case 16:
14081 case 32: 14529 case 32:
@@ -14100,7 +14548,7 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14100 DMA_RWCTRL_WRITE_BNDRY_384_PCIX); 14548 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14101 break; 14549 break;
14102 } 14550 }
14103 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { 14551 } else if (tg3_flag(tp, PCI_EXPRESS)) {
14104 switch (cacheline_size) { 14552 switch (cacheline_size) {
14105 case 16: 14553 case 16:
14106 case 32: 14554 case 32:
@@ -14272,13 +14720,13 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
14272 14720
14273 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); 14721 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14274 14722
14275 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) 14723 if (tg3_flag(tp, 57765_PLUS))
14276 goto out; 14724 goto out;
14277 14725
14278 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { 14726 if (tg3_flag(tp, PCI_EXPRESS)) {
14279 /* DMA read watermark not used on PCIE */ 14727 /* DMA read watermark not used on PCIE */
14280 tp->dma_rwctrl |= 0x00180000; 14728 tp->dma_rwctrl |= 0x00180000;
14281 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) { 14729 } else if (!tg3_flag(tp, PCIX_MODE)) {
14282 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || 14730 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14283 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) 14731 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14284 tp->dma_rwctrl |= 0x003f0000; 14732 tp->dma_rwctrl |= 0x003f0000;
@@ -14294,7 +14742,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
14294 * do the less restrictive ONE_DMA workaround for 14742 * do the less restrictive ONE_DMA workaround for
14295 * better performance. 14743 * better performance.
14296 */ 14744 */
14297 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) && 14745 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14298 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) 14746 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14299 tp->dma_rwctrl |= 0x8000; 14747 tp->dma_rwctrl |= 0x8000;
14300 else if (ccval == 0x6 || ccval == 0x7) 14748 else if (ccval == 0x6 || ccval == 0x7)
@@ -14423,7 +14871,6 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
14423 } 14871 }
14424 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != 14872 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14425 DMA_RWCTRL_WRITE_BNDRY_16) { 14873 DMA_RWCTRL_WRITE_BNDRY_16) {
14426
14427 /* DMA test passed without adjusting DMA boundary, 14874 /* DMA test passed without adjusting DMA boundary,
14428 * now look for chipsets that are known to expose the 14875 * now look for chipsets that are known to expose the
14429 * DMA bug without failing the test. 14876 * DMA bug without failing the test.
@@ -14447,7 +14894,7 @@ out_nofree:
14447 14894
14448static void __devinit tg3_init_bufmgr_config(struct tg3 *tp) 14895static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14449{ 14896{
14450 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) { 14897 if (tg3_flag(tp, 57765_PLUS)) {
14451 tp->bufmgr_config.mbuf_read_dma_low_water = 14898 tp->bufmgr_config.mbuf_read_dma_low_water =
14452 DEFAULT_MB_RDMA_LOW_WATER_5705; 14899 DEFAULT_MB_RDMA_LOW_WATER_5705;
14453 tp->bufmgr_config.mbuf_mac_rx_low_water = 14900 tp->bufmgr_config.mbuf_mac_rx_low_water =
@@ -14461,7 +14908,7 @@ static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14461 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765; 14908 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14462 tp->bufmgr_config.mbuf_high_water_jumbo = 14909 tp->bufmgr_config.mbuf_high_water_jumbo =
14463 DEFAULT_MB_HIGH_WATER_JUMBO_57765; 14910 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14464 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { 14911 } else if (tg3_flag(tp, 5705_PLUS)) {
14465 tp->bufmgr_config.mbuf_read_dma_low_water = 14912 tp->bufmgr_config.mbuf_read_dma_low_water =
14466 DEFAULT_MB_RDMA_LOW_WATER_5705; 14913 DEFAULT_MB_RDMA_LOW_WATER_5705;
14467 tp->bufmgr_config.mbuf_mac_rx_low_water = 14914 tp->bufmgr_config.mbuf_mac_rx_low_water =
@@ -14525,6 +14972,7 @@ static char * __devinit tg3_phy_string(struct tg3 *tp)
14525 case TG3_PHY_ID_BCM5718S: return "5718S"; 14972 case TG3_PHY_ID_BCM5718S: return "5718S";
14526 case TG3_PHY_ID_BCM57765: return "57765"; 14973 case TG3_PHY_ID_BCM57765: return "57765";
14527 case TG3_PHY_ID_BCM5719C: return "5719C"; 14974 case TG3_PHY_ID_BCM5719C: return "5719C";
14975 case TG3_PHY_ID_BCM5720C: return "5720C";
14528 case TG3_PHY_ID_BCM8002: return "8002/serdes"; 14976 case TG3_PHY_ID_BCM8002: return "8002/serdes";
14529 case 0: return "serdes"; 14977 case 0: return "serdes";
14530 default: return "unknown"; 14978 default: return "unknown";
@@ -14533,10 +14981,10 @@ static char * __devinit tg3_phy_string(struct tg3 *tp)
14533 14981
14534static char * __devinit tg3_bus_string(struct tg3 *tp, char *str) 14982static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14535{ 14983{
14536 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { 14984 if (tg3_flag(tp, PCI_EXPRESS)) {
14537 strcpy(str, "PCI Express"); 14985 strcpy(str, "PCI Express");
14538 return str; 14986 return str;
14539 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) { 14987 } else if (tg3_flag(tp, PCIX_MODE)) {
14540 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f; 14988 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14541 14989
14542 strcpy(str, "PCIX:"); 14990 strcpy(str, "PCIX:");
@@ -14555,12 +15003,12 @@ static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14555 strcat(str, "100MHz"); 15003 strcat(str, "100MHz");
14556 } else { 15004 } else {
14557 strcpy(str, "PCI:"); 15005 strcpy(str, "PCI:");
14558 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) 15006 if (tg3_flag(tp, PCI_HIGH_SPEED))
14559 strcat(str, "66MHz"); 15007 strcat(str, "66MHz");
14560 else 15008 else
14561 strcat(str, "33MHz"); 15009 strcat(str, "33MHz");
14562 } 15010 }
14563 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT) 15011 if (tg3_flag(tp, PCI_32BIT))
14564 strcat(str, ":32-bit"); 15012 strcat(str, ":32-bit");
14565 else 15013 else
14566 strcat(str, ":64-bit"); 15014 strcat(str, ":64-bit");
@@ -14619,7 +15067,7 @@ static void __devinit tg3_init_coal(struct tg3 *tp)
14619 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS; 15067 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
14620 } 15068 }
14621 15069
14622 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { 15070 if (tg3_flag(tp, 5705_PLUS)) {
14623 ec->rx_coalesce_usecs_irq = 0; 15071 ec->rx_coalesce_usecs_irq = 0;
14624 ec->tx_coalesce_usecs_irq = 0; 15072 ec->tx_coalesce_usecs_irq = 0;
14625 ec->stats_block_coalesce_usecs = 0; 15073 ec->stats_block_coalesce_usecs = 0;
@@ -14637,6 +15085,8 @@ static const struct net_device_ops tg3_netdev_ops = {
14637 .ndo_do_ioctl = tg3_ioctl, 15085 .ndo_do_ioctl = tg3_ioctl,
14638 .ndo_tx_timeout = tg3_tx_timeout, 15086 .ndo_tx_timeout = tg3_tx_timeout,
14639 .ndo_change_mtu = tg3_change_mtu, 15087 .ndo_change_mtu = tg3_change_mtu,
15088 .ndo_fix_features = tg3_fix_features,
15089 .ndo_set_features = tg3_set_features,
14640#ifdef CONFIG_NET_POLL_CONTROLLER 15090#ifdef CONFIG_NET_POLL_CONTROLLER
14641 .ndo_poll_controller = tg3_poll_controller, 15091 .ndo_poll_controller = tg3_poll_controller,
14642#endif 15092#endif
@@ -14653,6 +15103,7 @@ static const struct net_device_ops tg3_netdev_ops_dma_bug = {
14653 .ndo_do_ioctl = tg3_ioctl, 15103 .ndo_do_ioctl = tg3_ioctl,
14654 .ndo_tx_timeout = tg3_tx_timeout, 15104 .ndo_tx_timeout = tg3_tx_timeout,
14655 .ndo_change_mtu = tg3_change_mtu, 15105 .ndo_change_mtu = tg3_change_mtu,
15106 .ndo_set_features = tg3_set_features,
14656#ifdef CONFIG_NET_POLL_CONTROLLER 15107#ifdef CONFIG_NET_POLL_CONTROLLER
14657 .ndo_poll_controller = tg3_poll_controller, 15108 .ndo_poll_controller = tg3_poll_controller,
14658#endif 15109#endif
@@ -14667,6 +15118,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14667 u32 sndmbx, rcvmbx, intmbx; 15118 u32 sndmbx, rcvmbx, intmbx;
14668 char str[40]; 15119 char str[40];
14669 u64 dma_mask, persist_dma_mask; 15120 u64 dma_mask, persist_dma_mask;
15121 u32 hw_features = 0;
14670 15122
14671 printk_once(KERN_INFO "%s\n", version); 15123 printk_once(KERN_INFO "%s\n", version);
14672 15124
@@ -14762,9 +15214,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14762 goto err_out_iounmap; 15214 goto err_out_iounmap;
14763 } 15215 }
14764 15216
14765 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) && 15217 if (tg3_flag(tp, 5755_PLUS) && !tg3_flag(tp, 5717_PLUS))
14766 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
14767 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
14768 dev->netdev_ops = &tg3_netdev_ops; 15218 dev->netdev_ops = &tg3_netdev_ops;
14769 else 15219 else
14770 dev->netdev_ops = &tg3_netdev_ops_dma_bug; 15220 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
@@ -14776,9 +15226,9 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14776 * On 64-bit systems without IOMMU, use 64-bit dma_mask and 15226 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
14777 * do DMA address check in tg3_start_xmit(). 15227 * do DMA address check in tg3_start_xmit().
14778 */ 15228 */
14779 if (tp->tg3_flags2 & TG3_FLG2_IS_5788) 15229 if (tg3_flag(tp, IS_5788))
14780 persist_dma_mask = dma_mask = DMA_BIT_MASK(32); 15230 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
14781 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) { 15231 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
14782 persist_dma_mask = dma_mask = DMA_BIT_MASK(40); 15232 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
14783#ifdef CONFIG_HIGHMEM 15233#ifdef CONFIG_HIGHMEM
14784 dma_mask = DMA_BIT_MASK(64); 15234 dma_mask = DMA_BIT_MASK(64);
@@ -14812,11 +15262,14 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14812 tg3_init_bufmgr_config(tp); 15262 tg3_init_bufmgr_config(tp);
14813 15263
14814 /* Selectively allow TSO based on operating conditions */ 15264 /* Selectively allow TSO based on operating conditions */
14815 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) || 15265 if ((tg3_flag(tp, HW_TSO_1) ||
14816 (tp->fw_needed && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) 15266 tg3_flag(tp, HW_TSO_2) ||
14817 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; 15267 tg3_flag(tp, HW_TSO_3)) ||
15268 (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
15269 tg3_flag_set(tp, TSO_CAPABLE);
14818 else { 15270 else {
14819 tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG); 15271 tg3_flag_clear(tp, TSO_CAPABLE);
15272 tg3_flag_clear(tp, TSO_BUG);
14820 tp->fw_needed = NULL; 15273 tp->fw_needed = NULL;
14821 } 15274 }
14822 15275
@@ -14827,32 +15280,41 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14827 * Firmware TSO on older chips gives lower performance, so it 15280 * Firmware TSO on older chips gives lower performance, so it
14828 * is off by default, but can be enabled using ethtool. 15281 * is off by default, but can be enabled using ethtool.
14829 */ 15282 */
14830 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) && 15283 if ((tg3_flag(tp, HW_TSO_1) ||
14831 (dev->features & NETIF_F_IP_CSUM)) { 15284 tg3_flag(tp, HW_TSO_2) ||
14832 dev->features |= NETIF_F_TSO; 15285 tg3_flag(tp, HW_TSO_3)) &&
14833 vlan_features_add(dev, NETIF_F_TSO); 15286 (dev->features & NETIF_F_IP_CSUM))
14834 } 15287 hw_features |= NETIF_F_TSO;
14835 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) || 15288 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
14836 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) { 15289 if (dev->features & NETIF_F_IPV6_CSUM)
14837 if (dev->features & NETIF_F_IPV6_CSUM) { 15290 hw_features |= NETIF_F_TSO6;
14838 dev->features |= NETIF_F_TSO6; 15291 if (tg3_flag(tp, HW_TSO_3) ||
14839 vlan_features_add(dev, NETIF_F_TSO6);
14840 }
14841 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
14842 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 15292 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14843 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && 15293 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14844 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || 15294 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
14845 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 15295 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14846 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { 15296 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14847 dev->features |= NETIF_F_TSO_ECN; 15297 hw_features |= NETIF_F_TSO_ECN;
14848 vlan_features_add(dev, NETIF_F_TSO_ECN);
14849 }
14850 } 15298 }
14851 15299
15300 dev->hw_features |= hw_features;
15301 dev->features |= hw_features;
15302 dev->vlan_features |= hw_features;
15303
15304 /*
15305 * Add loopback capability only for a subset of devices that support
15306 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15307 * loopback for the remaining devices.
15308 */
15309 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15310 !tg3_flag(tp, CPMU_PRESENT))
15311 /* Add the loopback capability */
15312 dev->hw_features |= NETIF_F_LOOPBACK;
15313
14852 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && 15314 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
14853 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) && 15315 !tg3_flag(tp, TSO_CAPABLE) &&
14854 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { 15316 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
14855 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64; 15317 tg3_flag_set(tp, MAX_RXPEND_64);
14856 tp->rx_pending = 63; 15318 tp->rx_pending = 63;
14857 } 15319 }
14858 15320
@@ -14863,7 +15325,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14863 goto err_out_iounmap; 15325 goto err_out_iounmap;
14864 } 15326 }
14865 15327
14866 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { 15328 if (tg3_flag(tp, ENABLE_APE)) {
14867 tp->aperegs = pci_ioremap_bar(pdev, BAR_2); 15329 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
14868 if (!tp->aperegs) { 15330 if (!tp->aperegs) {
14869 dev_err(&pdev->dev, 15331 dev_err(&pdev->dev,
@@ -14874,7 +15336,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14874 15336
14875 tg3_ape_lock_init(tp); 15337 tg3_ape_lock_init(tp);
14876 15338
14877 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) 15339 if (tg3_flag(tp, ENABLE_ASF))
14878 tg3_read_dash_ver(tp); 15340 tg3_read_dash_ver(tp);
14879 } 15341 }
14880 15342
@@ -14918,7 +15380,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14918 else 15380 else
14919 tnapi->coal_now = HOSTCC_MODE_NOW; 15381 tnapi->coal_now = HOSTCC_MODE_NOW;
14920 15382
14921 if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX)) 15383 if (!tg3_flag(tp, SUPPORT_MSIX))
14922 break; 15384 break;
14923 15385
14924 /* 15386 /*
@@ -14972,21 +15434,25 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14972 ethtype = "10/100/1000Base-T"; 15434 ethtype = "10/100/1000Base-T";
14973 15435
14974 netdev_info(dev, "attached PHY is %s (%s Ethernet) " 15436 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
14975 "(WireSpeed[%d])\n", tg3_phy_string(tp), ethtype, 15437 "(WireSpeed[%d], EEE[%d])\n",
14976 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0); 15438 tg3_phy_string(tp), ethtype,
15439 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15440 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
14977 } 15441 }
14978 15442
14979 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n", 15443 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
14980 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0, 15444 (dev->features & NETIF_F_RXCSUM) != 0,
14981 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0, 15445 tg3_flag(tp, USE_LINKCHG_REG) != 0,
14982 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0, 15446 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
14983 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0, 15447 tg3_flag(tp, ENABLE_ASF) != 0,
14984 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0); 15448 tg3_flag(tp, TSO_CAPABLE) != 0);
14985 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n", 15449 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
14986 tp->dma_rwctrl, 15450 tp->dma_rwctrl,
14987 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 : 15451 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
14988 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64); 15452 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
14989 15453
15454 pci_save_state(pdev);
15455
14990 return 0; 15456 return 0;
14991 15457
14992err_out_apeunmap: 15458err_out_apeunmap:
@@ -15025,7 +15491,7 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev)
15025 15491
15026 cancel_work_sync(&tp->reset_task); 15492 cancel_work_sync(&tp->reset_task);
15027 15493
15028 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 15494 if (!tg3_flag(tp, USE_PHYLIB)) {
15029 tg3_phy_fini(tp); 15495 tg3_phy_fini(tp);
15030 tg3_mdio_fini(tp); 15496 tg3_mdio_fini(tp);
15031 } 15497 }
@@ -15071,7 +15537,7 @@ static int tg3_suspend(struct device *device)
15071 15537
15072 tg3_full_lock(tp, 0); 15538 tg3_full_lock(tp, 0);
15073 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 15539 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15074 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; 15540 tg3_flag_clear(tp, INIT_COMPLETE);
15075 tg3_full_unlock(tp); 15541 tg3_full_unlock(tp);
15076 15542
15077 err = tg3_power_down_prepare(tp); 15543 err = tg3_power_down_prepare(tp);
@@ -15080,7 +15546,7 @@ static int tg3_suspend(struct device *device)
15080 15546
15081 tg3_full_lock(tp, 0); 15547 tg3_full_lock(tp, 0);
15082 15548
15083 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 15549 tg3_flag_set(tp, INIT_COMPLETE);
15084 err2 = tg3_restart_hw(tp, 1); 15550 err2 = tg3_restart_hw(tp, 1);
15085 if (err2) 15551 if (err2)
15086 goto out; 15552 goto out;
@@ -15115,7 +15581,7 @@ static int tg3_resume(struct device *device)
15115 15581
15116 tg3_full_lock(tp, 0); 15582 tg3_full_lock(tp, 0);
15117 15583
15118 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 15584 tg3_flag_set(tp, INIT_COMPLETE);
15119 err = tg3_restart_hw(tp, 1); 15585 err = tg3_restart_hw(tp, 1);
15120 if (err) 15586 if (err)
15121 goto out; 15587 goto out;
@@ -15143,11 +15609,156 @@ static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15143 15609
15144#endif /* CONFIG_PM_SLEEP */ 15610#endif /* CONFIG_PM_SLEEP */
15145 15611
15612/**
15613 * tg3_io_error_detected - called when PCI error is detected
15614 * @pdev: Pointer to PCI device
15615 * @state: The current pci connection state
15616 *
15617 * This function is called after a PCI bus error affecting
15618 * this device has been detected.
15619 */
15620static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15621 pci_channel_state_t state)
15622{
15623 struct net_device *netdev = pci_get_drvdata(pdev);
15624 struct tg3 *tp = netdev_priv(netdev);
15625 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15626
15627 netdev_info(netdev, "PCI I/O error detected\n");
15628
15629 rtnl_lock();
15630
15631 if (!netif_running(netdev))
15632 goto done;
15633
15634 tg3_phy_stop(tp);
15635
15636 tg3_netif_stop(tp);
15637
15638 del_timer_sync(&tp->timer);
15639 tg3_flag_clear(tp, RESTART_TIMER);
15640
15641 /* Want to make sure that the reset task doesn't run */
15642 cancel_work_sync(&tp->reset_task);
15643 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15644 tg3_flag_clear(tp, RESTART_TIMER);
15645
15646 netif_device_detach(netdev);
15647
15648 /* Clean up software state, even if MMIO is blocked */
15649 tg3_full_lock(tp, 0);
15650 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15651 tg3_full_unlock(tp);
15652
15653done:
15654 if (state == pci_channel_io_perm_failure)
15655 err = PCI_ERS_RESULT_DISCONNECT;
15656 else
15657 pci_disable_device(pdev);
15658
15659 rtnl_unlock();
15660
15661 return err;
15662}
15663
15664/**
15665 * tg3_io_slot_reset - called after the pci bus has been reset.
15666 * @pdev: Pointer to PCI device
15667 *
15668 * Restart the card from scratch, as if from a cold-boot.
15669 * At this point, the card has exprienced a hard reset,
15670 * followed by fixups by BIOS, and has its config space
15671 * set up identically to what it was at cold boot.
15672 */
15673static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15674{
15675 struct net_device *netdev = pci_get_drvdata(pdev);
15676 struct tg3 *tp = netdev_priv(netdev);
15677 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15678 int err;
15679
15680 rtnl_lock();
15681
15682 if (pci_enable_device(pdev)) {
15683 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15684 goto done;
15685 }
15686
15687 pci_set_master(pdev);
15688 pci_restore_state(pdev);
15689 pci_save_state(pdev);
15690
15691 if (!netif_running(netdev)) {
15692 rc = PCI_ERS_RESULT_RECOVERED;
15693 goto done;
15694 }
15695
15696 err = tg3_power_up(tp);
15697 if (err) {
15698 netdev_err(netdev, "Failed to restore register access.\n");
15699 goto done;
15700 }
15701
15702 rc = PCI_ERS_RESULT_RECOVERED;
15703
15704done:
15705 rtnl_unlock();
15706
15707 return rc;
15708}
15709
15710/**
15711 * tg3_io_resume - called when traffic can start flowing again.
15712 * @pdev: Pointer to PCI device
15713 *
15714 * This callback is called when the error recovery driver tells
15715 * us that its OK to resume normal operation.
15716 */
15717static void tg3_io_resume(struct pci_dev *pdev)
15718{
15719 struct net_device *netdev = pci_get_drvdata(pdev);
15720 struct tg3 *tp = netdev_priv(netdev);
15721 int err;
15722
15723 rtnl_lock();
15724
15725 if (!netif_running(netdev))
15726 goto done;
15727
15728 tg3_full_lock(tp, 0);
15729 tg3_flag_set(tp, INIT_COMPLETE);
15730 err = tg3_restart_hw(tp, 1);
15731 tg3_full_unlock(tp);
15732 if (err) {
15733 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15734 goto done;
15735 }
15736
15737 netif_device_attach(netdev);
15738
15739 tp->timer.expires = jiffies + tp->timer_offset;
15740 add_timer(&tp->timer);
15741
15742 tg3_netif_start(tp);
15743
15744 tg3_phy_start(tp);
15745
15746done:
15747 rtnl_unlock();
15748}
15749
15750static struct pci_error_handlers tg3_err_handler = {
15751 .error_detected = tg3_io_error_detected,
15752 .slot_reset = tg3_io_slot_reset,
15753 .resume = tg3_io_resume
15754};
15755
15146static struct pci_driver tg3_driver = { 15756static struct pci_driver tg3_driver = {
15147 .name = DRV_MODULE_NAME, 15757 .name = DRV_MODULE_NAME,
15148 .id_table = tg3_pci_tbl, 15758 .id_table = tg3_pci_tbl,
15149 .probe = tg3_init_one, 15759 .probe = tg3_init_one,
15150 .remove = __devexit_p(tg3_remove_one), 15760 .remove = __devexit_p(tg3_remove_one),
15761 .err_handler = &tg3_err_handler,
15151 .driver.pm = TG3_PM_OPS, 15762 .driver.pm = TG3_PM_OPS,
15152}; 15763};
15153 15764
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 5e96706ad108..ce010cd33895 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -23,11 +23,13 @@
23#define TG3_BDINFO_NIC_ADDR 0xcUL /* 32-bit */ 23#define TG3_BDINFO_NIC_ADDR 0xcUL /* 32-bit */
24#define TG3_BDINFO_SIZE 0x10UL 24#define TG3_BDINFO_SIZE 0x10UL
25 25
26#define TG3_RX_INTERNAL_RING_SZ_5906 32 26#define TG3_RX_STD_MAX_SIZE_5700 512
27 27#define TG3_RX_STD_MAX_SIZE_5717 2048
28#define RX_STD_MAX_SIZE_5705 512 28#define TG3_RX_JMB_MAX_SIZE_5700 256
29#define RX_STD_MAX_SIZE_5717 2048 29#define TG3_RX_JMB_MAX_SIZE_5717 1024
30#define RX_JUMBO_MAX_SIZE 0xdeadbeef /* XXX */ 30#define TG3_RX_RET_MAX_SIZE_5700 1024
31#define TG3_RX_RET_MAX_SIZE_5705 512
32#define TG3_RX_RET_MAX_SIZE_5717 4096
31 33
32/* First 256 bytes are a mirror of PCI config space. */ 34/* First 256 bytes are a mirror of PCI config space. */
33#define TG3PCI_VENDOR 0x00000000 35#define TG3PCI_VENDOR 0x00000000
@@ -54,6 +56,7 @@
54#define TG3PCI_DEVICE_TIGON3_57791 0x16b2 56#define TG3PCI_DEVICE_TIGON3_57791 0x16b2
55#define TG3PCI_DEVICE_TIGON3_57795 0x16b6 57#define TG3PCI_DEVICE_TIGON3_57795 0x16b6
56#define TG3PCI_DEVICE_TIGON3_5719 0x1657 58#define TG3PCI_DEVICE_TIGON3_5719 0x1657
59#define TG3PCI_DEVICE_TIGON3_5720 0x165f
57/* 0x04 --> 0x2c unused */ 60/* 0x04 --> 0x2c unused */
58#define TG3PCI_SUBVENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM 61#define TG3PCI_SUBVENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM
59#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6 0x1644 62#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6 0x1644
@@ -142,6 +145,7 @@
142#define CHIPREV_ID_5717_A0 0x05717000 145#define CHIPREV_ID_5717_A0 0x05717000
143#define CHIPREV_ID_57765_A0 0x57785000 146#define CHIPREV_ID_57765_A0 0x57785000
144#define CHIPREV_ID_5719_A0 0x05719000 147#define CHIPREV_ID_5719_A0 0x05719000
148#define CHIPREV_ID_5720_A0 0x05720000
145#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12) 149#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12)
146#define ASIC_REV_5700 0x07 150#define ASIC_REV_5700 0x07
147#define ASIC_REV_5701 0x00 151#define ASIC_REV_5701 0x00
@@ -163,6 +167,7 @@
163#define ASIC_REV_5717 0x5717 167#define ASIC_REV_5717 0x5717
164#define ASIC_REV_57765 0x57785 168#define ASIC_REV_57765 0x57785
165#define ASIC_REV_5719 0x5719 169#define ASIC_REV_5719 0x5719
170#define ASIC_REV_5720 0x5720
166#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8) 171#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8)
167#define CHIPREV_5700_AX 0x70 172#define CHIPREV_5700_AX 0x70
168#define CHIPREV_5700_BX 0x71 173#define CHIPREV_5700_BX 0x71
@@ -183,6 +188,7 @@
183#define METAL_REV_B2 0x02 188#define METAL_REV_B2 0x02
184#define TG3PCI_DMA_RW_CTRL 0x0000006c 189#define TG3PCI_DMA_RW_CTRL 0x0000006c
185#define DMA_RWCTRL_DIS_CACHE_ALIGNMENT 0x00000001 190#define DMA_RWCTRL_DIS_CACHE_ALIGNMENT 0x00000001
191#define DMA_RWCTRL_TAGGED_STAT_WA 0x00000080
186#define DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK 0x00000380 192#define DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK 0x00000380
187#define DMA_RWCTRL_READ_BNDRY_MASK 0x00000700 193#define DMA_RWCTRL_READ_BNDRY_MASK 0x00000700
188#define DMA_RWCTRL_READ_BNDRY_DISAB 0x00000000 194#define DMA_RWCTRL_READ_BNDRY_DISAB 0x00000000
@@ -473,6 +479,8 @@
473#define TX_MODE_BIG_BCKOFF_ENABLE 0x00000020 479#define TX_MODE_BIG_BCKOFF_ENABLE 0x00000020
474#define TX_MODE_LONG_PAUSE_ENABLE 0x00000040 480#define TX_MODE_LONG_PAUSE_ENABLE 0x00000040
475#define TX_MODE_MBUF_LOCKUP_FIX 0x00000100 481#define TX_MODE_MBUF_LOCKUP_FIX 0x00000100
482#define TX_MODE_JMB_FRM_LEN 0x00400000
483#define TX_MODE_CNT_DN_MODE 0x00800000
476#define MAC_TX_STATUS 0x00000460 484#define MAC_TX_STATUS 0x00000460
477#define TX_STATUS_XOFFED 0x00000001 485#define TX_STATUS_XOFFED 0x00000001
478#define TX_STATUS_SENT_XOFF 0x00000002 486#define TX_STATUS_SENT_XOFF 0x00000002
@@ -487,6 +495,8 @@
487#define TX_LENGTHS_IPG_SHIFT 8 495#define TX_LENGTHS_IPG_SHIFT 8
488#define TX_LENGTHS_IPG_CRS_MASK 0x00003000 496#define TX_LENGTHS_IPG_CRS_MASK 0x00003000
489#define TX_LENGTHS_IPG_CRS_SHIFT 12 497#define TX_LENGTHS_IPG_CRS_SHIFT 12
498#define TX_LENGTHS_JMB_FRM_LEN_MSK 0x00ff0000
499#define TX_LENGTHS_CNT_DWN_VAL_MSK 0xff000000
490#define MAC_RX_MODE 0x00000468 500#define MAC_RX_MODE 0x00000468
491#define RX_MODE_RESET 0x00000001 501#define RX_MODE_RESET 0x00000001
492#define RX_MODE_ENABLE 0x00000002 502#define RX_MODE_ENABLE 0x00000002
@@ -1079,6 +1089,9 @@
1079#define CPMU_HST_ACC_MACCLK_6_25 0x00130000 1089#define CPMU_HST_ACC_MACCLK_6_25 0x00130000
1080/* 0x3620 --> 0x3630 unused */ 1090/* 0x3620 --> 0x3630 unused */
1081 1091
1092#define TG3_CPMU_CLCK_ORIDE 0x00003624
1093#define CPMU_CLCK_ORIDE_MAC_ORIDE_EN 0x80000000
1094
1082#define TG3_CPMU_CLCK_STAT 0x00003630 1095#define TG3_CPMU_CLCK_STAT 0x00003630
1083#define CPMU_CLCK_STAT_MAC_CLCK_MASK 0x001f0000 1096#define CPMU_CLCK_STAT_MAC_CLCK_MASK 0x001f0000
1084#define CPMU_CLCK_STAT_MAC_CLCK_62_5 0x00000000 1097#define CPMU_CLCK_STAT_MAC_CLCK_62_5 0x00000000
@@ -1188,6 +1201,7 @@
1188#define HOSTCC_STATS_BLK_NIC_ADDR 0x00003c40 1201#define HOSTCC_STATS_BLK_NIC_ADDR 0x00003c40
1189#define HOSTCC_STATUS_BLK_NIC_ADDR 0x00003c44 1202#define HOSTCC_STATUS_BLK_NIC_ADDR 0x00003c44
1190#define HOSTCC_FLOW_ATTN 0x00003c48 1203#define HOSTCC_FLOW_ATTN 0x00003c48
1204#define HOSTCC_FLOW_ATTN_MBUF_LWM 0x00000040
1191/* 0x3c4c --> 0x3c50 unused */ 1205/* 0x3c4c --> 0x3c50 unused */
1192#define HOSTCC_JUMBO_CON_IDX 0x00003c50 1206#define HOSTCC_JUMBO_CON_IDX 0x00003c50
1193#define HOSTCC_STD_CON_IDX 0x00003c54 1207#define HOSTCC_STD_CON_IDX 0x00003c54
@@ -1321,6 +1335,7 @@
1321#define RDMAC_MODE_MULT_DMA_RD_DIS 0x01000000 1335#define RDMAC_MODE_MULT_DMA_RD_DIS 0x01000000
1322#define RDMAC_MODE_IPV4_LSO_EN 0x08000000 1336#define RDMAC_MODE_IPV4_LSO_EN 0x08000000
1323#define RDMAC_MODE_IPV6_LSO_EN 0x10000000 1337#define RDMAC_MODE_IPV6_LSO_EN 0x10000000
1338#define RDMAC_MODE_H2BNC_VLAN_DET 0x20000000
1324#define RDMAC_STATUS 0x00004804 1339#define RDMAC_STATUS 0x00004804
1325#define RDMAC_STATUS_TGTABORT 0x00000004 1340#define RDMAC_STATUS_TGTABORT 0x00000004
1326#define RDMAC_STATUS_MSTABORT 0x00000008 1341#define RDMAC_STATUS_MSTABORT 0x00000008
@@ -1597,6 +1612,7 @@
1597#define MSGINT_MODE_ONE_SHOT_DISABLE 0x00000020 1612#define MSGINT_MODE_ONE_SHOT_DISABLE 0x00000020
1598#define MSGINT_MODE_MULTIVEC_EN 0x00000080 1613#define MSGINT_MODE_MULTIVEC_EN 0x00000080
1599#define MSGINT_STATUS 0x00006004 1614#define MSGINT_STATUS 0x00006004
1615#define MSGINT_STATUS_MSI_REQ 0x00000001
1600#define MSGINT_FIFO 0x00006008 1616#define MSGINT_FIFO 0x00006008
1601/* 0x600c --> 0x6400 unused */ 1617/* 0x600c --> 0x6400 unused */
1602 1618
@@ -1613,6 +1629,8 @@
1613#define GRC_MODE_WSWAP_NONFRM_DATA 0x00000004 1629#define GRC_MODE_WSWAP_NONFRM_DATA 0x00000004
1614#define GRC_MODE_BSWAP_DATA 0x00000010 1630#define GRC_MODE_BSWAP_DATA 0x00000010
1615#define GRC_MODE_WSWAP_DATA 0x00000020 1631#define GRC_MODE_WSWAP_DATA 0x00000020
1632#define GRC_MODE_BYTE_SWAP_B2HRX_DATA 0x00000040
1633#define GRC_MODE_WORD_SWAP_B2HRX_DATA 0x00000080
1616#define GRC_MODE_SPLITHDR 0x00000100 1634#define GRC_MODE_SPLITHDR 0x00000100
1617#define GRC_MODE_NOFRM_CRACKING 0x00000200 1635#define GRC_MODE_NOFRM_CRACKING 0x00000200
1618#define GRC_MODE_INCL_CRC 0x00000400 1636#define GRC_MODE_INCL_CRC 0x00000400
@@ -1620,8 +1638,10 @@
1620#define GRC_MODE_NOIRQ_ON_SENDS 0x00002000 1638#define GRC_MODE_NOIRQ_ON_SENDS 0x00002000
1621#define GRC_MODE_NOIRQ_ON_RCV 0x00004000 1639#define GRC_MODE_NOIRQ_ON_RCV 0x00004000
1622#define GRC_MODE_FORCE_PCI32BIT 0x00008000 1640#define GRC_MODE_FORCE_PCI32BIT 0x00008000
1641#define GRC_MODE_B2HRX_ENABLE 0x00008000
1623#define GRC_MODE_HOST_STACKUP 0x00010000 1642#define GRC_MODE_HOST_STACKUP 0x00010000
1624#define GRC_MODE_HOST_SENDBDS 0x00020000 1643#define GRC_MODE_HOST_SENDBDS 0x00020000
1644#define GRC_MODE_HTX2B_ENABLE 0x00040000
1625#define GRC_MODE_NO_TX_PHDR_CSUM 0x00100000 1645#define GRC_MODE_NO_TX_PHDR_CSUM 0x00100000
1626#define GRC_MODE_NVRAM_WR_ENABLE 0x00200000 1646#define GRC_MODE_NVRAM_WR_ENABLE 0x00200000
1627#define GRC_MODE_PCIE_TL_SEL 0x00000000 1647#define GRC_MODE_PCIE_TL_SEL 0x00000000
@@ -1818,6 +1838,38 @@
1818#define FLASH_5717VENDOR_ATMEL_45USPT 0x03400000 1838#define FLASH_5717VENDOR_ATMEL_45USPT 0x03400000
1819#define FLASH_5717VENDOR_ST_25USPT 0x03400002 1839#define FLASH_5717VENDOR_ST_25USPT 0x03400002
1820#define FLASH_5717VENDOR_ST_45USPT 0x03400001 1840#define FLASH_5717VENDOR_ST_45USPT 0x03400001
1841#define FLASH_5720_EEPROM_HD 0x00000001
1842#define FLASH_5720_EEPROM_LD 0x00000003
1843#define FLASH_5720VENDOR_M_ATMEL_DB011D 0x01000000
1844#define FLASH_5720VENDOR_M_ATMEL_DB021D 0x01000002
1845#define FLASH_5720VENDOR_M_ATMEL_DB041D 0x01000001
1846#define FLASH_5720VENDOR_M_ATMEL_DB081D 0x01000003
1847#define FLASH_5720VENDOR_M_ST_M25PE10 0x02000000
1848#define FLASH_5720VENDOR_M_ST_M25PE20 0x02000002
1849#define FLASH_5720VENDOR_M_ST_M25PE40 0x02000001
1850#define FLASH_5720VENDOR_M_ST_M25PE80 0x02000003
1851#define FLASH_5720VENDOR_M_ST_M45PE10 0x03000000
1852#define FLASH_5720VENDOR_M_ST_M45PE20 0x03000002
1853#define FLASH_5720VENDOR_M_ST_M45PE40 0x03000001
1854#define FLASH_5720VENDOR_M_ST_M45PE80 0x03000003
1855#define FLASH_5720VENDOR_A_ATMEL_DB011B 0x01800000
1856#define FLASH_5720VENDOR_A_ATMEL_DB021B 0x01800002
1857#define FLASH_5720VENDOR_A_ATMEL_DB041B 0x01800001
1858#define FLASH_5720VENDOR_A_ATMEL_DB011D 0x01c00000
1859#define FLASH_5720VENDOR_A_ATMEL_DB021D 0x01c00002
1860#define FLASH_5720VENDOR_A_ATMEL_DB041D 0x01c00001
1861#define FLASH_5720VENDOR_A_ATMEL_DB081D 0x01c00003
1862#define FLASH_5720VENDOR_A_ST_M25PE10 0x02800000
1863#define FLASH_5720VENDOR_A_ST_M25PE20 0x02800002
1864#define FLASH_5720VENDOR_A_ST_M25PE40 0x02800001
1865#define FLASH_5720VENDOR_A_ST_M25PE80 0x02800003
1866#define FLASH_5720VENDOR_A_ST_M45PE10 0x02c00000
1867#define FLASH_5720VENDOR_A_ST_M45PE20 0x02c00002
1868#define FLASH_5720VENDOR_A_ST_M45PE40 0x02c00001
1869#define FLASH_5720VENDOR_A_ST_M45PE80 0x02c00003
1870#define FLASH_5720VENDOR_ATMEL_45USPT 0x03c00000
1871#define FLASH_5720VENDOR_ST_25USPT 0x03c00002
1872#define FLASH_5720VENDOR_ST_45USPT 0x03c00001
1821#define NVRAM_CFG1_5752PAGE_SIZE_MASK 0x70000000 1873#define NVRAM_CFG1_5752PAGE_SIZE_MASK 0x70000000
1822#define FLASH_5752PAGE_SIZE_256 0x00000000 1874#define FLASH_5752PAGE_SIZE_256 0x00000000
1823#define FLASH_5752PAGE_SIZE_512 0x10000000 1875#define FLASH_5752PAGE_SIZE_512 0x10000000
@@ -1904,6 +1956,8 @@
1904#define TG3_PCIE_PL_LO_PHYCTL5 0x00000014 1956#define TG3_PCIE_PL_LO_PHYCTL5 0x00000014
1905#define TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ 0x80000000 1957#define TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ 0x80000000
1906 1958
1959#define TG3_REG_BLK_SIZE 0x00008000
1960
1907/* OTP bit definitions */ 1961/* OTP bit definitions */
1908#define TG3_OTP_AGCTGT_MASK 0x000000e0 1962#define TG3_OTP_AGCTGT_MASK 0x000000e0
1909#define TG3_OTP_AGCTGT_SHIFT 1 1963#define TG3_OTP_AGCTGT_SHIFT 1
@@ -1955,7 +2009,9 @@
1955#define TG3_NVM_DIR_END 0x78 2009#define TG3_NVM_DIR_END 0x78
1956#define TG3_NVM_DIRENT_SIZE 0xc 2010#define TG3_NVM_DIRENT_SIZE 0xc
1957#define TG3_NVM_DIRTYPE_SHIFT 24 2011#define TG3_NVM_DIRTYPE_SHIFT 24
2012#define TG3_NVM_DIRTYPE_LENMSK 0x003fffff
1958#define TG3_NVM_DIRTYPE_ASFINI 1 2013#define TG3_NVM_DIRTYPE_ASFINI 1
2014#define TG3_NVM_DIRTYPE_EXTVPD 20
1959#define TG3_NVM_PTREV_BCVER 0x94 2015#define TG3_NVM_PTREV_BCVER 0x94
1960#define TG3_NVM_BCVER_MAJMSK 0x0000ff00 2016#define TG3_NVM_BCVER_MAJMSK 0x0000ff00
1961#define TG3_NVM_BCVER_MAJSFT 8 2017#define TG3_NVM_BCVER_MAJSFT 8
@@ -2079,6 +2135,13 @@
2079#define NIC_SRAM_MBUF_POOL_BASE5705 0x00010000 2135#define NIC_SRAM_MBUF_POOL_BASE5705 0x00010000
2080#define NIC_SRAM_MBUF_POOL_SIZE5705 0x0000e000 2136#define NIC_SRAM_MBUF_POOL_SIZE5705 0x0000e000
2081 2137
2138#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5700 128
2139#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5755 64
2140#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5906 32
2141
2142#define TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700 64
2143#define TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717 16
2144
2082 2145
2083/* Currently this is fixed. */ 2146/* Currently this is fixed. */
2084#define TG3_PHY_MII_ADDR 0x01 2147#define TG3_PHY_MII_ADDR 0x01
@@ -2132,19 +2195,26 @@
2132 2195
2133#define MII_TG3_AUX_CTRL 0x18 /* auxiliary control register */ 2196#define MII_TG3_AUX_CTRL 0x18 /* auxiliary control register */
2134 2197
2198#define MII_TG3_AUXCTL_SHDWSEL_AUXCTL 0x0000
2199#define MII_TG3_AUXCTL_ACTL_TX_6DB 0x0400
2200#define MII_TG3_AUXCTL_ACTL_SMDSP_ENA 0x0800
2201#define MII_TG3_AUXCTL_ACTL_EXTPKTLEN 0x4000
2202
2203#define MII_TG3_AUXCTL_SHDWSEL_PWRCTL 0x0002
2204#define MII_TG3_AUXCTL_PCTL_WOL_EN 0x0008
2135#define MII_TG3_AUXCTL_PCTL_100TX_LPWR 0x0010 2205#define MII_TG3_AUXCTL_PCTL_100TX_LPWR 0x0010
2136#define MII_TG3_AUXCTL_PCTL_SPR_ISOLATE 0x0020 2206#define MII_TG3_AUXCTL_PCTL_SPR_ISOLATE 0x0020
2207#define MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC 0x0040
2137#define MII_TG3_AUXCTL_PCTL_VREG_11V 0x0180 2208#define MII_TG3_AUXCTL_PCTL_VREG_11V 0x0180
2138#define MII_TG3_AUXCTL_SHDWSEL_PWRCTL 0x0002
2139 2209
2140#define MII_TG3_AUXCTL_MISC_WREN 0x8000 2210#define MII_TG3_AUXCTL_SHDWSEL_MISCTEST 0x0004
2141#define MII_TG3_AUXCTL_MISC_FORCE_AMDIX 0x0200 2211
2142#define MII_TG3_AUXCTL_MISC_RDSEL_MISC 0x7000
2143#define MII_TG3_AUXCTL_SHDWSEL_MISC 0x0007 2212#define MII_TG3_AUXCTL_SHDWSEL_MISC 0x0007
2213#define MII_TG3_AUXCTL_MISC_WIRESPD_EN 0x0010
2214#define MII_TG3_AUXCTL_MISC_FORCE_AMDIX 0x0200
2215#define MII_TG3_AUXCTL_MISC_RDSEL_SHIFT 12
2216#define MII_TG3_AUXCTL_MISC_WREN 0x8000
2144 2217
2145#define MII_TG3_AUXCTL_ACTL_SMDSP_ENA 0x0800
2146#define MII_TG3_AUXCTL_ACTL_TX_6DB 0x0400
2147#define MII_TG3_AUXCTL_SHDWSEL_AUXCTL 0x0000
2148 2218
2149#define MII_TG3_AUX_STAT 0x19 /* auxiliary status register */ 2219#define MII_TG3_AUX_STAT 0x19 /* auxiliary status register */
2150#define MII_TG3_AUX_STAT_LPASS 0x0004 2220#define MII_TG3_AUX_STAT_LPASS 0x0004
@@ -2546,6 +2616,7 @@ struct tg3_hw_stats {
2546 tg3_stat64_t dma_write_prioq_full; 2616 tg3_stat64_t dma_write_prioq_full;
2547 tg3_stat64_t rxbds_empty; 2617 tg3_stat64_t rxbds_empty;
2548 tg3_stat64_t rx_discards; 2618 tg3_stat64_t rx_discards;
2619 tg3_stat64_t mbuf_lwm_thresh_hit;
2549 tg3_stat64_t rx_errors; 2620 tg3_stat64_t rx_errors;
2550 tg3_stat64_t rx_threshold_hit; 2621 tg3_stat64_t rx_threshold_hit;
2551 2622
@@ -2745,6 +2816,86 @@ struct tg3_napi {
2745 unsigned int irq_vec; 2816 unsigned int irq_vec;
2746}; 2817};
2747 2818
2819enum TG3_FLAGS {
2820 TG3_FLAG_TAGGED_STATUS = 0,
2821 TG3_FLAG_TXD_MBOX_HWBUG,
2822 TG3_FLAG_USE_LINKCHG_REG,
2823 TG3_FLAG_ERROR_PROCESSED,
2824 TG3_FLAG_ENABLE_ASF,
2825 TG3_FLAG_ASPM_WORKAROUND,
2826 TG3_FLAG_POLL_SERDES,
2827 TG3_FLAG_MBOX_WRITE_REORDER,
2828 TG3_FLAG_PCIX_TARGET_HWBUG,
2829 TG3_FLAG_WOL_SPEED_100MB,
2830 TG3_FLAG_WOL_ENABLE,
2831 TG3_FLAG_EEPROM_WRITE_PROT,
2832 TG3_FLAG_NVRAM,
2833 TG3_FLAG_NVRAM_BUFFERED,
2834 TG3_FLAG_SUPPORT_MSI,
2835 TG3_FLAG_SUPPORT_MSIX,
2836 TG3_FLAG_PCIX_MODE,
2837 TG3_FLAG_PCI_HIGH_SPEED,
2838 TG3_FLAG_PCI_32BIT,
2839 TG3_FLAG_SRAM_USE_CONFIG,
2840 TG3_FLAG_TX_RECOVERY_PENDING,
2841 TG3_FLAG_WOL_CAP,
2842 TG3_FLAG_JUMBO_RING_ENABLE,
2843 TG3_FLAG_PAUSE_AUTONEG,
2844 TG3_FLAG_CPMU_PRESENT,
2845 TG3_FLAG_40BIT_DMA_BUG,
2846 TG3_FLAG_BROKEN_CHECKSUMS,
2847 TG3_FLAG_JUMBO_CAPABLE,
2848 TG3_FLAG_CHIP_RESETTING,
2849 TG3_FLAG_INIT_COMPLETE,
2850 TG3_FLAG_RESTART_TIMER,
2851 TG3_FLAG_TSO_BUG,
2852 TG3_FLAG_IS_5788,
2853 TG3_FLAG_MAX_RXPEND_64,
2854 TG3_FLAG_TSO_CAPABLE,
2855 TG3_FLAG_PCI_EXPRESS,
2856 TG3_FLAG_ASF_NEW_HANDSHAKE,
2857 TG3_FLAG_HW_AUTONEG,
2858 TG3_FLAG_IS_NIC,
2859 TG3_FLAG_FLASH,
2860 TG3_FLAG_HW_TSO_1,
2861 TG3_FLAG_5705_PLUS,
2862 TG3_FLAG_5750_PLUS,
2863 TG3_FLAG_HW_TSO_3,
2864 TG3_FLAG_USING_MSI,
2865 TG3_FLAG_USING_MSIX,
2866 TG3_FLAG_ICH_WORKAROUND,
2867 TG3_FLAG_5780_CLASS,
2868 TG3_FLAG_HW_TSO_2,
2869 TG3_FLAG_1SHOT_MSI,
2870 TG3_FLAG_NO_FWARE_REPORTED,
2871 TG3_FLAG_NO_NVRAM_ADDR_TRANS,
2872 TG3_FLAG_ENABLE_APE,
2873 TG3_FLAG_PROTECTED_NVRAM,
2874 TG3_FLAG_5701_DMA_BUG,
2875 TG3_FLAG_USE_PHYLIB,
2876 TG3_FLAG_MDIOBUS_INITED,
2877 TG3_FLAG_LRG_PROD_RING_CAP,
2878 TG3_FLAG_RGMII_INBAND_DISABLE,
2879 TG3_FLAG_RGMII_EXT_IBND_RX_EN,
2880 TG3_FLAG_RGMII_EXT_IBND_TX_EN,
2881 TG3_FLAG_CLKREQ_BUG,
2882 TG3_FLAG_5755_PLUS,
2883 TG3_FLAG_NO_NVRAM,
2884 TG3_FLAG_ENABLE_RSS,
2885 TG3_FLAG_ENABLE_TSS,
2886 TG3_FLAG_4G_DMA_BNDRY_BUG,
2887 TG3_FLAG_40BIT_DMA_LIMIT_BUG,
2888 TG3_FLAG_SHORT_DMA_BUG,
2889 TG3_FLAG_USE_JUMBO_BDFLAG,
2890 TG3_FLAG_L1PLLPD_EN,
2891 TG3_FLAG_57765_PLUS,
2892 TG3_FLAG_APE_HAS_NCSI,
2893 TG3_FLAG_5717_PLUS,
2894
2895 /* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */
2896 TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */
2897};
2898
2748struct tg3 { 2899struct tg3 {
2749 /* begin "general, frequently-used members" cacheline section */ 2900 /* begin "general, frequently-used members" cacheline section */
2750 2901
@@ -2768,7 +2919,7 @@ struct tg3 {
2768 /* SMP locking strategy: 2919 /* SMP locking strategy:
2769 * 2920 *
2770 * lock: Held during reset, PHY access, timer, and when 2921 * lock: Held during reset, PHY access, timer, and when
2771 * updating tg3_flags and tg3_flags2. 2922 * updating tg3_flags.
2772 * 2923 *
2773 * netif_tx_lock: Held during tg3_start_xmit. tg3_tx holds 2924 * netif_tx_lock: Held during tg3_start_xmit. tg3_tx holds
2774 * netif_tx_lock when it needs to call 2925 * netif_tx_lock when it needs to call
@@ -2825,94 +2976,13 @@ struct tg3 {
2825 struct tg3_ethtool_stats estats; 2976 struct tg3_ethtool_stats estats;
2826 struct tg3_ethtool_stats estats_prev; 2977 struct tg3_ethtool_stats estats_prev;
2827 2978
2979 DECLARE_BITMAP(tg3_flags, TG3_FLAG_NUMBER_OF_FLAGS);
2980
2828 union { 2981 union {
2829 unsigned long phy_crc_errors; 2982 unsigned long phy_crc_errors;
2830 unsigned long last_event_jiffies; 2983 unsigned long last_event_jiffies;
2831 }; 2984 };
2832 2985
2833 u32 tg3_flags;
2834#define TG3_FLAG_TAGGED_STATUS 0x00000001
2835#define TG3_FLAG_TXD_MBOX_HWBUG 0x00000002
2836#define TG3_FLAG_RX_CHECKSUMS 0x00000004
2837#define TG3_FLAG_USE_LINKCHG_REG 0x00000008
2838#define TG3_FLAG_ENABLE_ASF 0x00000020
2839#define TG3_FLAG_ASPM_WORKAROUND 0x00000040
2840#define TG3_FLAG_POLL_SERDES 0x00000080
2841#define TG3_FLAG_MBOX_WRITE_REORDER 0x00000100
2842#define TG3_FLAG_PCIX_TARGET_HWBUG 0x00000200
2843#define TG3_FLAG_WOL_SPEED_100MB 0x00000400
2844#define TG3_FLAG_WOL_ENABLE 0x00000800
2845#define TG3_FLAG_EEPROM_WRITE_PROT 0x00001000
2846#define TG3_FLAG_NVRAM 0x00002000
2847#define TG3_FLAG_NVRAM_BUFFERED 0x00004000
2848#define TG3_FLAG_SUPPORT_MSI 0x00008000
2849#define TG3_FLAG_SUPPORT_MSIX 0x00010000
2850#define TG3_FLAG_SUPPORT_MSI_OR_MSIX (TG3_FLAG_SUPPORT_MSI | \
2851 TG3_FLAG_SUPPORT_MSIX)
2852#define TG3_FLAG_PCIX_MODE 0x00020000
2853#define TG3_FLAG_PCI_HIGH_SPEED 0x00040000
2854#define TG3_FLAG_PCI_32BIT 0x00080000
2855#define TG3_FLAG_SRAM_USE_CONFIG 0x00100000
2856#define TG3_FLAG_TX_RECOVERY_PENDING 0x00200000
2857#define TG3_FLAG_WOL_CAP 0x00400000
2858#define TG3_FLAG_JUMBO_RING_ENABLE 0x00800000
2859#define TG3_FLAG_PAUSE_AUTONEG 0x02000000
2860#define TG3_FLAG_CPMU_PRESENT 0x04000000
2861#define TG3_FLAG_40BIT_DMA_BUG 0x08000000
2862#define TG3_FLAG_BROKEN_CHECKSUMS 0x10000000
2863#define TG3_FLAG_JUMBO_CAPABLE 0x20000000
2864#define TG3_FLAG_CHIP_RESETTING 0x40000000
2865#define TG3_FLAG_INIT_COMPLETE 0x80000000
2866 u32 tg3_flags2;
2867#define TG3_FLG2_RESTART_TIMER 0x00000001
2868#define TG3_FLG2_TSO_BUG 0x00000002
2869#define TG3_FLG2_IS_5788 0x00000008
2870#define TG3_FLG2_MAX_RXPEND_64 0x00000010
2871#define TG3_FLG2_TSO_CAPABLE 0x00000020
2872#define TG3_FLG2_PCI_EXPRESS 0x00000200
2873#define TG3_FLG2_ASF_NEW_HANDSHAKE 0x00000400
2874#define TG3_FLG2_HW_AUTONEG 0x00000800
2875#define TG3_FLG2_IS_NIC 0x00001000
2876#define TG3_FLG2_FLASH 0x00008000
2877#define TG3_FLG2_HW_TSO_1 0x00010000
2878#define TG3_FLG2_5705_PLUS 0x00040000
2879#define TG3_FLG2_5750_PLUS 0x00080000
2880#define TG3_FLG2_HW_TSO_3 0x00100000
2881#define TG3_FLG2_USING_MSI 0x00200000
2882#define TG3_FLG2_USING_MSIX 0x00400000
2883#define TG3_FLG2_USING_MSI_OR_MSIX (TG3_FLG2_USING_MSI | \
2884 TG3_FLG2_USING_MSIX)
2885#define TG3_FLG2_ICH_WORKAROUND 0x02000000
2886#define TG3_FLG2_5780_CLASS 0x04000000
2887#define TG3_FLG2_HW_TSO_2 0x08000000
2888#define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | \
2889 TG3_FLG2_HW_TSO_2 | \
2890 TG3_FLG2_HW_TSO_3)
2891#define TG3_FLG2_1SHOT_MSI 0x10000000
2892#define TG3_FLG2_NO_FWARE_REPORTED 0x40000000
2893 u32 tg3_flags3;
2894#define TG3_FLG3_NO_NVRAM_ADDR_TRANS 0x00000001
2895#define TG3_FLG3_ENABLE_APE 0x00000002
2896#define TG3_FLG3_PROTECTED_NVRAM 0x00000004
2897#define TG3_FLG3_5701_DMA_BUG 0x00000008
2898#define TG3_FLG3_USE_PHYLIB 0x00000010
2899#define TG3_FLG3_MDIOBUS_INITED 0x00000020
2900#define TG3_FLG3_RGMII_INBAND_DISABLE 0x00000100
2901#define TG3_FLG3_RGMII_EXT_IBND_RX_EN 0x00000200
2902#define TG3_FLG3_RGMII_EXT_IBND_TX_EN 0x00000400
2903#define TG3_FLG3_CLKREQ_BUG 0x00000800
2904#define TG3_FLG3_5755_PLUS 0x00002000
2905#define TG3_FLG3_NO_NVRAM 0x00004000
2906#define TG3_FLG3_ENABLE_RSS 0x00020000
2907#define TG3_FLG3_ENABLE_TSS 0x00040000
2908#define TG3_FLG3_4G_DMA_BNDRY_BUG 0x00080000
2909#define TG3_FLG3_40BIT_DMA_LIMIT_BUG 0x00100000
2910#define TG3_FLG3_SHORT_DMA_BUG 0x00200000
2911#define TG3_FLG3_USE_JUMBO_BDFLAG 0x00400000
2912#define TG3_FLG3_L1PLLPD_EN 0x00800000
2913#define TG3_FLG3_5717_PLUS 0x01000000
2914#define TG3_FLG3_APE_HAS_NCSI 0x02000000
2915
2916 struct timer_list timer; 2986 struct timer_list timer;
2917 u16 timer_counter; 2987 u16 timer_counter;
2918 u16 timer_multiplier; 2988 u16 timer_multiplier;
@@ -2983,6 +3053,7 @@ struct tg3 {
2983#define TG3_PHY_ID_BCM5718S 0xbc050ff0 3053#define TG3_PHY_ID_BCM5718S 0xbc050ff0
2984#define TG3_PHY_ID_BCM57765 0x5c0d8a40 3054#define TG3_PHY_ID_BCM57765 0x5c0d8a40
2985#define TG3_PHY_ID_BCM5719C 0x5c0d8a20 3055#define TG3_PHY_ID_BCM5719C 0x5c0d8a20
3056#define TG3_PHY_ID_BCM5720C 0x5c0d8b60
2986#define TG3_PHY_ID_BCM5906 0xdc00ac40 3057#define TG3_PHY_ID_BCM5906 0xdc00ac40
2987#define TG3_PHY_ID_BCM8002 0x60010140 3058#define TG3_PHY_ID_BCM8002 0x60010140
2988#define TG3_PHY_ID_INVALID 0xffffffff 3059#define TG3_PHY_ID_INVALID 0xffffffff
@@ -3049,6 +3120,7 @@ struct tg3 {
3049 3120
3050 int nvram_lock_cnt; 3121 int nvram_lock_cnt;
3051 u32 nvram_size; 3122 u32 nvram_size;
3123#define TG3_NVRAM_SIZE_2KB 0x00000800
3052#define TG3_NVRAM_SIZE_64KB 0x00010000 3124#define TG3_NVRAM_SIZE_64KB 0x00010000
3053#define TG3_NVRAM_SIZE_128KB 0x00020000 3125#define TG3_NVRAM_SIZE_128KB 0x00020000
3054#define TG3_NVRAM_SIZE_256KB 0x00040000 3126#define TG3_NVRAM_SIZE_256KB 0x00040000
@@ -3064,6 +3136,9 @@ struct tg3 {
3064#define JEDEC_SAIFUN 0x4f 3136#define JEDEC_SAIFUN 0x4f
3065#define JEDEC_SST 0xbf 3137#define JEDEC_SST 0xbf
3066 3138
3139#define ATMEL_AT24C02_CHIP_SIZE TG3_NVRAM_SIZE_2KB
3140#define ATMEL_AT24C02_PAGE_SIZE (8)
3141
3067#define ATMEL_AT24C64_CHIP_SIZE TG3_NVRAM_SIZE_64KB 3142#define ATMEL_AT24C64_CHIP_SIZE TG3_NVRAM_SIZE_64KB
3068#define ATMEL_AT24C64_PAGE_SIZE (32) 3143#define ATMEL_AT24C64_PAGE_SIZE (32)
3069 3144
diff --git a/drivers/net/tile/tilepro.c b/drivers/net/tile/tilepro.c
index 0825db6d883f..1e980fdd9d77 100644
--- a/drivers/net/tile/tilepro.c
+++ b/drivers/net/tile/tilepro.c
@@ -1930,7 +1930,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
1930 unsigned int len = skb->len; 1930 unsigned int len = skb->len;
1931 unsigned char *data = skb->data; 1931 unsigned char *data = skb->data;
1932 1932
1933 unsigned int csum_start = skb->csum_start - skb_headroom(skb); 1933 unsigned int csum_start = skb_checksum_start_offset(skb);
1934 1934
1935 lepp_frag_t frags[LEPP_MAX_FRAGS]; 1935 lepp_frag_t frags[LEPP_MAX_FRAGS];
1936 1936
diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
index 2bedc0ace812..1313aa1315f0 100644
--- a/drivers/net/tokenring/madgemc.c
+++ b/drivers/net/tokenring/madgemc.c
@@ -727,7 +727,7 @@ static int __devexit madgemc_remove(struct device *device)
727 return 0; 727 return 0;
728} 728}
729 729
730static short madgemc_adapter_ids[] __initdata = { 730static const short madgemc_adapter_ids[] __devinitconst = {
731 0x002d, 731 0x002d,
732 0x0000 732 0x0000
733}; 733};
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index 2684003b8ab6..e3855aeb13d4 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -86,6 +86,7 @@
86#include <linux/timer.h> 86#include <linux/timer.h>
87#include <linux/in.h> 87#include <linux/in.h>
88#include <linux/ioport.h> 88#include <linux/ioport.h>
89#include <linux/seq_file.h>
89#include <linux/string.h> 90#include <linux/string.h>
90#include <linux/proc_fs.h> 91#include <linux/proc_fs.h>
91#include <linux/ptrace.h> 92#include <linux/ptrace.h>
@@ -193,7 +194,7 @@ static void olympic_arb_cmd(struct net_device *dev);
193static int olympic_change_mtu(struct net_device *dev, int mtu); 194static int olympic_change_mtu(struct net_device *dev, int mtu);
194static void olympic_srb_bh(struct net_device *dev) ; 195static void olympic_srb_bh(struct net_device *dev) ;
195static void olympic_asb_bh(struct net_device *dev) ; 196static void olympic_asb_bh(struct net_device *dev) ;
196static int olympic_proc_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data) ; 197static const struct file_operations olympic_proc_ops;
197 198
198static const struct net_device_ops olympic_netdev_ops = { 199static const struct net_device_ops olympic_netdev_ops = {
199 .ndo_open = olympic_open, 200 .ndo_open = olympic_open,
@@ -272,7 +273,7 @@ static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device
272 char proc_name[20] ; 273 char proc_name[20] ;
273 strcpy(proc_name,"olympic_") ; 274 strcpy(proc_name,"olympic_") ;
274 strcat(proc_name,dev->name) ; 275 strcat(proc_name,dev->name) ;
275 create_proc_read_entry(proc_name,0,init_net.proc_net,olympic_proc_info,(void *)dev) ; 276 proc_create_data(proc_name, 0, init_net.proc_net, &olympic_proc_ops, dev);
276 printk("Olympic: Network Monitor information: /proc/%s\n",proc_name); 277 printk("Olympic: Network Monitor information: /proc/%s\n",proc_name);
277 } 278 }
278 return 0 ; 279 return 0 ;
@@ -1615,29 +1616,25 @@ static int olympic_change_mtu(struct net_device *dev, int mtu)
1615 return 0 ; 1616 return 0 ;
1616} 1617}
1617 1618
1618static int olympic_proc_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data) 1619static int olympic_proc_show(struct seq_file *m, void *v)
1619{ 1620{
1620 struct net_device *dev = (struct net_device *)data ; 1621 struct net_device *dev = m->private;
1621 struct olympic_private *olympic_priv=netdev_priv(dev); 1622 struct olympic_private *olympic_priv=netdev_priv(dev);
1622 u8 __iomem *oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr) ; 1623 u8 __iomem *oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr) ;
1623 u8 __iomem *opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr) ; 1624 u8 __iomem *opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr) ;
1624 int size = 0 ;
1625 int len=0;
1626 off_t begin=0;
1627 off_t pos=0;
1628 u8 addr[6]; 1625 u8 addr[6];
1629 u8 addr2[6]; 1626 u8 addr2[6];
1630 int i; 1627 int i;
1631 1628
1632 size = sprintf(buffer, 1629 seq_printf(m,
1633 "IBM Pit/Pit-Phy/Olympic Chipset Token Ring Adapter %s\n",dev->name); 1630 "IBM Pit/Pit-Phy/Olympic Chipset Token Ring Adapter %s\n",dev->name);
1634 size += sprintf(buffer+size, "\n%6s: Adapter Address : Node Address : Functional Addr\n", 1631 seq_printf(m, "\n%6s: Adapter Address : Node Address : Functional Addr\n",
1635 dev->name); 1632 dev->name);
1636 1633
1637 for (i = 0 ; i < 6 ; i++) 1634 for (i = 0 ; i < 6 ; i++)
1638 addr[i] = readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr) + i); 1635 addr[i] = readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr) + i);
1639 1636
1640 size += sprintf(buffer+size, "%6s: %pM : %pM : %02x:%02x:%02x:%02x\n", 1637 seq_printf(m, "%6s: %pM : %pM : %02x:%02x:%02x:%02x\n",
1641 dev->name, 1638 dev->name,
1642 dev->dev_addr, addr, 1639 dev->dev_addr, addr,
1643 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)), 1640 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)),
@@ -1645,9 +1642,9 @@ static int olympic_proc_info(char *buffer, char **start, off_t offset, int lengt
1645 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2), 1642 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2),
1646 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3)); 1643 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3));
1647 1644
1648 size += sprintf(buffer+size, "\n%6s: Token Ring Parameters Table:\n", dev->name); 1645 seq_printf(m, "\n%6s: Token Ring Parameters Table:\n", dev->name);
1649 1646
1650 size += sprintf(buffer+size, "%6s: Physical Addr : Up Node Address : Poll Address : AccPri : Auth Src : Att Code :\n", 1647 seq_printf(m, "%6s: Physical Addr : Up Node Address : Poll Address : AccPri : Auth Src : Att Code :\n",
1651 dev->name) ; 1648 dev->name) ;
1652 1649
1653 for (i = 0 ; i < 6 ; i++) 1650 for (i = 0 ; i < 6 ; i++)
@@ -1655,7 +1652,7 @@ static int olympic_proc_info(char *buffer, char **start, off_t offset, int lengt
1655 for (i = 0 ; i < 6 ; i++) 1652 for (i = 0 ; i < 6 ; i++)
1656 addr2[i] = readb(opt+offsetof(struct olympic_parameters_table, poll_addr) + i); 1653 addr2[i] = readb(opt+offsetof(struct olympic_parameters_table, poll_addr) + i);
1657 1654
1658 size += sprintf(buffer+size, "%6s: %02x:%02x:%02x:%02x : %pM : %pM : %04x : %04x : %04x :\n", 1655 seq_printf(m, "%6s: %02x:%02x:%02x:%02x : %pM : %pM : %04x : %04x : %04x :\n",
1659 dev->name, 1656 dev->name,
1660 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)), 1657 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)),
1661 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+1), 1658 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+1),
@@ -1666,12 +1663,12 @@ static int olympic_proc_info(char *buffer, char **start, off_t offset, int lengt
1666 swab16(readw(opt+offsetof(struct olympic_parameters_table, auth_source_class))), 1663 swab16(readw(opt+offsetof(struct olympic_parameters_table, auth_source_class))),
1667 swab16(readw(opt+offsetof(struct olympic_parameters_table, att_code)))); 1664 swab16(readw(opt+offsetof(struct olympic_parameters_table, att_code))));
1668 1665
1669 size += sprintf(buffer+size, "%6s: Source Address : Bcn T : Maj. V : Lan St : Lcl Rg : Mon Err : Frame Correl : \n", 1666 seq_printf(m, "%6s: Source Address : Bcn T : Maj. V : Lan St : Lcl Rg : Mon Err : Frame Correl : \n",
1670 dev->name) ; 1667 dev->name) ;
1671 1668
1672 for (i = 0 ; i < 6 ; i++) 1669 for (i = 0 ; i < 6 ; i++)
1673 addr[i] = readb(opt+offsetof(struct olympic_parameters_table, source_addr) + i); 1670 addr[i] = readb(opt+offsetof(struct olympic_parameters_table, source_addr) + i);
1674 size += sprintf(buffer+size, "%6s: %pM : %04x : %04x : %04x : %04x : %04x : %04x : \n", 1671 seq_printf(m, "%6s: %pM : %04x : %04x : %04x : %04x : %04x : %04x : \n",
1675 dev->name, addr, 1672 dev->name, addr,
1676 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_type))), 1673 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_type))),
1677 swab16(readw(opt+offsetof(struct olympic_parameters_table, major_vector))), 1674 swab16(readw(opt+offsetof(struct olympic_parameters_table, major_vector))),
@@ -1680,12 +1677,12 @@ static int olympic_proc_info(char *buffer, char **start, off_t offset, int lengt
1680 swab16(readw(opt+offsetof(struct olympic_parameters_table, mon_error))), 1677 swab16(readw(opt+offsetof(struct olympic_parameters_table, mon_error))),
1681 swab16(readw(opt+offsetof(struct olympic_parameters_table, frame_correl)))); 1678 swab16(readw(opt+offsetof(struct olympic_parameters_table, frame_correl))));
1682 1679
1683 size += sprintf(buffer+size, "%6s: Beacon Details : Tx : Rx : NAUN Node Address : NAUN Node Phys : \n", 1680 seq_printf(m, "%6s: Beacon Details : Tx : Rx : NAUN Node Address : NAUN Node Phys : \n",
1684 dev->name) ; 1681 dev->name) ;
1685 1682
1686 for (i = 0 ; i < 6 ; i++) 1683 for (i = 0 ; i < 6 ; i++)
1687 addr[i] = readb(opt+offsetof(struct olympic_parameters_table, beacon_naun) + i); 1684 addr[i] = readb(opt+offsetof(struct olympic_parameters_table, beacon_naun) + i);
1688 size += sprintf(buffer+size, "%6s: : %02x : %02x : %pM : %02x:%02x:%02x:%02x : \n", 1685 seq_printf(m, "%6s: : %02x : %02x : %pM : %02x:%02x:%02x:%02x : \n",
1689 dev->name, 1686 dev->name,
1690 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_transmit))), 1687 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_transmit))),
1691 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_receive))), 1688 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_receive))),
@@ -1695,19 +1692,21 @@ static int olympic_proc_info(char *buffer, char **start, off_t offset, int lengt
1695 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+2), 1692 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+2),
1696 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+3)); 1693 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+3));
1697 1694
1698 len=size; 1695 return 0;
1699 pos=begin+size;
1700 if (pos<offset) {
1701 len=0;
1702 begin=pos;
1703 }
1704 *start=buffer+(offset-begin); /* Start of wanted data */
1705 len-=(offset-begin); /* Start slop */
1706 if(len>length)
1707 len=length; /* Ending slop */
1708 return len;
1709} 1696}
1710 1697
1698static int olympic_proc_open(struct inode *inode, struct file *file)
1699{
1700 return single_open(file, olympic_proc_show, PDE(inode)->data);
1701}
1702
1703static const struct file_operations olympic_proc_ops = {
1704 .open = olympic_proc_open,
1705 .read = seq_read,
1706 .llseek = seq_lseek,
1707 .release = single_release,
1708};
1709
1711static void __devexit olympic_remove_one(struct pci_dev *pdev) 1710static void __devexit olympic_remove_one(struct pci_dev *pdev)
1712{ 1711{
1713 struct net_device *dev = pci_get_drvdata(pdev) ; 1712 struct net_device *dev = pci_get_drvdata(pdev) ;
diff --git a/drivers/net/tulip/21142.c b/drivers/net/tulip/21142.c
index 007d8e75666d..092c3faa882a 100644
--- a/drivers/net/tulip/21142.c
+++ b/drivers/net/tulip/21142.c
@@ -122,8 +122,8 @@ void t21142_start_nway(struct net_device *dev)
122 tp->nway = tp->mediasense = 1; 122 tp->nway = tp->mediasense = 1;
123 tp->nwayset = tp->lpar = 0; 123 tp->nwayset = tp->lpar = 0;
124 if (tulip_debug > 1) 124 if (tulip_debug > 1)
125 printk(KERN_DEBUG "%s: Restarting 21143 autonegotiation, csr14=%08x\n", 125 netdev_dbg(dev, "Restarting 21143 autonegotiation, csr14=%08x\n",
126 dev->name, csr14); 126 csr14);
127 iowrite32(0x0001, ioaddr + CSR13); 127 iowrite32(0x0001, ioaddr + CSR13);
128 udelay(100); 128 udelay(100);
129 iowrite32(csr14, ioaddr + CSR14); 129 iowrite32(csr14, ioaddr + CSR14);
@@ -206,14 +206,14 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
206#if 0 /* Restart shouldn't be needed. */ 206#if 0 /* Restart shouldn't be needed. */
207 iowrite32(tp->csr6 | RxOn, ioaddr + CSR6); 207 iowrite32(tp->csr6 | RxOn, ioaddr + CSR6);
208 if (tulip_debug > 2) 208 if (tulip_debug > 2)
209 printk(KERN_DEBUG "%s: Restarting Tx and Rx, CSR5 is %08x\n", 209 netdev_dbg(dev, " Restarting Tx and Rx, CSR5 is %08x\n",
210 dev->name, ioread32(ioaddr + CSR5)); 210 ioread32(ioaddr + CSR5));
211#endif 211#endif
212 tulip_start_rxtx(tp); 212 tulip_start_rxtx(tp);
213 if (tulip_debug > 2) 213 if (tulip_debug > 2)
214 printk(KERN_DEBUG "%s: Setting CSR6 %08x/%x CSR12 %08x\n", 214 netdev_dbg(dev, " Setting CSR6 %08x/%x CSR12 %08x\n",
215 dev->name, tp->csr6, ioread32(ioaddr + CSR6), 215 tp->csr6, ioread32(ioaddr + CSR6),
216 ioread32(ioaddr + CSR12)); 216 ioread32(ioaddr + CSR12));
217 } else if ((tp->nwayset && (csr5 & 0x08000000) && 217 } else if ((tp->nwayset && (csr5 & 0x08000000) &&
218 (dev->if_port == 3 || dev->if_port == 5) && 218 (dev->if_port == 3 || dev->if_port == 5) &&
219 (csr12 & 2) == 2) || 219 (csr12 & 2) == 2) ||
diff --git a/drivers/net/tulip/Makefile b/drivers/net/tulip/Makefile
index 200cbf7c815c..5e8be38b45bb 100644
--- a/drivers/net/tulip/Makefile
+++ b/drivers/net/tulip/Makefile
@@ -2,6 +2,8 @@
2# Makefile for the Linux "Tulip" family network device drivers. 2# Makefile for the Linux "Tulip" family network device drivers.
3# 3#
4 4
5ccflags-$(CONFIG_NET_TULIP) := -DDEBUG
6
5obj-$(CONFIG_PCMCIA_XIRCOM) += xircom_cb.o 7obj-$(CONFIG_PCMCIA_XIRCOM) += xircom_cb.o
6obj-$(CONFIG_DM9102) += dmfe.o 8obj-$(CONFIG_DM9102) += dmfe.o
7obj-$(CONFIG_WINBOND_840) += winbond-840.o 9obj-$(CONFIG_WINBOND_840) += winbond-840.o
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index b13c6b040be3..e2f692351180 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -27,6 +27,8 @@
27 27
28 */ 28 */
29 29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
30#define DRV_NAME "de2104x" 32#define DRV_NAME "de2104x"
31#define DRV_VERSION "0.7" 33#define DRV_VERSION "0.7"
32#define DRV_RELDATE "Mar 17, 2004" 34#define DRV_RELDATE "Mar 17, 2004"
@@ -51,7 +53,7 @@
51 53
52/* These identify the driver base version and may not be removed. */ 54/* These identify the driver base version and may not be removed. */
53static char version[] = 55static char version[] =
54KERN_INFO DRV_NAME " PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n"; 56"PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")";
55 57
56MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>"); 58MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
57MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver"); 59MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver");
@@ -73,8 +75,6 @@ static int rx_copybreak = 100;
73module_param (rx_copybreak, int, 0); 75module_param (rx_copybreak, int, 0);
74MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copied"); 76MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copied");
75 77
76#define PFX DRV_NAME ": "
77
78#define DE_DEF_MSG_ENABLE (NETIF_MSG_DRV | \ 78#define DE_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
79 NETIF_MSG_PROBE | \ 79 NETIF_MSG_PROBE | \
80 NETIF_MSG_LINK | \ 80 NETIF_MSG_LINK | \
@@ -377,18 +377,16 @@ static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
377static void de_rx_err_acct (struct de_private *de, unsigned rx_tail, 377static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
378 u32 status, u32 len) 378 u32 status, u32 len)
379{ 379{
380 if (netif_msg_rx_err (de)) 380 netif_dbg(de, rx_err, de->dev,
381 printk (KERN_DEBUG 381 "rx err, slot %d status 0x%x len %d\n",
382 "%s: rx err, slot %d status 0x%x len %d\n", 382 rx_tail, status, len);
383 de->dev->name, rx_tail, status, len);
384 383
385 if ((status & 0x38000300) != 0x0300) { 384 if ((status & 0x38000300) != 0x0300) {
386 /* Ingore earlier buffers. */ 385 /* Ingore earlier buffers. */
387 if ((status & 0xffff) != 0x7fff) { 386 if ((status & 0xffff) != 0x7fff) {
388 if (netif_msg_rx_err(de)) 387 netif_warn(de, rx_err, de->dev,
389 dev_warn(&de->dev->dev, 388 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
390 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n", 389 status);
391 status);
392 de->net_stats.rx_length_errors++; 390 de->net_stats.rx_length_errors++;
393 } 391 }
394 } else if (status & RxError) { 392 } else if (status & RxError) {
@@ -435,10 +433,9 @@ static void de_rx (struct de_private *de)
435 433
436 copying_skb = (len <= rx_copybreak); 434 copying_skb = (len <= rx_copybreak);
437 435
438 if (unlikely(netif_msg_rx_status(de))) 436 netif_dbg(de, rx_status, de->dev,
439 printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d copying? %d\n", 437 "rx slot %d status 0x%x len %d copying? %d\n",
440 de->dev->name, rx_tail, status, len, 438 rx_tail, status, len, copying_skb);
441 copying_skb);
442 439
443 buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz; 440 buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz;
444 copy_skb = dev_alloc_skb (buflen); 441 copy_skb = dev_alloc_skb (buflen);
@@ -491,7 +488,7 @@ rx_next:
491 } 488 }
492 489
493 if (!rx_work) 490 if (!rx_work)
494 dev_warn(&de->dev->dev, "rx work limit reached\n"); 491 netdev_warn(de->dev, "rx work limit reached\n");
495 492
496 de->rx_tail = rx_tail; 493 de->rx_tail = rx_tail;
497} 494}
@@ -506,10 +503,9 @@ static irqreturn_t de_interrupt (int irq, void *dev_instance)
506 if ((!(status & (IntrOK|IntrErr))) || (status == 0xFFFF)) 503 if ((!(status & (IntrOK|IntrErr))) || (status == 0xFFFF))
507 return IRQ_NONE; 504 return IRQ_NONE;
508 505
509 if (netif_msg_intr(de)) 506 netif_dbg(de, intr, dev, "intr, status %08x mode %08x desc %u/%u/%u\n",
510 printk(KERN_DEBUG "%s: intr, status %08x mode %08x desc %u/%u/%u\n", 507 status, dr32(MacMode),
511 dev->name, status, dr32(MacMode), 508 de->rx_tail, de->tx_head, de->tx_tail);
512 de->rx_tail, de->tx_head, de->tx_tail);
513 509
514 dw32(MacStatus, status); 510 dw32(MacStatus, status);
515 511
@@ -534,9 +530,9 @@ static irqreturn_t de_interrupt (int irq, void *dev_instance)
534 530
535 pci_read_config_word(de->pdev, PCI_STATUS, &pci_status); 531 pci_read_config_word(de->pdev, PCI_STATUS, &pci_status);
536 pci_write_config_word(de->pdev, PCI_STATUS, pci_status); 532 pci_write_config_word(de->pdev, PCI_STATUS, pci_status);
537 dev_err(&de->dev->dev, 533 netdev_err(de->dev,
538 "PCI bus error, status=%08x, PCI status=%04x\n", 534 "PCI bus error, status=%08x, PCI status=%04x\n",
539 status, pci_status); 535 status, pci_status);
540 } 536 }
541 537
542 return IRQ_HANDLED; 538 return IRQ_HANDLED;
@@ -572,9 +568,9 @@ static void de_tx (struct de_private *de)
572 568
573 if (status & LastFrag) { 569 if (status & LastFrag) {
574 if (status & TxError) { 570 if (status & TxError) {
575 if (netif_msg_tx_err(de)) 571 netif_dbg(de, tx_err, de->dev,
576 printk(KERN_DEBUG "%s: tx err, status 0x%x\n", 572 "tx err, status 0x%x\n",
577 de->dev->name, status); 573 status);
578 de->net_stats.tx_errors++; 574 de->net_stats.tx_errors++;
579 if (status & TxOWC) 575 if (status & TxOWC)
580 de->net_stats.tx_window_errors++; 576 de->net_stats.tx_window_errors++;
@@ -587,9 +583,8 @@ static void de_tx (struct de_private *de)
587 } else { 583 } else {
588 de->net_stats.tx_packets++; 584 de->net_stats.tx_packets++;
589 de->net_stats.tx_bytes += skb->len; 585 de->net_stats.tx_bytes += skb->len;
590 if (netif_msg_tx_done(de)) 586 netif_dbg(de, tx_done, de->dev,
591 printk(KERN_DEBUG "%s: tx done, slot %d\n", 587 "tx done, slot %d\n", tx_tail);
592 de->dev->name, tx_tail);
593 } 588 }
594 dev_kfree_skb_irq(skb); 589 dev_kfree_skb_irq(skb);
595 } 590 }
@@ -646,9 +641,8 @@ static netdev_tx_t de_start_xmit (struct sk_buff *skb,
646 wmb(); 641 wmb();
647 642
648 de->tx_head = NEXT_TX(entry); 643 de->tx_head = NEXT_TX(entry);
649 if (netif_msg_tx_queued(de)) 644 netif_dbg(de, tx_queued, dev, "tx queued, slot %d, skblen %d\n",
650 printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n", 645 entry, skb->len);
651 dev->name, entry, skb->len);
652 646
653 if (tx_free == 0) 647 if (tx_free == 0)
654 netif_stop_queue(dev); 648 netif_stop_queue(dev);
@@ -873,7 +867,7 @@ static void de_stop_rxtx (struct de_private *de)
873 udelay(100); 867 udelay(100);
874 } 868 }
875 869
876 dev_warn(&de->dev->dev, "timeout expired stopping DMA\n"); 870 netdev_warn(de->dev, "timeout expired, stopping DMA\n");
877} 871}
878 872
879static inline void de_start_rxtx (struct de_private *de) 873static inline void de_start_rxtx (struct de_private *de)
@@ -907,9 +901,8 @@ static void de_link_up(struct de_private *de)
907{ 901{
908 if (!netif_carrier_ok(de->dev)) { 902 if (!netif_carrier_ok(de->dev)) {
909 netif_carrier_on(de->dev); 903 netif_carrier_on(de->dev);
910 if (netif_msg_link(de)) 904 netif_info(de, link, de->dev, "link up, media %s\n",
911 dev_info(&de->dev->dev, "link up, media %s\n", 905 media_name[de->media_type]);
912 media_name[de->media_type]);
913 } 906 }
914} 907}
915 908
@@ -917,8 +910,7 @@ static void de_link_down(struct de_private *de)
917{ 910{
918 if (netif_carrier_ok(de->dev)) { 911 if (netif_carrier_ok(de->dev)) {
919 netif_carrier_off(de->dev); 912 netif_carrier_off(de->dev);
920 if (netif_msg_link(de)) 913 netif_info(de, link, de->dev, "link down\n");
921 dev_info(&de->dev->dev, "link down\n");
922 } 914 }
923} 915}
924 916
@@ -928,8 +920,7 @@ static void de_set_media (struct de_private *de)
928 u32 macmode = dr32(MacMode); 920 u32 macmode = dr32(MacMode);
929 921
930 if (de_is_running(de)) 922 if (de_is_running(de))
931 dev_warn(&de->dev->dev, 923 netdev_warn(de->dev, "chip is running while changing media!\n");
932 "chip is running while changing media!\n");
933 924
934 if (de->de21040) 925 if (de->de21040)
935 dw32(CSR11, FULL_DUPLEX_MAGIC); 926 dw32(CSR11, FULL_DUPLEX_MAGIC);
@@ -948,18 +939,13 @@ static void de_set_media (struct de_private *de)
948 else 939 else
949 macmode &= ~FullDuplex; 940 macmode &= ~FullDuplex;
950 941
951 if (netif_msg_link(de)) 942 netif_info(de, link, de->dev, "set link %s\n", media_name[media]);
952 dev_info(&de->dev->dev, "set link %s\n", media_name[media]); 943 netif_info(de, hw, de->dev, "mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n",
953 if (netif_msg_hw(de)) { 944 dr32(MacMode), dr32(SIAStatus),
954 dev_info(&de->dev->dev, "mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n", 945 dr32(CSR13), dr32(CSR14), dr32(CSR15));
955 dr32(MacMode), dr32(SIAStatus), 946 netif_info(de, hw, de->dev, "set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
956 dr32(CSR13), dr32(CSR14), dr32(CSR15)); 947 macmode, de->media[media].csr13,
957 948 de->media[media].csr14, de->media[media].csr15);
958 dev_info(&de->dev->dev,
959 "set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
960 macmode, de->media[media].csr13,
961 de->media[media].csr14, de->media[media].csr15);
962 }
963 if (macmode != dr32(MacMode)) 949 if (macmode != dr32(MacMode))
964 dw32(MacMode, macmode); 950 dw32(MacMode, macmode);
965} 951}
@@ -996,9 +982,8 @@ static void de21040_media_timer (unsigned long data)
996 if (!netif_carrier_ok(dev)) 982 if (!netif_carrier_ok(dev))
997 de_link_up(de); 983 de_link_up(de);
998 else 984 else
999 if (netif_msg_timer(de)) 985 netif_info(de, timer, dev, "%s link ok, status %x\n",
1000 dev_info(&dev->dev, "%s link ok, status %x\n", 986 media_name[de->media_type], status);
1001 media_name[de->media_type], status);
1002 return; 987 return;
1003 } 988 }
1004 989
@@ -1025,9 +1010,8 @@ no_link_yet:
1025 de->media_timer.expires = jiffies + DE_TIMER_NO_LINK; 1010 de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1026 add_timer(&de->media_timer); 1011 add_timer(&de->media_timer);
1027 1012
1028 if (netif_msg_timer(de)) 1013 netif_info(de, timer, dev, "no link, trying media %s, status %x\n",
1029 dev_info(&dev->dev, "no link, trying media %s, status %x\n", 1014 media_name[de->media_type], status);
1030 media_name[de->media_type], status);
1031} 1015}
1032 1016
1033static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media) 1017static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
@@ -1085,11 +1069,10 @@ static void de21041_media_timer (unsigned long data)
1085 if (!netif_carrier_ok(dev)) 1069 if (!netif_carrier_ok(dev))
1086 de_link_up(de); 1070 de_link_up(de);
1087 else 1071 else
1088 if (netif_msg_timer(de)) 1072 netif_info(de, timer, dev,
1089 dev_info(&dev->dev, 1073 "%s link ok, mode %x status %x\n",
1090 "%s link ok, mode %x status %x\n", 1074 media_name[de->media_type],
1091 media_name[de->media_type], 1075 dr32(MacMode), status);
1092 dr32(MacMode), status);
1093 return; 1076 return;
1094 } 1077 }
1095 1078
@@ -1163,9 +1146,8 @@ no_link_yet:
1163 de->media_timer.expires = jiffies + DE_TIMER_NO_LINK; 1146 de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1164 add_timer(&de->media_timer); 1147 add_timer(&de->media_timer);
1165 1148
1166 if (netif_msg_timer(de)) 1149 netif_info(de, timer, dev, "no link, trying media %s, status %x\n",
1167 dev_info(&dev->dev, "no link, trying media %s, status %x\n", 1150 media_name[de->media_type], status);
1168 media_name[de->media_type], status);
1169} 1151}
1170 1152
1171static void de_media_interrupt (struct de_private *de, u32 status) 1153static void de_media_interrupt (struct de_private *de, u32 status)
@@ -1401,14 +1383,13 @@ static int de_open (struct net_device *dev)
1401 struct de_private *de = netdev_priv(dev); 1383 struct de_private *de = netdev_priv(dev);
1402 int rc; 1384 int rc;
1403 1385
1404 if (netif_msg_ifup(de)) 1386 netif_dbg(de, ifup, dev, "enabling interface\n");
1405 printk(KERN_DEBUG "%s: enabling interface\n", dev->name);
1406 1387
1407 de->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); 1388 de->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1408 1389
1409 rc = de_alloc_rings(de); 1390 rc = de_alloc_rings(de);
1410 if (rc) { 1391 if (rc) {
1411 dev_err(&dev->dev, "ring allocation failure, err=%d\n", rc); 1392 netdev_err(dev, "ring allocation failure, err=%d\n", rc);
1412 return rc; 1393 return rc;
1413 } 1394 }
1414 1395
@@ -1416,14 +1397,14 @@ static int de_open (struct net_device *dev)
1416 1397
1417 rc = request_irq(dev->irq, de_interrupt, IRQF_SHARED, dev->name, dev); 1398 rc = request_irq(dev->irq, de_interrupt, IRQF_SHARED, dev->name, dev);
1418 if (rc) { 1399 if (rc) {
1419 dev_err(&dev->dev, "IRQ %d request failure, err=%d\n", 1400 netdev_err(dev, "IRQ %d request failure, err=%d\n",
1420 dev->irq, rc); 1401 dev->irq, rc);
1421 goto err_out_free; 1402 goto err_out_free;
1422 } 1403 }
1423 1404
1424 rc = de_init_hw(de); 1405 rc = de_init_hw(de);
1425 if (rc) { 1406 if (rc) {
1426 dev_err(&dev->dev, "h/w init failure, err=%d\n", rc); 1407 netdev_err(dev, "h/w init failure, err=%d\n", rc);
1427 goto err_out_free_irq; 1408 goto err_out_free_irq;
1428 } 1409 }
1429 1410
@@ -1444,8 +1425,7 @@ static int de_close (struct net_device *dev)
1444 struct de_private *de = netdev_priv(dev); 1425 struct de_private *de = netdev_priv(dev);
1445 unsigned long flags; 1426 unsigned long flags;
1446 1427
1447 if (netif_msg_ifdown(de)) 1428 netif_dbg(de, ifdown, dev, "disabling interface\n");
1448 printk(KERN_DEBUG "%s: disabling interface\n", dev->name);
1449 1429
1450 del_timer_sync(&de->media_timer); 1430 del_timer_sync(&de->media_timer);
1451 1431
@@ -1466,9 +1446,9 @@ static void de_tx_timeout (struct net_device *dev)
1466{ 1446{
1467 struct de_private *de = netdev_priv(dev); 1447 struct de_private *de = netdev_priv(dev);
1468 1448
1469 printk(KERN_DEBUG "%s: NIC status %08x mode %08x sia %08x desc %u/%u/%u\n", 1449 netdev_dbg(dev, "NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
1470 dev->name, dr32(MacStatus), dr32(MacMode), dr32(SIAStatus), 1450 dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
1471 de->rx_tail, de->tx_head, de->tx_tail); 1451 de->rx_tail, de->tx_head, de->tx_tail);
1472 1452
1473 del_timer_sync(&de->media_timer); 1453 del_timer_sync(&de->media_timer);
1474 1454
@@ -1518,18 +1498,17 @@ static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1518 switch (de->media_type) { 1498 switch (de->media_type) {
1519 case DE_MEDIA_AUI: 1499 case DE_MEDIA_AUI:
1520 ecmd->port = PORT_AUI; 1500 ecmd->port = PORT_AUI;
1521 ecmd->speed = 5;
1522 break; 1501 break;
1523 case DE_MEDIA_BNC: 1502 case DE_MEDIA_BNC:
1524 ecmd->port = PORT_BNC; 1503 ecmd->port = PORT_BNC;
1525 ecmd->speed = 2;
1526 break; 1504 break;
1527 default: 1505 default:
1528 ecmd->port = PORT_TP; 1506 ecmd->port = PORT_TP;
1529 ecmd->speed = SPEED_10;
1530 break; 1507 break;
1531 } 1508 }
1532 1509
1510 ethtool_cmd_speed_set(ecmd, 10);
1511
1533 if (dr32(MacMode) & FullDuplex) 1512 if (dr32(MacMode) & FullDuplex)
1534 ecmd->duplex = DUPLEX_FULL; 1513 ecmd->duplex = DUPLEX_FULL;
1535 else 1514 else
@@ -1550,9 +1529,7 @@ static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1550 u32 new_media; 1529 u32 new_media;
1551 unsigned int media_lock; 1530 unsigned int media_lock;
1552 1531
1553 if (ecmd->speed != SPEED_10 && ecmd->speed != 5 && ecmd->speed != 2) 1532 if (ethtool_cmd_speed(ecmd) != 10)
1554 return -EINVAL;
1555 if (de->de21040 && ecmd->speed == 2)
1556 return -EINVAL; 1533 return -EINVAL;
1557 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) 1534 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
1558 return -EINVAL; 1535 return -EINVAL;
@@ -1696,9 +1673,8 @@ static int de_nway_reset(struct net_device *dev)
1696 1673
1697 status = dr32(SIAStatus); 1674 status = dr32(SIAStatus);
1698 dw32(SIAStatus, (status & ~NWayState) | NWayRestart); 1675 dw32(SIAStatus, (status & ~NWayState) | NWayRestart);
1699 if (netif_msg_link(de)) 1676 netif_info(de, link, dev, "link nway restart, status %x,%x\n",
1700 dev_info(&de->dev->dev, "link nway restart, status %x,%x\n", 1677 status, dr32(SIAStatus));
1701 status, dr32(SIAStatus));
1702 return 0; 1678 return 0;
1703} 1679}
1704 1680
@@ -1743,7 +1719,8 @@ static void __devinit de21040_get_mac_address (struct de_private *de)
1743 de->dev->dev_addr[i] = value; 1719 de->dev->dev_addr[i] = value;
1744 udelay(1); 1720 udelay(1);
1745 if (boguscnt <= 0) 1721 if (boguscnt <= 0)
1746 pr_warning(PFX "timeout reading 21040 MAC address byte %u\n", i); 1722 pr_warn("timeout reading 21040 MAC address byte %u\n",
1723 i);
1747 } 1724 }
1748} 1725}
1749 1726
@@ -1929,8 +1906,10 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
1929 de->media[idx].csr14, 1906 de->media[idx].csr14,
1930 de->media[idx].csr15); 1907 de->media[idx].csr15);
1931 1908
1932 } else if (netif_msg_probe(de)) 1909 } else {
1933 pr_cont("\n"); 1910 if (netif_msg_probe(de))
1911 pr_cont("\n");
1912 }
1934 1913
1935 if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3])) 1914 if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3]))
1936 break; 1915 break;
@@ -1999,7 +1978,7 @@ static int __devinit de_init_one (struct pci_dev *pdev,
1999 1978
2000#ifndef MODULE 1979#ifndef MODULE
2001 if (board_idx == 0) 1980 if (board_idx == 0)
2002 printk("%s", version); 1981 pr_info("%s\n", version);
2003#endif 1982#endif
2004 1983
2005 /* allocate a new ethernet device structure, and fill in defaults */ 1984 /* allocate a new ethernet device structure, and fill in defaults */
@@ -2041,7 +2020,7 @@ static int __devinit de_init_one (struct pci_dev *pdev,
2041 /* check for invalid IRQ value */ 2020 /* check for invalid IRQ value */
2042 if (pdev->irq < 2) { 2021 if (pdev->irq < 2) {
2043 rc = -EIO; 2022 rc = -EIO;
2044 pr_err(PFX "invalid irq (%d) for pci dev %s\n", 2023 pr_err("invalid irq (%d) for pci dev %s\n",
2045 pdev->irq, pci_name(pdev)); 2024 pdev->irq, pci_name(pdev));
2046 goto err_out_res; 2025 goto err_out_res;
2047 } 2026 }
@@ -2052,12 +2031,12 @@ static int __devinit de_init_one (struct pci_dev *pdev,
2052 pciaddr = pci_resource_start(pdev, 1); 2031 pciaddr = pci_resource_start(pdev, 1);
2053 if (!pciaddr) { 2032 if (!pciaddr) {
2054 rc = -EIO; 2033 rc = -EIO;
2055 pr_err(PFX "no MMIO resource for pci dev %s\n", pci_name(pdev)); 2034 pr_err("no MMIO resource for pci dev %s\n", pci_name(pdev));
2056 goto err_out_res; 2035 goto err_out_res;
2057 } 2036 }
2058 if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) { 2037 if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) {
2059 rc = -EIO; 2038 rc = -EIO;
2060 pr_err(PFX "MMIO resource (%llx) too small on pci dev %s\n", 2039 pr_err("MMIO resource (%llx) too small on pci dev %s\n",
2061 (unsigned long long)pci_resource_len(pdev, 1), 2040 (unsigned long long)pci_resource_len(pdev, 1),
2062 pci_name(pdev)); 2041 pci_name(pdev));
2063 goto err_out_res; 2042 goto err_out_res;
@@ -2067,7 +2046,7 @@ static int __devinit de_init_one (struct pci_dev *pdev,
2067 regs = ioremap_nocache(pciaddr, DE_REGS_SIZE); 2046 regs = ioremap_nocache(pciaddr, DE_REGS_SIZE);
2068 if (!regs) { 2047 if (!regs) {
2069 rc = -EIO; 2048 rc = -EIO;
2070 pr_err(PFX "Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n", 2049 pr_err("Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
2071 (unsigned long long)pci_resource_len(pdev, 1), 2050 (unsigned long long)pci_resource_len(pdev, 1),
2072 pciaddr, pci_name(pdev)); 2051 pciaddr, pci_name(pdev));
2073 goto err_out_res; 2052 goto err_out_res;
@@ -2080,7 +2059,7 @@ static int __devinit de_init_one (struct pci_dev *pdev,
2080 /* make sure hardware is not running */ 2059 /* make sure hardware is not running */
2081 rc = de_reset_mac(de); 2060 rc = de_reset_mac(de);
2082 if (rc) { 2061 if (rc) {
2083 pr_err(PFX "Cannot reset MAC, pci dev %s\n", pci_name(pdev)); 2062 pr_err("Cannot reset MAC, pci dev %s\n", pci_name(pdev));
2084 goto err_out_iomap; 2063 goto err_out_iomap;
2085 } 2064 }
2086 2065
@@ -2100,11 +2079,11 @@ static int __devinit de_init_one (struct pci_dev *pdev,
2100 goto err_out_iomap; 2079 goto err_out_iomap;
2101 2080
2102 /* print info about board and interface just registered */ 2081 /* print info about board and interface just registered */
2103 dev_info(&dev->dev, "%s at 0x%lx, %pM, IRQ %d\n", 2082 netdev_info(dev, "%s at 0x%lx, %pM, IRQ %d\n",
2104 de->de21040 ? "21040" : "21041", 2083 de->de21040 ? "21040" : "21041",
2105 dev->base_addr, 2084 dev->base_addr,
2106 dev->dev_addr, 2085 dev->dev_addr,
2107 dev->irq); 2086 dev->irq);
2108 2087
2109 pci_set_drvdata(pdev, dev); 2088 pci_set_drvdata(pdev, dev);
2110 2089
@@ -2192,7 +2171,7 @@ static int de_resume (struct pci_dev *pdev)
2192 if (!netif_running(dev)) 2171 if (!netif_running(dev))
2193 goto out_attach; 2172 goto out_attach;
2194 if ((retval = pci_enable_device(pdev))) { 2173 if ((retval = pci_enable_device(pdev))) {
2195 dev_err(&dev->dev, "pci_enable_device failed in resume\n"); 2174 netdev_err(dev, "pci_enable_device failed in resume\n");
2196 goto out; 2175 goto out;
2197 } 2176 }
2198 pci_set_master(pdev); 2177 pci_set_master(pdev);
@@ -2221,7 +2200,7 @@ static struct pci_driver de_driver = {
2221static int __init de_init (void) 2200static int __init de_init (void)
2222{ 2201{
2223#ifdef MODULE 2202#ifdef MODULE
2224 printk("%s", version); 2203 pr_info("%s\n", version);
2225#endif 2204#endif
2226 return pci_register_driver(&de_driver); 2205 return pci_register_driver(&de_driver);
2227} 2206}
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index efaa1d69b720..45144d5bd11b 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -1995,7 +1995,7 @@ SetMulticastFilter(struct net_device *dev)
1995 1995
1996static u_char de4x5_irq[] = EISA_ALLOWED_IRQ_LIST; 1996static u_char de4x5_irq[] = EISA_ALLOWED_IRQ_LIST;
1997 1997
1998static int __init de4x5_eisa_probe (struct device *gendev) 1998static int __devinit de4x5_eisa_probe (struct device *gendev)
1999{ 1999{
2000 struct eisa_device *edev; 2000 struct eisa_device *edev;
2001 u_long iobase; 2001 u_long iobase;
@@ -2097,7 +2097,7 @@ static int __devexit de4x5_eisa_remove (struct device *device)
2097 return 0; 2097 return 0;
2098} 2098}
2099 2099
2100static struct eisa_device_id de4x5_eisa_ids[] = { 2100static const struct eisa_device_id de4x5_eisa_ids[] __devinitconst = {
2101 { "DEC4250", 0 }, /* 0 is the board name index... */ 2101 { "DEC4250", 0 }, /* 0 is the board name index... */
2102 { "" } 2102 { "" }
2103}; 2103};
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index fb07f48910ae..468512731966 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -295,8 +295,7 @@ enum dmfe_CR6_bits {
295/* Global variable declaration ----------------------------- */ 295/* Global variable declaration ----------------------------- */
296static int __devinitdata printed_version; 296static int __devinitdata printed_version;
297static const char version[] __devinitconst = 297static const char version[] __devinitconst =
298 KERN_INFO DRV_NAME ": Davicom DM9xxx net driver, version " 298 "Davicom DM9xxx net driver, version " DRV_VERSION " (" DRV_RELDATE ")";
299 DRV_VERSION " (" DRV_RELDATE ")\n";
300 299
301static int dmfe_debug; 300static int dmfe_debug;
302static unsigned char dmfe_media_mode = DMFE_AUTO; 301static unsigned char dmfe_media_mode = DMFE_AUTO;
@@ -381,7 +380,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
381 DMFE_DBUG(0, "dmfe_init_one()", 0); 380 DMFE_DBUG(0, "dmfe_init_one()", 0);
382 381
383 if (!printed_version++) 382 if (!printed_version++)
384 printk(version); 383 pr_info("%s\n", version);
385 384
386 /* 385 /*
387 * SPARC on-board DM910x chips should be handled by the main 386 * SPARC on-board DM910x chips should be handled by the main
@@ -406,7 +405,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
406 SET_NETDEV_DEV(dev, &pdev->dev); 405 SET_NETDEV_DEV(dev, &pdev->dev);
407 406
408 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { 407 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
409 pr_warning("32-bit PCI DMA not available\n"); 408 pr_warn("32-bit PCI DMA not available\n");
410 err = -ENODEV; 409 err = -ENODEV;
411 goto err_out_free; 410 goto err_out_free;
412 } 411 }
@@ -2203,7 +2202,7 @@ static int __init dmfe_init_module(void)
2203{ 2202{
2204 int rc; 2203 int rc;
2205 2204
2206 printk(version); 2205 pr_info("%s\n", version);
2207 printed_version = 1; 2206 printed_version = 1;
2208 2207
2209 DMFE_DBUG(0, "init_module() ", debug); 2208 DMFE_DBUG(0, "init_module() ", debug);
diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
index 296486bf0956..fa5eee925f25 100644
--- a/drivers/net/tulip/eeprom.c
+++ b/drivers/net/tulip/eeprom.c
@@ -222,8 +222,8 @@ subsequent_board:
222 /* there is no phy information, don't even try to build mtable */ 222 /* there is no phy information, don't even try to build mtable */
223 if (count == 0) { 223 if (count == 0) {
224 if (tulip_debug > 0) 224 if (tulip_debug > 0)
225 pr_warning("%s: no phy info, aborting mtable build\n", 225 pr_warn("%s: no phy info, aborting mtable build\n",
226 dev->name); 226 dev->name);
227 return; 227 return;
228 } 228 }
229 229
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c
index 0013642903ee..5350d753e0ff 100644
--- a/drivers/net/tulip/interrupt.c
+++ b/drivers/net/tulip/interrupt.c
@@ -125,12 +125,12 @@ int tulip_poll(struct napi_struct *napi, int budget)
125#endif 125#endif
126 126
127 if (tulip_debug > 4) 127 if (tulip_debug > 4)
128 printk(KERN_DEBUG " In tulip_rx(), entry %d %08x\n", 128 netdev_dbg(dev, " In tulip_rx(), entry %d %08x\n",
129 entry, tp->rx_ring[entry].status); 129 entry, tp->rx_ring[entry].status);
130 130
131 do { 131 do {
132 if (ioread32(tp->base_addr + CSR5) == 0xffffffff) { 132 if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
133 printk(KERN_DEBUG " In tulip_poll(), hardware disappeared\n"); 133 netdev_dbg(dev, " In tulip_poll(), hardware disappeared\n");
134 break; 134 break;
135 } 135 }
136 /* Acknowledge current RX interrupt sources. */ 136 /* Acknowledge current RX interrupt sources. */
@@ -145,9 +145,9 @@ int tulip_poll(struct napi_struct *napi, int budget)
145 if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx) 145 if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
146 break; 146 break;
147 147
148 if (tulip_debug > 5) 148 if (tulip_debug > 5)
149 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %08x\n", 149 netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n",
150 dev->name, entry, status); 150 entry, status);
151 151
152 if (++work_done >= budget) 152 if (++work_done >= budget)
153 goto not_done; 153 goto not_done;
@@ -184,9 +184,9 @@ int tulip_poll(struct napi_struct *napi, int budget)
184 } 184 }
185 } else { 185 } else {
186 /* There was a fatal error. */ 186 /* There was a fatal error. */
187 if (tulip_debug > 2) 187 if (tulip_debug > 2)
188 printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n", 188 netdev_dbg(dev, "Receive error, Rx status %08x\n",
189 dev->name, status); 189 status);
190 dev->stats.rx_errors++; /* end of a packet.*/ 190 dev->stats.rx_errors++; /* end of a packet.*/
191 if (pkt_len > 1518 || 191 if (pkt_len > 1518 ||
192 (status & RxDescRunt)) 192 (status & RxDescRunt))
@@ -367,16 +367,16 @@ static int tulip_rx(struct net_device *dev)
367 int received = 0; 367 int received = 0;
368 368
369 if (tulip_debug > 4) 369 if (tulip_debug > 4)
370 printk(KERN_DEBUG " In tulip_rx(), entry %d %08x\n", 370 netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n",
371 entry, tp->rx_ring[entry].status); 371 entry, tp->rx_ring[entry].status);
372 /* If we own the next entry, it is a new packet. Send it up. */ 372 /* If we own the next entry, it is a new packet. Send it up. */
373 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) { 373 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
374 s32 status = le32_to_cpu(tp->rx_ring[entry].status); 374 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
375 short pkt_len; 375 short pkt_len;
376 376
377 if (tulip_debug > 5) 377 if (tulip_debug > 5)
378 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %08x\n", 378 netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n",
379 dev->name, entry, status); 379 entry, status);
380 if (--rx_work_limit < 0) 380 if (--rx_work_limit < 0)
381 break; 381 break;
382 382
@@ -404,16 +404,16 @@ static int tulip_rx(struct net_device *dev)
404 /* Ingore earlier buffers. */ 404 /* Ingore earlier buffers. */
405 if ((status & 0xffff) != 0x7fff) { 405 if ((status & 0xffff) != 0x7fff) {
406 if (tulip_debug > 1) 406 if (tulip_debug > 1)
407 dev_warn(&dev->dev, 407 netdev_warn(dev,
408 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n", 408 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
409 status); 409 status);
410 dev->stats.rx_length_errors++; 410 dev->stats.rx_length_errors++;
411 } 411 }
412 } else { 412 } else {
413 /* There was a fatal error. */ 413 /* There was a fatal error. */
414 if (tulip_debug > 2) 414 if (tulip_debug > 2)
415 printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n", 415 netdev_dbg(dev, "Receive error, Rx status %08x\n",
416 dev->name, status); 416 status);
417 dev->stats.rx_errors++; /* end of a packet.*/ 417 dev->stats.rx_errors++; /* end of a packet.*/
418 if (pkt_len > 1518 || 418 if (pkt_len > 1518 ||
419 (status & RxDescRunt)) 419 (status & RxDescRunt))
@@ -573,8 +573,8 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
573#endif /* CONFIG_TULIP_NAPI */ 573#endif /* CONFIG_TULIP_NAPI */
574 574
575 if (tulip_debug > 4) 575 if (tulip_debug > 4)
576 printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x\n", 576 netdev_dbg(dev, "interrupt csr5=%#8.8x new csr5=%#8.8x\n",
577 dev->name, csr5, ioread32(ioaddr + CSR5)); 577 csr5, ioread32(ioaddr + CSR5));
578 578
579 579
580 if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) { 580 if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
@@ -605,8 +605,8 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
605 /* There was an major error, log it. */ 605 /* There was an major error, log it. */
606#ifndef final_version 606#ifndef final_version
607 if (tulip_debug > 1) 607 if (tulip_debug > 1)
608 printk(KERN_DEBUG "%s: Transmit error, Tx status %08x\n", 608 netdev_dbg(dev, "Transmit error, Tx status %08x\n",
609 dev->name, status); 609 status);
610#endif 610#endif
611 dev->stats.tx_errors++; 611 dev->stats.tx_errors++;
612 if (status & 0x4104) 612 if (status & 0x4104)
@@ -804,8 +804,8 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
804 } 804 }
805 805
806 if (tulip_debug > 4) 806 if (tulip_debug > 4)
807 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#04x\n", 807 netdev_dbg(dev, "exiting interrupt, csr5=%#04x\n",
808 dev->name, ioread32(ioaddr + CSR5)); 808 ioread32(ioaddr + CSR5));
809 809
810 return IRQ_HANDLED; 810 return IRQ_HANDLED;
811} 811}
diff --git a/drivers/net/tulip/media.c b/drivers/net/tulip/media.c
index a0c770ee4b64..4bd13922875d 100644
--- a/drivers/net/tulip/media.c
+++ b/drivers/net/tulip/media.c
@@ -182,8 +182,8 @@ void tulip_select_media(struct net_device *dev, int startup)
182 switch (mleaf->type) { 182 switch (mleaf->type) {
183 case 0: /* 21140 non-MII xcvr. */ 183 case 0: /* 21140 non-MII xcvr. */
184 if (tulip_debug > 1) 184 if (tulip_debug > 1)
185 printk(KERN_DEBUG "%s: Using a 21140 non-MII transceiver with control setting %02x\n", 185 netdev_dbg(dev, "Using a 21140 non-MII transceiver with control setting %02x\n",
186 dev->name, p[1]); 186 p[1]);
187 dev->if_port = p[0]; 187 dev->if_port = p[0];
188 if (startup) 188 if (startup)
189 iowrite32(mtable->csr12dir | 0x100, ioaddr + CSR12); 189 iowrite32(mtable->csr12dir | 0x100, ioaddr + CSR12);
@@ -204,15 +204,14 @@ void tulip_select_media(struct net_device *dev, int startup)
204 struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset]; 204 struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset];
205 unsigned char *rst = rleaf->leafdata; 205 unsigned char *rst = rleaf->leafdata;
206 if (tulip_debug > 1) 206 if (tulip_debug > 1)
207 printk(KERN_DEBUG "%s: Resetting the transceiver\n", 207 netdev_dbg(dev, "Resetting the transceiver\n");
208 dev->name);
209 for (i = 0; i < rst[0]; i++) 208 for (i = 0; i < rst[0]; i++)
210 iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15); 209 iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15);
211 } 210 }
212 if (tulip_debug > 1) 211 if (tulip_debug > 1)
213 printk(KERN_DEBUG "%s: 21143 non-MII %s transceiver control %04x/%04x\n", 212 netdev_dbg(dev, "21143 non-MII %s transceiver control %04x/%04x\n",
214 dev->name, medianame[dev->if_port], 213 medianame[dev->if_port],
215 setup[0], setup[1]); 214 setup[0], setup[1]);
216 if (p[0] & 0x40) { /* SIA (CSR13-15) setup values are provided. */ 215 if (p[0] & 0x40) { /* SIA (CSR13-15) setup values are provided. */
217 csr13val = setup[0]; 216 csr13val = setup[0];
218 csr14val = setup[1]; 217 csr14val = setup[1];
@@ -239,8 +238,8 @@ void tulip_select_media(struct net_device *dev, int startup)
239 if (startup) iowrite32(csr13val, ioaddr + CSR13); 238 if (startup) iowrite32(csr13val, ioaddr + CSR13);
240 } 239 }
241 if (tulip_debug > 1) 240 if (tulip_debug > 1)
242 printk(KERN_DEBUG "%s: Setting CSR15 to %08x/%08x\n", 241 netdev_dbg(dev, "Setting CSR15 to %08x/%08x\n",
243 dev->name, csr15dir, csr15val); 242 csr15dir, csr15val);
244 if (mleaf->type == 4) 243 if (mleaf->type == 4)
245 new_csr6 = 0x82020000 | ((setup[2] & 0x71) << 18); 244 new_csr6 = 0x82020000 | ((setup[2] & 0x71) << 18);
246 else 245 else
@@ -316,9 +315,9 @@ void tulip_select_media(struct net_device *dev, int startup)
316 if (tp->mii_advertise == 0) 315 if (tp->mii_advertise == 0)
317 tp->mii_advertise = tp->advertising[phy_num]; 316 tp->mii_advertise = tp->advertising[phy_num];
318 if (tulip_debug > 1) 317 if (tulip_debug > 1)
319 printk(KERN_DEBUG "%s: Advertising %04x on MII %d\n", 318 netdev_dbg(dev, " Advertising %04x on MII %d\n",
320 dev->name, tp->mii_advertise, 319 tp->mii_advertise,
321 tp->phys[phy_num]); 320 tp->phys[phy_num]);
322 tulip_mdio_write(dev, tp->phys[phy_num], 4, tp->mii_advertise); 321 tulip_mdio_write(dev, tp->phys[phy_num], 4, tp->mii_advertise);
323 } 322 }
324 break; 323 break;
@@ -335,8 +334,7 @@ void tulip_select_media(struct net_device *dev, int startup)
335 struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset]; 334 struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset];
336 unsigned char *rst = rleaf->leafdata; 335 unsigned char *rst = rleaf->leafdata;
337 if (tulip_debug > 1) 336 if (tulip_debug > 1)
338 printk(KERN_DEBUG "%s: Resetting the transceiver\n", 337 netdev_dbg(dev, "Resetting the transceiver\n");
339 dev->name);
340 for (i = 0; i < rst[0]; i++) 338 for (i = 0; i < rst[0]; i++)
341 iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15); 339 iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15);
342 } 340 }
@@ -344,20 +342,21 @@ void tulip_select_media(struct net_device *dev, int startup)
344 break; 342 break;
345 } 343 }
346 default: 344 default:
347 printk(KERN_DEBUG "%s: Invalid media table selection %d\n", 345 netdev_dbg(dev, " Invalid media table selection %d\n",
348 dev->name, mleaf->type); 346 mleaf->type);
349 new_csr6 = 0x020E0000; 347 new_csr6 = 0x020E0000;
350 } 348 }
351 if (tulip_debug > 1) 349 if (tulip_debug > 1)
352 printk(KERN_DEBUG "%s: Using media type %s, CSR12 is %02x\n", 350 netdev_dbg(dev, "Using media type %s, CSR12 is %02x\n",
353 dev->name, medianame[dev->if_port], 351 medianame[dev->if_port],
354 ioread32(ioaddr + CSR12) & 0xff); 352 ioread32(ioaddr + CSR12) & 0xff);
355 } else if (tp->chip_id == LC82C168) { 353 } else if (tp->chip_id == LC82C168) {
356 if (startup && ! tp->medialock) 354 if (startup && ! tp->medialock)
357 dev->if_port = tp->mii_cnt ? 11 : 0; 355 dev->if_port = tp->mii_cnt ? 11 : 0;
358 if (tulip_debug > 1) 356 if (tulip_debug > 1)
359 printk(KERN_DEBUG "%s: PNIC PHY status is %3.3x, media %s\n", 357 netdev_dbg(dev, "PNIC PHY status is %3.3x, media %s\n",
360 dev->name, ioread32(ioaddr + 0xB8), medianame[dev->if_port]); 358 ioread32(ioaddr + 0xB8),
359 medianame[dev->if_port]);
361 if (tp->mii_cnt) { 360 if (tp->mii_cnt) {
362 new_csr6 = 0x810C0000; 361 new_csr6 = 0x810C0000;
363 iowrite32(0x0001, ioaddr + CSR15); 362 iowrite32(0x0001, ioaddr + CSR15);
@@ -388,9 +387,9 @@ void tulip_select_media(struct net_device *dev, int startup)
388 } else 387 } else
389 new_csr6 = 0x03860000; 388 new_csr6 = 0x03860000;
390 if (tulip_debug > 1) 389 if (tulip_debug > 1)
391 printk(KERN_DEBUG "%s: No media description table, assuming %s transceiver, CSR12 %02x\n", 390 netdev_dbg(dev, "No media description table, assuming %s transceiver, CSR12 %02x\n",
392 dev->name, medianame[dev->if_port], 391 medianame[dev->if_port],
393 ioread32(ioaddr + CSR12)); 392 ioread32(ioaddr + CSR12));
394 } 393 }
395 394
396 tp->csr6 = new_csr6 | (tp->csr6 & 0xfdff) | (tp->full_duplex ? 0x0200 : 0); 395 tp->csr6 = new_csr6 | (tp->csr6 & 0xfdff) | (tp->full_duplex ? 0x0200 : 0);
@@ -504,8 +503,8 @@ void __devinit tulip_find_mii (struct net_device *dev, int board_idx)
504 503
505 /* Fixup for DLink with miswired PHY. */ 504 /* Fixup for DLink with miswired PHY. */
506 if (mii_advert != to_advert) { 505 if (mii_advert != to_advert) {
507 printk(KERN_DEBUG "tulip%d: Advertising %04x on PHY %d, previously advertising %04x\n", 506 pr_debug("tulip%d: Advertising %04x on PHY %d, previously advertising %04x\n",
508 board_idx, to_advert, phy, mii_advert); 507 board_idx, to_advert, phy, mii_advert);
509 tulip_mdio_write (dev, phy, 4, to_advert); 508 tulip_mdio_write (dev, phy, 4, to_advert);
510 } 509 }
511 510
diff --git a/drivers/net/tulip/pnic.c b/drivers/net/tulip/pnic.c
index a63e64b6863d..aa4d9dad0395 100644
--- a/drivers/net/tulip/pnic.c
+++ b/drivers/net/tulip/pnic.c
@@ -40,8 +40,8 @@ void pnic_do_nway(struct net_device *dev)
40 new_csr6 |= 0x00000200; 40 new_csr6 |= 0x00000200;
41 } 41 }
42 if (tulip_debug > 1) 42 if (tulip_debug > 1)
43 printk(KERN_DEBUG "%s: PNIC autonegotiated status %08x, %s\n", 43 netdev_dbg(dev, "PNIC autonegotiated status %08x, %s\n",
44 dev->name, phy_reg, medianame[dev->if_port]); 44 phy_reg, medianame[dev->if_port]);
45 if (tp->csr6 != new_csr6) { 45 if (tp->csr6 != new_csr6) {
46 tp->csr6 = new_csr6; 46 tp->csr6 = new_csr6;
47 /* Restart Tx */ 47 /* Restart Tx */
@@ -58,8 +58,8 @@ void pnic_lnk_change(struct net_device *dev, int csr5)
58 int phy_reg = ioread32(ioaddr + 0xB8); 58 int phy_reg = ioread32(ioaddr + 0xB8);
59 59
60 if (tulip_debug > 1) 60 if (tulip_debug > 1)
61 printk(KERN_DEBUG "%s: PNIC link changed state %08x, CSR5 %08x\n", 61 netdev_dbg(dev, "PNIC link changed state %08x, CSR5 %08x\n",
62 dev->name, phy_reg, csr5); 62 phy_reg, csr5);
63 if (ioread32(ioaddr + CSR5) & TPLnkFail) { 63 if (ioread32(ioaddr + CSR5) & TPLnkFail) {
64 iowrite32((ioread32(ioaddr + CSR7) & ~TPLnkFail) | TPLnkPass, ioaddr + CSR7); 64 iowrite32((ioread32(ioaddr + CSR7) & ~TPLnkFail) | TPLnkPass, ioaddr + CSR7);
65 /* If we use an external MII, then we mustn't use the 65 /* If we use an external MII, then we mustn't use the
@@ -114,8 +114,8 @@ void pnic_timer(unsigned long data)
114 int csr5 = ioread32(ioaddr + CSR5); 114 int csr5 = ioread32(ioaddr + CSR5);
115 115
116 if (tulip_debug > 1) 116 if (tulip_debug > 1)
117 printk(KERN_DEBUG "%s: PNIC timer PHY status %08x, %s CSR5 %08x\n", 117 netdev_dbg(dev, "PNIC timer PHY status %08x, %s CSR5 %08x\n",
118 dev->name, phy_reg, medianame[dev->if_port], csr5); 118 phy_reg, medianame[dev->if_port], csr5);
119 if (phy_reg & 0x04000000) { /* Remote link fault */ 119 if (phy_reg & 0x04000000) { /* Remote link fault */
120 iowrite32(0x0201F078, ioaddr + 0xB8); 120 iowrite32(0x0201F078, ioaddr + 0xB8);
121 next_tick = 1*HZ; 121 next_tick = 1*HZ;
@@ -125,11 +125,11 @@ void pnic_timer(unsigned long data)
125 next_tick = 60*HZ; 125 next_tick = 60*HZ;
126 } else if (csr5 & TPLnkFail) { /* 100baseTx link beat */ 126 } else if (csr5 & TPLnkFail) { /* 100baseTx link beat */
127 if (tulip_debug > 1) 127 if (tulip_debug > 1)
128 printk(KERN_DEBUG "%s: %s link beat failed, CSR12 %04x, CSR5 %08x, PHY %03x\n", 128 netdev_dbg(dev, "%s link beat failed, CSR12 %04x, CSR5 %08x, PHY %03x\n",
129 dev->name, medianame[dev->if_port], 129 medianame[dev->if_port],
130 csr12, 130 csr12,
131 ioread32(ioaddr + CSR5), 131 ioread32(ioaddr + CSR5),
132 ioread32(ioaddr + 0xB8)); 132 ioread32(ioaddr + 0xB8));
133 next_tick = 3*HZ; 133 next_tick = 3*HZ;
134 if (tp->medialock) { 134 if (tp->medialock) {
135 } else if (tp->nwayset && (dev->if_port & 1)) { 135 } else if (tp->nwayset && (dev->if_port & 1)) {
diff --git a/drivers/net/tulip/pnic2.c b/drivers/net/tulip/pnic2.c
index 4690c8e69207..93358ee4d830 100644
--- a/drivers/net/tulip/pnic2.c
+++ b/drivers/net/tulip/pnic2.c
@@ -125,8 +125,8 @@ void pnic2_start_nway(struct net_device *dev)
125 csr14 |= 0x00001184; 125 csr14 |= 0x00001184;
126 126
127 if (tulip_debug > 1) 127 if (tulip_debug > 1)
128 printk(KERN_DEBUG "%s: Restarting PNIC2 autonegotiation, csr14=%08x\n", 128 netdev_dbg(dev, "Restarting PNIC2 autonegotiation, csr14=%08x\n",
129 dev->name, csr14); 129 csr14);
130 130
131 /* tell pnic2_lnk_change we are doing an nway negotiation */ 131 /* tell pnic2_lnk_change we are doing an nway negotiation */
132 dev->if_port = 0; 132 dev->if_port = 0;
@@ -137,8 +137,7 @@ void pnic2_start_nway(struct net_device *dev)
137 137
138 tp->csr6 = ioread32(ioaddr + CSR6); 138 tp->csr6 = ioread32(ioaddr + CSR6);
139 if (tulip_debug > 1) 139 if (tulip_debug > 1)
140 printk(KERN_DEBUG "%s: On Entry to Nway, csr6=%08x\n", 140 netdev_dbg(dev, "On Entry to Nway, csr6=%08x\n", tp->csr6);
141 dev->name, tp->csr6);
142 141
143 /* mask off any bits not to touch 142 /* mask off any bits not to touch
144 * comment at top of file explains mask value 143 * comment at top of file explains mask value
@@ -271,9 +270,10 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
271 iowrite32(1, ioaddr + CSR13); 270 iowrite32(1, ioaddr + CSR13);
272 271
273 if (tulip_debug > 2) 272 if (tulip_debug > 2)
274 printk(KERN_DEBUG "%s: Setting CSR6 %08x/%x CSR12 %08x\n", 273 netdev_dbg(dev, "Setting CSR6 %08x/%x CSR12 %08x\n",
275 dev->name, tp->csr6, 274 tp->csr6,
276 ioread32(ioaddr + CSR6), ioread32(ioaddr + CSR12)); 275 ioread32(ioaddr + CSR6),
276 ioread32(ioaddr + CSR12));
277 277
278 /* now the following actually writes out the 278 /* now the following actually writes out the
279 * new csr6 values 279 * new csr6 values
@@ -324,7 +324,7 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
324 /* Link blew? Maybe restart NWay. */ 324 /* Link blew? Maybe restart NWay. */
325 325
326 if (tulip_debug > 2) 326 if (tulip_debug > 2)
327 printk(KERN_DEBUG "%s: Ugh! Link blew?\n", dev->name); 327 netdev_dbg(dev, "Ugh! Link blew?\n");
328 328
329 del_timer_sync(&tp->timer); 329 del_timer_sync(&tp->timer);
330 pnic2_start_nway(dev); 330 pnic2_start_nway(dev);
diff --git a/drivers/net/tulip/timer.c b/drivers/net/tulip/timer.c
index 36c2725ec886..2017faf2d0e6 100644
--- a/drivers/net/tulip/timer.c
+++ b/drivers/net/tulip/timer.c
@@ -28,11 +28,11 @@ void tulip_media_task(struct work_struct *work)
28 unsigned long flags; 28 unsigned long flags;
29 29
30 if (tulip_debug > 2) { 30 if (tulip_debug > 2) {
31 printk(KERN_DEBUG "%s: Media selection tick, %s, status %08x mode %08x SIA %08x %08x %08x %08x\n", 31 netdev_dbg(dev, "Media selection tick, %s, status %08x mode %08x SIA %08x %08x %08x %08x\n",
32 dev->name, medianame[dev->if_port], 32 medianame[dev->if_port],
33 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR6), 33 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR6),
34 csr12, ioread32(ioaddr + CSR13), 34 csr12, ioread32(ioaddr + CSR13),
35 ioread32(ioaddr + CSR14), ioread32(ioaddr + CSR15)); 35 ioread32(ioaddr + CSR14), ioread32(ioaddr + CSR15));
36 } 36 }
37 switch (tp->chip_id) { 37 switch (tp->chip_id) {
38 case DC21140: 38 case DC21140:
@@ -48,9 +48,9 @@ void tulip_media_task(struct work_struct *work)
48 Assume this a generic MII or SYM transceiver. */ 48 Assume this a generic MII or SYM transceiver. */
49 next_tick = 60*HZ; 49 next_tick = 60*HZ;
50 if (tulip_debug > 2) 50 if (tulip_debug > 2)
51 printk(KERN_DEBUG "%s: network media monitor CSR6 %08x CSR12 0x%02x\n", 51 netdev_dbg(dev, "network media monitor CSR6 %08x CSR12 0x%02x\n",
52 dev->name, 52 ioread32(ioaddr + CSR6),
53 ioread32(ioaddr + CSR6), csr12 & 0xff); 53 csr12 & 0xff);
54 break; 54 break;
55 } 55 }
56 mleaf = &tp->mtable->mleaf[tp->cur_index]; 56 mleaf = &tp->mtable->mleaf[tp->cur_index];
@@ -62,8 +62,8 @@ void tulip_media_task(struct work_struct *work)
62 s8 bitnum = p[offset]; 62 s8 bitnum = p[offset];
63 if (p[offset+1] & 0x80) { 63 if (p[offset+1] & 0x80) {
64 if (tulip_debug > 1) 64 if (tulip_debug > 1)
65 printk(KERN_DEBUG "%s: Transceiver monitor tick CSR12=%#02x, no media sense\n", 65 netdev_dbg(dev, "Transceiver monitor tick CSR12=%#02x, no media sense\n",
66 dev->name, csr12); 66 csr12);
67 if (mleaf->type == 4) { 67 if (mleaf->type == 4) {
68 if (mleaf->media == 3 && (csr12 & 0x02)) 68 if (mleaf->media == 3 && (csr12 & 0x02))
69 goto select_next_media; 69 goto select_next_media;
@@ -71,17 +71,16 @@ void tulip_media_task(struct work_struct *work)
71 break; 71 break;
72 } 72 }
73 if (tulip_debug > 2) 73 if (tulip_debug > 2)
74 printk(KERN_DEBUG "%s: Transceiver monitor tick: CSR12=%#02x bit %d is %d, expecting %d\n", 74 netdev_dbg(dev, "Transceiver monitor tick: CSR12=%#02x bit %d is %d, expecting %d\n",
75 dev->name, csr12, (bitnum >> 1) & 7, 75 csr12, (bitnum >> 1) & 7,
76 (csr12 & (1 << ((bitnum >> 1) & 7))) != 0, 76 (csr12 & (1 << ((bitnum >> 1) & 7))) != 0,
77 (bitnum >= 0)); 77 (bitnum >= 0));
78 /* Check that the specified bit has the proper value. */ 78 /* Check that the specified bit has the proper value. */
79 if ((bitnum < 0) != 79 if ((bitnum < 0) !=
80 ((csr12 & (1 << ((bitnum >> 1) & 7))) != 0)) { 80 ((csr12 & (1 << ((bitnum >> 1) & 7))) != 0)) {
81 if (tulip_debug > 2) 81 if (tulip_debug > 2)
82 printk(KERN_DEBUG "%s: Link beat detected for %s\n", 82 netdev_dbg(dev, "Link beat detected for %s\n",
83 dev->name, 83 medianame[mleaf->media & MEDIA_MASK]);
84 medianame[mleaf->media & MEDIA_MASK]);
85 if ((p[2] & 0x61) == 0x01) /* Bogus Znyx board. */ 84 if ((p[2] & 0x61) == 0x01) /* Bogus Znyx board. */
86 goto actually_mii; 85 goto actually_mii;
87 netif_carrier_on(dev); 86 netif_carrier_on(dev);
@@ -99,10 +98,9 @@ void tulip_media_task(struct work_struct *work)
99 if (tulip_media_cap[dev->if_port] & MediaIsFD) 98 if (tulip_media_cap[dev->if_port] & MediaIsFD)
100 goto select_next_media; /* Skip FD entries. */ 99 goto select_next_media; /* Skip FD entries. */
101 if (tulip_debug > 1) 100 if (tulip_debug > 1)
102 printk(KERN_DEBUG "%s: No link beat on media %s, trying transceiver type %s\n", 101 netdev_dbg(dev, "No link beat on media %s, trying transceiver type %s\n",
103 dev->name, 102 medianame[mleaf->media & MEDIA_MASK],
104 medianame[mleaf->media & MEDIA_MASK], 103 medianame[tp->mtable->mleaf[tp->cur_index].media]);
105 medianame[tp->mtable->mleaf[tp->cur_index].media]);
106 tulip_select_media(dev, 0); 104 tulip_select_media(dev, 0);
107 /* Restart the transmit process. */ 105 /* Restart the transmit process. */
108 tulip_restart_rxtx(tp); 106 tulip_restart_rxtx(tp);
@@ -166,10 +164,9 @@ void comet_timer(unsigned long data)
166 int next_tick = 60*HZ; 164 int next_tick = 60*HZ;
167 165
168 if (tulip_debug > 1) 166 if (tulip_debug > 1)
169 printk(KERN_DEBUG "%s: Comet link status %04x partner capability %04x\n", 167 netdev_dbg(dev, "Comet link status %04x partner capability %04x\n",
170 dev->name, 168 tulip_mdio_read(dev, tp->phys[0], 1),
171 tulip_mdio_read(dev, tp->phys[0], 1), 169 tulip_mdio_read(dev, tp->phys[0], 5));
172 tulip_mdio_read(dev, tp->phys[0], 5));
173 /* mod_timer synchronizes us with potential add_timer calls 170 /* mod_timer synchronizes us with potential add_timer calls
174 * from interrupts. 171 * from interrupts.
175 */ 172 */
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h
index ed66a16711dc..9db528967da9 100644
--- a/drivers/net/tulip/tulip.h
+++ b/drivers/net/tulip/tulip.h
@@ -547,11 +547,9 @@ static inline void tulip_stop_rxtx(struct tulip_private *tp)
547 udelay(10); 547 udelay(10);
548 548
549 if (!i) 549 if (!i)
550 printk(KERN_DEBUG "%s: tulip_stop_rxtx() failed" 550 netdev_dbg(tp->dev, "tulip_stop_rxtx() failed (CSR5 0x%x CSR6 0x%x)\n",
551 " (CSR5 0x%x CSR6 0x%x)\n", 551 ioread32(ioaddr + CSR5),
552 pci_name(tp->pdev), 552 ioread32(ioaddr + CSR6));
553 ioread32(ioaddr + CSR5),
554 ioread32(ioaddr + CSR6));
555 } 553 }
556} 554}
557 555
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 5c01e260f1ba..82f87647207e 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -12,6 +12,7 @@
12 Please submit bugs to http://bugzilla.kernel.org/ . 12 Please submit bugs to http://bugzilla.kernel.org/ .
13*/ 13*/
14 14
15#define pr_fmt(fmt) "tulip: " fmt
15 16
16#define DRV_NAME "tulip" 17#define DRV_NAME "tulip"
17#ifdef CONFIG_TULIP_NAPI 18#ifdef CONFIG_TULIP_NAPI
@@ -119,8 +120,6 @@ module_param(csr0, int, 0);
119module_param_array(options, int, NULL, 0); 120module_param_array(options, int, NULL, 0);
120module_param_array(full_duplex, int, NULL, 0); 121module_param_array(full_duplex, int, NULL, 0);
121 122
122#define PFX DRV_NAME ": "
123
124#ifdef TULIP_DEBUG 123#ifdef TULIP_DEBUG
125int tulip_debug = TULIP_DEBUG; 124int tulip_debug = TULIP_DEBUG;
126#else 125#else
@@ -331,8 +330,7 @@ static void tulip_up(struct net_device *dev)
331 udelay(100); 330 udelay(100);
332 331
333 if (tulip_debug > 1) 332 if (tulip_debug > 1)
334 printk(KERN_DEBUG "%s: tulip_up(), irq==%d\n", 333 netdev_dbg(dev, "tulip_up(), irq==%d\n", dev->irq);
335 dev->name, dev->irq);
336 334
337 iowrite32(tp->rx_ring_dma, ioaddr + CSR3); 335 iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
338 iowrite32(tp->tx_ring_dma, ioaddr + CSR4); 336 iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
@@ -499,10 +497,10 @@ media_picked:
499 iowrite32(0, ioaddr + CSR2); /* Rx poll demand */ 497 iowrite32(0, ioaddr + CSR2); /* Rx poll demand */
500 498
501 if (tulip_debug > 2) { 499 if (tulip_debug > 2) {
502 printk(KERN_DEBUG "%s: Done tulip_up(), CSR0 %08x, CSR5 %08x CSR6 %08x\n", 500 netdev_dbg(dev, "Done tulip_up(), CSR0 %08x, CSR5 %08x CSR6 %08x\n",
503 dev->name, ioread32(ioaddr + CSR0), 501 ioread32(ioaddr + CSR0),
504 ioread32(ioaddr + CSR5), 502 ioread32(ioaddr + CSR5),
505 ioread32(ioaddr + CSR6)); 503 ioread32(ioaddr + CSR6));
506 } 504 }
507 505
508 /* Set the timer to switch to check for link beat and perhaps switch 506 /* Set the timer to switch to check for link beat and perhaps switch
@@ -843,8 +841,7 @@ static int tulip_close (struct net_device *dev)
843 tulip_down (dev); 841 tulip_down (dev);
844 842
845 if (tulip_debug > 1) 843 if (tulip_debug > 1)
846 dev_printk(KERN_DEBUG, &dev->dev, 844 netdev_dbg(dev, "Shutting down ethercard, status was %02x\n",
847 "Shutting down ethercard, status was %02x\n",
848 ioread32 (ioaddr + CSR5)); 845 ioread32 (ioaddr + CSR5));
849 846
850 free_irq (dev->irq, dev); 847 free_irq (dev->irq, dev);
@@ -1207,7 +1204,7 @@ static void __devinit tulip_mwi_config (struct pci_dev *pdev,
1207 u32 csr0; 1204 u32 csr0;
1208 1205
1209 if (tulip_debug > 3) 1206 if (tulip_debug > 3)
1210 printk(KERN_DEBUG "%s: tulip_mwi_config()\n", pci_name(pdev)); 1207 netdev_dbg(dev, "tulip_mwi_config()\n");
1211 1208
1212 tp->csr0 = csr0 = 0; 1209 tp->csr0 = csr0 = 0;
1213 1210
@@ -1269,8 +1266,8 @@ static void __devinit tulip_mwi_config (struct pci_dev *pdev,
1269out: 1266out:
1270 tp->csr0 = csr0; 1267 tp->csr0 = csr0;
1271 if (tulip_debug > 2) 1268 if (tulip_debug > 2)
1272 printk(KERN_DEBUG "%s: MWI config cacheline=%d, csr0=%08x\n", 1269 netdev_dbg(dev, "MWI config cacheline=%d, csr0=%08x\n",
1273 pci_name(pdev), cache, csr0); 1270 cache, csr0);
1274} 1271}
1275#endif 1272#endif
1276 1273
@@ -1340,13 +1337,13 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1340 */ 1337 */
1341 1338
1342 if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) { 1339 if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
1343 pr_err(PFX "skipping LMC card\n"); 1340 pr_err("skipping LMC card\n");
1344 return -ENODEV; 1341 return -ENODEV;
1345 } else if (pdev->subsystem_vendor == PCI_VENDOR_ID_SBE && 1342 } else if (pdev->subsystem_vendor == PCI_VENDOR_ID_SBE &&
1346 (pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_T3E3 || 1343 (pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_T3E3 ||
1347 pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P0 || 1344 pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P0 ||
1348 pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P1)) { 1345 pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P1)) {
1349 pr_err(PFX "skipping SBE T3E3 port\n"); 1346 pr_err("skipping SBE T3E3 port\n");
1350 return -ENODEV; 1347 return -ENODEV;
1351 } 1348 }
1352 1349
@@ -1362,13 +1359,13 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1362 1359
1363 if (pdev->vendor == 0x1282 && pdev->device == 0x9100 && 1360 if (pdev->vendor == 0x1282 && pdev->device == 0x9100 &&
1364 pdev->revision < 0x30) { 1361 pdev->revision < 0x30) {
1365 pr_info(PFX "skipping early DM9100 with Crc bug (use dmfe)\n"); 1362 pr_info("skipping early DM9100 with Crc bug (use dmfe)\n");
1366 return -ENODEV; 1363 return -ENODEV;
1367 } 1364 }
1368 1365
1369 dp = pci_device_to_OF_node(pdev); 1366 dp = pci_device_to_OF_node(pdev);
1370 if (!(dp && of_get_property(dp, "local-mac-address", NULL))) { 1367 if (!(dp && of_get_property(dp, "local-mac-address", NULL))) {
1371 pr_info(PFX "skipping DM910x expansion card (use dmfe)\n"); 1368 pr_info("skipping DM910x expansion card (use dmfe)\n");
1372 return -ENODEV; 1369 return -ENODEV;
1373 } 1370 }
1374 } 1371 }
@@ -1415,16 +1412,14 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1415 1412
1416 i = pci_enable_device(pdev); 1413 i = pci_enable_device(pdev);
1417 if (i) { 1414 if (i) {
1418 pr_err(PFX "Cannot enable tulip board #%d, aborting\n", 1415 pr_err("Cannot enable tulip board #%d, aborting\n", board_idx);
1419 board_idx);
1420 return i; 1416 return i;
1421 } 1417 }
1422 1418
1423 /* The chip will fail to enter a low-power state later unless 1419 /* The chip will fail to enter a low-power state later unless
1424 * first explicitly commanded into D0 */ 1420 * first explicitly commanded into D0 */
1425 if (pci_set_power_state(pdev, PCI_D0)) { 1421 if (pci_set_power_state(pdev, PCI_D0)) {
1426 printk (KERN_NOTICE PFX 1422 pr_notice("Failed to set power state to D0\n");
1427 "Failed to set power state to D0\n");
1428 } 1423 }
1429 1424
1430 irq = pdev->irq; 1425 irq = pdev->irq;
@@ -1432,13 +1427,13 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1432 /* alloc_etherdev ensures aligned and zeroed private structures */ 1427 /* alloc_etherdev ensures aligned and zeroed private structures */
1433 dev = alloc_etherdev (sizeof (*tp)); 1428 dev = alloc_etherdev (sizeof (*tp));
1434 if (!dev) { 1429 if (!dev) {
1435 pr_err(PFX "ether device alloc failed, aborting\n"); 1430 pr_err("ether device alloc failed, aborting\n");
1436 return -ENOMEM; 1431 return -ENOMEM;
1437 } 1432 }
1438 1433
1439 SET_NETDEV_DEV(dev, &pdev->dev); 1434 SET_NETDEV_DEV(dev, &pdev->dev);
1440 if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) { 1435 if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
1441 pr_err(PFX "%s: I/O region (0x%llx@0x%llx) too small, aborting\n", 1436 pr_err("%s: I/O region (0x%llx@0x%llx) too small, aborting\n",
1442 pci_name(pdev), 1437 pci_name(pdev),
1443 (unsigned long long)pci_resource_len (pdev, 0), 1438 (unsigned long long)pci_resource_len (pdev, 0),
1444 (unsigned long long)pci_resource_start (pdev, 0)); 1439 (unsigned long long)pci_resource_start (pdev, 0));
@@ -1483,7 +1478,8 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1483 if (sig == 0x09811317) { 1478 if (sig == 0x09811317) {
1484 tp->flags |= COMET_PM; 1479 tp->flags |= COMET_PM;
1485 tp->wolinfo.supported = WAKE_PHY | WAKE_MAGIC; 1480 tp->wolinfo.supported = WAKE_PHY | WAKE_MAGIC;
1486 printk(KERN_INFO "tulip_init_one: Enabled WOL support for AN983B\n"); 1481 pr_info("%s: Enabled WOL support for AN983B\n",
1482 __func__);
1487 } 1483 }
1488 } 1484 }
1489 tp->pdev = pdev; 1485 tp->pdev = pdev;
@@ -1879,7 +1875,7 @@ save_state:
1879 tulip_set_wolopts(pdev, tp->wolinfo.wolopts); 1875 tulip_set_wolopts(pdev, tp->wolinfo.wolopts);
1880 rc = pci_enable_wake(pdev, pstate, tp->wolinfo.wolopts); 1876 rc = pci_enable_wake(pdev, pstate, tp->wolinfo.wolopts);
1881 if (rc) 1877 if (rc)
1882 printk("tulip: pci_enable_wake failed (%d)\n", rc); 1878 pr_err("pci_enable_wake failed (%d)\n", rc);
1883 } 1879 }
1884 pci_set_power_state(pdev, pstate); 1880 pci_set_power_state(pdev, pstate);
1885 1881
@@ -1905,12 +1901,12 @@ static int tulip_resume(struct pci_dev *pdev)
1905 return 0; 1901 return 0;
1906 1902
1907 if ((retval = pci_enable_device(pdev))) { 1903 if ((retval = pci_enable_device(pdev))) {
1908 pr_err(PFX "pci_enable_device failed in resume\n"); 1904 pr_err("pci_enable_device failed in resume\n");
1909 return retval; 1905 return retval;
1910 } 1906 }
1911 1907
1912 if ((retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev))) { 1908 if ((retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev))) {
1913 pr_err(PFX "request_irq failed in resume\n"); 1909 pr_err("request_irq failed in resume\n");
1914 return retval; 1910 return retval;
1915 } 1911 }
1916 1912
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index 74217dbf0143..9e63f406f72d 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -209,8 +209,7 @@ enum uli526x_CR6_bits {
209/* Global variable declaration ----------------------------- */ 209/* Global variable declaration ----------------------------- */
210static int __devinitdata printed_version; 210static int __devinitdata printed_version;
211static const char version[] __devinitconst = 211static const char version[] __devinitconst =
212 KERN_INFO DRV_NAME ": ULi M5261/M5263 net driver, version " 212 "ULi M5261/M5263 net driver, version " DRV_VERSION " (" DRV_RELDATE ")";
213 DRV_VERSION " (" DRV_RELDATE ")\n";
214 213
215static int uli526x_debug; 214static int uli526x_debug;
216static unsigned char uli526x_media_mode = ULI526X_AUTO; 215static unsigned char uli526x_media_mode = ULI526X_AUTO;
@@ -283,7 +282,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
283 ULI526X_DBUG(0, "uli526x_init_one()", 0); 282 ULI526X_DBUG(0, "uli526x_init_one()", 0);
284 283
285 if (!printed_version++) 284 if (!printed_version++)
286 printk(version); 285 pr_info("%s\n", version);
287 286
288 /* Init network device */ 287 /* Init network device */
289 dev = alloc_etherdev(sizeof(*db)); 288 dev = alloc_etherdev(sizeof(*db));
@@ -292,7 +291,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
292 SET_NETDEV_DEV(dev, &pdev->dev); 291 SET_NETDEV_DEV(dev, &pdev->dev);
293 292
294 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { 293 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
295 pr_warning("32-bit PCI DMA not available\n"); 294 pr_warn("32-bit PCI DMA not available\n");
296 err = -ENODEV; 295 err = -ENODEV;
297 goto err_out_free; 296 goto err_out_free;
298 } 297 }
@@ -390,9 +389,9 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
390 if (err) 389 if (err)
391 goto err_out_res; 390 goto err_out_res;
392 391
393 dev_info(&dev->dev, "ULi M%04lx at pci%s, %pM, irq %d\n", 392 netdev_info(dev, "ULi M%04lx at pci%s, %pM, irq %d\n",
394 ent->driver_data >> 16, pci_name(pdev), 393 ent->driver_data >> 16, pci_name(pdev),
395 dev->dev_addr, dev->irq); 394 dev->dev_addr, dev->irq);
396 395
397 pci_set_master(pdev); 396 pci_set_master(pdev);
398 397
@@ -524,7 +523,7 @@ static void uli526x_init(struct net_device *dev)
524 } 523 }
525 } 524 }
526 if(phy_tmp == 32) 525 if(phy_tmp == 32)
527 pr_warning("Can not find the phy address!!!"); 526 pr_warn("Can not find the phy address!!!\n");
528 /* Parser SROM and media mode */ 527 /* Parser SROM and media mode */
529 db->media_mode = uli526x_media_mode; 528 db->media_mode = uli526x_media_mode;
530 529
@@ -590,7 +589,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
590 589
591 /* Too large packet check */ 590 /* Too large packet check */
592 if (skb->len > MAX_PACKET_SIZE) { 591 if (skb->len > MAX_PACKET_SIZE) {
593 pr_err("big packet = %d\n", (u16)skb->len); 592 netdev_err(dev, "big packet = %d\n", (u16)skb->len);
594 dev_kfree_skb(skb); 593 dev_kfree_skb(skb);
595 return NETDEV_TX_OK; 594 return NETDEV_TX_OK;
596 } 595 }
@@ -600,7 +599,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
600 /* No Tx resource check, it never happen nromally */ 599 /* No Tx resource check, it never happen nromally */
601 if (db->tx_packet_cnt >= TX_FREE_DESC_CNT) { 600 if (db->tx_packet_cnt >= TX_FREE_DESC_CNT) {
602 spin_unlock_irqrestore(&db->lock, flags); 601 spin_unlock_irqrestore(&db->lock, flags);
603 pr_err("No Tx resource %ld\n", db->tx_packet_cnt); 602 netdev_err(dev, "No Tx resource %ld\n", db->tx_packet_cnt);
604 return NETDEV_TX_BUSY; 603 return NETDEV_TX_BUSY;
605 } 604 }
606 605
@@ -667,15 +666,6 @@ static int uli526x_stop(struct net_device *dev)
667 /* free allocated rx buffer */ 666 /* free allocated rx buffer */
668 uli526x_free_rxbuffer(db); 667 uli526x_free_rxbuffer(db);
669 668
670#if 0
671 /* show statistic counter */
672 printk(DRV_NAME ": FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
673 db->tx_fifo_underrun, db->tx_excessive_collision,
674 db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
675 db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
676 db->reset_fatal, db->reset_TXtimeout);
677#endif
678
679 return 0; 669 return 0;
680} 670}
681 671
@@ -755,7 +745,6 @@ static void uli526x_free_tx_pkt(struct net_device *dev,
755 txptr = db->tx_remove_ptr; 745 txptr = db->tx_remove_ptr;
756 while(db->tx_packet_cnt) { 746 while(db->tx_packet_cnt) {
757 tdes0 = le32_to_cpu(txptr->tdes0); 747 tdes0 = le32_to_cpu(txptr->tdes0);
758 /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
759 if (tdes0 & 0x80000000) 748 if (tdes0 & 0x80000000)
760 break; 749 break;
761 750
@@ -765,7 +754,6 @@ static void uli526x_free_tx_pkt(struct net_device *dev,
765 754
766 /* Transmit statistic counter */ 755 /* Transmit statistic counter */
767 if ( tdes0 != 0x7fffffff ) { 756 if ( tdes0 != 0x7fffffff ) {
768 /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
769 dev->stats.collisions += (tdes0 >> 3) & 0xf; 757 dev->stats.collisions += (tdes0 >> 3) & 0xf;
770 dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff; 758 dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
771 if (tdes0 & TDES0_ERR_MASK) { 759 if (tdes0 & TDES0_ERR_MASK) {
@@ -838,7 +826,6 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info
838 /* error summary bit check */ 826 /* error summary bit check */
839 if (rdes0 & 0x8000) { 827 if (rdes0 & 0x8000) {
840 /* This is a error packet */ 828 /* This is a error packet */
841 //printk(DRV_NAME ": rdes0: %lx\n", rdes0);
842 dev->stats.rx_errors++; 829 dev->stats.rx_errors++;
843 if (rdes0 & 1) 830 if (rdes0 & 1)
844 dev->stats.rx_fifo_errors++; 831 dev->stats.rx_fifo_errors++;
@@ -945,12 +932,12 @@ ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd)
945 932
946 ecmd->transceiver = XCVR_EXTERNAL; 933 ecmd->transceiver = XCVR_EXTERNAL;
947 934
948 ecmd->speed = 10; 935 ethtool_cmd_speed_set(ecmd, SPEED_10);
949 ecmd->duplex = DUPLEX_HALF; 936 ecmd->duplex = DUPLEX_HALF;
950 937
951 if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD) 938 if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD)
952 { 939 {
953 ecmd->speed = 100; 940 ethtool_cmd_speed_set(ecmd, SPEED_100);
954 } 941 }
955 if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD) 942 if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD)
956 { 943 {
@@ -958,7 +945,7 @@ ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd)
958 } 945 }
959 if(db->link_failed) 946 if(db->link_failed)
960 { 947 {
961 ecmd->speed = -1; 948 ethtool_cmd_speed_set(ecmd, -1);
962 ecmd->duplex = -1; 949 ecmd->duplex = -1;
963 } 950 }
964 951
@@ -1024,7 +1011,6 @@ static void uli526x_timer(unsigned long data)
1024 struct net_device *dev = (struct net_device *) data; 1011 struct net_device *dev = (struct net_device *) data;
1025 struct uli526x_board_info *db = netdev_priv(dev); 1012 struct uli526x_board_info *db = netdev_priv(dev);
1026 unsigned long flags; 1013 unsigned long flags;
1027 u8 TmpSpeed=10;
1028 1014
1029 //ULI526X_DBUG(0, "uli526x_timer()", 0); 1015 //ULI526X_DBUG(0, "uli526x_timer()", 0);
1030 spin_lock_irqsave(&db->lock, flags); 1016 spin_lock_irqsave(&db->lock, flags);
@@ -1047,8 +1033,7 @@ static void uli526x_timer(unsigned long data)
1047 if ( time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_TIMEOUT) ) { 1033 if ( time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_TIMEOUT) ) {
1048 db->reset_TXtimeout++; 1034 db->reset_TXtimeout++;
1049 db->wait_reset = 1; 1035 db->wait_reset = 1;
1050 printk( "%s: Tx timeout - resetting\n", 1036 netdev_err(dev, " Tx timeout - resetting\n");
1051 dev->name);
1052 } 1037 }
1053 } 1038 }
1054 1039
@@ -1070,7 +1055,7 @@ static void uli526x_timer(unsigned long data)
1070 /* Link Failed */ 1055 /* Link Failed */
1071 ULI526X_DBUG(0, "Link Failed", tmp_cr12); 1056 ULI526X_DBUG(0, "Link Failed", tmp_cr12);
1072 netif_carrier_off(dev); 1057 netif_carrier_off(dev);
1073 pr_info("%s NIC Link is Down\n",dev->name); 1058 netdev_info(dev, "NIC Link is Down\n");
1074 db->link_failed = 1; 1059 db->link_failed = 1;
1075 1060
1076 /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */ 1061 /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
@@ -1096,18 +1081,13 @@ static void uli526x_timer(unsigned long data)
1096 1081
1097 if(db->link_failed==0) 1082 if(db->link_failed==0)
1098 { 1083 {
1099 if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD) 1084 netdev_info(dev, "NIC Link is Up %d Mbps %s duplex\n",
1100 { 1085 (db->op_mode == ULI526X_100MHF ||
1101 TmpSpeed = 100; 1086 db->op_mode == ULI526X_100MFD)
1102 } 1087 ? 100 : 10,
1103 if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD) 1088 (db->op_mode == ULI526X_10MFD ||
1104 { 1089 db->op_mode == ULI526X_100MFD)
1105 pr_info("%s NIC Link is Up %d Mbps Full duplex\n",dev->name,TmpSpeed); 1090 ? "Full" : "Half");
1106 }
1107 else
1108 {
1109 pr_info("%s NIC Link is Up %d Mbps Half duplex\n",dev->name,TmpSpeed);
1110 }
1111 netif_carrier_on(dev); 1091 netif_carrier_on(dev);
1112 } 1092 }
1113 /* SHOW_MEDIA_TYPE(db->op_mode); */ 1093 /* SHOW_MEDIA_TYPE(db->op_mode); */
@@ -1116,7 +1096,7 @@ static void uli526x_timer(unsigned long data)
1116 { 1096 {
1117 if(db->init==1) 1097 if(db->init==1)
1118 { 1098 {
1119 pr_info("%s NIC Link is Down\n",dev->name); 1099 netdev_info(dev, "NIC Link is Down\n");
1120 netif_carrier_off(dev); 1100 netif_carrier_off(dev);
1121 } 1101 }
1122 } 1102 }
@@ -1242,7 +1222,7 @@ static int uli526x_resume(struct pci_dev *pdev)
1242 1222
1243 err = pci_set_power_state(pdev, PCI_D0); 1223 err = pci_set_power_state(pdev, PCI_D0);
1244 if (err) { 1224 if (err) {
1245 dev_warn(&dev->dev, "Could not put device into D0\n"); 1225 netdev_warn(dev, "Could not put device into D0\n");
1246 return err; 1226 return err;
1247 } 1227 }
1248 1228
@@ -1443,7 +1423,7 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
1443 update_cr6(db->cr6_data, dev->base_addr); 1423 update_cr6(db->cr6_data, dev->base_addr);
1444 dev->trans_start = jiffies; 1424 dev->trans_start = jiffies;
1445 } else 1425 } else
1446 pr_err("No Tx resource - Send_filter_frame!\n"); 1426 netdev_err(dev, "No Tx resource - Send_filter_frame!\n");
1447} 1427}
1448 1428
1449 1429
@@ -1540,7 +1520,6 @@ static u8 uli526x_sense_speed(struct uli526x_board_info * db)
1540 else 1520 else
1541 phy_mode = 0x1000; 1521 phy_mode = 0x1000;
1542 1522
1543 /* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */
1544 switch (phy_mode) { 1523 switch (phy_mode) {
1545 case 0x1000: db->op_mode = ULI526X_10MHF; break; 1524 case 0x1000: db->op_mode = ULI526X_10MHF; break;
1546 case 0x2000: db->op_mode = ULI526X_10MFD; break; 1525 case 0x2000: db->op_mode = ULI526X_10MFD; break;
@@ -1829,7 +1808,7 @@ MODULE_PARM_DESC(mode, "ULi M5261/M5263: Bit 0: 10/100Mbps, bit 2: duplex, bit 8
1829static int __init uli526x_init_module(void) 1808static int __init uli526x_init_module(void)
1830{ 1809{
1831 1810
1832 printk(version); 1811 pr_info("%s\n", version);
1833 printed_version = 1; 1812 printed_version = 1;
1834 1813
1835 ULI526X_DBUG(0, "init_module() ", debug); 1814 ULI526X_DBUG(0, "init_module() ", debug);
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index f0b231035dee..862eadf07191 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -44,6 +44,8 @@
44 * Wake-On-LAN 44 * Wake-On-LAN
45*/ 45*/
46 46
47#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
48
47#define DRV_NAME "winbond-840" 49#define DRV_NAME "winbond-840"
48#define DRV_VERSION "1.01-e" 50#define DRV_VERSION "1.01-e"
49#define DRV_RELDATE "Sep-11-2006" 51#define DRV_RELDATE "Sep-11-2006"
@@ -139,7 +141,7 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
139 141
140/* These identify the driver base version and may not be removed. */ 142/* These identify the driver base version and may not be removed. */
141static const char version[] __initconst = 143static const char version[] __initconst =
142 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " 144 "v" DRV_VERSION " (2.4 port) "
143 DRV_RELDATE " Donald Becker <becker@scyld.com>\n" 145 DRV_RELDATE " Donald Becker <becker@scyld.com>\n"
144 " http://www.scyld.com/network/drivers.html\n"; 146 " http://www.scyld.com/network/drivers.html\n";
145 147
@@ -375,8 +377,8 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
375 irq = pdev->irq; 377 irq = pdev->irq;
376 378
377 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { 379 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
378 pr_warning("Winbond-840: Device %s disabled due to DMA limitations\n", 380 pr_warn("Device %s disabled due to DMA limitations\n",
379 pci_name(pdev)); 381 pci_name(pdev));
380 return -EIO; 382 return -EIO;
381 } 383 }
382 dev = alloc_etherdev(sizeof(*np)); 384 dev = alloc_etherdev(sizeof(*np));
@@ -643,8 +645,7 @@ static int netdev_open(struct net_device *dev)
643 goto out_err; 645 goto out_err;
644 646
645 if (debug > 1) 647 if (debug > 1)
646 printk(KERN_DEBUG "%s: w89c840_open() irq %d\n", 648 netdev_dbg(dev, "w89c840_open() irq %d\n", dev->irq);
647 dev->name, dev->irq);
648 649
649 if((i=alloc_ringdesc(dev))) 650 if((i=alloc_ringdesc(dev)))
650 goto out_err; 651 goto out_err;
@@ -656,7 +657,7 @@ static int netdev_open(struct net_device *dev)
656 657
657 netif_start_queue(dev); 658 netif_start_queue(dev);
658 if (debug > 2) 659 if (debug > 2)
659 printk(KERN_DEBUG "%s: Done netdev_open()\n", dev->name); 660 netdev_dbg(dev, "Done netdev_open()\n");
660 661
661 /* Set the timer to check for link beat. */ 662 /* Set the timer to check for link beat. */
662 init_timer(&np->timer); 663 init_timer(&np->timer);
@@ -785,9 +786,9 @@ static void netdev_timer(unsigned long data)
785 void __iomem *ioaddr = np->base_addr; 786 void __iomem *ioaddr = np->base_addr;
786 787
787 if (debug > 2) 788 if (debug > 2)
788 printk(KERN_DEBUG "%s: Media selection timer tick, status %08x config %08x\n", 789 netdev_dbg(dev, "Media selection timer tick, status %08x config %08x\n",
789 dev->name, ioread32(ioaddr + IntrStatus), 790 ioread32(ioaddr + IntrStatus),
790 ioread32(ioaddr + NetworkConfig)); 791 ioread32(ioaddr + NetworkConfig));
791 spin_lock_irq(&np->lock); 792 spin_lock_irq(&np->lock);
792 update_csr6(dev, update_link(dev)); 793 update_csr6(dev, update_link(dev));
793 spin_unlock_irq(&np->lock); 794 spin_unlock_irq(&np->lock);
@@ -1054,8 +1055,8 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1054 spin_unlock_irq(&np->lock); 1055 spin_unlock_irq(&np->lock);
1055 1056
1056 if (debug > 4) { 1057 if (debug > 4) {
1057 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d\n", 1058 netdev_dbg(dev, "Transmit frame #%d queued in slot %d\n",
1058 dev->name, np->cur_tx, entry); 1059 np->cur_tx, entry);
1059 } 1060 }
1060 return NETDEV_TX_OK; 1061 return NETDEV_TX_OK;
1061} 1062}
@@ -1072,8 +1073,8 @@ static void netdev_tx_done(struct net_device *dev)
1072 if (tx_status & 0x8000) { /* There was an error, log it. */ 1073 if (tx_status & 0x8000) { /* There was an error, log it. */
1073#ifndef final_version 1074#ifndef final_version
1074 if (debug > 1) 1075 if (debug > 1)
1075 printk(KERN_DEBUG "%s: Transmit error, Tx status %08x\n", 1076 netdev_dbg(dev, "Transmit error, Tx status %08x\n",
1076 dev->name, tx_status); 1077 tx_status);
1077#endif 1078#endif
1078 np->stats.tx_errors++; 1079 np->stats.tx_errors++;
1079 if (tx_status & 0x0104) np->stats.tx_aborted_errors++; 1080 if (tx_status & 0x0104) np->stats.tx_aborted_errors++;
@@ -1085,8 +1086,8 @@ static void netdev_tx_done(struct net_device *dev)
1085 } else { 1086 } else {
1086#ifndef final_version 1087#ifndef final_version
1087 if (debug > 3) 1088 if (debug > 3)
1088 printk(KERN_DEBUG "%s: Transmit slot %d ok, Tx status %08x\n", 1089 netdev_dbg(dev, "Transmit slot %d ok, Tx status %08x\n",
1089 dev->name, entry, tx_status); 1090 entry, tx_status);
1090#endif 1091#endif
1091 np->stats.tx_bytes += np->tx_skbuff[entry]->len; 1092 np->stats.tx_bytes += np->tx_skbuff[entry]->len;
1092 np->stats.collisions += (tx_status >> 3) & 15; 1093 np->stats.collisions += (tx_status >> 3) & 15;
@@ -1129,8 +1130,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
1129 iowrite32(intr_status & 0x001ffff, ioaddr + IntrStatus); 1130 iowrite32(intr_status & 0x001ffff, ioaddr + IntrStatus);
1130 1131
1131 if (debug > 4) 1132 if (debug > 4)
1132 printk(KERN_DEBUG "%s: Interrupt, status %04x\n", 1133 netdev_dbg(dev, "Interrupt, status %04x\n", intr_status);
1133 dev->name, intr_status);
1134 1134
1135 if ((intr_status & (NormalIntr|AbnormalIntr)) == 0) 1135 if ((intr_status & (NormalIntr|AbnormalIntr)) == 0)
1136 break; 1136 break;
@@ -1171,8 +1171,8 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
1171 } while (1); 1171 } while (1);
1172 1172
1173 if (debug > 3) 1173 if (debug > 3)
1174 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x\n", 1174 netdev_dbg(dev, "exiting interrupt, status=%#4.4x\n",
1175 dev->name, ioread32(ioaddr + IntrStatus)); 1175 ioread32(ioaddr + IntrStatus));
1176 return IRQ_RETVAL(handled); 1176 return IRQ_RETVAL(handled);
1177} 1177}
1178 1178
@@ -1185,8 +1185,8 @@ static int netdev_rx(struct net_device *dev)
1185 int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx; 1185 int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
1186 1186
1187 if (debug > 4) { 1187 if (debug > 4) {
1188 printk(KERN_DEBUG " In netdev_rx(), entry %d status %04x\n", 1188 netdev_dbg(dev, " In netdev_rx(), entry %d status %04x\n",
1189 entry, np->rx_ring[entry].status); 1189 entry, np->rx_ring[entry].status);
1190 } 1190 }
1191 1191
1192 /* If EOP is set on the next entry, it's a new packet. Send it up. */ 1192 /* If EOP is set on the next entry, it's a new packet. Send it up. */
@@ -1195,8 +1195,8 @@ static int netdev_rx(struct net_device *dev)
1195 s32 status = desc->status; 1195 s32 status = desc->status;
1196 1196
1197 if (debug > 4) 1197 if (debug > 4)
1198 printk(KERN_DEBUG " netdev_rx() status was %08x\n", 1198 netdev_dbg(dev, " netdev_rx() status was %08x\n",
1199 status); 1199 status);
1200 if (status < 0) 1200 if (status < 0)
1201 break; 1201 break;
1202 if ((status & 0x38008300) != 0x0300) { 1202 if ((status & 0x38008300) != 0x0300) {
@@ -1211,8 +1211,8 @@ static int netdev_rx(struct net_device *dev)
1211 } else if (status & 0x8000) { 1211 } else if (status & 0x8000) {
1212 /* There was a fatal error. */ 1212 /* There was a fatal error. */
1213 if (debug > 2) 1213 if (debug > 2)
1214 printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n", 1214 netdev_dbg(dev, "Receive error, Rx status %08x\n",
1215 dev->name, status); 1215 status);
1216 np->stats.rx_errors++; /* end of a packet.*/ 1216 np->stats.rx_errors++; /* end of a packet.*/
1217 if (status & 0x0890) np->stats.rx_length_errors++; 1217 if (status & 0x0890) np->stats.rx_length_errors++;
1218 if (status & 0x004C) np->stats.rx_frame_errors++; 1218 if (status & 0x004C) np->stats.rx_frame_errors++;
@@ -1225,8 +1225,8 @@ static int netdev_rx(struct net_device *dev)
1225 1225
1226#ifndef final_version 1226#ifndef final_version
1227 if (debug > 4) 1227 if (debug > 4)
1228 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d status %x\n", 1228 netdev_dbg(dev, " netdev_rx() normal Rx pkt length %d status %x\n",
1229 pkt_len, status); 1229 pkt_len, status);
1230#endif 1230#endif
1231 /* Check if the packet is long enough to accept without copying 1231 /* Check if the packet is long enough to accept without copying
1232 to a minimally-sized skbuff. */ 1232 to a minimally-sized skbuff. */
@@ -1251,10 +1251,10 @@ static int netdev_rx(struct net_device *dev)
1251#ifndef final_version /* Remove after testing. */ 1251#ifndef final_version /* Remove after testing. */
1252 /* You will want this info for the initial debug. */ 1252 /* You will want this info for the initial debug. */
1253 if (debug > 5) 1253 if (debug > 5)
1254 printk(KERN_DEBUG " Rx data %pM %pM %02x%02x %pI4\n", 1254 netdev_dbg(dev, " Rx data %pM %pM %02x%02x %pI4\n",
1255 &skb->data[0], &skb->data[6], 1255 &skb->data[0], &skb->data[6],
1256 skb->data[12], skb->data[13], 1256 skb->data[12], skb->data[13],
1257 &skb->data[14]); 1257 &skb->data[14]);
1258#endif 1258#endif
1259 skb->protocol = eth_type_trans(skb, dev); 1259 skb->protocol = eth_type_trans(skb, dev);
1260 netif_rx(skb); 1260 netif_rx(skb);
@@ -1292,8 +1292,7 @@ static void netdev_error(struct net_device *dev, int intr_status)
1292 void __iomem *ioaddr = np->base_addr; 1292 void __iomem *ioaddr = np->base_addr;
1293 1293
1294 if (debug > 2) 1294 if (debug > 2)
1295 printk(KERN_DEBUG "%s: Abnormal event, %08x\n", 1295 netdev_dbg(dev, "Abnormal event, %08x\n", intr_status);
1296 dev->name, intr_status);
1297 if (intr_status == 0xffffffff) 1296 if (intr_status == 0xffffffff)
1298 return; 1297 return;
1299 spin_lock(&np->lock); 1298 spin_lock(&np->lock);
@@ -1313,8 +1312,7 @@ static void netdev_error(struct net_device *dev, int intr_status)
1313 new = 127; /* load full packet before starting */ 1312 new = 127; /* load full packet before starting */
1314 new = (np->csr6 & ~(0x7F << 14)) | (new<<14); 1313 new = (np->csr6 & ~(0x7F << 14)) | (new<<14);
1315#endif 1314#endif
1316 printk(KERN_DEBUG "%s: Tx underflow, new csr6 %08x\n", 1315 netdev_dbg(dev, "Tx underflow, new csr6 %08x\n", new);
1317 dev->name, new);
1318 update_csr6(dev, new); 1316 update_csr6(dev, new);
1319 } 1317 }
1320 if (intr_status & RxDied) { /* Missed a Rx frame. */ 1318 if (intr_status & RxDied) { /* Missed a Rx frame. */
@@ -1487,13 +1485,12 @@ static int netdev_close(struct net_device *dev)
1487 netif_stop_queue(dev); 1485 netif_stop_queue(dev);
1488 1486
1489 if (debug > 1) { 1487 if (debug > 1) {
1490 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %08x Config %08x\n", 1488 netdev_dbg(dev, "Shutting down ethercard, status was %08x Config %08x\n",
1491 dev->name, ioread32(ioaddr + IntrStatus), 1489 ioread32(ioaddr + IntrStatus),
1492 ioread32(ioaddr + NetworkConfig)); 1490 ioread32(ioaddr + NetworkConfig));
1493 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d\n", 1491 netdev_dbg(dev, "Queue pointers were Tx %d / %d, Rx %d / %d\n",
1494 dev->name, 1492 np->cur_tx, np->dirty_tx,
1495 np->cur_tx, np->dirty_tx, 1493 np->cur_rx, np->dirty_rx);
1496 np->cur_rx, np->dirty_rx);
1497 } 1494 }
1498 1495
1499 /* Stop the chip's Tx and Rx processes. */ 1496 /* Stop the chip's Tx and Rx processes. */
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index 5a73752be2ca..988b8eb24d37 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -37,15 +37,6 @@
37#include <asm/irq.h> 37#include <asm/irq.h>
38#endif 38#endif
39 39
40#ifdef DEBUG
41#define enter(x) printk("Enter: %s, %s line %i\n",x,__FILE__,__LINE__)
42#define leave(x) printk("Leave: %s, %s line %i\n",x,__FILE__,__LINE__)
43#else
44#define enter(x) do {} while (0)
45#define leave(x) do {} while (0)
46#endif
47
48
49MODULE_DESCRIPTION("Xircom Cardbus ethernet driver"); 40MODULE_DESCRIPTION("Xircom Cardbus ethernet driver");
50MODULE_AUTHOR("Arjan van de Ven <arjanv@redhat.com>"); 41MODULE_AUTHOR("Arjan van de Ven <arjanv@redhat.com>");
51MODULE_LICENSE("GPL"); 42MODULE_LICENSE("GPL");
@@ -161,7 +152,7 @@ static struct pci_driver xircom_ops = {
161}; 152};
162 153
163 154
164#ifdef DEBUG 155#if defined DEBUG && DEBUG > 1
165static void print_binary(unsigned int number) 156static void print_binary(unsigned int number)
166{ 157{
167 int i,i2; 158 int i,i2;
@@ -176,7 +167,7 @@ static void print_binary(unsigned int number)
176 if ((i&3)==0) 167 if ((i&3)==0)
177 buffer[i2++]=' '; 168 buffer[i2++]=' ';
178 } 169 }
179 printk("%s\n",buffer); 170 pr_debug("%s\n",buffer);
180} 171}
181#endif 172#endif
182 173
@@ -205,7 +196,6 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
205 struct xircom_private *private; 196 struct xircom_private *private;
206 unsigned long flags; 197 unsigned long flags;
207 unsigned short tmp16; 198 unsigned short tmp16;
208 enter("xircom_probe");
209 199
210 /* First do the PCI initialisation */ 200 /* First do the PCI initialisation */
211 201
@@ -272,8 +262,8 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
272 goto reg_fail; 262 goto reg_fail;
273 } 263 }
274 264
275 dev_info(&dev->dev, "Xircom cardbus revision %i at irq %i\n", 265 netdev_info(dev, "Xircom cardbus revision %i at irq %i\n",
276 pdev->revision, pdev->irq); 266 pdev->revision, pdev->irq);
277 /* start the transmitter to get a heartbeat */ 267 /* start the transmitter to get a heartbeat */
278 /* TODO: send 2 dummy packets here */ 268 /* TODO: send 2 dummy packets here */
279 transceiver_voodoo(private); 269 transceiver_voodoo(private);
@@ -285,7 +275,6 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
285 275
286 trigger_receive(private); 276 trigger_receive(private);
287 277
288 leave("xircom_probe");
289 return 0; 278 return 0;
290 279
291reg_fail: 280reg_fail:
@@ -310,7 +299,6 @@ static void __devexit xircom_remove(struct pci_dev *pdev)
310 struct net_device *dev = pci_get_drvdata(pdev); 299 struct net_device *dev = pci_get_drvdata(pdev);
311 struct xircom_private *card = netdev_priv(dev); 300 struct xircom_private *card = netdev_priv(dev);
312 301
313 enter("xircom_remove");
314 pci_free_consistent(pdev,8192,card->rx_buffer,card->rx_dma_handle); 302 pci_free_consistent(pdev,8192,card->rx_buffer,card->rx_dma_handle);
315 pci_free_consistent(pdev,8192,card->tx_buffer,card->tx_dma_handle); 303 pci_free_consistent(pdev,8192,card->tx_buffer,card->tx_dma_handle);
316 304
@@ -318,7 +306,6 @@ static void __devexit xircom_remove(struct pci_dev *pdev)
318 unregister_netdev(dev); 306 unregister_netdev(dev);
319 free_netdev(dev); 307 free_netdev(dev);
320 pci_set_drvdata(pdev, NULL); 308 pci_set_drvdata(pdev, NULL);
321 leave("xircom_remove");
322} 309}
323 310
324static irqreturn_t xircom_interrupt(int irq, void *dev_instance) 311static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
@@ -328,17 +315,15 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
328 unsigned int status; 315 unsigned int status;
329 int i; 316 int i;
330 317
331 enter("xircom_interrupt\n");
332
333 spin_lock(&card->lock); 318 spin_lock(&card->lock);
334 status = inl(card->io_port+CSR5); 319 status = inl(card->io_port+CSR5);
335 320
336#ifdef DEBUG 321#if defined DEBUG && DEBUG > 1
337 print_binary(status); 322 print_binary(status);
338 printk("tx status 0x%08x 0x%08x\n", 323 pr_debug("tx status 0x%08x 0x%08x\n",
339 card->tx_buffer[0], card->tx_buffer[4]); 324 card->tx_buffer[0], card->tx_buffer[4]);
340 printk("rx status 0x%08x 0x%08x\n", 325 pr_debug("rx status 0x%08x 0x%08x\n",
341 card->rx_buffer[0], card->rx_buffer[4]); 326 card->rx_buffer[0], card->rx_buffer[4]);
342#endif 327#endif
343 /* Handle shared irq and hotplug */ 328 /* Handle shared irq and hotplug */
344 if (status == 0 || status == 0xffffffff) { 329 if (status == 0 || status == 0xffffffff) {
@@ -348,9 +333,9 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
348 333
349 if (link_status_changed(card)) { 334 if (link_status_changed(card)) {
350 int newlink; 335 int newlink;
351 printk(KERN_DEBUG "xircom_cb: Link status has changed\n"); 336 netdev_dbg(dev, "Link status has changed\n");
352 newlink = link_status(card); 337 newlink = link_status(card);
353 dev_info(&dev->dev, "Link is %i mbit\n", newlink); 338 netdev_info(dev, "Link is %d mbit\n", newlink);
354 if (newlink) 339 if (newlink)
355 netif_carrier_on(dev); 340 netif_carrier_on(dev);
356 else 341 else
@@ -369,9 +354,7 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
369 for (i=0;i<NUMDESCRIPTORS;i++) 354 for (i=0;i<NUMDESCRIPTORS;i++)
370 investigate_read_descriptor(dev,card,i,bufferoffsets[i]); 355 investigate_read_descriptor(dev,card,i,bufferoffsets[i]);
371 356
372
373 spin_unlock(&card->lock); 357 spin_unlock(&card->lock);
374 leave("xircom_interrupt");
375 return IRQ_HANDLED; 358 return IRQ_HANDLED;
376} 359}
377 360
@@ -382,7 +365,6 @@ static netdev_tx_t xircom_start_xmit(struct sk_buff *skb,
382 unsigned long flags; 365 unsigned long flags;
383 int nextdescriptor; 366 int nextdescriptor;
384 int desc; 367 int desc;
385 enter("xircom_start_xmit");
386 368
387 card = netdev_priv(dev); 369 card = netdev_priv(dev);
388 spin_lock_irqsave(&card->lock,flags); 370 spin_lock_irqsave(&card->lock,flags);
@@ -424,13 +406,10 @@ static netdev_tx_t xircom_start_xmit(struct sk_buff *skb,
424 netif_stop_queue(dev); 406 netif_stop_queue(dev);
425 } 407 }
426 card->transmit_used = nextdescriptor; 408 card->transmit_used = nextdescriptor;
427 leave("xircom-start_xmit - sent");
428 spin_unlock_irqrestore(&card->lock,flags); 409 spin_unlock_irqrestore(&card->lock,flags);
429 return NETDEV_TX_OK; 410 return NETDEV_TX_OK;
430 } 411 }
431 412
432
433
434 /* Uh oh... no free descriptor... drop the packet */ 413 /* Uh oh... no free descriptor... drop the packet */
435 netif_stop_queue(dev); 414 netif_stop_queue(dev);
436 spin_unlock_irqrestore(&card->lock,flags); 415 spin_unlock_irqrestore(&card->lock,flags);
@@ -446,18 +425,16 @@ static int xircom_open(struct net_device *dev)
446{ 425{
447 struct xircom_private *xp = netdev_priv(dev); 426 struct xircom_private *xp = netdev_priv(dev);
448 int retval; 427 int retval;
449 enter("xircom_open"); 428
450 pr_info("xircom cardbus adaptor found, registering as %s, using irq %i\n", 429 netdev_info(dev, "xircom cardbus adaptor found, using irq %i\n",
451 dev->name, dev->irq); 430 dev->irq);
452 retval = request_irq(dev->irq, xircom_interrupt, IRQF_SHARED, dev->name, dev); 431 retval = request_irq(dev->irq, xircom_interrupt, IRQF_SHARED, dev->name, dev);
453 if (retval) { 432 if (retval)
454 leave("xircom_open - No IRQ");
455 return retval; 433 return retval;
456 }
457 434
458 xircom_up(xp); 435 xircom_up(xp);
459 xp->open = 1; 436 xp->open = 1;
460 leave("xircom_open"); 437
461 return 0; 438 return 0;
462} 439}
463 440
@@ -466,7 +443,6 @@ static int xircom_close(struct net_device *dev)
466 struct xircom_private *card; 443 struct xircom_private *card;
467 unsigned long flags; 444 unsigned long flags;
468 445
469 enter("xircom_close");
470 card = netdev_priv(dev); 446 card = netdev_priv(dev);
471 netif_stop_queue(dev); /* we don't want new packets */ 447 netif_stop_queue(dev); /* we don't want new packets */
472 448
@@ -486,8 +462,6 @@ static int xircom_close(struct net_device *dev)
486 card->open = 0; 462 card->open = 0;
487 free_irq(dev->irq,dev); 463 free_irq(dev->irq,dev);
488 464
489 leave("xircom_close");
490
491 return 0; 465 return 0;
492 466
493} 467}
@@ -507,8 +481,6 @@ static void initialize_card(struct xircom_private *card)
507{ 481{
508 unsigned int val; 482 unsigned int val;
509 unsigned long flags; 483 unsigned long flags;
510 enter("initialize_card");
511
512 484
513 spin_lock_irqsave(&card->lock, flags); 485 spin_lock_irqsave(&card->lock, flags);
514 486
@@ -534,8 +506,6 @@ static void initialize_card(struct xircom_private *card)
534 deactivate_transmitter(card); 506 deactivate_transmitter(card);
535 507
536 spin_unlock_irqrestore(&card->lock, flags); 508 spin_unlock_irqrestore(&card->lock, flags);
537
538 leave("initialize_card");
539} 509}
540 510
541/* 511/*
@@ -547,12 +517,9 @@ ignored; I chose zero.
547static void trigger_transmit(struct xircom_private *card) 517static void trigger_transmit(struct xircom_private *card)
548{ 518{
549 unsigned int val; 519 unsigned int val;
550 enter("trigger_transmit");
551 520
552 val = 0; 521 val = 0;
553 outl(val, card->io_port + CSR1); 522 outl(val, card->io_port + CSR1);
554
555 leave("trigger_transmit");
556} 523}
557 524
558/* 525/*
@@ -565,12 +532,9 @@ ignored; I chose zero.
565static void trigger_receive(struct xircom_private *card) 532static void trigger_receive(struct xircom_private *card)
566{ 533{
567 unsigned int val; 534 unsigned int val;
568 enter("trigger_receive");
569 535
570 val = 0; 536 val = 0;
571 outl(val, card->io_port + CSR2); 537 outl(val, card->io_port + CSR2);
572
573 leave("trigger_receive");
574} 538}
575 539
576/* 540/*
@@ -581,8 +545,6 @@ static void setup_descriptors(struct xircom_private *card)
581{ 545{
582 u32 address; 546 u32 address;
583 int i; 547 int i;
584 enter("setup_descriptors");
585
586 548
587 BUG_ON(card->rx_buffer == NULL); 549 BUG_ON(card->rx_buffer == NULL);
588 BUG_ON(card->tx_buffer == NULL); 550 BUG_ON(card->tx_buffer == NULL);
@@ -636,8 +598,6 @@ static void setup_descriptors(struct xircom_private *card)
636 /* wite the transmit descriptor ring to the card */ 598 /* wite the transmit descriptor ring to the card */
637 address = card->tx_dma_handle; 599 address = card->tx_dma_handle;
638 outl(address, card->io_port + CSR4); /* xmit descr list address */ 600 outl(address, card->io_port + CSR4); /* xmit descr list address */
639
640 leave("setup_descriptors");
641} 601}
642 602
643/* 603/*
@@ -647,13 +607,10 @@ valid by setting the address in the card to 0x00.
647static void remove_descriptors(struct xircom_private *card) 607static void remove_descriptors(struct xircom_private *card)
648{ 608{
649 unsigned int val; 609 unsigned int val;
650 enter("remove_descriptors");
651 610
652 val = 0; 611 val = 0;
653 outl(val, card->io_port + CSR3); /* Receive descriptor address */ 612 outl(val, card->io_port + CSR3); /* Receive descriptor address */
654 outl(val, card->io_port + CSR4); /* Send descriptor address */ 613 outl(val, card->io_port + CSR4); /* Send descriptor address */
655
656 leave("remove_descriptors");
657} 614}
658 615
659/* 616/*
@@ -665,21 +622,17 @@ This function also clears the status-bit.
665static int link_status_changed(struct xircom_private *card) 622static int link_status_changed(struct xircom_private *card)
666{ 623{
667 unsigned int val; 624 unsigned int val;
668 enter("link_status_changed");
669 625
670 val = inl(card->io_port + CSR5); /* Status register */ 626 val = inl(card->io_port + CSR5); /* Status register */
671 627
672 if ((val & (1 << 27)) == 0) { /* no change */ 628 if ((val & (1 << 27)) == 0) /* no change */
673 leave("link_status_changed - nochange");
674 return 0; 629 return 0;
675 }
676 630
677 /* clear the event by writing a 1 to the bit in the 631 /* clear the event by writing a 1 to the bit in the
678 status register. */ 632 status register. */
679 val = (1 << 27); 633 val = (1 << 27);
680 outl(val, card->io_port + CSR5); 634 outl(val, card->io_port + CSR5);
681 635
682 leave("link_status_changed - changed");
683 return 1; 636 return 1;
684} 637}
685 638
@@ -691,16 +644,12 @@ in a non-stopped state.
691static int transmit_active(struct xircom_private *card) 644static int transmit_active(struct xircom_private *card)
692{ 645{
693 unsigned int val; 646 unsigned int val;
694 enter("transmit_active");
695 647
696 val = inl(card->io_port + CSR5); /* Status register */ 648 val = inl(card->io_port + CSR5); /* Status register */
697 649
698 if ((val & (7 << 20)) == 0) { /* transmitter disabled */ 650 if ((val & (7 << 20)) == 0) /* transmitter disabled */
699 leave("transmit_active - inactive");
700 return 0; 651 return 0;
701 }
702 652
703 leave("transmit_active - active");
704 return 1; 653 return 1;
705} 654}
706 655
@@ -711,17 +660,12 @@ in a non-stopped state.
711static int receive_active(struct xircom_private *card) 660static int receive_active(struct xircom_private *card)
712{ 661{
713 unsigned int val; 662 unsigned int val;
714 enter("receive_active");
715
716 663
717 val = inl(card->io_port + CSR5); /* Status register */ 664 val = inl(card->io_port + CSR5); /* Status register */
718 665
719 if ((val & (7 << 17)) == 0) { /* receiver disabled */ 666 if ((val & (7 << 17)) == 0) /* receiver disabled */
720 leave("receive_active - inactive");
721 return 0; 667 return 0;
722 }
723 668
724 leave("receive_active - active");
725 return 1; 669 return 1;
726} 670}
727 671
@@ -739,8 +683,6 @@ static void activate_receiver(struct xircom_private *card)
739{ 683{
740 unsigned int val; 684 unsigned int val;
741 int counter; 685 int counter;
742 enter("activate_receiver");
743
744 686
745 val = inl(card->io_port + CSR6); /* Operation mode */ 687 val = inl(card->io_port + CSR6); /* Operation mode */
746 688
@@ -761,7 +703,7 @@ static void activate_receiver(struct xircom_private *card)
761 udelay(50); 703 udelay(50);
762 counter--; 704 counter--;
763 if (counter <= 0) 705 if (counter <= 0)
764 pr_err("Receiver failed to deactivate\n"); 706 netdev_err(card->dev, "Receiver failed to deactivate\n");
765 } 707 }
766 708
767 /* enable the receiver */ 709 /* enable the receiver */
@@ -778,10 +720,9 @@ static void activate_receiver(struct xircom_private *card)
778 udelay(50); 720 udelay(50);
779 counter--; 721 counter--;
780 if (counter <= 0) 722 if (counter <= 0)
781 pr_err("Receiver failed to re-activate\n"); 723 netdev_err(card->dev,
724 "Receiver failed to re-activate\n");
782 } 725 }
783
784 leave("activate_receiver");
785} 726}
786 727
787/* 728/*
@@ -795,7 +736,6 @@ static void deactivate_receiver(struct xircom_private *card)
795{ 736{
796 unsigned int val; 737 unsigned int val;
797 int counter; 738 int counter;
798 enter("deactivate_receiver");
799 739
800 val = inl(card->io_port + CSR6); /* Operation mode */ 740 val = inl(card->io_port + CSR6); /* Operation mode */
801 val = val & ~2; /* disable the receiver */ 741 val = val & ~2; /* disable the receiver */
@@ -809,11 +749,8 @@ static void deactivate_receiver(struct xircom_private *card)
809 udelay(50); 749 udelay(50);
810 counter--; 750 counter--;
811 if (counter <= 0) 751 if (counter <= 0)
812 pr_err("Receiver failed to deactivate\n"); 752 netdev_err(card->dev, "Receiver failed to deactivate\n");
813 } 753 }
814
815
816 leave("deactivate_receiver");
817} 754}
818 755
819 756
@@ -831,8 +768,6 @@ static void activate_transmitter(struct xircom_private *card)
831{ 768{
832 unsigned int val; 769 unsigned int val;
833 int counter; 770 int counter;
834 enter("activate_transmitter");
835
836 771
837 val = inl(card->io_port + CSR6); /* Operation mode */ 772 val = inl(card->io_port + CSR6); /* Operation mode */
838 773
@@ -852,7 +787,8 @@ static void activate_transmitter(struct xircom_private *card)
852 udelay(50); 787 udelay(50);
853 counter--; 788 counter--;
854 if (counter <= 0) 789 if (counter <= 0)
855 pr_err("Transmitter failed to deactivate\n"); 790 netdev_err(card->dev,
791 "Transmitter failed to deactivate\n");
856 } 792 }
857 793
858 /* enable the transmitter */ 794 /* enable the transmitter */
@@ -869,10 +805,9 @@ static void activate_transmitter(struct xircom_private *card)
869 udelay(50); 805 udelay(50);
870 counter--; 806 counter--;
871 if (counter <= 0) 807 if (counter <= 0)
872 pr_err("Transmitter failed to re-activate\n"); 808 netdev_err(card->dev,
809 "Transmitter failed to re-activate\n");
873 } 810 }
874
875 leave("activate_transmitter");
876} 811}
877 812
878/* 813/*
@@ -886,7 +821,6 @@ static void deactivate_transmitter(struct xircom_private *card)
886{ 821{
887 unsigned int val; 822 unsigned int val;
888 int counter; 823 int counter;
889 enter("deactivate_transmitter");
890 824
891 val = inl(card->io_port + CSR6); /* Operation mode */ 825 val = inl(card->io_port + CSR6); /* Operation mode */
892 val = val & ~2; /* disable the transmitter */ 826 val = val & ~2; /* disable the transmitter */
@@ -900,11 +834,9 @@ static void deactivate_transmitter(struct xircom_private *card)
900 udelay(50); 834 udelay(50);
901 counter--; 835 counter--;
902 if (counter <= 0) 836 if (counter <= 0)
903 pr_err("Transmitter failed to deactivate\n"); 837 netdev_err(card->dev,
838 "Transmitter failed to deactivate\n");
904 } 839 }
905
906
907 leave("deactivate_transmitter");
908} 840}
909 841
910 842
@@ -916,13 +848,10 @@ must be called with the lock held and interrupts disabled.
916static void enable_transmit_interrupt(struct xircom_private *card) 848static void enable_transmit_interrupt(struct xircom_private *card)
917{ 849{
918 unsigned int val; 850 unsigned int val;
919 enter("enable_transmit_interrupt");
920 851
921 val = inl(card->io_port + CSR7); /* Interrupt enable register */ 852 val = inl(card->io_port + CSR7); /* Interrupt enable register */
922 val |= 1; /* enable the transmit interrupt */ 853 val |= 1; /* enable the transmit interrupt */
923 outl(val, card->io_port + CSR7); 854 outl(val, card->io_port + CSR7);
924
925 leave("enable_transmit_interrupt");
926} 855}
927 856
928 857
@@ -934,13 +863,10 @@ must be called with the lock held and interrupts disabled.
934static void enable_receive_interrupt(struct xircom_private *card) 863static void enable_receive_interrupt(struct xircom_private *card)
935{ 864{
936 unsigned int val; 865 unsigned int val;
937 enter("enable_receive_interrupt");
938 866
939 val = inl(card->io_port + CSR7); /* Interrupt enable register */ 867 val = inl(card->io_port + CSR7); /* Interrupt enable register */
940 val = val | (1 << 6); /* enable the receive interrupt */ 868 val = val | (1 << 6); /* enable the receive interrupt */
941 outl(val, card->io_port + CSR7); 869 outl(val, card->io_port + CSR7);
942
943 leave("enable_receive_interrupt");
944} 870}
945 871
946/* 872/*
@@ -951,13 +877,10 @@ must be called with the lock held and interrupts disabled.
951static void enable_link_interrupt(struct xircom_private *card) 877static void enable_link_interrupt(struct xircom_private *card)
952{ 878{
953 unsigned int val; 879 unsigned int val;
954 enter("enable_link_interrupt");
955 880
956 val = inl(card->io_port + CSR7); /* Interrupt enable register */ 881 val = inl(card->io_port + CSR7); /* Interrupt enable register */
957 val = val | (1 << 27); /* enable the link status chage interrupt */ 882 val = val | (1 << 27); /* enable the link status chage interrupt */
958 outl(val, card->io_port + CSR7); 883 outl(val, card->io_port + CSR7);
959
960 leave("enable_link_interrupt");
961} 884}
962 885
963 886
@@ -970,12 +893,9 @@ must be called with the lock held and interrupts disabled.
970static void disable_all_interrupts(struct xircom_private *card) 893static void disable_all_interrupts(struct xircom_private *card)
971{ 894{
972 unsigned int val; 895 unsigned int val;
973 enter("enable_all_interrupts");
974 896
975 val = 0; /* disable all interrupts */ 897 val = 0; /* disable all interrupts */
976 outl(val, card->io_port + CSR7); 898 outl(val, card->io_port + CSR7);
977
978 leave("disable_all_interrupts");
979} 899}
980 900
981/* 901/*
@@ -986,7 +906,6 @@ must be called with the lock held and interrupts disabled.
986static void enable_common_interrupts(struct xircom_private *card) 906static void enable_common_interrupts(struct xircom_private *card)
987{ 907{
988 unsigned int val; 908 unsigned int val;
989 enter("enable_link_interrupt");
990 909
991 val = inl(card->io_port + CSR7); /* Interrupt enable register */ 910 val = inl(card->io_port + CSR7); /* Interrupt enable register */
992 val |= (1<<16); /* Normal Interrupt Summary */ 911 val |= (1<<16); /* Normal Interrupt Summary */
@@ -998,8 +917,6 @@ static void enable_common_interrupts(struct xircom_private *card)
998 val |= (1<<2); /* Transmit Buffer Unavailable */ 917 val |= (1<<2); /* Transmit Buffer Unavailable */
999 val |= (1<<1); /* Transmit Process Stopped */ 918 val |= (1<<1); /* Transmit Process Stopped */
1000 outl(val, card->io_port + CSR7); 919 outl(val, card->io_port + CSR7);
1001
1002 leave("enable_link_interrupt");
1003} 920}
1004 921
1005/* 922/*
@@ -1010,13 +927,11 @@ must be called with the lock held and interrupts disabled.
1010static int enable_promisc(struct xircom_private *card) 927static int enable_promisc(struct xircom_private *card)
1011{ 928{
1012 unsigned int val; 929 unsigned int val;
1013 enter("enable_promisc");
1014 930
1015 val = inl(card->io_port + CSR6); 931 val = inl(card->io_port + CSR6);
1016 val = val | (1 << 6); 932 val = val | (1 << 6);
1017 outl(val, card->io_port + CSR6); 933 outl(val, card->io_port + CSR6);
1018 934
1019 leave("enable_promisc");
1020 return 1; 935 return 1;
1021} 936}
1022 937
@@ -1031,7 +946,6 @@ Must be called in locked state with interrupts disabled
1031static int link_status(struct xircom_private *card) 946static int link_status(struct xircom_private *card)
1032{ 947{
1033 unsigned int val; 948 unsigned int val;
1034 enter("link_status");
1035 949
1036 val = inb(card->io_port + CSR12); 950 val = inb(card->io_port + CSR12);
1037 951
@@ -1042,7 +956,6 @@ static int link_status(struct xircom_private *card)
1042 956
1043 /* If we get here -> no link at all */ 957 /* If we get here -> no link at all */
1044 958
1045 leave("link_status");
1046 return 0; 959 return 0;
1047} 960}
1048 961
@@ -1061,8 +974,6 @@ static void read_mac_address(struct xircom_private *card)
1061 unsigned long flags; 974 unsigned long flags;
1062 int i; 975 int i;
1063 976
1064 enter("read_mac_address");
1065
1066 spin_lock_irqsave(&card->lock, flags); 977 spin_lock_irqsave(&card->lock, flags);
1067 978
1068 outl(1 << 12, card->io_port + CSR9); /* enable boot rom access */ 979 outl(1 << 12, card->io_port + CSR9); /* enable boot rom access */
@@ -1090,7 +1001,6 @@ static void read_mac_address(struct xircom_private *card)
1090 } 1001 }
1091 spin_unlock_irqrestore(&card->lock, flags); 1002 spin_unlock_irqrestore(&card->lock, flags);
1092 pr_debug(" %pM\n", card->dev->dev_addr); 1003 pr_debug(" %pM\n", card->dev->dev_addr);
1093 leave("read_mac_address");
1094} 1004}
1095 1005
1096 1006
@@ -1103,8 +1013,6 @@ static void transceiver_voodoo(struct xircom_private *card)
1103{ 1013{
1104 unsigned long flags; 1014 unsigned long flags;
1105 1015
1106 enter("transceiver_voodoo");
1107
1108 /* disable all powermanagement */ 1016 /* disable all powermanagement */
1109 pci_write_config_dword(card->pdev, PCI_POWERMGMT, 0x0000); 1017 pci_write_config_dword(card->pdev, PCI_POWERMGMT, 0x0000);
1110 1018
@@ -1122,7 +1030,6 @@ static void transceiver_voodoo(struct xircom_private *card)
1122 spin_unlock_irqrestore(&card->lock, flags); 1030 spin_unlock_irqrestore(&card->lock, flags);
1123 1031
1124 netif_start_queue(card->dev); 1032 netif_start_queue(card->dev);
1125 leave("transceiver_voodoo");
1126} 1033}
1127 1034
1128 1035
@@ -1131,8 +1038,6 @@ static void xircom_up(struct xircom_private *card)
1131 unsigned long flags; 1038 unsigned long flags;
1132 int i; 1039 int i;
1133 1040
1134 enter("xircom_up");
1135
1136 /* disable all powermanagement */ 1041 /* disable all powermanagement */
1137 pci_write_config_dword(card->pdev, PCI_POWERMGMT, 0x0000); 1042 pci_write_config_dword(card->pdev, PCI_POWERMGMT, 0x0000);
1138 1043
@@ -1156,87 +1061,84 @@ static void xircom_up(struct xircom_private *card)
1156 trigger_receive(card); 1061 trigger_receive(card);
1157 trigger_transmit(card); 1062 trigger_transmit(card);
1158 netif_start_queue(card->dev); 1063 netif_start_queue(card->dev);
1159 leave("xircom_up");
1160} 1064}
1161 1065
1162/* Bufferoffset is in BYTES */ 1066/* Bufferoffset is in BYTES */
1163static void investigate_read_descriptor(struct net_device *dev,struct xircom_private *card, int descnr, unsigned int bufferoffset) 1067static void
1068investigate_read_descriptor(struct net_device *dev, struct xircom_private *card,
1069 int descnr, unsigned int bufferoffset)
1164{ 1070{
1165 int status; 1071 int status;
1166
1167 enter("investigate_read_descriptor");
1168 status = le32_to_cpu(card->rx_buffer[4*descnr]);
1169 1072
1170 if ((status > 0)) { /* packet received */ 1073 status = le32_to_cpu(card->rx_buffer[4*descnr]);
1171 1074
1172 /* TODO: discard error packets */ 1075 if (status > 0) { /* packet received */
1173 1076
1174 short pkt_len = ((status >> 16) & 0x7ff) - 4; /* minus 4, we don't want the CRC */ 1077 /* TODO: discard error packets */
1175 struct sk_buff *skb;
1176 1078
1177 if (pkt_len > 1518) { 1079 short pkt_len = ((status >> 16) & 0x7ff) - 4;
1178 pr_err("Packet length %i is bogus\n", pkt_len); 1080 /* minus 4, we don't want the CRC */
1179 pkt_len = 1518; 1081 struct sk_buff *skb;
1180 }
1181 1082
1182 skb = dev_alloc_skb(pkt_len + 2); 1083 if (pkt_len > 1518) {
1183 if (skb == NULL) { 1084 netdev_err(dev, "Packet length %i is bogus\n", pkt_len);
1184 dev->stats.rx_dropped++; 1085 pkt_len = 1518;
1185 goto out;
1186 }
1187 skb_reserve(skb, 2);
1188 skb_copy_to_linear_data(skb, (unsigned char*)&card->rx_buffer[bufferoffset / 4], pkt_len);
1189 skb_put(skb, pkt_len);
1190 skb->protocol = eth_type_trans(skb, dev);
1191 netif_rx(skb);
1192 dev->stats.rx_packets++;
1193 dev->stats.rx_bytes += pkt_len;
1194
1195 out:
1196 /* give the buffer back to the card */
1197 card->rx_buffer[4*descnr] = cpu_to_le32(0x80000000);
1198 trigger_receive(card);
1199 } 1086 }
1200 1087
1201 leave("investigate_read_descriptor"); 1088 skb = dev_alloc_skb(pkt_len + 2);
1202 1089 if (skb == NULL) {
1090 dev->stats.rx_dropped++;
1091 goto out;
1092 }
1093 skb_reserve(skb, 2);
1094 skb_copy_to_linear_data(skb,
1095 &card->rx_buffer[bufferoffset / 4],
1096 pkt_len);
1097 skb_put(skb, pkt_len);
1098 skb->protocol = eth_type_trans(skb, dev);
1099 netif_rx(skb);
1100 dev->stats.rx_packets++;
1101 dev->stats.rx_bytes += pkt_len;
1102
1103out:
1104 /* give the buffer back to the card */
1105 card->rx_buffer[4*descnr] = cpu_to_le32(0x80000000);
1106 trigger_receive(card);
1107 }
1203} 1108}
1204 1109
1205 1110
1206/* Bufferoffset is in BYTES */ 1111/* Bufferoffset is in BYTES */
1207static void investigate_write_descriptor(struct net_device *dev, struct xircom_private *card, int descnr, unsigned int bufferoffset) 1112static void
1113investigate_write_descriptor(struct net_device *dev,
1114 struct xircom_private *card,
1115 int descnr, unsigned int bufferoffset)
1208{ 1116{
1209 int status; 1117 int status;
1210
1211 enter("investigate_write_descriptor");
1212 1118
1213 status = le32_to_cpu(card->tx_buffer[4*descnr]); 1119 status = le32_to_cpu(card->tx_buffer[4*descnr]);
1214#if 0 1120#if 0
1215 if (status & 0x8000) { /* Major error */ 1121 if (status & 0x8000) { /* Major error */
1216 pr_err("Major transmit error status %x\n", status); 1122 pr_err("Major transmit error status %x\n", status);
1217 card->tx_buffer[4*descnr] = 0; 1123 card->tx_buffer[4*descnr] = 0;
1218 netif_wake_queue (dev); 1124 netif_wake_queue (dev);
1219 } 1125 }
1220#endif 1126#endif
1221 if (status > 0) { /* bit 31 is 0 when done */ 1127 if (status > 0) { /* bit 31 is 0 when done */
1222 if (card->tx_skb[descnr]!=NULL) { 1128 if (card->tx_skb[descnr]!=NULL) {
1223 dev->stats.tx_bytes += card->tx_skb[descnr]->len; 1129 dev->stats.tx_bytes += card->tx_skb[descnr]->len;
1224 dev_kfree_skb_irq(card->tx_skb[descnr]); 1130 dev_kfree_skb_irq(card->tx_skb[descnr]);
1225 }
1226 card->tx_skb[descnr] = NULL;
1227 /* Bit 8 in the status field is 1 if there was a collision */
1228 if (status&(1<<8))
1229 dev->stats.collisions++;
1230 card->tx_buffer[4*descnr] = 0; /* descriptor is free again */
1231 netif_wake_queue (dev);
1232 dev->stats.tx_packets++;
1233 } 1131 }
1234 1132 card->tx_skb[descnr] = NULL;
1235 leave("investigate_write_descriptor"); 1133 /* Bit 8 in the status field is 1 if there was a collision */
1236 1134 if (status & (1 << 8))
1135 dev->stats.collisions++;
1136 card->tx_buffer[4*descnr] = 0; /* descriptor is free again */
1137 netif_wake_queue (dev);
1138 dev->stats.tx_packets++;
1139 }
1237} 1140}
1238 1141
1239
1240static int __init xircom_init(void) 1142static int __init xircom_init(void)
1241{ 1143{
1242 return pci_register_driver(&xircom_ops); 1144 return pci_register_driver(&xircom_ops);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index f5e9ac00a07b..74e94054ab1a 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -123,6 +123,9 @@ struct tun_struct {
123 gid_t group; 123 gid_t group;
124 124
125 struct net_device *dev; 125 struct net_device *dev;
126 u32 set_features;
127#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
128 NETIF_F_TSO6|NETIF_F_UFO)
126 struct fasync_struct *fasync; 129 struct fasync_struct *fasync;
127 130
128 struct tap_filter txflt; 131 struct tap_filter txflt;
@@ -451,12 +454,20 @@ tun_net_change_mtu(struct net_device *dev, int new_mtu)
451 return 0; 454 return 0;
452} 455}
453 456
457static u32 tun_net_fix_features(struct net_device *dev, u32 features)
458{
459 struct tun_struct *tun = netdev_priv(dev);
460
461 return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
462}
463
454static const struct net_device_ops tun_netdev_ops = { 464static const struct net_device_ops tun_netdev_ops = {
455 .ndo_uninit = tun_net_uninit, 465 .ndo_uninit = tun_net_uninit,
456 .ndo_open = tun_net_open, 466 .ndo_open = tun_net_open,
457 .ndo_stop = tun_net_close, 467 .ndo_stop = tun_net_close,
458 .ndo_start_xmit = tun_net_xmit, 468 .ndo_start_xmit = tun_net_xmit,
459 .ndo_change_mtu = tun_net_change_mtu, 469 .ndo_change_mtu = tun_net_change_mtu,
470 .ndo_fix_features = tun_net_fix_features,
460}; 471};
461 472
462static const struct net_device_ops tap_netdev_ops = { 473static const struct net_device_ops tap_netdev_ops = {
@@ -465,6 +476,7 @@ static const struct net_device_ops tap_netdev_ops = {
465 .ndo_stop = tun_net_close, 476 .ndo_stop = tun_net_close,
466 .ndo_start_xmit = tun_net_xmit, 477 .ndo_start_xmit = tun_net_xmit,
467 .ndo_change_mtu = tun_net_change_mtu, 478 .ndo_change_mtu = tun_net_change_mtu,
479 .ndo_fix_features = tun_net_fix_features,
468 .ndo_set_multicast_list = tun_net_mclist, 480 .ndo_set_multicast_list = tun_net_mclist,
469 .ndo_set_mac_address = eth_mac_addr, 481 .ndo_set_mac_address = eth_mac_addr,
470 .ndo_validate_addr = eth_validate_addr, 482 .ndo_validate_addr = eth_validate_addr,
@@ -628,8 +640,7 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun,
628 kfree_skb(skb); 640 kfree_skb(skb);
629 return -EINVAL; 641 return -EINVAL;
630 } 642 }
631 } else if (tun->flags & TUN_NOCHECKSUM) 643 }
632 skb->ip_summed = CHECKSUM_UNNECESSARY;
633 644
634 switch (tun->flags & TUN_TYPE_MASK) { 645 switch (tun->flags & TUN_TYPE_MASK) {
635 case TUN_TUN_DEV: 646 case TUN_TUN_DEV:
@@ -1088,11 +1099,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1088 1099
1089 tun_net_init(dev); 1100 tun_net_init(dev);
1090 1101
1091 if (strchr(dev->name, '%')) { 1102 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
1092 err = dev_alloc_name(dev, dev->name); 1103 TUN_USER_FEATURES;
1093 if (err < 0) 1104 dev->features = dev->hw_features;
1094 goto err_free_sk;
1095 }
1096 1105
1097 err = register_netdevice(tun->dev); 1106 err = register_netdevice(tun->dev);
1098 if (err < 0) 1107 if (err < 0)
@@ -1158,18 +1167,12 @@ static int tun_get_iff(struct net *net, struct tun_struct *tun,
1158 1167
1159/* This is like a cut-down ethtool ops, except done via tun fd so no 1168/* This is like a cut-down ethtool ops, except done via tun fd so no
1160 * privs required. */ 1169 * privs required. */
1161static int set_offload(struct net_device *dev, unsigned long arg) 1170static int set_offload(struct tun_struct *tun, unsigned long arg)
1162{ 1171{
1163 u32 old_features, features; 1172 u32 features = 0;
1164
1165 old_features = dev->features;
1166 /* Unset features, set them as we chew on the arg. */
1167 features = (old_features & ~(NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST
1168 |NETIF_F_TSO_ECN|NETIF_F_TSO|NETIF_F_TSO6
1169 |NETIF_F_UFO));
1170 1173
1171 if (arg & TUN_F_CSUM) { 1174 if (arg & TUN_F_CSUM) {
1172 features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; 1175 features |= NETIF_F_HW_CSUM;
1173 arg &= ~TUN_F_CSUM; 1176 arg &= ~TUN_F_CSUM;
1174 1177
1175 if (arg & (TUN_F_TSO4|TUN_F_TSO6)) { 1178 if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
@@ -1195,9 +1198,8 @@ static int set_offload(struct net_device *dev, unsigned long arg)
1195 if (arg) 1198 if (arg)
1196 return -EINVAL; 1199 return -EINVAL;
1197 1200
1198 dev->features = features; 1201 tun->set_features = features;
1199 if (old_features != dev->features) 1202 netdev_update_features(tun->dev);
1200 netdev_features_change(dev);
1201 1203
1202 return 0; 1204 return 0;
1203} 1205}
@@ -1262,12 +1264,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1262 1264
1263 case TUNSETNOCSUM: 1265 case TUNSETNOCSUM:
1264 /* Disable/Enable checksum */ 1266 /* Disable/Enable checksum */
1265 if (arg)
1266 tun->flags |= TUN_NOCHECKSUM;
1267 else
1268 tun->flags &= ~TUN_NOCHECKSUM;
1269 1267
1270 tun_debug(KERN_INFO, tun, "checksum %s\n", 1268 /* [unimplemented] */
1269 tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n",
1271 arg ? "disabled" : "enabled"); 1270 arg ? "disabled" : "enabled");
1272 break; 1271 break;
1273 1272
@@ -1316,7 +1315,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1316 break; 1315 break;
1317#endif 1316#endif
1318 case TUNSETOFFLOAD: 1317 case TUNSETOFFLOAD:
1319 ret = set_offload(tun->dev, arg); 1318 ret = set_offload(tun, arg);
1320 break; 1319 break;
1321 1320
1322 case TUNSETTXFILTER: 1321 case TUNSETTXFILTER:
@@ -1548,7 +1547,7 @@ static int tun_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1548{ 1547{
1549 cmd->supported = 0; 1548 cmd->supported = 0;
1550 cmd->advertising = 0; 1549 cmd->advertising = 0;
1551 cmd->speed = SPEED_10; 1550 ethtool_cmd_speed_set(cmd, SPEED_10);
1552 cmd->duplex = DUPLEX_FULL; 1551 cmd->duplex = DUPLEX_FULL;
1553 cmd->port = PORT_TP; 1552 cmd->port = PORT_TP;
1554 cmd->phy_address = 0; 1553 cmd->phy_address = 0;
@@ -1595,30 +1594,12 @@ static void tun_set_msglevel(struct net_device *dev, u32 value)
1595#endif 1594#endif
1596} 1595}
1597 1596
1598static u32 tun_get_rx_csum(struct net_device *dev)
1599{
1600 struct tun_struct *tun = netdev_priv(dev);
1601 return (tun->flags & TUN_NOCHECKSUM) == 0;
1602}
1603
1604static int tun_set_rx_csum(struct net_device *dev, u32 data)
1605{
1606 struct tun_struct *tun = netdev_priv(dev);
1607 if (data)
1608 tun->flags &= ~TUN_NOCHECKSUM;
1609 else
1610 tun->flags |= TUN_NOCHECKSUM;
1611 return 0;
1612}
1613
1614static const struct ethtool_ops tun_ethtool_ops = { 1597static const struct ethtool_ops tun_ethtool_ops = {
1615 .get_settings = tun_get_settings, 1598 .get_settings = tun_get_settings,
1616 .get_drvinfo = tun_get_drvinfo, 1599 .get_drvinfo = tun_get_drvinfo,
1617 .get_msglevel = tun_get_msglevel, 1600 .get_msglevel = tun_get_msglevel,
1618 .set_msglevel = tun_set_msglevel, 1601 .set_msglevel = tun_set_msglevel,
1619 .get_link = ethtool_op_get_link, 1602 .get_link = ethtool_op_get_link,
1620 .get_rx_csum = tun_get_rx_csum,
1621 .set_rx_csum = tun_set_rx_csum
1622}; 1603};
1623 1604
1624 1605
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 82653cb07857..3de4283344e9 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -1050,7 +1050,7 @@ typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1050 1050
1051 /* need to get stats to make these link speed/duplex valid */ 1051 /* need to get stats to make these link speed/duplex valid */
1052 typhoon_do_get_stats(tp); 1052 typhoon_do_get_stats(tp);
1053 cmd->speed = tp->speed; 1053 ethtool_cmd_speed_set(cmd, tp->speed);
1054 cmd->duplex = tp->duplex; 1054 cmd->duplex = tp->duplex;
1055 cmd->phy_address = 0; 1055 cmd->phy_address = 0;
1056 cmd->transceiver = XCVR_INTERNAL; 1056 cmd->transceiver = XCVR_INTERNAL;
@@ -1068,25 +1068,26 @@ static int
1068typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1068typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1069{ 1069{
1070 struct typhoon *tp = netdev_priv(dev); 1070 struct typhoon *tp = netdev_priv(dev);
1071 u32 speed = ethtool_cmd_speed(cmd);
1071 struct cmd_desc xp_cmd; 1072 struct cmd_desc xp_cmd;
1072 __le16 xcvr; 1073 __le16 xcvr;
1073 int err; 1074 int err;
1074 1075
1075 err = -EINVAL; 1076 err = -EINVAL;
1076 if(cmd->autoneg == AUTONEG_ENABLE) { 1077 if (cmd->autoneg == AUTONEG_ENABLE) {
1077 xcvr = TYPHOON_XCVR_AUTONEG; 1078 xcvr = TYPHOON_XCVR_AUTONEG;
1078 } else { 1079 } else {
1079 if(cmd->duplex == DUPLEX_HALF) { 1080 if (cmd->duplex == DUPLEX_HALF) {
1080 if(cmd->speed == SPEED_10) 1081 if (speed == SPEED_10)
1081 xcvr = TYPHOON_XCVR_10HALF; 1082 xcvr = TYPHOON_XCVR_10HALF;
1082 else if(cmd->speed == SPEED_100) 1083 else if (speed == SPEED_100)
1083 xcvr = TYPHOON_XCVR_100HALF; 1084 xcvr = TYPHOON_XCVR_100HALF;
1084 else 1085 else
1085 goto out; 1086 goto out;
1086 } else if(cmd->duplex == DUPLEX_FULL) { 1087 } else if (cmd->duplex == DUPLEX_FULL) {
1087 if(cmd->speed == SPEED_10) 1088 if (speed == SPEED_10)
1088 xcvr = TYPHOON_XCVR_10FULL; 1089 xcvr = TYPHOON_XCVR_10FULL;
1089 else if(cmd->speed == SPEED_100) 1090 else if (speed == SPEED_100)
1090 xcvr = TYPHOON_XCVR_100FULL; 1091 xcvr = TYPHOON_XCVR_100FULL;
1091 else 1092 else
1092 goto out; 1093 goto out;
@@ -1105,7 +1106,7 @@ typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1105 tp->speed = 0xff; /* invalid */ 1106 tp->speed = 0xff; /* invalid */
1106 tp->duplex = 0xff; /* invalid */ 1107 tp->duplex = 0xff; /* invalid */
1107 } else { 1108 } else {
1108 tp->speed = cmd->speed; 1109 tp->speed = speed;
1109 tp->duplex = cmd->duplex; 1110 tp->duplex = cmd->duplex;
1110 } 1111 }
1111 1112
@@ -1144,28 +1145,6 @@ typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1144 return 0; 1145 return 0;
1145} 1146}
1146 1147
1147static u32
1148typhoon_get_rx_csum(struct net_device *dev)
1149{
1150 /* For now, we don't allow turning off RX checksums.
1151 */
1152 return 1;
1153}
1154
1155static int
1156typhoon_set_flags(struct net_device *dev, u32 data)
1157{
1158 /* There's no way to turn off the RX VLAN offloading and stripping
1159 * on the current 3XP firmware -- it does not respect the offload
1160 * settings -- so we only allow the user to toggle the TX processing.
1161 */
1162 if (!(data & ETH_FLAG_RXVLAN))
1163 return -EINVAL;
1164
1165 return ethtool_op_set_flags(dev, data,
1166 ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN);
1167}
1168
1169static void 1148static void
1170typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) 1149typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
1171{ 1150{
@@ -1187,13 +1166,7 @@ static const struct ethtool_ops typhoon_ethtool_ops = {
1187 .get_wol = typhoon_get_wol, 1166 .get_wol = typhoon_get_wol,
1188 .set_wol = typhoon_set_wol, 1167 .set_wol = typhoon_set_wol,
1189 .get_link = ethtool_op_get_link, 1168 .get_link = ethtool_op_get_link,
1190 .get_rx_csum = typhoon_get_rx_csum,
1191 .set_tx_csum = ethtool_op_set_tx_csum,
1192 .set_sg = ethtool_op_set_sg,
1193 .set_tso = ethtool_op_set_tso,
1194 .get_ringparam = typhoon_get_ringparam, 1169 .get_ringparam = typhoon_get_ringparam,
1195 .set_flags = typhoon_set_flags,
1196 .get_flags = ethtool_op_get_flags,
1197}; 1170};
1198 1171
1199static int 1172static int
@@ -2482,10 +2455,15 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2482 2455
2483 /* We can handle scatter gather, up to 16 entries, and 2456 /* We can handle scatter gather, up to 16 entries, and
2484 * we can do IP checksumming (only version 4, doh...) 2457 * we can do IP checksumming (only version 4, doh...)
2458 *
2459 * There's no way to turn off the RX VLAN offloading and stripping
2460 * on the current 3XP firmware -- it does not respect the offload
2461 * settings -- so we only allow the user to toggle the TX processing.
2485 */ 2462 */
2486 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; 2463 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
2487 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 2464 NETIF_F_HW_VLAN_TX;
2488 dev->features |= NETIF_F_TSO; 2465 dev->features = dev->hw_features |
2466 NETIF_F_HW_VLAN_RX | NETIF_F_RXCSUM;
2489 2467
2490 if(register_netdev(dev) < 0) { 2468 if(register_netdev(dev) < 0) {
2491 err_msg = "unable to register netdev"; 2469 err_msg = "unable to register netdev";
diff --git a/drivers/net/ucc_geth_ethtool.c b/drivers/net/ucc_geth_ethtool.c
index 6f92e48f02d3..537fbc0a4401 100644
--- a/drivers/net/ucc_geth_ethtool.c
+++ b/drivers/net/ucc_geth_ethtool.c
@@ -410,7 +410,6 @@ static const struct ethtool_ops uec_ethtool_ops = {
410 .set_ringparam = uec_set_ringparam, 410 .set_ringparam = uec_set_ringparam,
411 .get_pauseparam = uec_get_pauseparam, 411 .get_pauseparam = uec_get_pauseparam,
412 .set_pauseparam = uec_set_pauseparam, 412 .set_pauseparam = uec_set_pauseparam,
413 .set_sg = ethtool_op_set_sg,
414 .get_sset_count = uec_get_sset_count, 413 .get_sset_count = uec_get_sset_count,
415 .get_strings = uec_get_strings, 414 .get_strings = uec_get_strings,
416 .get_ethtool_stats = uec_get_ethtool_stats, 415 .get_ethtool_stats = uec_get_ethtool_stats,
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 3ec22c307797..9d4f9117260f 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -258,7 +258,7 @@ config USB_NET_NET1080
258 optionally with LEDs that indicate traffic 258 optionally with LEDs that indicate traffic
259 259
260config USB_NET_PLUSB 260config USB_NET_PLUSB
261 tristate "Prolific PL-2301/2302 based cables" 261 tristate "Prolific PL-2301/2302/25A1 based cables"
262 # if the handshake/init/reset problems, from original 'plusb', 262 # if the handshake/init/reset problems, from original 'plusb',
263 # are ever resolved ... then remove "experimental" 263 # are ever resolved ... then remove "experimental"
264 depends on USB_USBNET && EXPERIMENTAL 264 depends on USB_USBNET && EXPERIMENTAL
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 6140b56cce53..6998aa6b7bb7 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -847,7 +847,7 @@ static void ax88172_set_multicast(struct net_device *net)
847static int ax88172_link_reset(struct usbnet *dev) 847static int ax88172_link_reset(struct usbnet *dev)
848{ 848{
849 u8 mode; 849 u8 mode;
850 struct ethtool_cmd ecmd; 850 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
851 851
852 mii_check_media(&dev->mii, 1, 1); 852 mii_check_media(&dev->mii, 1, 1);
853 mii_ethtool_gset(&dev->mii, &ecmd); 853 mii_ethtool_gset(&dev->mii, &ecmd);
@@ -856,8 +856,8 @@ static int ax88172_link_reset(struct usbnet *dev)
856 if (ecmd.duplex != DUPLEX_FULL) 856 if (ecmd.duplex != DUPLEX_FULL)
857 mode |= ~AX88172_MEDIUM_FD; 857 mode |= ~AX88172_MEDIUM_FD;
858 858
859 netdev_dbg(dev->net, "ax88172_link_reset() speed: %d duplex: %d setting mode to 0x%04x\n", 859 netdev_dbg(dev->net, "ax88172_link_reset() speed: %u duplex: %d setting mode to 0x%04x\n",
860 ecmd.speed, ecmd.duplex, mode); 860 ethtool_cmd_speed(&ecmd), ecmd.duplex, mode);
861 861
862 asix_write_medium_mode(dev, mode); 862 asix_write_medium_mode(dev, mode);
863 863
@@ -947,20 +947,20 @@ static const struct ethtool_ops ax88772_ethtool_ops = {
947static int ax88772_link_reset(struct usbnet *dev) 947static int ax88772_link_reset(struct usbnet *dev)
948{ 948{
949 u16 mode; 949 u16 mode;
950 struct ethtool_cmd ecmd; 950 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
951 951
952 mii_check_media(&dev->mii, 1, 1); 952 mii_check_media(&dev->mii, 1, 1);
953 mii_ethtool_gset(&dev->mii, &ecmd); 953 mii_ethtool_gset(&dev->mii, &ecmd);
954 mode = AX88772_MEDIUM_DEFAULT; 954 mode = AX88772_MEDIUM_DEFAULT;
955 955
956 if (ecmd.speed != SPEED_100) 956 if (ethtool_cmd_speed(&ecmd) != SPEED_100)
957 mode &= ~AX_MEDIUM_PS; 957 mode &= ~AX_MEDIUM_PS;
958 958
959 if (ecmd.duplex != DUPLEX_FULL) 959 if (ecmd.duplex != DUPLEX_FULL)
960 mode &= ~AX_MEDIUM_FD; 960 mode &= ~AX_MEDIUM_FD;
961 961
962 netdev_dbg(dev->net, "ax88772_link_reset() speed: %d duplex: %d setting mode to 0x%04x\n", 962 netdev_dbg(dev->net, "ax88772_link_reset() speed: %u duplex: %d setting mode to 0x%04x\n",
963 ecmd.speed, ecmd.duplex, mode); 963 ethtool_cmd_speed(&ecmd), ecmd.duplex, mode);
964 964
965 asix_write_medium_mode(dev, mode); 965 asix_write_medium_mode(dev, mode);
966 966
@@ -1173,18 +1173,20 @@ static int marvell_led_status(struct usbnet *dev, u16 speed)
1173static int ax88178_link_reset(struct usbnet *dev) 1173static int ax88178_link_reset(struct usbnet *dev)
1174{ 1174{
1175 u16 mode; 1175 u16 mode;
1176 struct ethtool_cmd ecmd; 1176 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
1177 struct asix_data *data = (struct asix_data *)&dev->data; 1177 struct asix_data *data = (struct asix_data *)&dev->data;
1178 u32 speed;
1178 1179
1179 netdev_dbg(dev->net, "ax88178_link_reset()\n"); 1180 netdev_dbg(dev->net, "ax88178_link_reset()\n");
1180 1181
1181 mii_check_media(&dev->mii, 1, 1); 1182 mii_check_media(&dev->mii, 1, 1);
1182 mii_ethtool_gset(&dev->mii, &ecmd); 1183 mii_ethtool_gset(&dev->mii, &ecmd);
1183 mode = AX88178_MEDIUM_DEFAULT; 1184 mode = AX88178_MEDIUM_DEFAULT;
1185 speed = ethtool_cmd_speed(&ecmd);
1184 1186
1185 if (ecmd.speed == SPEED_1000) 1187 if (speed == SPEED_1000)
1186 mode |= AX_MEDIUM_GM; 1188 mode |= AX_MEDIUM_GM;
1187 else if (ecmd.speed == SPEED_100) 1189 else if (speed == SPEED_100)
1188 mode |= AX_MEDIUM_PS; 1190 mode |= AX_MEDIUM_PS;
1189 else 1191 else
1190 mode &= ~(AX_MEDIUM_PS | AX_MEDIUM_GM); 1192 mode &= ~(AX_MEDIUM_PS | AX_MEDIUM_GM);
@@ -1196,13 +1198,13 @@ static int ax88178_link_reset(struct usbnet *dev)
1196 else 1198 else
1197 mode &= ~AX_MEDIUM_FD; 1199 mode &= ~AX_MEDIUM_FD;
1198 1200
1199 netdev_dbg(dev->net, "ax88178_link_reset() speed: %d duplex: %d setting mode to 0x%04x\n", 1201 netdev_dbg(dev->net, "ax88178_link_reset() speed: %u duplex: %d setting mode to 0x%04x\n",
1200 ecmd.speed, ecmd.duplex, mode); 1202 speed, ecmd.duplex, mode);
1201 1203
1202 asix_write_medium_mode(dev, mode); 1204 asix_write_medium_mode(dev, mode);
1203 1205
1204 if (data->phymode == PHY_MODE_MARVELL && data->ledmode) 1206 if (data->phymode == PHY_MODE_MARVELL && data->ledmode)
1205 marvell_led_status(dev, ecmd.speed); 1207 marvell_led_status(dev, speed);
1206 1208
1207 return 0; 1209 return 0;
1208} 1210}
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 97687d335903..d7221c4a5dcf 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -686,7 +686,7 @@ static int catc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
686 686
687 cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_TP; 687 cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_TP;
688 cmd->advertising = ADVERTISED_10baseT_Half | ADVERTISED_TP; 688 cmd->advertising = ADVERTISED_10baseT_Half | ADVERTISED_TP;
689 cmd->speed = SPEED_10; 689 ethtool_cmd_speed_set(cmd, SPEED_10);
690 cmd->duplex = DUPLEX_HALF; 690 cmd->duplex = DUPLEX_HALF;
691 cmd->port = PORT_TP; 691 cmd->port = PORT_TP;
692 cmd->phy_address = 0; 692 cmd->phy_address = 0;
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 1033ef6476a4..4ab557d0287d 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -54,13 +54,13 @@
54#include <linux/usb/usbnet.h> 54#include <linux/usb/usbnet.h>
55#include <linux/usb/cdc.h> 55#include <linux/usb/cdc.h>
56 56
57#define DRIVER_VERSION "23-Apr-2011" 57#define DRIVER_VERSION "06-May-2011"
58 58
59/* CDC NCM subclass 3.2.1 */ 59/* CDC NCM subclass 3.2.1 */
60#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10 60#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
61 61
62/* Maximum NTB length */ 62/* Maximum NTB length */
63#define CDC_NCM_NTB_MAX_SIZE_TX (16384 + 4) /* bytes, must be short terminated */ 63#define CDC_NCM_NTB_MAX_SIZE_TX 16384 /* bytes */
64#define CDC_NCM_NTB_MAX_SIZE_RX 16384 /* bytes */ 64#define CDC_NCM_NTB_MAX_SIZE_RX 16384 /* bytes */
65 65
66/* Minimum value for MaxDatagramSize, ch. 6.2.9 */ 66/* Minimum value for MaxDatagramSize, ch. 6.2.9 */
@@ -722,7 +722,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
722 722
723 } else { 723 } else {
724 /* reset variables */ 724 /* reset variables */
725 skb_out = alloc_skb(ctx->tx_max, GFP_ATOMIC); 725 skb_out = alloc_skb((ctx->tx_max + 1), GFP_ATOMIC);
726 if (skb_out == NULL) { 726 if (skb_out == NULL) {
727 if (skb != NULL) { 727 if (skb != NULL) {
728 dev_kfree_skb_any(skb); 728 dev_kfree_skb_any(skb);
@@ -861,8 +861,11 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
861 /* store last offset */ 861 /* store last offset */
862 last_offset = offset; 862 last_offset = offset;
863 863
864 if ((last_offset < ctx->tx_max) && ((last_offset % 864 if (((last_offset < ctx->tx_max) && ((last_offset %
865 le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0)) { 865 le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0)) ||
866 (((last_offset == ctx->tx_max) && ((ctx->tx_max %
867 le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0)) &&
868 (ctx->tx_max < le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize)))) {
866 /* force short packet */ 869 /* force short packet */
867 *(((u8 *)skb_out->data) + last_offset) = 0; 870 *(((u8 *)skb_out->data) + last_offset) = 0;
868 last_offset++; 871 last_offset++;
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 5002f5be47be..1d93133e9b74 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -599,13 +599,13 @@ static void dm9601_status(struct usbnet *dev, struct urb *urb)
599 599
600static int dm9601_link_reset(struct usbnet *dev) 600static int dm9601_link_reset(struct usbnet *dev)
601{ 601{
602 struct ethtool_cmd ecmd; 602 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
603 603
604 mii_check_media(&dev->mii, 1, 1); 604 mii_check_media(&dev->mii, 1, 1);
605 mii_ethtool_gset(&dev->mii, &ecmd); 605 mii_ethtool_gset(&dev->mii, &ecmd);
606 606
607 netdev_dbg(dev->net, "link_reset() speed: %d duplex: %d\n", 607 netdev_dbg(dev->net, "link_reset() speed: %u duplex: %d\n",
608 ecmd.speed, ecmd.duplex); 608 ethtool_cmd_speed(&ecmd), ecmd.duplex);
609 609
610 return 0; 610 return 0;
611} 611}
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 823c53751307..217aec8a768f 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -45,6 +45,14 @@
45 * seems to get wedged under load. Prolific docs are weak, and 45 * seems to get wedged under load. Prolific docs are weak, and
46 * don't identify differences between PL2301 and PL2302, much less 46 * don't identify differences between PL2301 and PL2302, much less
47 * anything to explain the different PL2302 versions observed. 47 * anything to explain the different PL2302 versions observed.
48 *
49 * NOTE: pl2501 has several modes, including pl2301 and pl2302
50 * compatibility. Some docs suggest the difference between 2301
51 * and 2302 is only to make MS-Windows use a different driver...
52 *
53 * pl25a1 glue based on patch from Tony Gibbs. Prolific "docs" on
54 * this chip are as usual incomplete about what control messages
55 * are supported.
48 */ 56 */
49 57
50/* 58/*
@@ -86,16 +94,20 @@ pl_set_QuickLink_features(struct usbnet *dev, int val)
86 94
87static int pl_reset(struct usbnet *dev) 95static int pl_reset(struct usbnet *dev)
88{ 96{
97 int status;
98
89 /* some units seem to need this reset, others reject it utterly. 99 /* some units seem to need this reset, others reject it utterly.
90 * FIXME be more like "naplink" or windows drivers. 100 * FIXME be more like "naplink" or windows drivers.
91 */ 101 */
92 (void) pl_set_QuickLink_features(dev, 102 status = pl_set_QuickLink_features(dev,
93 PL_S_EN|PL_RESET_OUT|PL_RESET_IN|PL_PEER_E); 103 PL_S_EN|PL_RESET_OUT|PL_RESET_IN|PL_PEER_E);
104 if (status != 0 && netif_msg_probe(dev))
105 netif_dbg(dev, link, dev->net, "pl_reset --> %d\n", status);
94 return 0; 106 return 0;
95} 107}
96 108
97static const struct driver_info prolific_info = { 109static const struct driver_info prolific_info = {
98 .description = "Prolific PL-2301/PL-2302", 110 .description = "Prolific PL-2301/PL-2302/PL-25A1",
99 .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT, 111 .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT,
100 /* some PL-2302 versions seem to fail usb_set_interface() */ 112 /* some PL-2302 versions seem to fail usb_set_interface() */
101 .reset = pl_reset, 113 .reset = pl_reset,
@@ -111,6 +123,7 @@ static const struct driver_info prolific_info = {
111 123
112static const struct usb_device_id products [] = { 124static const struct usb_device_id products [] = {
113 125
126/* full speed cables */
114{ 127{
115 USB_DEVICE(0x067b, 0x0000), // PL-2301 128 USB_DEVICE(0x067b, 0x0000), // PL-2301
116 .driver_info = (unsigned long) &prolific_info, 129 .driver_info = (unsigned long) &prolific_info,
@@ -119,6 +132,15 @@ static const struct usb_device_id products [] = {
119 .driver_info = (unsigned long) &prolific_info, 132 .driver_info = (unsigned long) &prolific_info,
120}, 133},
121 134
135/* high speed cables */
136{
137 USB_DEVICE(0x067b, 0x25a1), /* PL-25A1, no eeprom */
138 .driver_info = (unsigned long) &prolific_info,
139}, {
140 USB_DEVICE(0x050d, 0x258a), /* Belkin F5U258/F5U279 (PL-25A1) */
141 .driver_info = (unsigned long) &prolific_info,
142},
143
122 { }, // END 144 { }, // END
123}; 145};
124MODULE_DEVICE_TABLE(usb, products); 146MODULE_DEVICE_TABLE(usb, products);
@@ -134,16 +156,16 @@ static struct usb_driver plusb_driver = {
134 156
135static int __init plusb_init(void) 157static int __init plusb_init(void)
136{ 158{
137 return usb_register(&plusb_driver); 159 return usb_register(&plusb_driver);
138} 160}
139module_init(plusb_init); 161module_init(plusb_init);
140 162
141static void __exit plusb_exit(void) 163static void __exit plusb_exit(void)
142{ 164{
143 usb_deregister(&plusb_driver); 165 usb_deregister(&plusb_driver);
144} 166}
145module_exit(plusb_exit); 167module_exit(plusb_exit);
146 168
147MODULE_AUTHOR("David Brownell"); 169MODULE_AUTHOR("David Brownell");
148MODULE_DESCRIPTION("Prolific PL-2301/2302 USB Host to Host Link Driver"); 170MODULE_DESCRIPTION("Prolific PL-2301/2302/25A1 USB Host to Host Link Driver");
149MODULE_LICENSE("GPL"); 171MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 5994a25c56ac..255d6a424a6b 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -104,8 +104,10 @@ static void rndis_msg_indicate(struct usbnet *dev, struct rndis_indicate *msg,
104int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen) 104int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen)
105{ 105{
106 struct cdc_state *info = (void *) &dev->data; 106 struct cdc_state *info = (void *) &dev->data;
107 struct usb_cdc_notification notification;
107 int master_ifnum; 108 int master_ifnum;
108 int retval; 109 int retval;
110 int partial;
109 unsigned count; 111 unsigned count;
110 __le32 rsp; 112 __le32 rsp;
111 u32 xid = 0, msg_len, request_id; 113 u32 xid = 0, msg_len, request_id;
@@ -133,13 +135,20 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen)
133 if (unlikely(retval < 0 || xid == 0)) 135 if (unlikely(retval < 0 || xid == 0))
134 return retval; 136 return retval;
135 137
136 // FIXME Seems like some devices discard responses when 138 /* Some devices don't respond on the control channel until
137 // we time out and cancel our "get response" requests... 139 * polled on the status channel, so do that first. */
138 // so, this is fragile. Probably need to poll for status. 140 if (dev->driver_info->data & RNDIS_DRIVER_DATA_POLL_STATUS) {
141 retval = usb_interrupt_msg(
142 dev->udev,
143 usb_rcvintpipe(dev->udev,
144 dev->status->desc.bEndpointAddress),
145 &notification, sizeof(notification), &partial,
146 RNDIS_CONTROL_TIMEOUT_MS);
147 if (unlikely(retval < 0))
148 return retval;
149 }
139 150
140 /* ignore status endpoint, just poll the control channel; 151 /* Poll the control channel; the request probably completed immediately */
141 * the request probably completed immediately
142 */
143 rsp = buf->msg_type | RNDIS_MSG_COMPLETION; 152 rsp = buf->msg_type | RNDIS_MSG_COMPLETION;
144 for (count = 0; count < 10; count++) { 153 for (count = 0; count < 10; count++) {
145 memset(buf, 0, CONTROL_BUFFER_SIZE); 154 memset(buf, 0, CONTROL_BUFFER_SIZE);
@@ -581,17 +590,33 @@ static const struct driver_info rndis_info = {
581 .tx_fixup = rndis_tx_fixup, 590 .tx_fixup = rndis_tx_fixup,
582}; 591};
583 592
593static const struct driver_info rndis_poll_status_info = {
594 .description = "RNDIS device (poll status before control)",
595 .flags = FLAG_ETHER | FLAG_POINTTOPOINT | FLAG_FRAMING_RN | FLAG_NO_SETINT,
596 .data = RNDIS_DRIVER_DATA_POLL_STATUS,
597 .bind = rndis_bind,
598 .unbind = rndis_unbind,
599 .status = rndis_status,
600 .rx_fixup = rndis_rx_fixup,
601 .tx_fixup = rndis_tx_fixup,
602};
603
584/*-------------------------------------------------------------------------*/ 604/*-------------------------------------------------------------------------*/
585 605
586static const struct usb_device_id products [] = { 606static const struct usb_device_id products [] = {
587{ 607{
608 /* 2Wire HomePortal 1000SW */
609 USB_DEVICE_AND_INTERFACE_INFO(0x1630, 0x0042,
610 USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
611 .driver_info = (unsigned long) &rndis_poll_status_info,
612}, {
588 /* RNDIS is MSFT's un-official variant of CDC ACM */ 613 /* RNDIS is MSFT's un-official variant of CDC ACM */
589 USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff), 614 USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
590 .driver_info = (unsigned long) &rndis_info, 615 .driver_info = (unsigned long) &rndis_info,
591}, { 616}, {
592 /* "ActiveSync" is an undocumented variant of RNDIS, used in WM5 */ 617 /* "ActiveSync" is an undocumented variant of RNDIS, used in WM5 */
593 USB_INTERFACE_INFO(USB_CLASS_MISC, 1, 1), 618 USB_INTERFACE_INFO(USB_CLASS_MISC, 1, 1),
594 .driver_info = (unsigned long) &rndis_info, 619 .driver_info = (unsigned long) &rndis_poll_status_info,
595}, { 620}, {
596 /* RNDIS for tethering */ 621 /* RNDIS for tethering */
597 USB_INTERFACE_INFO(USB_CLASS_WIRELESS_CONTROLLER, 1, 3), 622 USB_INTERFACE_INFO(USB_CLASS_WIRELESS_CONTROLLER, 1, 3),
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index e85c89c6706d..041fb7d43c4f 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -843,10 +843,11 @@ static int rtl8150_get_settings(struct net_device *netdev, struct ethtool_cmd *e
843 get_registers(dev, BMCR, 2, &bmcr); 843 get_registers(dev, BMCR, 2, &bmcr);
844 get_registers(dev, ANLP, 2, &lpa); 844 get_registers(dev, ANLP, 2, &lpa);
845 if (bmcr & BMCR_ANENABLE) { 845 if (bmcr & BMCR_ANENABLE) {
846 u32 speed = ((lpa & (LPA_100HALF | LPA_100FULL)) ?
847 SPEED_100 : SPEED_10);
848 ethtool_cmd_speed_set(ecmd, speed);
846 ecmd->autoneg = AUTONEG_ENABLE; 849 ecmd->autoneg = AUTONEG_ENABLE;
847 ecmd->speed = (lpa & (LPA_100HALF | LPA_100FULL)) ? 850 if (speed == SPEED_100)
848 SPEED_100 : SPEED_10;
849 if (ecmd->speed == SPEED_100)
850 ecmd->duplex = (lpa & LPA_100FULL) ? 851 ecmd->duplex = (lpa & LPA_100FULL) ?
851 DUPLEX_FULL : DUPLEX_HALF; 852 DUPLEX_FULL : DUPLEX_HALF;
852 else 853 else
@@ -854,8 +855,8 @@ static int rtl8150_get_settings(struct net_device *netdev, struct ethtool_cmd *e
854 DUPLEX_FULL : DUPLEX_HALF; 855 DUPLEX_FULL : DUPLEX_HALF;
855 } else { 856 } else {
856 ecmd->autoneg = AUTONEG_DISABLE; 857 ecmd->autoneg = AUTONEG_DISABLE;
857 ecmd->speed = (bmcr & BMCR_SPEED100) ? 858 ethtool_cmd_speed_set(ecmd, ((bmcr & BMCR_SPEED100) ?
858 SPEED_100 : SPEED_10; 859 SPEED_100 : SPEED_10));
859 ecmd->duplex = (bmcr & BMCR_FULLDPLX) ? 860 ecmd->duplex = (bmcr & BMCR_FULLDPLX) ?
860 DUPLEX_FULL : DUPLEX_HALF; 861 DUPLEX_FULL : DUPLEX_HALF;
861 } 862 }
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 753ee6eb7edd..15b3d6888ae9 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -65,7 +65,6 @@ struct smsc75xx_priv {
65 struct usbnet *dev; 65 struct usbnet *dev;
66 u32 rfe_ctl; 66 u32 rfe_ctl;
67 u32 multicast_hash_table[DP_SEL_VHF_HASH_LEN]; 67 u32 multicast_hash_table[DP_SEL_VHF_HASH_LEN];
68 bool use_rx_csum;
69 struct mutex dataport_mutex; 68 struct mutex dataport_mutex;
70 spinlock_t rfe_ctl_lock; 69 spinlock_t rfe_ctl_lock;
71 struct work_struct set_multicast; 70 struct work_struct set_multicast;
@@ -504,7 +503,7 @@ static int smsc75xx_update_flowcontrol(struct usbnet *dev, u8 duplex,
504static int smsc75xx_link_reset(struct usbnet *dev) 503static int smsc75xx_link_reset(struct usbnet *dev)
505{ 504{
506 struct mii_if_info *mii = &dev->mii; 505 struct mii_if_info *mii = &dev->mii;
507 struct ethtool_cmd ecmd; 506 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
508 u16 lcladv, rmtadv; 507 u16 lcladv, rmtadv;
509 int ret; 508 int ret;
510 509
@@ -520,8 +519,9 @@ static int smsc75xx_link_reset(struct usbnet *dev)
520 lcladv = smsc75xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE); 519 lcladv = smsc75xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE);
521 rmtadv = smsc75xx_mdio_read(dev->net, mii->phy_id, MII_LPA); 520 rmtadv = smsc75xx_mdio_read(dev->net, mii->phy_id, MII_LPA);
522 521
523 netif_dbg(dev, link, dev->net, "speed: %d duplex: %d lcladv: %04x" 522 netif_dbg(dev, link, dev->net, "speed: %u duplex: %d lcladv: %04x"
524 " rmtadv: %04x", ecmd.speed, ecmd.duplex, lcladv, rmtadv); 523 " rmtadv: %04x", ethtool_cmd_speed(&ecmd),
524 ecmd.duplex, lcladv, rmtadv);
525 525
526 return smsc75xx_update_flowcontrol(dev, ecmd.duplex, lcladv, rmtadv); 526 return smsc75xx_update_flowcontrol(dev, ecmd.duplex, lcladv, rmtadv);
527} 527}
@@ -548,28 +548,6 @@ static void smsc75xx_status(struct usbnet *dev, struct urb *urb)
548 "unexpected interrupt, intdata=0x%08X", intdata); 548 "unexpected interrupt, intdata=0x%08X", intdata);
549} 549}
550 550
551/* Enable or disable Rx checksum offload engine */
552static int smsc75xx_set_rx_csum_offload(struct usbnet *dev)
553{
554 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
555 unsigned long flags;
556 int ret;
557
558 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
559
560 if (pdata->use_rx_csum)
561 pdata->rfe_ctl |= RFE_CTL_TCPUDP_CKM | RFE_CTL_IP_CKM;
562 else
563 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_CKM | RFE_CTL_IP_CKM);
564
565 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
566
567 ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
568 check_warn_return(ret, "Error writing RFE_CTL");
569
570 return 0;
571}
572
573static int smsc75xx_ethtool_get_eeprom_len(struct net_device *net) 551static int smsc75xx_ethtool_get_eeprom_len(struct net_device *net)
574{ 552{
575 return MAX_EEPROM_SIZE; 553 return MAX_EEPROM_SIZE;
@@ -599,34 +577,6 @@ static int smsc75xx_ethtool_set_eeprom(struct net_device *netdev,
599 return smsc75xx_write_eeprom(dev, ee->offset, ee->len, data); 577 return smsc75xx_write_eeprom(dev, ee->offset, ee->len, data);
600} 578}
601 579
602static u32 smsc75xx_ethtool_get_rx_csum(struct net_device *netdev)
603{
604 struct usbnet *dev = netdev_priv(netdev);
605 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
606
607 return pdata->use_rx_csum;
608}
609
610static int smsc75xx_ethtool_set_rx_csum(struct net_device *netdev, u32 val)
611{
612 struct usbnet *dev = netdev_priv(netdev);
613 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
614
615 pdata->use_rx_csum = !!val;
616
617 return smsc75xx_set_rx_csum_offload(dev);
618}
619
620static int smsc75xx_ethtool_set_tso(struct net_device *netdev, u32 data)
621{
622 if (data)
623 netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
624 else
625 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
626
627 return 0;
628}
629
630static const struct ethtool_ops smsc75xx_ethtool_ops = { 580static const struct ethtool_ops smsc75xx_ethtool_ops = {
631 .get_link = usbnet_get_link, 581 .get_link = usbnet_get_link,
632 .nway_reset = usbnet_nway_reset, 582 .nway_reset = usbnet_nway_reset,
@@ -638,12 +588,6 @@ static const struct ethtool_ops smsc75xx_ethtool_ops = {
638 .get_eeprom_len = smsc75xx_ethtool_get_eeprom_len, 588 .get_eeprom_len = smsc75xx_ethtool_get_eeprom_len,
639 .get_eeprom = smsc75xx_ethtool_get_eeprom, 589 .get_eeprom = smsc75xx_ethtool_get_eeprom,
640 .set_eeprom = smsc75xx_ethtool_set_eeprom, 590 .set_eeprom = smsc75xx_ethtool_set_eeprom,
641 .get_tx_csum = ethtool_op_get_tx_csum,
642 .set_tx_csum = ethtool_op_set_tx_hw_csum,
643 .get_rx_csum = smsc75xx_ethtool_get_rx_csum,
644 .set_rx_csum = smsc75xx_ethtool_set_rx_csum,
645 .get_tso = ethtool_op_get_tso,
646 .set_tso = smsc75xx_ethtool_set_tso,
647}; 591};
648 592
649static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) 593static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@ -782,6 +726,30 @@ static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu)
782 return usbnet_change_mtu(netdev, new_mtu); 726 return usbnet_change_mtu(netdev, new_mtu);
783} 727}
784 728
729/* Enable or disable Rx checksum offload engine */
730static int smsc75xx_set_features(struct net_device *netdev, u32 features)
731{
732 struct usbnet *dev = netdev_priv(netdev);
733 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
734 unsigned long flags;
735 int ret;
736
737 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
738
739 if (features & NETIF_F_RXCSUM)
740 pdata->rfe_ctl |= RFE_CTL_TCPUDP_CKM | RFE_CTL_IP_CKM;
741 else
742 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_CKM | RFE_CTL_IP_CKM);
743
744 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
745 /* it's racing here! */
746
747 ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
748 check_warn_return(ret, "Error writing RFE_CTL");
749
750 return 0;
751}
752
785static int smsc75xx_reset(struct usbnet *dev) 753static int smsc75xx_reset(struct usbnet *dev)
786{ 754{
787 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); 755 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
@@ -960,11 +928,7 @@ static int smsc75xx_reset(struct usbnet *dev)
960 netif_dbg(dev, ifup, dev->net, "RFE_CTL set to 0x%08x", pdata->rfe_ctl); 928 netif_dbg(dev, ifup, dev->net, "RFE_CTL set to 0x%08x", pdata->rfe_ctl);
961 929
962 /* Enable or disable checksum offload engines */ 930 /* Enable or disable checksum offload engines */
963 ethtool_op_set_tx_hw_csum(dev->net, DEFAULT_TX_CSUM_ENABLE); 931 smsc75xx_set_features(dev->net, dev->net->features);
964 ret = smsc75xx_set_rx_csum_offload(dev);
965 check_warn_return(ret, "Failed to set rx csum offload: %d", ret);
966
967 smsc75xx_ethtool_set_tso(dev->net, DEFAULT_TSO_ENABLE);
968 932
969 smsc75xx_set_multicast(dev->net); 933 smsc75xx_set_multicast(dev->net);
970 934
@@ -1037,6 +1001,7 @@ static const struct net_device_ops smsc75xx_netdev_ops = {
1037 .ndo_validate_addr = eth_validate_addr, 1001 .ndo_validate_addr = eth_validate_addr,
1038 .ndo_do_ioctl = smsc75xx_ioctl, 1002 .ndo_do_ioctl = smsc75xx_ioctl,
1039 .ndo_set_multicast_list = smsc75xx_set_multicast, 1003 .ndo_set_multicast_list = smsc75xx_set_multicast,
1004 .ndo_set_features = smsc75xx_set_features,
1040}; 1005};
1041 1006
1042static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf) 1007static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
@@ -1065,10 +1030,17 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
1065 1030
1066 INIT_WORK(&pdata->set_multicast, smsc75xx_deferred_multicast_write); 1031 INIT_WORK(&pdata->set_multicast, smsc75xx_deferred_multicast_write);
1067 1032
1068 pdata->use_rx_csum = DEFAULT_RX_CSUM_ENABLE; 1033 if (DEFAULT_TX_CSUM_ENABLE) {
1034 dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1035 if (DEFAULT_TSO_ENABLE)
1036 dev->net->features |= NETIF_F_SG |
1037 NETIF_F_TSO | NETIF_F_TSO6;
1038 }
1039 if (DEFAULT_RX_CSUM_ENABLE)
1040 dev->net->features |= NETIF_F_RXCSUM;
1069 1041
1070 /* We have to advertise SG otherwise TSO cannot be enabled */ 1042 dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1071 dev->net->features |= NETIF_F_SG; 1043 NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXCSUM;
1072 1044
1073 /* Init all registers */ 1045 /* Init all registers */
1074 ret = smsc75xx_reset(dev); 1046 ret = smsc75xx_reset(dev);
@@ -1091,10 +1063,11 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
1091 } 1063 }
1092} 1064}
1093 1065
1094static void smsc75xx_rx_csum_offload(struct sk_buff *skb, u32 rx_cmd_a, 1066static void smsc75xx_rx_csum_offload(struct usbnet *dev, struct sk_buff *skb,
1095 u32 rx_cmd_b) 1067 u32 rx_cmd_a, u32 rx_cmd_b)
1096{ 1068{
1097 if (unlikely(rx_cmd_a & RX_CMD_A_LCSM)) { 1069 if (!(dev->net->features & NETIF_F_RXCSUM) ||
1070 unlikely(rx_cmd_a & RX_CMD_A_LCSM)) {
1098 skb->ip_summed = CHECKSUM_NONE; 1071 skb->ip_summed = CHECKSUM_NONE;
1099 } else { 1072 } else {
1100 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT)); 1073 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT));
@@ -1104,8 +1077,6 @@ static void smsc75xx_rx_csum_offload(struct sk_buff *skb, u32 rx_cmd_a,
1104 1077
1105static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) 1078static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1106{ 1079{
1107 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
1108
1109 while (skb->len > 0) { 1080 while (skb->len > 0) {
1110 u32 rx_cmd_a, rx_cmd_b, align_count, size; 1081 u32 rx_cmd_a, rx_cmd_b, align_count, size;
1111 struct sk_buff *ax_skb; 1082 struct sk_buff *ax_skb;
@@ -1145,11 +1116,8 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1145 1116
1146 /* last frame in this batch */ 1117 /* last frame in this batch */
1147 if (skb->len == size) { 1118 if (skb->len == size) {
1148 if (pdata->use_rx_csum) 1119 smsc75xx_rx_csum_offload(dev, skb, rx_cmd_a,
1149 smsc75xx_rx_csum_offload(skb, rx_cmd_a, 1120 rx_cmd_b);
1150 rx_cmd_b);
1151 else
1152 skb->ip_summed = CHECKSUM_NONE;
1153 1121
1154 skb_trim(skb, skb->len - 4); /* remove fcs */ 1122 skb_trim(skb, skb->len - 4); /* remove fcs */
1155 skb->truesize = size + sizeof(struct sk_buff); 1123 skb->truesize = size + sizeof(struct sk_buff);
@@ -1167,11 +1135,8 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1167 ax_skb->data = packet; 1135 ax_skb->data = packet;
1168 skb_set_tail_pointer(ax_skb, size); 1136 skb_set_tail_pointer(ax_skb, size);
1169 1137
1170 if (pdata->use_rx_csum) 1138 smsc75xx_rx_csum_offload(dev, ax_skb, rx_cmd_a,
1171 smsc75xx_rx_csum_offload(ax_skb, rx_cmd_a, 1139 rx_cmd_b);
1172 rx_cmd_b);
1173 else
1174 ax_skb->ip_summed = CHECKSUM_NONE;
1175 1140
1176 skb_trim(ax_skb, ax_skb->len - 4); /* remove fcs */ 1141 skb_trim(ax_skb, ax_skb->len - 4); /* remove fcs */
1177 ax_skb->truesize = size + sizeof(struct sk_buff); 1142 ax_skb->truesize = size + sizeof(struct sk_buff);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 48d4efdb4959..f74f3ce71526 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -52,8 +52,6 @@ struct smsc95xx_priv {
52 u32 hash_hi; 52 u32 hash_hi;
53 u32 hash_lo; 53 u32 hash_lo;
54 spinlock_t mac_cr_lock; 54 spinlock_t mac_cr_lock;
55 bool use_tx_csum;
56 bool use_rx_csum;
57}; 55};
58 56
59struct usb_context { 57struct usb_context {
@@ -459,7 +457,7 @@ static int smsc95xx_link_reset(struct usbnet *dev)
459{ 457{
460 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); 458 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
461 struct mii_if_info *mii = &dev->mii; 459 struct mii_if_info *mii = &dev->mii;
462 struct ethtool_cmd ecmd; 460 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
463 unsigned long flags; 461 unsigned long flags;
464 u16 lcladv, rmtadv; 462 u16 lcladv, rmtadv;
465 u32 intdata; 463 u32 intdata;
@@ -474,8 +472,9 @@ static int smsc95xx_link_reset(struct usbnet *dev)
474 lcladv = smsc95xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE); 472 lcladv = smsc95xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE);
475 rmtadv = smsc95xx_mdio_read(dev->net, mii->phy_id, MII_LPA); 473 rmtadv = smsc95xx_mdio_read(dev->net, mii->phy_id, MII_LPA);
476 474
477 netif_dbg(dev, link, dev->net, "speed: %d duplex: %d lcladv: %04x rmtadv: %04x\n", 475 netif_dbg(dev, link, dev->net,
478 ecmd.speed, ecmd.duplex, lcladv, rmtadv); 476 "speed: %u duplex: %d lcladv: %04x rmtadv: %04x\n",
477 ethtool_cmd_speed(&ecmd), ecmd.duplex, lcladv, rmtadv);
479 478
480 spin_lock_irqsave(&pdata->mac_cr_lock, flags); 479 spin_lock_irqsave(&pdata->mac_cr_lock, flags);
481 if (ecmd.duplex != DUPLEX_FULL) { 480 if (ecmd.duplex != DUPLEX_FULL) {
@@ -517,22 +516,24 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb)
517} 516}
518 517
519/* Enable or disable Tx & Rx checksum offload engines */ 518/* Enable or disable Tx & Rx checksum offload engines */
520static int smsc95xx_set_csums(struct usbnet *dev) 519static int smsc95xx_set_features(struct net_device *netdev, u32 features)
521{ 520{
522 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); 521 struct usbnet *dev = netdev_priv(netdev);
523 u32 read_buf; 522 u32 read_buf;
524 int ret = smsc95xx_read_reg(dev, COE_CR, &read_buf); 523 int ret;
524
525 ret = smsc95xx_read_reg(dev, COE_CR, &read_buf);
525 if (ret < 0) { 526 if (ret < 0) {
526 netdev_warn(dev->net, "Failed to read COE_CR: %d\n", ret); 527 netdev_warn(dev->net, "Failed to read COE_CR: %d\n", ret);
527 return ret; 528 return ret;
528 } 529 }
529 530
530 if (pdata->use_tx_csum) 531 if (features & NETIF_F_HW_CSUM)
531 read_buf |= Tx_COE_EN_; 532 read_buf |= Tx_COE_EN_;
532 else 533 else
533 read_buf &= ~Tx_COE_EN_; 534 read_buf &= ~Tx_COE_EN_;
534 535
535 if (pdata->use_rx_csum) 536 if (features & NETIF_F_RXCSUM)
536 read_buf |= Rx_COE_EN_; 537 read_buf |= Rx_COE_EN_;
537 else 538 else
538 read_buf &= ~Rx_COE_EN_; 539 read_buf &= ~Rx_COE_EN_;
@@ -576,43 +577,6 @@ static int smsc95xx_ethtool_set_eeprom(struct net_device *netdev,
576 return smsc95xx_write_eeprom(dev, ee->offset, ee->len, data); 577 return smsc95xx_write_eeprom(dev, ee->offset, ee->len, data);
577} 578}
578 579
579static u32 smsc95xx_ethtool_get_rx_csum(struct net_device *netdev)
580{
581 struct usbnet *dev = netdev_priv(netdev);
582 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
583
584 return pdata->use_rx_csum;
585}
586
587static int smsc95xx_ethtool_set_rx_csum(struct net_device *netdev, u32 val)
588{
589 struct usbnet *dev = netdev_priv(netdev);
590 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
591
592 pdata->use_rx_csum = !!val;
593
594 return smsc95xx_set_csums(dev);
595}
596
597static u32 smsc95xx_ethtool_get_tx_csum(struct net_device *netdev)
598{
599 struct usbnet *dev = netdev_priv(netdev);
600 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
601
602 return pdata->use_tx_csum;
603}
604
605static int smsc95xx_ethtool_set_tx_csum(struct net_device *netdev, u32 val)
606{
607 struct usbnet *dev = netdev_priv(netdev);
608 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
609
610 pdata->use_tx_csum = !!val;
611
612 ethtool_op_set_tx_hw_csum(netdev, pdata->use_tx_csum);
613 return smsc95xx_set_csums(dev);
614}
615
616static const struct ethtool_ops smsc95xx_ethtool_ops = { 580static const struct ethtool_ops smsc95xx_ethtool_ops = {
617 .get_link = usbnet_get_link, 581 .get_link = usbnet_get_link,
618 .nway_reset = usbnet_nway_reset, 582 .nway_reset = usbnet_nway_reset,
@@ -624,10 +588,6 @@ static const struct ethtool_ops smsc95xx_ethtool_ops = {
624 .get_eeprom_len = smsc95xx_ethtool_get_eeprom_len, 588 .get_eeprom_len = smsc95xx_ethtool_get_eeprom_len,
625 .get_eeprom = smsc95xx_ethtool_get_eeprom, 589 .get_eeprom = smsc95xx_ethtool_get_eeprom,
626 .set_eeprom = smsc95xx_ethtool_set_eeprom, 590 .set_eeprom = smsc95xx_ethtool_set_eeprom,
627 .get_tx_csum = smsc95xx_ethtool_get_tx_csum,
628 .set_tx_csum = smsc95xx_ethtool_set_tx_csum,
629 .get_rx_csum = smsc95xx_ethtool_get_rx_csum,
630 .set_rx_csum = smsc95xx_ethtool_set_rx_csum,
631}; 591};
632 592
633static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) 593static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@ -755,7 +715,6 @@ static int smsc95xx_phy_initialize(struct usbnet *dev)
755static int smsc95xx_reset(struct usbnet *dev) 715static int smsc95xx_reset(struct usbnet *dev)
756{ 716{
757 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); 717 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
758 struct net_device *netdev = dev->net;
759 u32 read_buf, write_buf, burst_cap; 718 u32 read_buf, write_buf, burst_cap;
760 int ret = 0, timeout; 719 int ret = 0, timeout;
761 720
@@ -975,12 +934,7 @@ static int smsc95xx_reset(struct usbnet *dev)
975 } 934 }
976 935
977 /* Enable or disable checksum offload engines */ 936 /* Enable or disable checksum offload engines */
978 ethtool_op_set_tx_hw_csum(netdev, pdata->use_tx_csum); 937 smsc95xx_set_features(dev->net, dev->net->features);
979 ret = smsc95xx_set_csums(dev);
980 if (ret < 0) {
981 netdev_warn(dev->net, "Failed to set csum offload: %d\n", ret);
982 return ret;
983 }
984 938
985 smsc95xx_set_multicast(dev->net); 939 smsc95xx_set_multicast(dev->net);
986 940
@@ -1019,6 +973,7 @@ static const struct net_device_ops smsc95xx_netdev_ops = {
1019 .ndo_validate_addr = eth_validate_addr, 973 .ndo_validate_addr = eth_validate_addr,
1020 .ndo_do_ioctl = smsc95xx_ioctl, 974 .ndo_do_ioctl = smsc95xx_ioctl,
1021 .ndo_set_multicast_list = smsc95xx_set_multicast, 975 .ndo_set_multicast_list = smsc95xx_set_multicast,
976 .ndo_set_features = smsc95xx_set_features,
1022}; 977};
1023 978
1024static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) 979static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
@@ -1045,8 +1000,12 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
1045 1000
1046 spin_lock_init(&pdata->mac_cr_lock); 1001 spin_lock_init(&pdata->mac_cr_lock);
1047 1002
1048 pdata->use_tx_csum = DEFAULT_TX_CSUM_ENABLE; 1003 if (DEFAULT_TX_CSUM_ENABLE)
1049 pdata->use_rx_csum = DEFAULT_RX_CSUM_ENABLE; 1004 dev->net->features |= NETIF_F_HW_CSUM;
1005 if (DEFAULT_RX_CSUM_ENABLE)
1006 dev->net->features |= NETIF_F_RXCSUM;
1007
1008 dev->net->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
1050 1009
1051 smsc95xx_init_mac_address(dev); 1010 smsc95xx_init_mac_address(dev);
1052 1011
@@ -1056,7 +1015,7 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
1056 dev->net->netdev_ops = &smsc95xx_netdev_ops; 1015 dev->net->netdev_ops = &smsc95xx_netdev_ops;
1057 dev->net->ethtool_ops = &smsc95xx_ethtool_ops; 1016 dev->net->ethtool_ops = &smsc95xx_ethtool_ops;
1058 dev->net->flags |= IFF_MULTICAST; 1017 dev->net->flags |= IFF_MULTICAST;
1059 dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD; 1018 dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM;
1060 return 0; 1019 return 0;
1061} 1020}
1062 1021
@@ -1080,8 +1039,6 @@ static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
1080 1039
1081static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) 1040static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1082{ 1041{
1083 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
1084
1085 while (skb->len > 0) { 1042 while (skb->len > 0) {
1086 u32 header, align_count; 1043 u32 header, align_count;
1087 struct sk_buff *ax_skb; 1044 struct sk_buff *ax_skb;
@@ -1123,7 +1080,7 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1123 1080
1124 /* last frame in this batch */ 1081 /* last frame in this batch */
1125 if (skb->len == size) { 1082 if (skb->len == size) {
1126 if (pdata->use_rx_csum) 1083 if (dev->net->features & NETIF_F_RXCSUM)
1127 smsc95xx_rx_csum_offload(skb); 1084 smsc95xx_rx_csum_offload(skb);
1128 skb_trim(skb, skb->len - 4); /* remove fcs */ 1085 skb_trim(skb, skb->len - 4); /* remove fcs */
1129 skb->truesize = size + sizeof(struct sk_buff); 1086 skb->truesize = size + sizeof(struct sk_buff);
@@ -1141,7 +1098,7 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1141 ax_skb->data = packet; 1098 ax_skb->data = packet;
1142 skb_set_tail_pointer(ax_skb, size); 1099 skb_set_tail_pointer(ax_skb, size);
1143 1100
1144 if (pdata->use_rx_csum) 1101 if (dev->net->features & NETIF_F_RXCSUM)
1145 smsc95xx_rx_csum_offload(ax_skb); 1102 smsc95xx_rx_csum_offload(ax_skb);
1146 skb_trim(ax_skb, ax_skb->len - 4); /* remove fcs */ 1103 skb_trim(ax_skb, ax_skb->len - 4); /* remove fcs */
1147 ax_skb->truesize = size + sizeof(struct sk_buff); 1104 ax_skb->truesize = size + sizeof(struct sk_buff);
@@ -1174,8 +1131,7 @@ static u32 smsc95xx_calc_csum_preamble(struct sk_buff *skb)
1174static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev, 1131static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
1175 struct sk_buff *skb, gfp_t flags) 1132 struct sk_buff *skb, gfp_t flags)
1176{ 1133{
1177 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); 1134 bool csum = skb->ip_summed == CHECKSUM_PARTIAL;
1178 bool csum = pdata->use_tx_csum && (skb->ip_summed == CHECKSUM_PARTIAL);
1179 int overhead = csum ? SMSC95XX_TX_OVERHEAD_CSUM : SMSC95XX_TX_OVERHEAD; 1135 int overhead = csum ? SMSC95XX_TX_OVERHEAD_CSUM : SMSC95XX_TX_OVERHEAD;
1180 u32 tx_cmd_a, tx_cmd_b; 1136 u32 tx_cmd_a, tx_cmd_b;
1181 1137
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 9ab439d144ed..e6dd24466965 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1541,9 +1541,9 @@ EXPORT_SYMBOL_GPL(usbnet_resume);
1541 1541
1542static int __init usbnet_init(void) 1542static int __init usbnet_init(void)
1543{ 1543{
1544 /* compiler should optimize this out */ 1544 /* Compiler should optimize this out. */
1545 BUILD_BUG_ON (sizeof (((struct sk_buff *)0)->cb) 1545 BUILD_BUG_ON(
1546 < sizeof (struct skb_data)); 1546 FIELD_SIZEOF(struct sk_buff, cb) < sizeof(struct skb_data));
1547 1547
1548 random_ether_addr(node_id); 1548 random_ether_addr(node_id);
1549 return 0; 1549 return 0;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 3b99f64104fd..8461576fa015 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -22,7 +22,6 @@
22 22
23#define MIN_MTU 68 /* Min L3 MTU */ 23#define MIN_MTU 68 /* Min L3 MTU */
24#define MAX_MTU 65535 /* Max L3 MTU (arbitrary) */ 24#define MAX_MTU 65535 /* Max L3 MTU (arbitrary) */
25#define MTU_PAD (ETH_HLEN + 4) /* Max difference between L2 and L3 size MTU */
26 25
27struct veth_net_stats { 26struct veth_net_stats {
28 unsigned long rx_packets; 27 unsigned long rx_packets;
@@ -36,7 +35,6 @@ struct veth_net_stats {
36struct veth_priv { 35struct veth_priv {
37 struct net_device *peer; 36 struct net_device *peer;
38 struct veth_net_stats __percpu *stats; 37 struct veth_net_stats __percpu *stats;
39 unsigned ip_summed;
40}; 38};
41 39
42/* 40/*
@@ -53,7 +51,7 @@ static int veth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
53{ 51{
54 cmd->supported = 0; 52 cmd->supported = 0;
55 cmd->advertising = 0; 53 cmd->advertising = 0;
56 cmd->speed = SPEED_10000; 54 ethtool_cmd_speed_set(cmd, SPEED_10000);
57 cmd->duplex = DUPLEX_FULL; 55 cmd->duplex = DUPLEX_FULL;
58 cmd->port = PORT_TP; 56 cmd->port = PORT_TP;
59 cmd->phy_address = 0; 57 cmd->phy_address = 0;
@@ -99,47 +97,10 @@ static void veth_get_ethtool_stats(struct net_device *dev,
99 data[0] = priv->peer->ifindex; 97 data[0] = priv->peer->ifindex;
100} 98}
101 99
102static u32 veth_get_rx_csum(struct net_device *dev)
103{
104 struct veth_priv *priv;
105
106 priv = netdev_priv(dev);
107 return priv->ip_summed == CHECKSUM_UNNECESSARY;
108}
109
110static int veth_set_rx_csum(struct net_device *dev, u32 data)
111{
112 struct veth_priv *priv;
113
114 priv = netdev_priv(dev);
115 priv->ip_summed = data ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
116 return 0;
117}
118
119static u32 veth_get_tx_csum(struct net_device *dev)
120{
121 return (dev->features & NETIF_F_NO_CSUM) != 0;
122}
123
124static int veth_set_tx_csum(struct net_device *dev, u32 data)
125{
126 if (data)
127 dev->features |= NETIF_F_NO_CSUM;
128 else
129 dev->features &= ~NETIF_F_NO_CSUM;
130 return 0;
131}
132
133static const struct ethtool_ops veth_ethtool_ops = { 100static const struct ethtool_ops veth_ethtool_ops = {
134 .get_settings = veth_get_settings, 101 .get_settings = veth_get_settings,
135 .get_drvinfo = veth_get_drvinfo, 102 .get_drvinfo = veth_get_drvinfo,
136 .get_link = ethtool_op_get_link, 103 .get_link = ethtool_op_get_link,
137 .get_rx_csum = veth_get_rx_csum,
138 .set_rx_csum = veth_set_rx_csum,
139 .get_tx_csum = veth_get_tx_csum,
140 .set_tx_csum = veth_set_tx_csum,
141 .get_sg = ethtool_op_get_sg,
142 .set_sg = ethtool_op_set_sg,
143 .get_strings = veth_get_strings, 104 .get_strings = veth_get_strings,
144 .get_sset_count = veth_get_sset_count, 105 .get_sset_count = veth_get_sset_count,
145 .get_ethtool_stats = veth_get_ethtool_stats, 106 .get_ethtool_stats = veth_get_ethtool_stats,
@@ -168,8 +129,9 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
168 129
169 /* don't change ip_summed == CHECKSUM_PARTIAL, as that 130 /* don't change ip_summed == CHECKSUM_PARTIAL, as that
170 will cause bad checksum on forwarded packets */ 131 will cause bad checksum on forwarded packets */
171 if (skb->ip_summed == CHECKSUM_NONE) 132 if (skb->ip_summed == CHECKSUM_NONE &&
172 skb->ip_summed = rcv_priv->ip_summed; 133 rcv->features & NETIF_F_RXCSUM)
134 skb->ip_summed = CHECKSUM_UNNECESSARY;
173 135
174 length = skb->len; 136 length = skb->len;
175 if (dev_forward_skb(rcv, skb) != NET_RX_SUCCESS) 137 if (dev_forward_skb(rcv, skb) != NET_RX_SUCCESS)
@@ -304,6 +266,8 @@ static void veth_setup(struct net_device *dev)
304 dev->ethtool_ops = &veth_ethtool_ops; 266 dev->ethtool_ops = &veth_ethtool_ops;
305 dev->features |= NETIF_F_LLTX; 267 dev->features |= NETIF_F_LLTX;
306 dev->destructor = veth_dev_free; 268 dev->destructor = veth_dev_free;
269
270 dev->hw_features = NETIF_F_NO_CSUM | NETIF_F_SG | NETIF_F_RXCSUM;
307} 271}
308 272
309/* 273/*
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index eb5d75df5d5d..7f23ab913fd9 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -29,6 +29,8 @@
29 29
30*/ 30*/
31 31
32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
32#define DRV_NAME "via-rhine" 34#define DRV_NAME "via-rhine"
33#define DRV_VERSION "1.5.0" 35#define DRV_VERSION "1.5.0"
34#define DRV_RELDATE "2010-10-09" 36#define DRV_RELDATE "2010-10-09"
@@ -37,6 +39,7 @@
37/* A few user-configurable values. 39/* A few user-configurable values.
38 These may be modified when a driver module is loaded. */ 40 These may be modified when a driver module is loaded. */
39 41
42#define DEBUG
40static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ 43static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
41static int max_interrupt_work = 20; 44static int max_interrupt_work = 20;
42 45
@@ -111,8 +114,7 @@ static const int multicast_filter_limit = 32;
111 114
112/* These identify the driver base version and may not be removed. */ 115/* These identify the driver base version and may not be removed. */
113static const char version[] __devinitconst = 116static const char version[] __devinitconst =
114 KERN_INFO DRV_NAME ".c:v1.10-LK" DRV_VERSION " " DRV_RELDATE 117 "v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
115 " Written by Donald Becker\n";
116 118
117/* This driver was written to use PCI memory space. Some early versions 119/* This driver was written to use PCI memory space. Some early versions
118 of the Rhine may only work correctly with I/O space accesses. */ 120 of the Rhine may only work correctly with I/O space accesses. */
@@ -495,14 +497,15 @@ static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask);
495static void rhine_init_cam_filter(struct net_device *dev); 497static void rhine_init_cam_filter(struct net_device *dev);
496static void rhine_update_vcam(struct net_device *dev); 498static void rhine_update_vcam(struct net_device *dev);
497 499
498#define RHINE_WAIT_FOR(condition) do { \ 500#define RHINE_WAIT_FOR(condition) \
499 int i=1024; \ 501do { \
500 while (!(condition) && --i) \ 502 int i = 1024; \
501 ; \ 503 while (!(condition) && --i) \
502 if (debug > 1 && i < 512) \ 504 ; \
503 printk(KERN_INFO "%s: %4d cycles used @ %s:%d\n", \ 505 if (debug > 1 && i < 512) \
504 DRV_NAME, 1024-i, __func__, __LINE__); \ 506 pr_info("%4d cycles used @ %s:%d\n", \
505} while(0) 507 1024 - i, __func__, __LINE__); \
508} while (0)
506 509
507static inline u32 get_intr_status(struct net_device *dev) 510static inline u32 get_intr_status(struct net_device *dev)
508{ 511{
@@ -571,8 +574,8 @@ static void rhine_power_init(struct net_device *dev)
571 default: 574 default:
572 reason = "Unknown"; 575 reason = "Unknown";
573 } 576 }
574 printk(KERN_INFO "%s: Woke system up. Reason: %s.\n", 577 netdev_info(dev, "Woke system up. Reason: %s\n",
575 DRV_NAME, reason); 578 reason);
576 } 579 }
577 } 580 }
578} 581}
@@ -586,8 +589,7 @@ static void rhine_chip_reset(struct net_device *dev)
586 IOSYNC; 589 IOSYNC;
587 590
588 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) { 591 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
589 printk(KERN_INFO "%s: Reset not complete yet. " 592 netdev_info(dev, "Reset not complete yet. Trying harder.\n");
590 "Trying harder.\n", DRV_NAME);
591 593
592 /* Force reset */ 594 /* Force reset */
593 if (rp->quirks & rqForceReset) 595 if (rp->quirks & rqForceReset)
@@ -598,9 +600,9 @@ static void rhine_chip_reset(struct net_device *dev)
598 } 600 }
599 601
600 if (debug > 1) 602 if (debug > 1)
601 printk(KERN_INFO "%s: Reset %s.\n", dev->name, 603 netdev_info(dev, "Reset %s\n",
602 (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ? 604 (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ?
603 "failed" : "succeeded"); 605 "failed" : "succeeded");
604} 606}
605 607
606#ifdef USE_MMIO 608#ifdef USE_MMIO
@@ -728,9 +730,7 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
728 730
729/* when built into the kernel, we only print version if device is found */ 731/* when built into the kernel, we only print version if device is found */
730#ifndef MODULE 732#ifndef MODULE
731 static int printed_version; 733 pr_info_once("%s\n", version);
732 if (!printed_version++)
733 printk(version);
734#endif 734#endif
735 735
736 io_size = 256; 736 io_size = 256;
@@ -765,8 +765,8 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
765 /* this should always be supported */ 765 /* this should always be supported */
766 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 766 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
767 if (rc) { 767 if (rc) {
768 printk(KERN_ERR "32-bit PCI DMA addresses not supported by " 768 dev_err(&pdev->dev,
769 "the card!?\n"); 769 "32-bit PCI DMA addresses not supported by the card!?\n");
770 goto err_out; 770 goto err_out;
771 } 771 }
772 772
@@ -774,7 +774,7 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
774 if ((pci_resource_len(pdev, 0) < io_size) || 774 if ((pci_resource_len(pdev, 0) < io_size) ||
775 (pci_resource_len(pdev, 1) < io_size)) { 775 (pci_resource_len(pdev, 1) < io_size)) {
776 rc = -EIO; 776 rc = -EIO;
777 printk(KERN_ERR "Insufficient PCI resources, aborting\n"); 777 dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
778 goto err_out; 778 goto err_out;
779 } 779 }
780 780
@@ -786,7 +786,7 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
786 dev = alloc_etherdev(sizeof(struct rhine_private)); 786 dev = alloc_etherdev(sizeof(struct rhine_private));
787 if (!dev) { 787 if (!dev) {
788 rc = -ENOMEM; 788 rc = -ENOMEM;
789 printk(KERN_ERR "alloc_etherdev failed\n"); 789 dev_err(&pdev->dev, "alloc_etherdev failed\n");
790 goto err_out; 790 goto err_out;
791 } 791 }
792 SET_NETDEV_DEV(dev, &pdev->dev); 792 SET_NETDEV_DEV(dev, &pdev->dev);
@@ -804,8 +804,9 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
804 ioaddr = pci_iomap(pdev, bar, io_size); 804 ioaddr = pci_iomap(pdev, bar, io_size);
805 if (!ioaddr) { 805 if (!ioaddr) {
806 rc = -EIO; 806 rc = -EIO;
807 printk(KERN_ERR "ioremap failed for device %s, region 0x%X " 807 dev_err(&pdev->dev,
808 "@ 0x%lX\n", pci_name(pdev), io_size, memaddr); 808 "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
809 pci_name(pdev), io_size, memaddr);
809 goto err_out_free_res; 810 goto err_out_free_res;
810 } 811 }
811 812
@@ -820,8 +821,9 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
820 unsigned char b = readb(ioaddr+reg); 821 unsigned char b = readb(ioaddr+reg);
821 if (a != b) { 822 if (a != b) {
822 rc = -EIO; 823 rc = -EIO;
823 printk(KERN_ERR "MMIO do not match PIO [%02x] " 824 dev_err(&pdev->dev,
824 "(%02x != %02x)\n", reg, a, b); 825 "MMIO do not match PIO [%02x] (%02x != %02x)\n",
826 reg, a, b);
825 goto err_out_unmap; 827 goto err_out_unmap;
826 } 828 }
827 } 829 }
@@ -836,13 +838,15 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
836 838
837 for (i = 0; i < 6; i++) 839 for (i = 0; i < 6; i++)
838 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i); 840 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
839 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
840 841
841 if (!is_valid_ether_addr(dev->perm_addr)) { 842 if (!is_valid_ether_addr(dev->dev_addr)) {
842 rc = -EIO; 843 /* Report it and use a random ethernet address instead */
843 printk(KERN_ERR "Invalid MAC address\n"); 844 netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
844 goto err_out_unmap; 845 random_ether_addr(dev->dev_addr);
846 netdev_info(dev, "Using random MAC address: %pM\n",
847 dev->dev_addr);
845 } 848 }
849 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
846 850
847 /* For Rhine-I/II, phy_id is loaded from EEPROM */ 851 /* For Rhine-I/II, phy_id is loaded from EEPROM */
848 if (!phy_id) 852 if (!phy_id)
@@ -878,14 +882,14 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
878 if (rc) 882 if (rc)
879 goto err_out_unmap; 883 goto err_out_unmap;
880 884
881 printk(KERN_INFO "%s: VIA %s at 0x%lx, %pM, IRQ %d.\n", 885 netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
882 dev->name, name, 886 name,
883#ifdef USE_MMIO 887#ifdef USE_MMIO
884 memaddr, 888 memaddr,
885#else 889#else
886 (long)ioaddr, 890 (long)ioaddr,
887#endif 891#endif
888 dev->dev_addr, pdev->irq); 892 dev->dev_addr, pdev->irq);
889 893
890 pci_set_drvdata(pdev, dev); 894 pci_set_drvdata(pdev, dev);
891 895
@@ -896,11 +900,11 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
896 mdio_write(dev, phy_id, MII_BMCR, mii_cmd); 900 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
897 if (mii_status != 0xffff && mii_status != 0x0000) { 901 if (mii_status != 0xffff && mii_status != 0x0000) {
898 rp->mii_if.advertising = mdio_read(dev, phy_id, 4); 902 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
899 printk(KERN_INFO "%s: MII PHY found at address " 903 netdev_info(dev,
900 "%d, status 0x%4.4x advertising %4.4x " 904 "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
901 "Link %4.4x.\n", dev->name, phy_id, 905 phy_id,
902 mii_status, rp->mii_if.advertising, 906 mii_status, rp->mii_if.advertising,
903 mdio_read(dev, phy_id, 5)); 907 mdio_read(dev, phy_id, 5));
904 908
905 /* set IFF_RUNNING */ 909 /* set IFF_RUNNING */
906 if (mii_status & BMSR_LSTATUS) 910 if (mii_status & BMSR_LSTATUS)
@@ -912,8 +916,7 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
912 } 916 }
913 rp->mii_if.phy_id = phy_id; 917 rp->mii_if.phy_id = phy_id;
914 if (debug > 1 && avoid_D3) 918 if (debug > 1 && avoid_D3)
915 printk(KERN_INFO "%s: No D3 power state at shutdown.\n", 919 netdev_info(dev, "No D3 power state at shutdown\n");
916 dev->name);
917 920
918 return 0; 921 return 0;
919 922
@@ -938,7 +941,7 @@ static int alloc_ring(struct net_device* dev)
938 TX_RING_SIZE * sizeof(struct tx_desc), 941 TX_RING_SIZE * sizeof(struct tx_desc),
939 &ring_dma); 942 &ring_dma);
940 if (!ring) { 943 if (!ring) {
941 printk(KERN_ERR "Could not allocate DMA memory.\n"); 944 netdev_err(dev, "Could not allocate DMA memory\n");
942 return -ENOMEM; 945 return -ENOMEM;
943 } 946 }
944 if (rp->quirks & rqRhineI) { 947 if (rp->quirks & rqRhineI) {
@@ -1098,8 +1101,8 @@ static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1098 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex, 1101 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1099 ioaddr + ChipCmd1); 1102 ioaddr + ChipCmd1);
1100 if (debug > 1) 1103 if (debug > 1)
1101 printk(KERN_INFO "%s: force_media %d, carrier %d\n", dev->name, 1104 netdev_info(dev, "force_media %d, carrier %d\n",
1102 rp->mii_if.force_media, netif_carrier_ok(dev)); 1105 rp->mii_if.force_media, netif_carrier_ok(dev));
1103} 1106}
1104 1107
1105/* Called after status of force_media possibly changed */ 1108/* Called after status of force_media possibly changed */
@@ -1113,9 +1116,8 @@ static void rhine_set_carrier(struct mii_if_info *mii)
1113 else /* Let MMI library update carrier status */ 1116 else /* Let MMI library update carrier status */
1114 rhine_check_media(mii->dev, 0); 1117 rhine_check_media(mii->dev, 0);
1115 if (debug > 1) 1118 if (debug > 1)
1116 printk(KERN_INFO "%s: force_media %d, carrier %d\n", 1119 netdev_info(mii->dev, "force_media %d, carrier %d\n",
1117 mii->dev->name, mii->force_media, 1120 mii->force_media, netif_carrier_ok(mii->dev));
1118 netif_carrier_ok(mii->dev));
1119} 1121}
1120 1122
1121/** 1123/**
@@ -1402,8 +1404,7 @@ static int rhine_open(struct net_device *dev)
1402 return rc; 1404 return rc;
1403 1405
1404 if (debug > 1) 1406 if (debug > 1)
1405 printk(KERN_DEBUG "%s: rhine_open() irq %d.\n", 1407 netdev_dbg(dev, "%s() irq %d\n", __func__, rp->pdev->irq);
1406 dev->name, rp->pdev->irq);
1407 1408
1408 rc = alloc_ring(dev); 1409 rc = alloc_ring(dev);
1409 if (rc) { 1410 if (rc) {
@@ -1415,10 +1416,9 @@ static int rhine_open(struct net_device *dev)
1415 rhine_chip_reset(dev); 1416 rhine_chip_reset(dev);
1416 init_registers(dev); 1417 init_registers(dev);
1417 if (debug > 2) 1418 if (debug > 2)
1418 printk(KERN_DEBUG "%s: Done rhine_open(), status %4.4x " 1419 netdev_dbg(dev, "%s() Done - status %04x MII status: %04x\n",
1419 "MII status: %4.4x.\n", 1420 __func__, ioread16(ioaddr + ChipCmd),
1420 dev->name, ioread16(ioaddr + ChipCmd), 1421 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1421 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1422 1422
1423 netif_start_queue(dev); 1423 netif_start_queue(dev);
1424 1424
@@ -1461,10 +1461,9 @@ static void rhine_tx_timeout(struct net_device *dev)
1461 struct rhine_private *rp = netdev_priv(dev); 1461 struct rhine_private *rp = netdev_priv(dev);
1462 void __iomem *ioaddr = rp->base; 1462 void __iomem *ioaddr = rp->base;
1463 1463
1464 printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status " 1464 netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1465 "%4.4x, resetting...\n", 1465 ioread16(ioaddr + IntrStatus),
1466 dev->name, ioread16(ioaddr + IntrStatus), 1466 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1467 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1468 1467
1469 schedule_work(&rp->reset_task); 1468 schedule_work(&rp->reset_task);
1470} 1469}
@@ -1551,8 +1550,8 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1551 spin_unlock_irqrestore(&rp->lock, flags); 1550 spin_unlock_irqrestore(&rp->lock, flags);
1552 1551
1553 if (debug > 4) { 1552 if (debug > 4) {
1554 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n", 1553 netdev_dbg(dev, "Transmit frame #%d queued in slot %d\n",
1555 dev->name, rp->cur_tx-1, entry); 1554 rp->cur_tx-1, entry);
1556 } 1555 }
1557 return NETDEV_TX_OK; 1556 return NETDEV_TX_OK;
1558} 1557}
@@ -1578,8 +1577,8 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1578 IOSYNC; 1577 IOSYNC;
1579 1578
1580 if (debug > 4) 1579 if (debug > 4)
1581 printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n", 1580 netdev_dbg(dev, "Interrupt, status %08x\n",
1582 dev->name, intr_status); 1581 intr_status);
1583 1582
1584 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped | 1583 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
1585 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) { 1584 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) {
@@ -1597,9 +1596,9 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1597 RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn)); 1596 RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));
1598 if (debug > 2 && 1597 if (debug > 2 &&
1599 ioread8(ioaddr+ChipCmd) & CmdTxOn) 1598 ioread8(ioaddr+ChipCmd) & CmdTxOn)
1600 printk(KERN_WARNING "%s: " 1599 netdev_warn(dev,
1601 "rhine_interrupt() Tx engine " 1600 "%s: Tx engine still on\n",
1602 "still on.\n", dev->name); 1601 __func__);
1603 } 1602 }
1604 rhine_tx(dev); 1603 rhine_tx(dev);
1605 } 1604 }
@@ -1611,16 +1610,15 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1611 rhine_error(dev, intr_status); 1610 rhine_error(dev, intr_status);
1612 1611
1613 if (--boguscnt < 0) { 1612 if (--boguscnt < 0) {
1614 printk(KERN_WARNING "%s: Too much work at interrupt, " 1613 netdev_warn(dev, "Too much work at interrupt, status=%#08x\n",
1615 "status=%#8.8x.\n", 1614 intr_status);
1616 dev->name, intr_status);
1617 break; 1615 break;
1618 } 1616 }
1619 } 1617 }
1620 1618
1621 if (debug > 3) 1619 if (debug > 3)
1622 printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n", 1620 netdev_dbg(dev, "exiting interrupt, status=%08x\n",
1623 dev->name, ioread16(ioaddr + IntrStatus)); 1621 ioread16(ioaddr + IntrStatus));
1624 return IRQ_RETVAL(handled); 1622 return IRQ_RETVAL(handled);
1625} 1623}
1626 1624
@@ -1637,15 +1635,14 @@ static void rhine_tx(struct net_device *dev)
1637 while (rp->dirty_tx != rp->cur_tx) { 1635 while (rp->dirty_tx != rp->cur_tx) {
1638 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status); 1636 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1639 if (debug > 6) 1637 if (debug > 6)
1640 printk(KERN_DEBUG "Tx scavenge %d status %8.8x.\n", 1638 netdev_dbg(dev, "Tx scavenge %d status %08x\n",
1641 entry, txstatus); 1639 entry, txstatus);
1642 if (txstatus & DescOwn) 1640 if (txstatus & DescOwn)
1643 break; 1641 break;
1644 if (txstatus & 0x8000) { 1642 if (txstatus & 0x8000) {
1645 if (debug > 1) 1643 if (debug > 1)
1646 printk(KERN_DEBUG "%s: Transmit error, " 1644 netdev_dbg(dev, "Transmit error, Tx status %08x\n",
1647 "Tx status %8.8x.\n", 1645 txstatus);
1648 dev->name, txstatus);
1649 dev->stats.tx_errors++; 1646 dev->stats.tx_errors++;
1650 if (txstatus & 0x0400) 1647 if (txstatus & 0x0400)
1651 dev->stats.tx_carrier_errors++; 1648 dev->stats.tx_carrier_errors++;
@@ -1668,9 +1665,9 @@ static void rhine_tx(struct net_device *dev)
1668 else 1665 else
1669 dev->stats.collisions += txstatus & 0x0F; 1666 dev->stats.collisions += txstatus & 0x0F;
1670 if (debug > 6) 1667 if (debug > 6)
1671 printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n", 1668 netdev_dbg(dev, "collisions: %1.1x:%1.1x\n",
1672 (txstatus >> 3) & 0xF, 1669 (txstatus >> 3) & 0xF,
1673 txstatus & 0xF); 1670 txstatus & 0xF);
1674 dev->stats.tx_bytes += rp->tx_skbuff[entry]->len; 1671 dev->stats.tx_bytes += rp->tx_skbuff[entry]->len;
1675 dev->stats.tx_packets++; 1672 dev->stats.tx_packets++;
1676 } 1673 }
@@ -1703,7 +1700,7 @@ static void rhine_tx(struct net_device *dev)
1703static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size) 1700static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
1704{ 1701{
1705 u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2; 1702 u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
1706 return ntohs(*(u16 *)trailer); 1703 return be16_to_cpup((__be16 *)trailer);
1707} 1704}
1708 1705
1709/* Process up to limit frames from receive ring */ 1706/* Process up to limit frames from receive ring */
@@ -1714,9 +1711,9 @@ static int rhine_rx(struct net_device *dev, int limit)
1714 int entry = rp->cur_rx % RX_RING_SIZE; 1711 int entry = rp->cur_rx % RX_RING_SIZE;
1715 1712
1716 if (debug > 4) { 1713 if (debug > 4) {
1717 printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n", 1714 netdev_dbg(dev, "%s(), entry %d status %08x\n",
1718 dev->name, entry, 1715 __func__, entry,
1719 le32_to_cpu(rp->rx_head_desc->rx_status)); 1716 le32_to_cpu(rp->rx_head_desc->rx_status));
1720 } 1717 }
1721 1718
1722 /* If EOP is set on the next entry, it's a new packet. Send it up. */ 1719 /* If EOP is set on the next entry, it's a new packet. Send it up. */
@@ -1730,26 +1727,26 @@ static int rhine_rx(struct net_device *dev, int limit)
1730 break; 1727 break;
1731 1728
1732 if (debug > 4) 1729 if (debug > 4)
1733 printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n", 1730 netdev_dbg(dev, "%s() status is %08x\n",
1734 desc_status); 1731 __func__, desc_status);
1735 1732
1736 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) { 1733 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1737 if ((desc_status & RxWholePkt) != RxWholePkt) { 1734 if ((desc_status & RxWholePkt) != RxWholePkt) {
1738 printk(KERN_WARNING "%s: Oversized Ethernet " 1735 netdev_warn(dev,
1739 "frame spanned multiple buffers, entry " 1736 "Oversized Ethernet frame spanned multiple buffers, "
1740 "%#x length %d status %8.8x!\n", 1737 "entry %#x length %d status %08x!\n",
1741 dev->name, entry, data_size, 1738 entry, data_size,
1742 desc_status); 1739 desc_status);
1743 printk(KERN_WARNING "%s: Oversized Ethernet " 1740 netdev_warn(dev,
1744 "frame %p vs %p.\n", dev->name, 1741 "Oversized Ethernet frame %p vs %p\n",
1745 rp->rx_head_desc, &rp->rx_ring[entry]); 1742 rp->rx_head_desc,
1743 &rp->rx_ring[entry]);
1746 dev->stats.rx_length_errors++; 1744 dev->stats.rx_length_errors++;
1747 } else if (desc_status & RxErr) { 1745 } else if (desc_status & RxErr) {
1748 /* There was a error. */ 1746 /* There was a error. */
1749 if (debug > 2) 1747 if (debug > 2)
1750 printk(KERN_DEBUG "rhine_rx() Rx " 1748 netdev_dbg(dev, "%s() Rx error was %08x\n",
1751 "error was %8.8x.\n", 1749 __func__, desc_status);
1752 desc_status);
1753 dev->stats.rx_errors++; 1750 dev->stats.rx_errors++;
1754 if (desc_status & 0x0030) 1751 if (desc_status & 0x0030)
1755 dev->stats.rx_length_errors++; 1752 dev->stats.rx_length_errors++;
@@ -1791,9 +1788,7 @@ static int rhine_rx(struct net_device *dev, int limit)
1791 } else { 1788 } else {
1792 skb = rp->rx_skbuff[entry]; 1789 skb = rp->rx_skbuff[entry];
1793 if (skb == NULL) { 1790 if (skb == NULL) {
1794 printk(KERN_ERR "%s: Inconsistent Rx " 1791 netdev_err(dev, "Inconsistent Rx descriptor chain\n");
1795 "descriptor chain.\n",
1796 dev->name);
1797 break; 1792 break;
1798 } 1793 }
1799 rp->rx_skbuff[entry] = NULL; 1794 rp->rx_skbuff[entry] = NULL;
@@ -1886,9 +1881,8 @@ static void rhine_restart_tx(struct net_device *dev) {
1886 else { 1881 else {
1887 /* This should never happen */ 1882 /* This should never happen */
1888 if (debug > 1) 1883 if (debug > 1)
1889 printk(KERN_WARNING "%s: rhine_restart_tx() " 1884 netdev_warn(dev, "%s() Another error occurred %08x\n",
1890 "Another error occurred %8.8x.\n", 1885 __func__, intr_status);
1891 dev->name, intr_status);
1892 } 1886 }
1893 1887
1894} 1888}
@@ -1909,21 +1903,19 @@ static void rhine_error(struct net_device *dev, int intr_status)
1909 } 1903 }
1910 if (intr_status & IntrTxAborted) { 1904 if (intr_status & IntrTxAborted) {
1911 if (debug > 1) 1905 if (debug > 1)
1912 printk(KERN_INFO "%s: Abort %8.8x, frame dropped.\n", 1906 netdev_info(dev, "Abort %08x, frame dropped\n",
1913 dev->name, intr_status); 1907 intr_status);
1914 } 1908 }
1915 if (intr_status & IntrTxUnderrun) { 1909 if (intr_status & IntrTxUnderrun) {
1916 if (rp->tx_thresh < 0xE0) 1910 if (rp->tx_thresh < 0xE0)
1917 BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig); 1911 BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig);
1918 if (debug > 1) 1912 if (debug > 1)
1919 printk(KERN_INFO "%s: Transmitter underrun, Tx " 1913 netdev_info(dev, "Transmitter underrun, Tx threshold now %02x\n",
1920 "threshold now %2.2x.\n", 1914 rp->tx_thresh);
1921 dev->name, rp->tx_thresh);
1922 } 1915 }
1923 if (intr_status & IntrTxDescRace) { 1916 if (intr_status & IntrTxDescRace) {
1924 if (debug > 2) 1917 if (debug > 2)
1925 printk(KERN_INFO "%s: Tx descriptor write-back race.\n", 1918 netdev_info(dev, "Tx descriptor write-back race\n");
1926 dev->name);
1927 } 1919 }
1928 if ((intr_status & IntrTxError) && 1920 if ((intr_status & IntrTxError) &&
1929 (intr_status & (IntrTxAborted | 1921 (intr_status & (IntrTxAborted |
@@ -1932,9 +1924,8 @@ static void rhine_error(struct net_device *dev, int intr_status)
1932 BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig); 1924 BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig);
1933 } 1925 }
1934 if (debug > 1) 1926 if (debug > 1)
1935 printk(KERN_INFO "%s: Unspecified error. Tx " 1927 netdev_info(dev, "Unspecified error. Tx threshold now %02x\n",
1936 "threshold now %2.2x.\n", 1928 rp->tx_thresh);
1937 dev->name, rp->tx_thresh);
1938 } 1929 }
1939 if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace | 1930 if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
1940 IntrTxError)) 1931 IntrTxError))
@@ -1944,8 +1935,8 @@ static void rhine_error(struct net_device *dev, int intr_status)
1944 IntrTxError | IntrTxAborted | IntrNormalSummary | 1935 IntrTxError | IntrTxAborted | IntrNormalSummary |
1945 IntrTxDescRace)) { 1936 IntrTxDescRace)) {
1946 if (debug > 1) 1937 if (debug > 1)
1947 printk(KERN_ERR "%s: Something Wicked happened! " 1938 netdev_err(dev, "Something Wicked happened! %08x\n",
1948 "%8.8x.\n", dev->name, intr_status); 1939 intr_status);
1949 } 1940 }
1950 1941
1951 spin_unlock(&rp->lock); 1942 spin_unlock(&rp->lock);
@@ -2145,9 +2136,8 @@ static int rhine_close(struct net_device *dev)
2145 spin_lock_irq(&rp->lock); 2136 spin_lock_irq(&rp->lock);
2146 2137
2147 if (debug > 1) 2138 if (debug > 1)
2148 printk(KERN_DEBUG "%s: Shutting down ethercard, " 2139 netdev_dbg(dev, "Shutting down ethercard, status was %04x\n",
2149 "status was %4.4x.\n", 2140 ioread16(ioaddr + ChipCmd));
2150 dev->name, ioread16(ioaddr + ChipCmd));
2151 2141
2152 /* Switch to loopback mode to avoid hardware races. */ 2142 /* Switch to loopback mode to avoid hardware races. */
2153 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig); 2143 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
@@ -2265,12 +2255,12 @@ static int rhine_resume(struct pci_dev *pdev)
2265 return 0; 2255 return 0;
2266 2256
2267 if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev)) 2257 if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev))
2268 printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name); 2258 netdev_err(dev, "request_irq failed\n");
2269 2259
2270 ret = pci_set_power_state(pdev, PCI_D0); 2260 ret = pci_set_power_state(pdev, PCI_D0);
2271 if (debug > 1) 2261 if (debug > 1)
2272 printk(KERN_INFO "%s: Entering power state D0 %s (%d).\n", 2262 netdev_info(dev, "Entering power state D0 %s (%d)\n",
2273 dev->name, ret ? "failed" : "succeeded", ret); 2263 ret ? "failed" : "succeeded", ret);
2274 2264
2275 pci_restore_state(pdev); 2265 pci_restore_state(pdev);
2276 2266
@@ -2326,17 +2316,15 @@ static int __init rhine_init(void)
2326{ 2316{
2327/* when a module, this is printed whether or not devices are found in probe */ 2317/* when a module, this is printed whether or not devices are found in probe */
2328#ifdef MODULE 2318#ifdef MODULE
2329 printk(version); 2319 pr_info("%s\n", version);
2330#endif 2320#endif
2331 if (dmi_check_system(rhine_dmi_table)) { 2321 if (dmi_check_system(rhine_dmi_table)) {
2332 /* these BIOSes fail at PXE boot if chip is in D3 */ 2322 /* these BIOSes fail at PXE boot if chip is in D3 */
2333 avoid_D3 = 1; 2323 avoid_D3 = 1;
2334 printk(KERN_WARNING "%s: Broken BIOS detected, avoid_D3 " 2324 pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
2335 "enabled.\n",
2336 DRV_NAME);
2337 } 2325 }
2338 else if (avoid_D3) 2326 else if (avoid_D3)
2339 printk(KERN_INFO "%s: avoid_D3 set.\n", DRV_NAME); 2327 pr_info("avoid_D3 set\n");
2340 2328
2341 return pci_register_driver(&rhine_driver); 2329 return pci_register_driver(&rhine_driver);
2342} 2330}
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 4fe051753842..06daa9d6fee8 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -2600,8 +2600,7 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2600 /* 2600 /*
2601 * Handle hardware checksum 2601 * Handle hardware checksum
2602 */ 2602 */
2603 if ((dev->features & NETIF_F_IP_CSUM) && 2603 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2604 (skb->ip_summed == CHECKSUM_PARTIAL)) {
2605 const struct iphdr *ip = ip_hdr(skb); 2604 const struct iphdr *ip = ip_hdr(skb);
2606 if (ip->protocol == IPPROTO_TCP) 2605 if (ip->protocol == IPPROTO_TCP)
2607 td_ptr->tdesc1.TCR |= TCR0_TCPCK; 2606 td_ptr->tdesc1.TCR |= TCR0_TCPCK;
@@ -2841,6 +2840,7 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
2841 dev->ethtool_ops = &velocity_ethtool_ops; 2840 dev->ethtool_ops = &velocity_ethtool_ops;
2842 netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT); 2841 netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
2843 2842
2843 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HW_VLAN_TX;
2844 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | 2844 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2845 NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM; 2845 NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM;
2846 2846
@@ -3182,7 +3182,8 @@ static void velocity_ethtool_down(struct net_device *dev)
3182 pci_set_power_state(vptr->pdev, PCI_D3hot); 3182 pci_set_power_state(vptr->pdev, PCI_D3hot);
3183} 3183}
3184 3184
3185static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 3185static int velocity_get_settings(struct net_device *dev,
3186 struct ethtool_cmd *cmd)
3186{ 3187{
3187 struct velocity_info *vptr = netdev_priv(dev); 3188 struct velocity_info *vptr = netdev_priv(dev);
3188 struct mac_regs __iomem *regs = vptr->mac_regs; 3189 struct mac_regs __iomem *regs = vptr->mac_regs;
@@ -3228,12 +3229,14 @@ static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd
3228 break; 3229 break;
3229 } 3230 }
3230 } 3231 }
3232
3231 if (status & VELOCITY_SPEED_1000) 3233 if (status & VELOCITY_SPEED_1000)
3232 cmd->speed = SPEED_1000; 3234 ethtool_cmd_speed_set(cmd, SPEED_1000);
3233 else if (status & VELOCITY_SPEED_100) 3235 else if (status & VELOCITY_SPEED_100)
3234 cmd->speed = SPEED_100; 3236 ethtool_cmd_speed_set(cmd, SPEED_100);
3235 else 3237 else
3236 cmd->speed = SPEED_10; 3238 ethtool_cmd_speed_set(cmd, SPEED_10);
3239
3237 cmd->autoneg = (status & VELOCITY_AUTONEG_ENABLE) ? AUTONEG_ENABLE : AUTONEG_DISABLE; 3240 cmd->autoneg = (status & VELOCITY_AUTONEG_ENABLE) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
3238 cmd->port = PORT_TP; 3241 cmd->port = PORT_TP;
3239 cmd->transceiver = XCVR_INTERNAL; 3242 cmd->transceiver = XCVR_INTERNAL;
@@ -3247,9 +3250,11 @@ static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd
3247 return 0; 3250 return 0;
3248} 3251}
3249 3252
3250static int velocity_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 3253static int velocity_set_settings(struct net_device *dev,
3254 struct ethtool_cmd *cmd)
3251{ 3255{
3252 struct velocity_info *vptr = netdev_priv(dev); 3256 struct velocity_info *vptr = netdev_priv(dev);
3257 u32 speed = ethtool_cmd_speed(cmd);
3253 u32 curr_status; 3258 u32 curr_status;
3254 u32 new_status = 0; 3259 u32 new_status = 0;
3255 int ret = 0; 3260 int ret = 0;
@@ -3258,9 +3263,9 @@ static int velocity_set_settings(struct net_device *dev, struct ethtool_cmd *cmd
3258 curr_status &= (~VELOCITY_LINK_FAIL); 3263 curr_status &= (~VELOCITY_LINK_FAIL);
3259 3264
3260 new_status |= ((cmd->autoneg) ? VELOCITY_AUTONEG_ENABLE : 0); 3265 new_status |= ((cmd->autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
3261 new_status |= ((cmd->speed == SPEED_1000) ? VELOCITY_SPEED_1000 : 0); 3266 new_status |= ((speed == SPEED_1000) ? VELOCITY_SPEED_1000 : 0);
3262 new_status |= ((cmd->speed == SPEED_100) ? VELOCITY_SPEED_100 : 0); 3267 new_status |= ((speed == SPEED_100) ? VELOCITY_SPEED_100 : 0);
3263 new_status |= ((cmd->speed == SPEED_10) ? VELOCITY_SPEED_10 : 0); 3268 new_status |= ((speed == SPEED_10) ? VELOCITY_SPEED_10 : 0);
3264 new_status |= ((cmd->duplex == DUPLEX_FULL) ? VELOCITY_DUPLEX_FULL : 0); 3269 new_status |= ((cmd->duplex == DUPLEX_FULL) ? VELOCITY_DUPLEX_FULL : 0);
3265 3270
3266 if ((new_status & VELOCITY_AUTONEG_ENABLE) && 3271 if ((new_status & VELOCITY_AUTONEG_ENABLE) &&
@@ -3457,13 +3462,10 @@ static const struct ethtool_ops velocity_ethtool_ops = {
3457 .get_settings = velocity_get_settings, 3462 .get_settings = velocity_get_settings,
3458 .set_settings = velocity_set_settings, 3463 .set_settings = velocity_set_settings,
3459 .get_drvinfo = velocity_get_drvinfo, 3464 .get_drvinfo = velocity_get_drvinfo,
3460 .set_tx_csum = ethtool_op_set_tx_csum,
3461 .get_tx_csum = ethtool_op_get_tx_csum,
3462 .get_wol = velocity_ethtool_get_wol, 3465 .get_wol = velocity_ethtool_get_wol,
3463 .set_wol = velocity_ethtool_set_wol, 3466 .set_wol = velocity_ethtool_set_wol,
3464 .get_msglevel = velocity_get_msglevel, 3467 .get_msglevel = velocity_get_msglevel,
3465 .set_msglevel = velocity_set_msglevel, 3468 .set_msglevel = velocity_set_msglevel,
3466 .set_sg = ethtool_op_set_sg,
3467 .get_link = velocity_get_link, 3469 .get_link = velocity_get_link,
3468 .get_coalesce = velocity_get_coalesce, 3470 .get_coalesce = velocity_get_coalesce,
3469 .set_coalesce = velocity_set_coalesce, 3471 .set_coalesce = velocity_set_coalesce,
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 82dba5aaf423..0cb0b0632672 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -710,17 +710,6 @@ static int virtnet_close(struct net_device *dev)
710 return 0; 710 return 0;
711} 711}
712 712
713static int virtnet_set_tx_csum(struct net_device *dev, u32 data)
714{
715 struct virtnet_info *vi = netdev_priv(dev);
716 struct virtio_device *vdev = vi->vdev;
717
718 if (data && !virtio_has_feature(vdev, VIRTIO_NET_F_CSUM))
719 return -ENOSYS;
720
721 return ethtool_op_set_tx_hw_csum(dev, data);
722}
723
724static void virtnet_set_rx_mode(struct net_device *dev) 713static void virtnet_set_rx_mode(struct net_device *dev)
725{ 714{
726 struct virtnet_info *vi = netdev_priv(dev); 715 struct virtnet_info *vi = netdev_priv(dev);
@@ -822,10 +811,6 @@ static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
822} 811}
823 812
824static const struct ethtool_ops virtnet_ethtool_ops = { 813static const struct ethtool_ops virtnet_ethtool_ops = {
825 .set_tx_csum = virtnet_set_tx_csum,
826 .set_sg = ethtool_op_set_sg,
827 .set_tso = ethtool_op_set_tso,
828 .set_ufo = ethtool_op_set_ufo,
829 .get_link = ethtool_op_get_link, 814 .get_link = ethtool_op_get_link,
830}; 815};
831 816
@@ -912,22 +897,29 @@ static int virtnet_probe(struct virtio_device *vdev)
912 SET_NETDEV_DEV(dev, &vdev->dev); 897 SET_NETDEV_DEV(dev, &vdev->dev);
913 898
914 /* Do we support "hardware" checksums? */ 899 /* Do we support "hardware" checksums? */
915 if (csum && virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { 900 if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
916 /* This opens up the world of extra features. */ 901 /* This opens up the world of extra features. */
917 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; 902 dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
918 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { 903 if (csum)
919 dev->features |= NETIF_F_TSO | NETIF_F_UFO 904 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
905
906 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
907 dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
920 | NETIF_F_TSO_ECN | NETIF_F_TSO6; 908 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
921 } 909 }
922 /* Individual feature bits: what can host handle? */ 910 /* Individual feature bits: what can host handle? */
923 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) 911 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
924 dev->features |= NETIF_F_TSO; 912 dev->hw_features |= NETIF_F_TSO;
925 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) 913 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
926 dev->features |= NETIF_F_TSO6; 914 dev->hw_features |= NETIF_F_TSO6;
927 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) 915 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
928 dev->features |= NETIF_F_TSO_ECN; 916 dev->hw_features |= NETIF_F_TSO_ECN;
929 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) 917 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
930 dev->features |= NETIF_F_UFO; 918 dev->hw_features |= NETIF_F_UFO;
919
920 if (gso)
921 dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
922 /* (!csum && gso) case will be fixed by register_netdev() */
931 } 923 }
932 924
933 /* Configuration may specify what MAC to use. Otherwise random. */ 925 /* Configuration may specify what MAC to use. Otherwise random. */
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index c16ed961153a..fa6e2ac7475a 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1083,7 +1083,7 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
1083 struct sk_buff *skb, 1083 struct sk_buff *skb,
1084 union Vmxnet3_GenericDesc *gdesc) 1084 union Vmxnet3_GenericDesc *gdesc)
1085{ 1085{
1086 if (!gdesc->rcd.cnc && adapter->rxcsum) { 1086 if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
1087 /* typical case: TCP/UDP over IP and both csums are correct */ 1087 /* typical case: TCP/UDP over IP and both csums are correct */
1088 if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) == 1088 if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) ==
1089 VMXNET3_RCD_CSUM_OK) { 1089 VMXNET3_RCD_CSUM_OK) {
@@ -2082,10 +2082,10 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2082 devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter)); 2082 devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
2083 2083
2084 /* set up feature flags */ 2084 /* set up feature flags */
2085 if (adapter->rxcsum) 2085 if (adapter->netdev->features & NETIF_F_RXCSUM)
2086 devRead->misc.uptFeatures |= UPT1_F_RXCSUM; 2086 devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
2087 2087
2088 if (adapter->lro) { 2088 if (adapter->netdev->features & NETIF_F_LRO) {
2089 devRead->misc.uptFeatures |= UPT1_F_LRO; 2089 devRead->misc.uptFeatures |= UPT1_F_LRO;
2090 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS); 2090 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
2091 } 2091 }
@@ -2594,9 +2594,6 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
2594 if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU) 2594 if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU)
2595 return -EINVAL; 2595 return -EINVAL;
2596 2596
2597 if (new_mtu > 1500 && !adapter->jumbo_frame)
2598 return -EINVAL;
2599
2600 netdev->mtu = new_mtu; 2597 netdev->mtu = new_mtu;
2601 2598
2602 /* 2599 /*
@@ -2642,28 +2639,18 @@ vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
2642{ 2639{
2643 struct net_device *netdev = adapter->netdev; 2640 struct net_device *netdev = adapter->netdev;
2644 2641
2645 netdev->features = NETIF_F_SG | 2642 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
2646 NETIF_F_HW_CSUM | 2643 NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_TX |
2647 NETIF_F_HW_VLAN_TX | 2644 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_LRO;
2648 NETIF_F_HW_VLAN_RX | 2645 if (dma64)
2649 NETIF_F_HW_VLAN_FILTER |
2650 NETIF_F_TSO |
2651 NETIF_F_TSO6 |
2652 NETIF_F_LRO;
2653
2654 printk(KERN_INFO "features: sg csum vlan jf tso tsoIPv6 lro");
2655
2656 adapter->rxcsum = true;
2657 adapter->jumbo_frame = true;
2658 adapter->lro = true;
2659
2660 if (dma64) {
2661 netdev->features |= NETIF_F_HIGHDMA; 2646 netdev->features |= NETIF_F_HIGHDMA;
2662 printk(" highDMA"); 2647 netdev->vlan_features = netdev->hw_features & ~NETIF_F_HW_VLAN_TX;
2663 } 2648 netdev->features = netdev->hw_features |
2649 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2664 2650
2665 netdev->vlan_features = netdev->features; 2651 netdev_info(adapter->netdev,
2666 printk("\n"); 2652 "features: sg csum vlan jf tso tsoIPv6 lro%s\n",
2653 dma64 ? " highDMA" : "");
2667} 2654}
2668 2655
2669 2656
@@ -2876,6 +2863,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2876 .ndo_start_xmit = vmxnet3_xmit_frame, 2863 .ndo_start_xmit = vmxnet3_xmit_frame,
2877 .ndo_set_mac_address = vmxnet3_set_mac_addr, 2864 .ndo_set_mac_address = vmxnet3_set_mac_addr,
2878 .ndo_change_mtu = vmxnet3_change_mtu, 2865 .ndo_change_mtu = vmxnet3_change_mtu,
2866 .ndo_set_features = vmxnet3_set_features,
2879 .ndo_get_stats = vmxnet3_get_stats, 2867 .ndo_get_stats = vmxnet3_get_stats,
2880 .ndo_tx_timeout = vmxnet3_tx_timeout, 2868 .ndo_tx_timeout = vmxnet3_tx_timeout,
2881 .ndo_set_multicast_list = vmxnet3_set_mc, 2869 .ndo_set_multicast_list = vmxnet3_set_mc,
@@ -2896,6 +2884,9 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2896 int num_tx_queues; 2884 int num_tx_queues;
2897 int num_rx_queues; 2885 int num_rx_queues;
2898 2886
2887 if (!pci_msi_enabled())
2888 enable_mq = 0;
2889
2899#ifdef VMXNET3_RSS 2890#ifdef VMXNET3_RSS
2900 if (enable_mq) 2891 if (enable_mq)
2901 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES, 2892 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 976467253d20..dc959fe27aa5 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -33,40 +33,6 @@ struct vmxnet3_stat_desc {
33}; 33};
34 34
35 35
36static u32
37vmxnet3_get_rx_csum(struct net_device *netdev)
38{
39 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
40 return adapter->rxcsum;
41}
42
43
44static int
45vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
46{
47 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
48 unsigned long flags;
49
50 if (adapter->rxcsum != val) {
51 adapter->rxcsum = val;
52 if (netif_running(netdev)) {
53 if (val)
54 adapter->shared->devRead.misc.uptFeatures |=
55 UPT1_F_RXCSUM;
56 else
57 adapter->shared->devRead.misc.uptFeatures &=
58 ~UPT1_F_RXCSUM;
59
60 spin_lock_irqsave(&adapter->cmd_lock, flags);
61 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
62 VMXNET3_CMD_UPDATE_FEATURE);
63 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
64 }
65 }
66 return 0;
67}
68
69
70/* per tq stats maintained by the device */ 36/* per tq stats maintained by the device */
71static const struct vmxnet3_stat_desc 37static const struct vmxnet3_stat_desc
72vmxnet3_tq_dev_stats[] = { 38vmxnet3_tq_dev_stats[] = {
@@ -296,31 +262,28 @@ vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
296 } 262 }
297} 263}
298 264
299static int 265int vmxnet3_set_features(struct net_device *netdev, u32 features)
300vmxnet3_set_flags(struct net_device *netdev, u32 data)
301{ 266{
302 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 267 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
303 u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1;
304 u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1;
305 unsigned long flags; 268 unsigned long flags;
269 u32 changed = features ^ netdev->features;
306 270
307 if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO)) 271 if (changed & (NETIF_F_RXCSUM|NETIF_F_LRO)) {
308 return -EINVAL; 272 if (features & NETIF_F_RXCSUM)
309 273 adapter->shared->devRead.misc.uptFeatures |=
310 if (lro_requested ^ lro_present) { 274 UPT1_F_RXCSUM;
311 /* toggle the LRO feature*/ 275 else
312 netdev->features ^= NETIF_F_LRO; 276 adapter->shared->devRead.misc.uptFeatures &=
313 277 ~UPT1_F_RXCSUM;
314 /* Update private LRO flag */
315 adapter->lro = lro_requested;
316 278
317 /* update harware LRO capability accordingly */ 279 /* update harware LRO capability accordingly */
318 if (lro_requested) 280 if (features & NETIF_F_LRO)
319 adapter->shared->devRead.misc.uptFeatures |= 281 adapter->shared->devRead.misc.uptFeatures |=
320 UPT1_F_LRO; 282 UPT1_F_LRO;
321 else 283 else
322 adapter->shared->devRead.misc.uptFeatures &= 284 adapter->shared->devRead.misc.uptFeatures &=
323 ~UPT1_F_LRO; 285 ~UPT1_F_LRO;
286
324 spin_lock_irqsave(&adapter->cmd_lock, flags); 287 spin_lock_irqsave(&adapter->cmd_lock, flags);
325 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 288 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
326 VMXNET3_CMD_UPDATE_FEATURE); 289 VMXNET3_CMD_UPDATE_FEATURE);
@@ -462,10 +425,10 @@ vmxnet3_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
462 ecmd->transceiver = XCVR_INTERNAL; 425 ecmd->transceiver = XCVR_INTERNAL;
463 426
464 if (adapter->link_speed) { 427 if (adapter->link_speed) {
465 ecmd->speed = adapter->link_speed; 428 ethtool_cmd_speed_set(ecmd, adapter->link_speed);
466 ecmd->duplex = DUPLEX_FULL; 429 ecmd->duplex = DUPLEX_FULL;
467 } else { 430 } else {
468 ecmd->speed = -1; 431 ethtool_cmd_speed_set(ecmd, -1);
469 ecmd->duplex = -1; 432 ecmd->duplex = -1;
470 } 433 }
471 return 0; 434 return 0;
@@ -657,17 +620,7 @@ static struct ethtool_ops vmxnet3_ethtool_ops = {
657 .get_wol = vmxnet3_get_wol, 620 .get_wol = vmxnet3_get_wol,
658 .set_wol = vmxnet3_set_wol, 621 .set_wol = vmxnet3_set_wol,
659 .get_link = ethtool_op_get_link, 622 .get_link = ethtool_op_get_link,
660 .get_rx_csum = vmxnet3_get_rx_csum,
661 .set_rx_csum = vmxnet3_set_rx_csum,
662 .get_tx_csum = ethtool_op_get_tx_csum,
663 .set_tx_csum = ethtool_op_set_tx_hw_csum,
664 .get_sg = ethtool_op_get_sg,
665 .set_sg = ethtool_op_set_sg,
666 .get_tso = ethtool_op_get_tso,
667 .set_tso = ethtool_op_set_tso,
668 .get_strings = vmxnet3_get_strings, 623 .get_strings = vmxnet3_get_strings,
669 .get_flags = ethtool_op_get_flags,
670 .set_flags = vmxnet3_set_flags,
671 .get_sset_count = vmxnet3_get_sset_count, 624 .get_sset_count = vmxnet3_get_sset_count,
672 .get_ethtool_stats = vmxnet3_get_ethtool_stats, 625 .get_ethtool_stats = vmxnet3_get_ethtool_stats,
673 .get_ringparam = vmxnet3_get_ringparam, 626 .get_ringparam = vmxnet3_get_ringparam,
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index fb5d245ac878..f50d36fdf405 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -68,10 +68,10 @@
68/* 68/*
69 * Version numbers 69 * Version numbers
70 */ 70 */
71#define VMXNET3_DRIVER_VERSION_STRING "1.0.25.0-k" 71#define VMXNET3_DRIVER_VERSION_STRING "1.1.9.0-k"
72 72
73/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 73/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
74#define VMXNET3_DRIVER_VERSION_NUM 0x01001900 74#define VMXNET3_DRIVER_VERSION_NUM 0x01010900
75 75
76#if defined(CONFIG_PCI_MSI) 76#if defined(CONFIG_PCI_MSI)
77 /* RSS only makes sense if MSI-X is supported. */ 77 /* RSS only makes sense if MSI-X is supported. */
@@ -329,10 +329,6 @@ struct vmxnet3_adapter {
329 u8 __iomem *hw_addr0; /* for BAR 0 */ 329 u8 __iomem *hw_addr0; /* for BAR 0 */
330 u8 __iomem *hw_addr1; /* for BAR 1 */ 330 u8 __iomem *hw_addr1; /* for BAR 1 */
331 331
332 /* feature control */
333 bool rxcsum;
334 bool lro;
335 bool jumbo_frame;
336#ifdef VMXNET3_RSS 332#ifdef VMXNET3_RSS
337 struct UPT1_RSSConf *rss_conf; 333 struct UPT1_RSSConf *rss_conf;
338 bool rss; 334 bool rss;
@@ -404,6 +400,9 @@ void
404vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter); 400vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter);
405 401
406int 402int
403vmxnet3_set_features(struct net_device *netdev, u32 features);
404
405int
407vmxnet3_create_queues(struct vmxnet3_adapter *adapter, 406vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
408 u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size); 407 u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size);
409 408
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 401bebf59502..32763b2dd73f 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -159,16 +159,15 @@ vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action,
159 u32 fw_memo, u32 offset, u64 *data0, u64 *data1, 159 u32 fw_memo, u32 offset, u64 *data0, u64 *data1,
160 u64 *steer_ctrl) 160 u64 *steer_ctrl)
161{ 161{
162 struct vxge_hw_vpath_reg __iomem *vp_reg; 162 struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
163 enum vxge_hw_status status; 163 enum vxge_hw_status status;
164 u64 val64; 164 u64 val64;
165 u32 retry = 0, max_retry = 100; 165 u32 retry = 0, max_retry = 3;
166
167 vp_reg = vpath->vp_reg;
168 166
169 if (vpath->vp_open) { 167 spin_lock(&vpath->lock);
170 max_retry = 3; 168 if (!vpath->vp_open) {
171 spin_lock(&vpath->lock); 169 spin_unlock(&vpath->lock);
170 max_retry = 100;
172 } 171 }
173 172
174 writeq(*data0, &vp_reg->rts_access_steer_data0); 173 writeq(*data0, &vp_reg->rts_access_steer_data0);
@@ -1000,7 +999,7 @@ exit:
1000/** 999/**
1001 * vxge_hw_device_hw_info_get - Get the hw information 1000 * vxge_hw_device_hw_info_get - Get the hw information
1002 * Returns the vpath mask that has the bits set for each vpath allocated 1001 * Returns the vpath mask that has the bits set for each vpath allocated
1003 * for the driver, FW version information and the first mac addresse for 1002 * for the driver, FW version information, and the first mac address for
1004 * each vpath 1003 * each vpath
1005 */ 1004 */
1006enum vxge_hw_status __devinit 1005enum vxge_hw_status __devinit
@@ -1064,9 +1063,10 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
1064 1063
1065 val64 = readq(&toc->toc_vpath_pointer[i]); 1064 val64 = readq(&toc->toc_vpath_pointer[i]);
1066 1065
1066 spin_lock_init(&vpath.lock);
1067 vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *) 1067 vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
1068 (bar0 + val64); 1068 (bar0 + val64);
1069 vpath.vp_open = 0; 1069 vpath.vp_open = VXGE_HW_VP_NOT_OPEN;
1070 1070
1071 status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info); 1071 status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info);
1072 if (status != VXGE_HW_OK) 1072 if (status != VXGE_HW_OK)
@@ -1090,7 +1090,7 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
1090 val64 = readq(&toc->toc_vpath_pointer[i]); 1090 val64 = readq(&toc->toc_vpath_pointer[i]);
1091 vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *) 1091 vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
1092 (bar0 + val64); 1092 (bar0 + val64);
1093 vpath.vp_open = 0; 1093 vpath.vp_open = VXGE_HW_VP_NOT_OPEN;
1094 1094
1095 status = __vxge_hw_vpath_addr_get(&vpath, 1095 status = __vxge_hw_vpath_addr_get(&vpath,
1096 hw_info->mac_addrs[i], 1096 hw_info->mac_addrs[i],
@@ -4646,7 +4646,27 @@ static void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
4646 vpath->hldev->tim_int_mask1, vpath->vp_id); 4646 vpath->hldev->tim_int_mask1, vpath->vp_id);
4647 hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL; 4647 hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
4648 4648
4649 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); 4649 /* If the whole struct __vxge_hw_virtualpath is zeroed, nothing will
4650 * work after the interface is brought down.
4651 */
4652 spin_lock(&vpath->lock);
4653 vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
4654 spin_unlock(&vpath->lock);
4655
4656 vpath->vpmgmt_reg = NULL;
4657 vpath->nofl_db = NULL;
4658 vpath->max_mtu = 0;
4659 vpath->vsport_number = 0;
4660 vpath->max_kdfc_db = 0;
4661 vpath->max_nofl_db = 0;
4662 vpath->ringh = NULL;
4663 vpath->fifoh = NULL;
4664 memset(&vpath->vpath_handles, 0, sizeof(struct list_head));
4665 vpath->stats_block = 0;
4666 vpath->hw_stats = NULL;
4667 vpath->hw_stats_sav = NULL;
4668 vpath->sw_stats = NULL;
4669
4650exit: 4670exit:
4651 return; 4671 return;
4652} 4672}
@@ -4670,7 +4690,7 @@ __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
4670 4690
4671 vpath = &hldev->virtual_paths[vp_id]; 4691 vpath = &hldev->virtual_paths[vp_id];
4672 4692
4673 spin_lock_init(&hldev->virtual_paths[vp_id].lock); 4693 spin_lock_init(&vpath->lock);
4674 vpath->vp_id = vp_id; 4694 vpath->vp_id = vp_id;
4675 vpath->vp_open = VXGE_HW_VP_OPEN; 4695 vpath->vp_open = VXGE_HW_VP_OPEN;
4676 vpath->hldev = hldev; 4696 vpath->hldev = hldev;
@@ -5019,10 +5039,6 @@ enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
5019 5039
5020 __vxge_hw_vp_terminate(devh, vp_id); 5040 __vxge_hw_vp_terminate(devh, vp_id);
5021 5041
5022 spin_lock(&vpath->lock);
5023 vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
5024 spin_unlock(&vpath->lock);
5025
5026vpath_close_exit: 5042vpath_close_exit:
5027 return status; 5043 return status;
5028} 5044}
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index 3c53aa732c9d..359b9b9f8041 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -412,44 +412,48 @@ struct vxge_hw_vp_config {
412 * See also: struct vxge_hw_tim_intr_config{}. 412 * See also: struct vxge_hw_tim_intr_config{}.
413 */ 413 */
414struct vxge_hw_device_config { 414struct vxge_hw_device_config {
415 u32 dma_blockpool_initial; 415 u32 device_poll_millis;
416 u32 dma_blockpool_max; 416#define VXGE_HW_MIN_DEVICE_POLL_MILLIS 1
417#define VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE 0 417#define VXGE_HW_MAX_DEVICE_POLL_MILLIS 100000
418#define VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE 0 418#define VXGE_HW_DEF_DEVICE_POLL_MILLIS 1000
419#define VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE 4 419
420#define VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE 4096 420 u32 dma_blockpool_initial;
421 421 u32 dma_blockpool_max;
422#define VXGE_HW_MAX_PAYLOAD_SIZE_512 2 422#define VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE 0
423 423#define VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE 0
424 u32 intr_mode; 424#define VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE 4
425#define VXGE_HW_INTR_MODE_IRQLINE 0 425#define VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE 4096
426#define VXGE_HW_INTR_MODE_MSIX 1 426
427#define VXGE_HW_INTR_MODE_MSIX_ONE_SHOT 2 427#define VXGE_HW_MAX_PAYLOAD_SIZE_512 2
428 428
429#define VXGE_HW_INTR_MODE_DEF 0 429 u32 intr_mode:2,
430 430#define VXGE_HW_INTR_MODE_IRQLINE 0
431 u32 rth_en; 431#define VXGE_HW_INTR_MODE_MSIX 1
432#define VXGE_HW_RTH_DISABLE 0 432#define VXGE_HW_INTR_MODE_MSIX_ONE_SHOT 2
433#define VXGE_HW_RTH_ENABLE 1 433
434#define VXGE_HW_RTH_DEFAULT 0 434#define VXGE_HW_INTR_MODE_DEF 0
435 435
436 u32 rth_it_type; 436 rth_en:1,
437#define VXGE_HW_RTH_IT_TYPE_SOLO_IT 0 437#define VXGE_HW_RTH_DISABLE 0
438#define VXGE_HW_RTH_IT_TYPE_MULTI_IT 1 438#define VXGE_HW_RTH_ENABLE 1
439#define VXGE_HW_RTH_IT_TYPE_DEFAULT 0 439#define VXGE_HW_RTH_DEFAULT 0
440 440
441 u32 rts_mac_en; 441 rth_it_type:1,
442#define VXGE_HW_RTH_IT_TYPE_SOLO_IT 0
443#define VXGE_HW_RTH_IT_TYPE_MULTI_IT 1
444#define VXGE_HW_RTH_IT_TYPE_DEFAULT 0
445
446 rts_mac_en:1,
442#define VXGE_HW_RTS_MAC_DISABLE 0 447#define VXGE_HW_RTS_MAC_DISABLE 0
443#define VXGE_HW_RTS_MAC_ENABLE 1 448#define VXGE_HW_RTS_MAC_ENABLE 1
444#define VXGE_HW_RTS_MAC_DEFAULT 0 449#define VXGE_HW_RTS_MAC_DEFAULT 0
445 450
446 struct vxge_hw_vp_config vp_config[VXGE_HW_MAX_VIRTUAL_PATHS]; 451 hwts_en:1;
447 452#define VXGE_HW_HWTS_DISABLE 0
448 u32 device_poll_millis; 453#define VXGE_HW_HWTS_ENABLE 1
449#define VXGE_HW_MIN_DEVICE_POLL_MILLIS 1 454#define VXGE_HW_HWTS_DEFAULT 1
450#define VXGE_HW_MAX_DEVICE_POLL_MILLIS 100000
451#define VXGE_HW_DEF_DEVICE_POLL_MILLIS 1000
452 455
456 struct vxge_hw_vp_config vp_config[VXGE_HW_MAX_VIRTUAL_PATHS];
453}; 457};
454 458
455/** 459/**
diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c
index c5eb034107fd..92dd72d3f9de 100644
--- a/drivers/net/vxge/vxge-ethtool.c
+++ b/drivers/net/vxge/vxge-ethtool.c
@@ -33,7 +33,8 @@ static int vxge_ethtool_sset(struct net_device *dev, struct ethtool_cmd *info)
33{ 33{
34 /* We currently only support 10Gb/FULL */ 34 /* We currently only support 10Gb/FULL */
35 if ((info->autoneg == AUTONEG_ENABLE) || 35 if ((info->autoneg == AUTONEG_ENABLE) ||
36 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL)) 36 (ethtool_cmd_speed(info) != SPEED_10000) ||
37 (info->duplex != DUPLEX_FULL))
37 return -EINVAL; 38 return -EINVAL;
38 39
39 return 0; 40 return 0;
@@ -58,10 +59,10 @@ static int vxge_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
58 info->transceiver = XCVR_EXTERNAL; 59 info->transceiver = XCVR_EXTERNAL;
59 60
60 if (netif_carrier_ok(dev)) { 61 if (netif_carrier_ok(dev)) {
61 info->speed = SPEED_10000; 62 ethtool_cmd_speed_set(info, SPEED_10000);
62 info->duplex = DUPLEX_FULL; 63 info->duplex = DUPLEX_FULL;
63 } else { 64 } else {
64 info->speed = -1; 65 ethtool_cmd_speed_set(info, -1);
65 info->duplex = -1; 66 info->duplex = -1;
66 } 67 }
67 68
@@ -134,22 +135,29 @@ static void vxge_ethtool_gregs(struct net_device *dev,
134/** 135/**
135 * vxge_ethtool_idnic - To physically identify the nic on the system. 136 * vxge_ethtool_idnic - To physically identify the nic on the system.
136 * @dev : device pointer. 137 * @dev : device pointer.
137 * @id : pointer to the structure with identification parameters given by 138 * @state : requested LED state
138 * ethtool.
139 * 139 *
140 * Used to physically identify the NIC on the system. 140 * Used to physically identify the NIC on the system.
141 * The Link LED will blink for a time specified by the user.
142 * Return value:
143 * 0 on success 141 * 0 on success
144 */ 142 */
145static int vxge_ethtool_idnic(struct net_device *dev, u32 data) 143static int vxge_ethtool_idnic(struct net_device *dev,
144 enum ethtool_phys_id_state state)
146{ 145{
147 struct vxgedev *vdev = netdev_priv(dev); 146 struct vxgedev *vdev = netdev_priv(dev);
148 struct __vxge_hw_device *hldev = vdev->devh; 147 struct __vxge_hw_device *hldev = vdev->devh;
149 148
150 vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_ON); 149 switch (state) {
151 msleep_interruptible(data ? (data * HZ) : VXGE_MAX_FLICKER_TIME); 150 case ETHTOOL_ID_ACTIVE:
152 vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_OFF); 151 vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_ON);
152 break;
153
154 case ETHTOOL_ID_INACTIVE:
155 vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_OFF);
156 break;
157
158 default:
159 return -EINVAL;
160 }
153 161
154 return 0; 162 return 0;
155} 163}
@@ -1064,35 +1072,6 @@ static int vxge_ethtool_get_regs_len(struct net_device *dev)
1064 return sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath; 1072 return sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
1065} 1073}
1066 1074
1067static u32 vxge_get_rx_csum(struct net_device *dev)
1068{
1069 struct vxgedev *vdev = netdev_priv(dev);
1070
1071 return vdev->rx_csum;
1072}
1073
1074static int vxge_set_rx_csum(struct net_device *dev, u32 data)
1075{
1076 struct vxgedev *vdev = netdev_priv(dev);
1077
1078 if (data)
1079 vdev->rx_csum = 1;
1080 else
1081 vdev->rx_csum = 0;
1082
1083 return 0;
1084}
1085
1086static int vxge_ethtool_op_set_tso(struct net_device *dev, u32 data)
1087{
1088 if (data)
1089 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
1090 else
1091 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
1092
1093 return 0;
1094}
1095
1096static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset) 1075static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset)
1097{ 1076{
1098 struct vxgedev *vdev = netdev_priv(dev); 1077 struct vxgedev *vdev = netdev_priv(dev);
@@ -1112,40 +1091,6 @@ static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset)
1112 } 1091 }
1113} 1092}
1114 1093
1115static int vxge_set_flags(struct net_device *dev, u32 data)
1116{
1117 struct vxgedev *vdev = netdev_priv(dev);
1118 enum vxge_hw_status status;
1119
1120 if (ethtool_invalid_flags(dev, data, ETH_FLAG_RXHASH))
1121 return -EINVAL;
1122
1123 if (!!(data & ETH_FLAG_RXHASH) == vdev->devh->config.rth_en)
1124 return 0;
1125
1126 if (netif_running(dev) || (vdev->config.rth_steering == NO_STEERING))
1127 return -EINVAL;
1128
1129 vdev->devh->config.rth_en = !!(data & ETH_FLAG_RXHASH);
1130
1131 /* Enabling RTH requires some of the logic in vxge_device_register and a
1132 * vpath reset. Due to these restrictions, only allow modification
1133 * while the interface is down.
1134 */
1135 status = vxge_reset_all_vpaths(vdev);
1136 if (status != VXGE_HW_OK) {
1137 vdev->devh->config.rth_en = !vdev->devh->config.rth_en;
1138 return -EFAULT;
1139 }
1140
1141 if (vdev->devh->config.rth_en)
1142 dev->features |= NETIF_F_RXHASH;
1143 else
1144 dev->features &= ~NETIF_F_RXHASH;
1145
1146 return 0;
1147}
1148
1149static int vxge_fw_flash(struct net_device *dev, struct ethtool_flash *parms) 1094static int vxge_fw_flash(struct net_device *dev, struct ethtool_flash *parms)
1150{ 1095{
1151 struct vxgedev *vdev = netdev_priv(dev); 1096 struct vxgedev *vdev = netdev_priv(dev);
@@ -1174,19 +1119,10 @@ static const struct ethtool_ops vxge_ethtool_ops = {
1174 .get_link = ethtool_op_get_link, 1119 .get_link = ethtool_op_get_link,
1175 .get_pauseparam = vxge_ethtool_getpause_data, 1120 .get_pauseparam = vxge_ethtool_getpause_data,
1176 .set_pauseparam = vxge_ethtool_setpause_data, 1121 .set_pauseparam = vxge_ethtool_setpause_data,
1177 .get_rx_csum = vxge_get_rx_csum,
1178 .set_rx_csum = vxge_set_rx_csum,
1179 .get_tx_csum = ethtool_op_get_tx_csum,
1180 .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
1181 .get_sg = ethtool_op_get_sg,
1182 .set_sg = ethtool_op_set_sg,
1183 .get_tso = ethtool_op_get_tso,
1184 .set_tso = vxge_ethtool_op_set_tso,
1185 .get_strings = vxge_ethtool_get_strings, 1122 .get_strings = vxge_ethtool_get_strings,
1186 .phys_id = vxge_ethtool_idnic, 1123 .set_phys_id = vxge_ethtool_idnic,
1187 .get_sset_count = vxge_ethtool_get_sset_count, 1124 .get_sset_count = vxge_ethtool_get_sset_count,
1188 .get_ethtool_stats = vxge_get_ethtool_stats, 1125 .get_ethtool_stats = vxge_get_ethtool_stats,
1189 .set_flags = vxge_set_flags,
1190 .flash_device = vxge_fw_flash, 1126 .flash_device = vxge_fw_flash,
1191}; 1127};
1192 1128
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index aff68c1118d4..fc837cf6bd4d 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -304,22 +304,14 @@ vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
304 "%s: %s:%d skb protocol = %d", 304 "%s: %s:%d skb protocol = %d",
305 ring->ndev->name, __func__, __LINE__, skb->protocol); 305 ring->ndev->name, __func__, __LINE__, skb->protocol);
306 306
307 if (ring->gro_enable) { 307 if (ring->vlgrp && ext_info->vlan &&
308 if (ring->vlgrp && ext_info->vlan && 308 (ring->vlan_tag_strip ==
309 (ring->vlan_tag_strip == 309 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE))
310 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE)) 310 vlan_gro_receive(ring->napi_p, ring->vlgrp,
311 vlan_gro_receive(ring->napi_p, ring->vlgrp, 311 ext_info->vlan, skb);
312 ext_info->vlan, skb); 312 else
313 else 313 napi_gro_receive(ring->napi_p, skb);
314 napi_gro_receive(ring->napi_p, skb); 314
315 } else {
316 if (ring->vlgrp && vlan &&
317 (ring->vlan_tag_strip ==
318 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE))
319 vlan_hwaccel_receive_skb(skb, ring->vlgrp, vlan);
320 else
321 netif_receive_skb(skb);
322 }
323 vxge_debug_entryexit(VXGE_TRACE, 315 vxge_debug_entryexit(VXGE_TRACE,
324 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__); 316 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
325} 317}
@@ -490,7 +482,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
490 482
491 if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) && 483 if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) &&
492 !(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) && 484 !(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) &&
493 ring->rx_csum && /* Offload Rx side CSUM */ 485 (dev->features & NETIF_F_RXCSUM) && /* Offload Rx side CSUM */
494 ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK && 486 ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK &&
495 ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK) 487 ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK)
496 skb->ip_summed = CHECKSUM_UNNECESSARY; 488 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -2094,11 +2086,9 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
2094 vdev->config.fifo_indicate_max_pkts; 2086 vdev->config.fifo_indicate_max_pkts;
2095 vpath->fifo.tx_vector_no = 0; 2087 vpath->fifo.tx_vector_no = 0;
2096 vpath->ring.rx_vector_no = 0; 2088 vpath->ring.rx_vector_no = 0;
2097 vpath->ring.rx_csum = vdev->rx_csum;
2098 vpath->ring.rx_hwts = vdev->rx_hwts; 2089 vpath->ring.rx_hwts = vdev->rx_hwts;
2099 vpath->is_open = 1; 2090 vpath->is_open = 1;
2100 vdev->vp_handles[i] = vpath->handle; 2091 vdev->vp_handles[i] = vpath->handle;
2101 vpath->ring.gro_enable = vdev->config.gro_enable;
2102 vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip; 2092 vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip;
2103 vdev->stats.vpaths_open++; 2093 vdev->stats.vpaths_open++;
2104 } else { 2094 } else {
@@ -2670,6 +2660,40 @@ static void vxge_poll_vp_lockup(unsigned long data)
2670 mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000); 2660 mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000);
2671} 2661}
2672 2662
2663static u32 vxge_fix_features(struct net_device *dev, u32 features)
2664{
2665 u32 changed = dev->features ^ features;
2666
2667 /* Enabling RTH requires some of the logic in vxge_device_register and a
2668 * vpath reset. Due to these restrictions, only allow modification
2669 * while the interface is down.
2670 */
2671 if ((changed & NETIF_F_RXHASH) && netif_running(dev))
2672 features ^= NETIF_F_RXHASH;
2673
2674 return features;
2675}
2676
2677static int vxge_set_features(struct net_device *dev, u32 features)
2678{
2679 struct vxgedev *vdev = netdev_priv(dev);
2680 u32 changed = dev->features ^ features;
2681
2682 if (!(changed & NETIF_F_RXHASH))
2683 return 0;
2684
2685 /* !netif_running() ensured by vxge_fix_features() */
2686
2687 vdev->devh->config.rth_en = !!(features & NETIF_F_RXHASH);
2688 if (vxge_reset_all_vpaths(vdev) != VXGE_HW_OK) {
2689 dev->features = features ^ NETIF_F_RXHASH;
2690 vdev->devh->config.rth_en = !!(dev->features & NETIF_F_RXHASH);
2691 return -EIO;
2692 }
2693
2694 return 0;
2695}
2696
2673/** 2697/**
2674 * vxge_open 2698 * vxge_open
2675 * @dev: pointer to the device structure. 2699 * @dev: pointer to the device structure.
@@ -3112,8 +3136,7 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
3112 return net_stats; 3136 return net_stats;
3113} 3137}
3114 3138
3115static enum vxge_hw_status vxge_timestamp_config(struct vxgedev *vdev, 3139static enum vxge_hw_status vxge_timestamp_config(struct __vxge_hw_device *devh)
3116 int enable)
3117{ 3140{
3118 enum vxge_hw_status status; 3141 enum vxge_hw_status status;
3119 u64 val64; 3142 u64 val64;
@@ -3123,27 +3146,24 @@ static enum vxge_hw_status vxge_timestamp_config(struct vxgedev *vdev,
3123 * required for the driver to load (due to a hardware bug), 3146 * required for the driver to load (due to a hardware bug),
3124 * there is no need to do anything special here. 3147 * there is no need to do anything special here.
3125 */ 3148 */
3126 if (enable) 3149 val64 = VXGE_HW_XMAC_TIMESTAMP_EN |
3127 val64 = VXGE_HW_XMAC_TIMESTAMP_EN | 3150 VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(0) |
3128 VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(0) | 3151 VXGE_HW_XMAC_TIMESTAMP_INTERVAL(0);
3129 VXGE_HW_XMAC_TIMESTAMP_INTERVAL(0);
3130 else
3131 val64 = 0;
3132 3152
3133 status = vxge_hw_mgmt_reg_write(vdev->devh, 3153 status = vxge_hw_mgmt_reg_write(devh,
3134 vxge_hw_mgmt_reg_type_mrpcim, 3154 vxge_hw_mgmt_reg_type_mrpcim,
3135 0, 3155 0,
3136 offsetof(struct vxge_hw_mrpcim_reg, 3156 offsetof(struct vxge_hw_mrpcim_reg,
3137 xmac_timestamp), 3157 xmac_timestamp),
3138 val64); 3158 val64);
3139 vxge_hw_device_flush_io(vdev->devh); 3159 vxge_hw_device_flush_io(devh);
3160 devh->config.hwts_en = VXGE_HW_HWTS_ENABLE;
3140 return status; 3161 return status;
3141} 3162}
3142 3163
3143static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data) 3164static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
3144{ 3165{
3145 struct hwtstamp_config config; 3166 struct hwtstamp_config config;
3146 enum vxge_hw_status status;
3147 int i; 3167 int i;
3148 3168
3149 if (copy_from_user(&config, data, sizeof(config))) 3169 if (copy_from_user(&config, data, sizeof(config)))
@@ -3164,10 +3184,6 @@ static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
3164 3184
3165 switch (config.rx_filter) { 3185 switch (config.rx_filter) {
3166 case HWTSTAMP_FILTER_NONE: 3186 case HWTSTAMP_FILTER_NONE:
3167 status = vxge_timestamp_config(vdev, 0);
3168 if (status != VXGE_HW_OK)
3169 return -EFAULT;
3170
3171 vdev->rx_hwts = 0; 3187 vdev->rx_hwts = 0;
3172 config.rx_filter = HWTSTAMP_FILTER_NONE; 3188 config.rx_filter = HWTSTAMP_FILTER_NONE;
3173 break; 3189 break;
@@ -3186,8 +3202,7 @@ static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
3186 case HWTSTAMP_FILTER_PTP_V2_EVENT: 3202 case HWTSTAMP_FILTER_PTP_V2_EVENT:
3187 case HWTSTAMP_FILTER_PTP_V2_SYNC: 3203 case HWTSTAMP_FILTER_PTP_V2_SYNC:
3188 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 3204 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
3189 status = vxge_timestamp_config(vdev, 1); 3205 if (vdev->devh->config.hwts_en != VXGE_HW_HWTS_ENABLE)
3190 if (status != VXGE_HW_OK)
3191 return -EFAULT; 3206 return -EFAULT;
3192 3207
3193 vdev->rx_hwts = 1; 3208 vdev->rx_hwts = 1;
@@ -3378,6 +3393,8 @@ static const struct net_device_ops vxge_netdev_ops = {
3378 .ndo_do_ioctl = vxge_ioctl, 3393 .ndo_do_ioctl = vxge_ioctl,
3379 .ndo_set_mac_address = vxge_set_mac_addr, 3394 .ndo_set_mac_address = vxge_set_mac_addr,
3380 .ndo_change_mtu = vxge_change_mtu, 3395 .ndo_change_mtu = vxge_change_mtu,
3396 .ndo_fix_features = vxge_fix_features,
3397 .ndo_set_features = vxge_set_features,
3381 .ndo_vlan_rx_register = vxge_vlan_rx_register, 3398 .ndo_vlan_rx_register = vxge_vlan_rx_register,
3382 .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid, 3399 .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid,
3383 .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid, 3400 .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid,
@@ -3424,14 +3441,21 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3424 vdev->devh = hldev; 3441 vdev->devh = hldev;
3425 vdev->pdev = hldev->pdev; 3442 vdev->pdev = hldev->pdev;
3426 memcpy(&vdev->config, config, sizeof(struct vxge_config)); 3443 memcpy(&vdev->config, config, sizeof(struct vxge_config));
3427 vdev->rx_csum = 1; /* Enable Rx CSUM by default. */
3428 vdev->rx_hwts = 0; 3444 vdev->rx_hwts = 0;
3429 vdev->titan1 = (vdev->pdev->revision == VXGE_HW_TITAN1_PCI_REVISION); 3445 vdev->titan1 = (vdev->pdev->revision == VXGE_HW_TITAN1_PCI_REVISION);
3430 3446
3431 SET_NETDEV_DEV(ndev, &vdev->pdev->dev); 3447 SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
3432 3448
3433 ndev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | 3449 ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_SG |
3434 NETIF_F_HW_VLAN_FILTER; 3450 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3451 NETIF_F_TSO | NETIF_F_TSO6 |
3452 NETIF_F_HW_VLAN_TX;
3453 if (vdev->config.rth_steering != NO_STEERING)
3454 ndev->hw_features |= NETIF_F_RXHASH;
3455
3456 ndev->features |= ndev->hw_features |
3457 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3458
3435 /* Driver entry points */ 3459 /* Driver entry points */
3436 ndev->irq = vdev->pdev->irq; 3460 ndev->irq = vdev->pdev->irq;
3437 ndev->base_addr = (unsigned long) hldev->bar0; 3461 ndev->base_addr = (unsigned long) hldev->bar0;
@@ -3443,11 +3467,6 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3443 3467
3444 vxge_initialize_ethtool_ops(ndev); 3468 vxge_initialize_ethtool_ops(ndev);
3445 3469
3446 if (vdev->config.rth_steering != NO_STEERING) {
3447 ndev->features |= NETIF_F_RXHASH;
3448 hldev->config.rth_en = VXGE_HW_RTH_ENABLE;
3449 }
3450
3451 /* Allocate memory for vpath */ 3470 /* Allocate memory for vpath */
3452 vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) * 3471 vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
3453 no_of_vpath, GFP_KERNEL); 3472 no_of_vpath, GFP_KERNEL);
@@ -3459,9 +3478,6 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3459 goto _out1; 3478 goto _out1;
3460 } 3479 }
3461 3480
3462 ndev->features |= NETIF_F_SG;
3463
3464 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3465 vxge_debug_init(vxge_hw_device_trace_level_get(hldev), 3481 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3466 "%s : checksuming enabled", __func__); 3482 "%s : checksuming enabled", __func__);
3467 3483
@@ -3471,11 +3487,6 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3471 "%s : using High DMA", __func__); 3487 "%s : using High DMA", __func__);
3472 } 3488 }
3473 3489
3474 ndev->features |= NETIF_F_TSO | NETIF_F_TSO6;
3475
3476 if (vdev->config.gro_enable)
3477 ndev->features |= NETIF_F_GRO;
3478
3479 ret = register_netdev(ndev); 3490 ret = register_netdev(ndev);
3480 if (ret) { 3491 if (ret) {
3481 vxge_debug_init(vxge_hw_device_trace_level_get(hldev), 3492 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
@@ -4005,15 +4016,6 @@ static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
4005 vdev->config.tx_steering_type = 0; 4016 vdev->config.tx_steering_type = 0;
4006 } 4017 }
4007 4018
4008 if (vdev->config.gro_enable) {
4009 vxge_debug_init(VXGE_ERR,
4010 "%s: Generic receive offload enabled",
4011 vdev->ndev->name);
4012 } else
4013 vxge_debug_init(VXGE_TRACE,
4014 "%s: Generic receive offload disabled",
4015 vdev->ndev->name);
4016
4017 if (vdev->config.addr_learn_en) 4019 if (vdev->config.addr_learn_en)
4018 vxge_debug_init(VXGE_TRACE, 4020 vxge_debug_init(VXGE_TRACE,
4019 "%s: MAC Address learning enabled", vdev->ndev->name); 4021 "%s: MAC Address learning enabled", vdev->ndev->name);
@@ -4575,12 +4577,29 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4575 goto _exit4; 4577 goto _exit4;
4576 } 4578 }
4577 4579
4580 /* Always enable HWTS. This will always cause the FCS to be invalid,
4581 * due to the fact that HWTS is using the FCS as the location of the
4582 * timestamp. The HW FCS checking will still correctly determine if
4583 * there is a valid checksum, and the FCS is being removed by the driver
4584 * anyway. So no fucntionality is being lost. Since it is always
4585 * enabled, we now simply use the ioctl call to set whether or not the
4586 * driver should be paying attention to the HWTS.
4587 */
4588 if (is_privileged == VXGE_HW_OK) {
4589 status = vxge_timestamp_config(hldev);
4590 if (status != VXGE_HW_OK) {
4591 vxge_debug_init(VXGE_ERR, "%s: HWTS enable failed",
4592 VXGE_DRIVER_NAME);
4593 ret = -EFAULT;
4594 goto _exit4;
4595 }
4596 }
4597
4578 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL); 4598 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
4579 4599
4580 /* set private device info */ 4600 /* set private device info */
4581 pci_set_drvdata(pdev, hldev); 4601 pci_set_drvdata(pdev, hldev);
4582 4602
4583 ll_config->gro_enable = VXGE_GRO_ALWAYS_AGGREGATE;
4584 ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS; 4603 ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
4585 ll_config->addr_learn_en = addr_learn_en; 4604 ll_config->addr_learn_en = addr_learn_en;
4586 ll_config->rth_algorithm = RTH_ALG_JENKINS; 4605 ll_config->rth_algorithm = RTH_ALG_JENKINS;
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index 40474f0da576..ed120aba443d 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -168,9 +168,6 @@ struct vxge_config {
168 168
169#define NEW_NAPI_WEIGHT 64 169#define NEW_NAPI_WEIGHT 64
170 int napi_weight; 170 int napi_weight;
171#define VXGE_GRO_DONOT_AGGREGATE 0
172#define VXGE_GRO_ALWAYS_AGGREGATE 1
173 int gro_enable;
174 int intr_type; 171 int intr_type;
175#define INTA 0 172#define INTA 0
176#define MSI 1 173#define MSI 1
@@ -290,13 +287,11 @@ struct vxge_ring {
290 unsigned long interrupt_count; 287 unsigned long interrupt_count;
291 unsigned long jiffies; 288 unsigned long jiffies;
292 289
293 /* copy of the flag indicating whether rx_csum is to be used */ 290 /* copy of the flag indicating whether rx_hwts is to be used */
294 u32 rx_csum:1, 291 u32 rx_hwts:1;
295 rx_hwts:1;
296 292
297 int pkts_processed; 293 int pkts_processed;
298 int budget; 294 int budget;
299 int gro_enable;
300 295
301 struct napi_struct napi; 296 struct napi_struct napi;
302 struct napi_struct *napi_p; 297 struct napi_struct *napi_p;
@@ -369,9 +364,8 @@ struct vxgedev {
369 */ 364 */
370 u16 all_multi_flg; 365 u16 all_multi_flg;
371 366
372 /* A flag indicating whether rx_csum is to be used or not. */ 367 /* A flag indicating whether rx_hwts is to be used or not. */
373 u32 rx_csum:1, 368 u32 rx_hwts:1,
374 rx_hwts:1,
375 titan1:1; 369 titan1:1;
376 370
377 struct vxge_msix_entry *vxge_entries; 371 struct vxge_msix_entry *vxge_entries;
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index 6c2fc0b72af5..4a518a3b131c 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -240,7 +240,7 @@ struct vxge_hw_tim_intr_config {
240 u32 btimer_val; 240 u32 btimer_val;
241#define VXGE_HW_MIN_TIM_BTIMER_VAL 0 241#define VXGE_HW_MIN_TIM_BTIMER_VAL 0
242#define VXGE_HW_MAX_TIM_BTIMER_VAL 67108864 242#define VXGE_HW_MAX_TIM_BTIMER_VAL 67108864
243#define VXGE_HW_USE_FLASH_DEFAULT 0xffffffff 243#define VXGE_HW_USE_FLASH_DEFAULT (~0)
244 244
245 u32 timer_ac_en; 245 u32 timer_ac_en;
246#define VXGE_HW_TIM_TIMER_AC_ENABLE 1 246#define VXGE_HW_TIM_TIMER_AC_ENABLE 1
diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h
index 581e21525e85..b9efa28bab3e 100644
--- a/drivers/net/vxge/vxge-version.h
+++ b/drivers/net/vxge/vxge-version.h
@@ -16,8 +16,8 @@
16 16
17#define VXGE_VERSION_MAJOR "2" 17#define VXGE_VERSION_MAJOR "2"
18#define VXGE_VERSION_MINOR "5" 18#define VXGE_VERSION_MINOR "5"
19#define VXGE_VERSION_FIX "2" 19#define VXGE_VERSION_FIX "3"
20#define VXGE_VERSION_BUILD "22259" 20#define VXGE_VERSION_BUILD "22640"
21#define VXGE_VERSION_FOR "k" 21#define VXGE_VERSION_FOR "k"
22 22
23#define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld)) 23#define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld))
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
index 1481a446fefb..21b104db5a90 100644
--- a/drivers/net/wan/dlci.c
+++ b/drivers/net/wan/dlci.c
@@ -341,10 +341,6 @@ static int dlci_add(struct dlci_add *dlci)
341 } 341 }
342 } 342 }
343 343
344 err = dev_alloc_name(master, master->name);
345 if (err < 0)
346 goto err2;
347
348 *(short *)(master->dev_addr) = dlci->dlci; 344 *(short *)(master->dev_addr) = dlci->dlci;
349 345
350 dlp = netdev_priv(master); 346 dlp = netdev_priv(master);
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 0edb535bb2b5..fc433f28c047 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -1070,7 +1070,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
1070 hdlc_device *hdlc = dev_to_hdlc(frad); 1070 hdlc_device *hdlc = dev_to_hdlc(frad);
1071 pvc_device *pvc; 1071 pvc_device *pvc;
1072 struct net_device *dev; 1072 struct net_device *dev;
1073 int result, used; 1073 int used;
1074 1074
1075 if ((pvc = add_pvc(frad, dlci)) == NULL) { 1075 if ((pvc = add_pvc(frad, dlci)) == NULL) {
1076 printk(KERN_WARNING "%s: Memory squeeze on fr_add_pvc()\n", 1076 printk(KERN_WARNING "%s: Memory squeeze on fr_add_pvc()\n",
@@ -1106,13 +1106,6 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
1106 dev->tx_queue_len = 0; 1106 dev->tx_queue_len = 0;
1107 dev->ml_priv = pvc; 1107 dev->ml_priv = pvc;
1108 1108
1109 result = dev_alloc_name(dev, dev->name);
1110 if (result < 0) {
1111 free_netdev(dev);
1112 delete_unused_pvcs(hdlc);
1113 return result;
1114 }
1115
1116 if (register_netdevice(dev) != 0) { 1109 if (register_netdevice(dev) != 0) {
1117 free_netdev(dev); 1110 free_netdev(dev);
1118 delete_unused_pvcs(hdlc); 1111 delete_unused_pvcs(hdlc);
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 7f5bb913c8b9..eec463f99c09 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -338,10 +338,6 @@ static int lapbeth_new_device(struct net_device *dev)
338 dev_hold(dev); 338 dev_hold(dev);
339 lapbeth->ethdev = dev; 339 lapbeth->ethdev = dev;
340 340
341 rc = dev_alloc_name(ndev, ndev->name);
342 if (rc < 0)
343 goto fail;
344
345 rc = -EIO; 341 rc = -EIO;
346 if (register_netdevice(ndev)) 342 if (register_netdevice(ndev))
347 goto fail; 343 goto fail;
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 7aeb113cbb90..f354bd4e121e 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -284,5 +284,6 @@ source "drivers/net/wireless/rtlwifi/Kconfig"
284source "drivers/net/wireless/wl1251/Kconfig" 284source "drivers/net/wireless/wl1251/Kconfig"
285source "drivers/net/wireless/wl12xx/Kconfig" 285source "drivers/net/wireless/wl12xx/Kconfig"
286source "drivers/net/wireless/zd1211rw/Kconfig" 286source "drivers/net/wireless/zd1211rw/Kconfig"
287source "drivers/net/wireless/mwifiex/Kconfig"
287 288
288endif # WLAN 289endif # WLAN
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index ddd3fb6ba1d3..7bba6a82b875 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -56,3 +56,5 @@ obj-$(CONFIG_WL12XX) += wl12xx/
56obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx/ 56obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx/
57 57
58obj-$(CONFIG_IWM) += iwmc3200wifi/ 58obj-$(CONFIG_IWM) += iwmc3200wifi/
59
60obj-$(CONFIG_MWIFIEX) += mwifiex/
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index 92c216263ee9..d1b23067619f 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -24,7 +24,6 @@ config ATH_DEBUG
24 24
25source "drivers/net/wireless/ath/ath5k/Kconfig" 25source "drivers/net/wireless/ath/ath5k/Kconfig"
26source "drivers/net/wireless/ath/ath9k/Kconfig" 26source "drivers/net/wireless/ath/ath9k/Kconfig"
27source "drivers/net/wireless/ath/ar9170/Kconfig"
28source "drivers/net/wireless/ath/carl9170/Kconfig" 27source "drivers/net/wireless/ath/carl9170/Kconfig"
29 28
30endif 29endif
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index 6d711ec97ec2..0e8f528c81c0 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -1,6 +1,5 @@
1obj-$(CONFIG_ATH5K) += ath5k/ 1obj-$(CONFIG_ATH5K) += ath5k/
2obj-$(CONFIG_ATH9K_HW) += ath9k/ 2obj-$(CONFIG_ATH9K_HW) += ath9k/
3obj-$(CONFIG_AR9170_USB) += ar9170/
4obj-$(CONFIG_CARL9170) += carl9170/ 3obj-$(CONFIG_CARL9170) += carl9170/
5 4
6obj-$(CONFIG_ATH_COMMON) += ath.o 5obj-$(CONFIG_ATH_COMMON) += ath.o
diff --git a/drivers/net/wireless/ath/ar9170/Kconfig b/drivers/net/wireless/ath/ar9170/Kconfig
deleted file mode 100644
index 7b9672b0d090..000000000000
--- a/drivers/net/wireless/ath/ar9170/Kconfig
+++ /dev/null
@@ -1,20 +0,0 @@
1config AR9170_USB
2 tristate "Atheros AR9170 802.11n USB support (OBSOLETE)"
3 depends on USB && MAC80211
4 select FW_LOADER
5 help
6 This driver is going to get replaced by carl9170.
7
8 This is a driver for the Atheros "otus" 802.11n USB devices.
9
10 These devices require additional firmware (2 files).
11 For now, these files can be downloaded from here:
12
13 http://wireless.kernel.org/en/users/Drivers/ar9170
14
15 If you choose to build a module, it'll be called ar9170usb.
16
17config AR9170_LEDS
18 bool
19 depends on AR9170_USB && MAC80211_LEDS && (LEDS_CLASS = y || LEDS_CLASS = AR9170_USB)
20 default y
diff --git a/drivers/net/wireless/ath/ar9170/Makefile b/drivers/net/wireless/ath/ar9170/Makefile
deleted file mode 100644
index 8d91c7ee3215..000000000000
--- a/drivers/net/wireless/ath/ar9170/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
1ar9170usb-objs := usb.o main.o cmd.o mac.o phy.o led.o
2
3obj-$(CONFIG_AR9170_USB) += ar9170usb.o
diff --git a/drivers/net/wireless/ath/ar9170/ar9170.h b/drivers/net/wireless/ath/ar9170/ar9170.h
deleted file mode 100644
index 371e4ce49528..000000000000
--- a/drivers/net/wireless/ath/ar9170/ar9170.h
+++ /dev/null
@@ -1,258 +0,0 @@
1/*
2 * Atheros AR9170 driver
3 *
4 * Driver specific definitions
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38#ifndef __AR9170_H
39#define __AR9170_H
40
41#include <linux/completion.h>
42#include <linux/spinlock.h>
43#include <net/cfg80211.h>
44#include <net/mac80211.h>
45#ifdef CONFIG_AR9170_LEDS
46#include <linux/leds.h>
47#endif /* CONFIG_AR9170_LEDS */
48#include "eeprom.h"
49#include "hw.h"
50
51#include "../regd.h"
52
53#define PAYLOAD_MAX (AR9170_MAX_CMD_LEN/4 - 1)
54
55enum ar9170_bw {
56 AR9170_BW_20,
57 AR9170_BW_40_BELOW,
58 AR9170_BW_40_ABOVE,
59
60 __AR9170_NUM_BW,
61};
62
63static inline enum ar9170_bw nl80211_to_ar9170(enum nl80211_channel_type type)
64{
65 switch (type) {
66 case NL80211_CHAN_NO_HT:
67 case NL80211_CHAN_HT20:
68 return AR9170_BW_20;
69 case NL80211_CHAN_HT40MINUS:
70 return AR9170_BW_40_BELOW;
71 case NL80211_CHAN_HT40PLUS:
72 return AR9170_BW_40_ABOVE;
73 default:
74 BUG();
75 }
76}
77
78enum ar9170_rf_init_mode {
79 AR9170_RFI_NONE,
80 AR9170_RFI_WARM,
81 AR9170_RFI_COLD,
82};
83
84#define AR9170_MAX_RX_BUFFER_SIZE 8192
85
86#ifdef CONFIG_AR9170_LEDS
87struct ar9170;
88
89struct ar9170_led {
90 struct ar9170 *ar;
91 struct led_classdev l;
92 char name[32];
93 unsigned int toggled;
94 bool last_state;
95 bool registered;
96};
97
98#endif /* CONFIG_AR9170_LEDS */
99
100enum ar9170_device_state {
101 AR9170_UNKNOWN_STATE,
102 AR9170_STOPPED,
103 AR9170_IDLE,
104 AR9170_STARTED,
105};
106
107struct ar9170_rxstream_mpdu_merge {
108 struct ar9170_rx_head plcp;
109 bool has_plcp;
110};
111
112struct ar9170_tx_queue_stats {
113 unsigned int len;
114 unsigned int limit;
115 unsigned int count;
116};
117
118#define AR9170_QUEUE_TIMEOUT 64
119#define AR9170_TX_TIMEOUT 8
120#define AR9170_JANITOR_DELAY 128
121#define AR9170_TX_INVALID_RATE 0xffffffff
122
123#define AR9170_NUM_TX_LIMIT_HARD AR9170_TXQ_DEPTH
124#define AR9170_NUM_TX_LIMIT_SOFT (AR9170_TXQ_DEPTH - 10)
125
126struct ar9170 {
127 struct ieee80211_hw *hw;
128 struct ath_common common;
129 struct mutex mutex;
130 enum ar9170_device_state state;
131 bool registered;
132 unsigned long bad_hw_nagger;
133
134 int (*open)(struct ar9170 *);
135 void (*stop)(struct ar9170 *);
136 int (*tx)(struct ar9170 *, struct sk_buff *);
137 int (*exec_cmd)(struct ar9170 *, enum ar9170_cmd, u32 ,
138 void *, u32 , void *);
139 void (*callback_cmd)(struct ar9170 *, u32 , void *);
140 int (*flush)(struct ar9170 *);
141
142 /* interface mode settings */
143 struct ieee80211_vif *vif;
144
145 /* beaconing */
146 struct sk_buff *beacon;
147 struct work_struct beacon_work;
148 bool enable_beacon;
149
150 /* cryptographic engine */
151 u64 usedkeys;
152 bool rx_software_decryption;
153 bool disable_offload;
154
155 /* filter settings */
156 u64 cur_mc_hash;
157 u32 cur_filter;
158 unsigned int filter_state;
159 bool sniffer_enabled;
160
161 /* PHY */
162 struct ieee80211_channel *channel;
163 int noise[4];
164
165 /* power calibration data */
166 u8 power_5G_leg[4];
167 u8 power_2G_cck[4];
168 u8 power_2G_ofdm[4];
169 u8 power_5G_ht20[8];
170 u8 power_5G_ht40[8];
171 u8 power_2G_ht20[8];
172 u8 power_2G_ht40[8];
173
174 u8 phy_heavy_clip;
175
176#ifdef CONFIG_AR9170_LEDS
177 struct delayed_work led_work;
178 struct ar9170_led leds[AR9170_NUM_LEDS];
179#endif /* CONFIG_AR9170_LEDS */
180
181 /* qos queue settings */
182 spinlock_t tx_stats_lock;
183 struct ar9170_tx_queue_stats tx_stats[5];
184 struct ieee80211_tx_queue_params edcf[5];
185
186 spinlock_t cmdlock;
187 __le32 cmdbuf[PAYLOAD_MAX + 1];
188
189 /* MAC statistics */
190 struct ieee80211_low_level_stats stats;
191
192 /* EEPROM */
193 struct ar9170_eeprom eeprom;
194
195 /* tx queues - as seen by hw - */
196 struct sk_buff_head tx_pending[__AR9170_NUM_TXQ];
197 struct sk_buff_head tx_status[__AR9170_NUM_TXQ];
198 struct delayed_work tx_janitor;
199
200 /* rxstream mpdu merge */
201 struct ar9170_rxstream_mpdu_merge rx_mpdu;
202 struct sk_buff *rx_failover;
203 int rx_failover_missing;
204
205 /* (cached) HW A-MPDU settings */
206 u8 global_ampdu_density;
207 u8 global_ampdu_factor;
208};
209
210struct ar9170_tx_info {
211 unsigned long timeout;
212};
213
214#define IS_STARTED(a) (((struct ar9170 *)a)->state >= AR9170_STARTED)
215#define IS_ACCEPTING_CMD(a) (((struct ar9170 *)a)->state >= AR9170_IDLE)
216
217/* exported interface */
218void *ar9170_alloc(size_t priv_size);
219int ar9170_register(struct ar9170 *ar, struct device *pdev);
220void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb);
221void ar9170_unregister(struct ar9170 *ar);
222void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb);
223void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len);
224int ar9170_nag_limiter(struct ar9170 *ar);
225
226/* MAC */
227void ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
228int ar9170_init_mac(struct ar9170 *ar);
229int ar9170_set_qos(struct ar9170 *ar);
230int ar9170_update_multicast(struct ar9170 *ar, const u64 mc_hast);
231int ar9170_update_frame_filter(struct ar9170 *ar, const u32 filter);
232int ar9170_set_operating_mode(struct ar9170 *ar);
233int ar9170_set_beacon_timers(struct ar9170 *ar);
234int ar9170_set_dyn_sifs_ack(struct ar9170 *ar);
235int ar9170_set_slot_time(struct ar9170 *ar);
236int ar9170_set_basic_rates(struct ar9170 *ar);
237int ar9170_set_hwretry_limit(struct ar9170 *ar, u32 max_retry);
238int ar9170_update_beacon(struct ar9170 *ar);
239void ar9170_new_beacon(struct work_struct *work);
240int ar9170_upload_key(struct ar9170 *ar, u8 id, const u8 *mac, u8 ktype,
241 u8 keyidx, u8 *keydata, int keylen);
242int ar9170_disable_key(struct ar9170 *ar, u8 id);
243
244/* LEDs */
245#ifdef CONFIG_AR9170_LEDS
246int ar9170_register_leds(struct ar9170 *ar);
247void ar9170_unregister_leds(struct ar9170 *ar);
248#endif /* CONFIG_AR9170_LEDS */
249int ar9170_init_leds(struct ar9170 *ar);
250int ar9170_set_leds_state(struct ar9170 *ar, u32 led_state);
251
252/* PHY / RF */
253int ar9170_init_phy(struct ar9170 *ar, enum ieee80211_band band);
254int ar9170_init_rf(struct ar9170 *ar);
255int ar9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
256 enum ar9170_rf_init_mode rfi, enum ar9170_bw bw);
257
258#endif /* __AR9170_H */
diff --git a/drivers/net/wireless/ath/ar9170/cmd.c b/drivers/net/wireless/ath/ar9170/cmd.c
deleted file mode 100644
index 6452c5055a63..000000000000
--- a/drivers/net/wireless/ath/ar9170/cmd.c
+++ /dev/null
@@ -1,127 +0,0 @@
1/*
2 * Atheros AR9170 driver
3 *
4 * Basic HW register/memory/command access functions
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38
39#include "ar9170.h"
40#include "cmd.h"
41
42int ar9170_write_mem(struct ar9170 *ar, const __le32 *data, size_t len)
43{
44 int err;
45
46 if (unlikely(!IS_ACCEPTING_CMD(ar)))
47 return 0;
48
49 err = ar->exec_cmd(ar, AR9170_CMD_WMEM, len, (u8 *) data, 0, NULL);
50 if (err)
51 wiphy_debug(ar->hw->wiphy, "writing memory failed\n");
52 return err;
53}
54
55int ar9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val)
56{
57 const __le32 buf[2] = {
58 cpu_to_le32(reg),
59 cpu_to_le32(val),
60 };
61 int err;
62
63 if (unlikely(!IS_ACCEPTING_CMD(ar)))
64 return 0;
65
66 err = ar->exec_cmd(ar, AR9170_CMD_WREG, sizeof(buf),
67 (u8 *) buf, 0, NULL);
68 if (err)
69 wiphy_debug(ar->hw->wiphy, "writing reg %#x (val %#x) failed\n",
70 reg, val);
71 return err;
72}
73
74int ar9170_read_mreg(struct ar9170 *ar, int nregs, const u32 *regs, u32 *out)
75{
76 int i, err;
77 __le32 *offs, *res;
78
79 if (unlikely(!IS_ACCEPTING_CMD(ar)))
80 return 0;
81
82 /* abuse "out" for the register offsets, must be same length */
83 offs = (__le32 *)out;
84 for (i = 0; i < nregs; i++)
85 offs[i] = cpu_to_le32(regs[i]);
86
87 /* also use the same buffer for the input */
88 res = (__le32 *)out;
89
90 err = ar->exec_cmd(ar, AR9170_CMD_RREG,
91 4 * nregs, (u8 *)offs,
92 4 * nregs, (u8 *)res);
93 if (err)
94 return err;
95
96 /* convert result to cpu endian */
97 for (i = 0; i < nregs; i++)
98 out[i] = le32_to_cpu(res[i]);
99
100 return 0;
101}
102
103int ar9170_read_reg(struct ar9170 *ar, u32 reg, u32 *val)
104{
105 return ar9170_read_mreg(ar, 1, &reg, val);
106}
107
108int ar9170_echo_test(struct ar9170 *ar, u32 v)
109{
110 __le32 echobuf = cpu_to_le32(v);
111 __le32 echores;
112 int err;
113
114 if (unlikely(!IS_ACCEPTING_CMD(ar)))
115 return -ENODEV;
116
117 err = ar->exec_cmd(ar, AR9170_CMD_ECHO,
118 4, (u8 *)&echobuf,
119 4, (u8 *)&echores);
120 if (err)
121 return err;
122
123 if (echobuf != echores)
124 return -EINVAL;
125
126 return 0;
127}
diff --git a/drivers/net/wireless/ath/ar9170/cmd.h b/drivers/net/wireless/ath/ar9170/cmd.h
deleted file mode 100644
index ec8134b4b949..000000000000
--- a/drivers/net/wireless/ath/ar9170/cmd.h
+++ /dev/null
@@ -1,92 +0,0 @@
1/*
2 * Atheros AR9170 driver
3 *
4 * Basic HW register/memory/command access functions
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38#ifndef __CMD_H
39#define __CMD_H
40
41#include "ar9170.h"
42
43/* basic HW access */
44int ar9170_write_mem(struct ar9170 *ar, const __le32 *data, size_t len);
45int ar9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val);
46int ar9170_read_reg(struct ar9170 *ar, u32 reg, u32 *val);
47int ar9170_read_mreg(struct ar9170 *ar, int nregs, const u32 *regs, u32 *out);
48int ar9170_echo_test(struct ar9170 *ar, u32 v);
49
50/*
51 * Macros to facilitate writing multiple registers in a single
52 * write-combining USB command. Note that when the first group
53 * fails the whole thing will fail without any others attempted,
54 * but you won't know which write in the group failed.
55 */
56#define ar9170_regwrite_begin(ar) \
57do { \
58 int __nreg = 0, __err = 0; \
59 struct ar9170 *__ar = ar;
60
61#define ar9170_regwrite(r, v) do { \
62 __ar->cmdbuf[2 * __nreg + 1] = cpu_to_le32(r); \
63 __ar->cmdbuf[2 * __nreg + 2] = cpu_to_le32(v); \
64 __nreg++; \
65 if ((__nreg >= PAYLOAD_MAX/2)) { \
66 if (IS_ACCEPTING_CMD(__ar)) \
67 __err = ar->exec_cmd(__ar, AR9170_CMD_WREG, \
68 8 * __nreg, \
69 (u8 *) &__ar->cmdbuf[1], \
70 0, NULL); \
71 __nreg = 0; \
72 if (__err) \
73 goto __regwrite_out; \
74 } \
75} while (0)
76
77#define ar9170_regwrite_finish() \
78__regwrite_out : \
79 if (__nreg) { \
80 if (IS_ACCEPTING_CMD(__ar)) \
81 __err = ar->exec_cmd(__ar, AR9170_CMD_WREG, \
82 8 * __nreg, \
83 (u8 *) &__ar->cmdbuf[1], \
84 0, NULL); \
85 __nreg = 0; \
86 }
87
88#define ar9170_regwrite_result() \
89 __err; \
90} while (0);
91
92#endif /* __CMD_H */
diff --git a/drivers/net/wireless/ath/ar9170/eeprom.h b/drivers/net/wireless/ath/ar9170/eeprom.h
deleted file mode 100644
index 6c4663883423..000000000000
--- a/drivers/net/wireless/ath/ar9170/eeprom.h
+++ /dev/null
@@ -1,179 +0,0 @@
1/*
2 * Atheros AR9170 driver
3 *
4 * EEPROM layout
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38#ifndef __AR9170_EEPROM_H
39#define __AR9170_EEPROM_H
40
41#define AR5416_MAX_CHAINS 2
42#define AR5416_MODAL_SPURS 5
43
44struct ar9170_eeprom_modal {
45 __le32 antCtrlChain[AR5416_MAX_CHAINS];
46 __le32 antCtrlCommon;
47 s8 antennaGainCh[AR5416_MAX_CHAINS];
48 u8 switchSettling;
49 u8 txRxAttenCh[AR5416_MAX_CHAINS];
50 u8 rxTxMarginCh[AR5416_MAX_CHAINS];
51 s8 adcDesiredSize;
52 s8 pgaDesiredSize;
53 u8 xlnaGainCh[AR5416_MAX_CHAINS];
54 u8 txEndToXpaOff;
55 u8 txEndToRxOn;
56 u8 txFrameToXpaOn;
57 u8 thresh62;
58 s8 noiseFloorThreshCh[AR5416_MAX_CHAINS];
59 u8 xpdGain;
60 u8 xpd;
61 s8 iqCalICh[AR5416_MAX_CHAINS];
62 s8 iqCalQCh[AR5416_MAX_CHAINS];
63 u8 pdGainOverlap;
64 u8 ob;
65 u8 db;
66 u8 xpaBiasLvl;
67 u8 pwrDecreaseFor2Chain;
68 u8 pwrDecreaseFor3Chain;
69 u8 txFrameToDataStart;
70 u8 txFrameToPaOn;
71 u8 ht40PowerIncForPdadc;
72 u8 bswAtten[AR5416_MAX_CHAINS];
73 u8 bswMargin[AR5416_MAX_CHAINS];
74 u8 swSettleHt40;
75 u8 reserved[22];
76 struct spur_channel {
77 __le16 spurChan;
78 u8 spurRangeLow;
79 u8 spurRangeHigh;
80 } __packed spur_channels[AR5416_MODAL_SPURS];
81} __packed;
82
83#define AR5416_NUM_PD_GAINS 4
84#define AR5416_PD_GAIN_ICEPTS 5
85
86struct ar9170_calibration_data_per_freq {
87 u8 pwr_pdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS];
88 u8 vpd_pdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS];
89} __packed;
90
91#define AR5416_NUM_5G_CAL_PIERS 8
92#define AR5416_NUM_2G_CAL_PIERS 4
93
94#define AR5416_NUM_5G_TARGET_PWRS 8
95#define AR5416_NUM_2G_CCK_TARGET_PWRS 3
96#define AR5416_NUM_2G_OFDM_TARGET_PWRS 4
97#define AR5416_MAX_NUM_TGT_PWRS 8
98
99struct ar9170_calibration_target_power_legacy {
100 u8 freq;
101 u8 power[4];
102} __packed;
103
104struct ar9170_calibration_target_power_ht {
105 u8 freq;
106 u8 power[8];
107} __packed;
108
109#define AR5416_NUM_CTLS 24
110
111struct ar9170_calctl_edges {
112 u8 channel;
113#define AR9170_CALCTL_EDGE_FLAGS 0xC0
114 u8 power_flags;
115} __packed;
116
117#define AR5416_NUM_BAND_EDGES 8
118
119struct ar9170_calctl_data {
120 struct ar9170_calctl_edges
121 control_edges[AR5416_MAX_CHAINS][AR5416_NUM_BAND_EDGES];
122} __packed;
123
124
125struct ar9170_eeprom {
126 __le16 length;
127 __le16 checksum;
128 __le16 version;
129 u8 operating_flags;
130#define AR9170_OPFLAG_5GHZ 1
131#define AR9170_OPFLAG_2GHZ 2
132 u8 misc;
133 __le16 reg_domain[2];
134 u8 mac_address[6];
135 u8 rx_mask;
136 u8 tx_mask;
137 __le16 rf_silent;
138 __le16 bluetooth_options;
139 __le16 device_capabilities;
140 __le32 build_number;
141 u8 deviceType;
142 u8 reserved[33];
143
144 u8 customer_data[64];
145
146 struct ar9170_eeprom_modal
147 modal_header[2];
148
149 u8 cal_freq_pier_5G[AR5416_NUM_5G_CAL_PIERS];
150 u8 cal_freq_pier_2G[AR5416_NUM_2G_CAL_PIERS];
151
152 struct ar9170_calibration_data_per_freq
153 cal_pier_data_5G[AR5416_MAX_CHAINS][AR5416_NUM_5G_CAL_PIERS],
154 cal_pier_data_2G[AR5416_MAX_CHAINS][AR5416_NUM_2G_CAL_PIERS];
155
156 /* power calibration data */
157 struct ar9170_calibration_target_power_legacy
158 cal_tgt_pwr_5G[AR5416_NUM_5G_TARGET_PWRS];
159 struct ar9170_calibration_target_power_ht
160 cal_tgt_pwr_5G_ht20[AR5416_NUM_5G_TARGET_PWRS],
161 cal_tgt_pwr_5G_ht40[AR5416_NUM_5G_TARGET_PWRS];
162
163 struct ar9170_calibration_target_power_legacy
164 cal_tgt_pwr_2G_cck[AR5416_NUM_2G_CCK_TARGET_PWRS],
165 cal_tgt_pwr_2G_ofdm[AR5416_NUM_2G_OFDM_TARGET_PWRS];
166 struct ar9170_calibration_target_power_ht
167 cal_tgt_pwr_2G_ht20[AR5416_NUM_2G_OFDM_TARGET_PWRS],
168 cal_tgt_pwr_2G_ht40[AR5416_NUM_2G_OFDM_TARGET_PWRS];
169
170 /* conformance testing limits */
171 u8 ctl_index[AR5416_NUM_CTLS];
172 struct ar9170_calctl_data
173 ctl_data[AR5416_NUM_CTLS];
174
175 u8 pad;
176 __le16 subsystem_id;
177} __packed;
178
179#endif /* __AR9170_EEPROM_H */
diff --git a/drivers/net/wireless/ath/ar9170/hw.h b/drivers/net/wireless/ath/ar9170/hw.h
deleted file mode 100644
index 06f1f3c951a4..000000000000
--- a/drivers/net/wireless/ath/ar9170/hw.h
+++ /dev/null
@@ -1,430 +0,0 @@
1/*
2 * Atheros AR9170 driver
3 *
4 * Hardware-specific definitions
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38#ifndef __AR9170_HW_H
39#define __AR9170_HW_H
40
41#define AR9170_MAX_CMD_LEN 64
42
43enum ar9170_cmd {
44 AR9170_CMD_RREG = 0x00,
45 AR9170_CMD_WREG = 0x01,
46 AR9170_CMD_RMEM = 0x02,
47 AR9170_CMD_WMEM = 0x03,
48 AR9170_CMD_BITAND = 0x04,
49 AR9170_CMD_BITOR = 0x05,
50 AR9170_CMD_EKEY = 0x28,
51 AR9170_CMD_DKEY = 0x29,
52 AR9170_CMD_FREQUENCY = 0x30,
53 AR9170_CMD_RF_INIT = 0x31,
54 AR9170_CMD_SYNTH = 0x32,
55 AR9170_CMD_FREQ_START = 0x33,
56 AR9170_CMD_ECHO = 0x80,
57 AR9170_CMD_TALLY = 0x81,
58 AR9170_CMD_TALLY_APD = 0x82,
59 AR9170_CMD_CONFIG = 0x83,
60 AR9170_CMD_RESET = 0x90,
61 AR9170_CMD_DKRESET = 0x91,
62 AR9170_CMD_DKTX_STATUS = 0x92,
63 AR9170_CMD_FDC = 0xA0,
64 AR9170_CMD_WREEPROM = 0xB0,
65 AR9170_CMD_WFLASH = 0xB0,
66 AR9170_CMD_FLASH_ERASE = 0xB1,
67 AR9170_CMD_FLASH_PROG = 0xB2,
68 AR9170_CMD_FLASH_CHKSUM = 0xB3,
69 AR9170_CMD_FLASH_READ = 0xB4,
70 AR9170_CMD_FW_DL_INIT = 0xB5,
71 AR9170_CMD_MEM_WREEPROM = 0xBB,
72};
73
74/* endpoints */
75#define AR9170_EP_TX 1
76#define AR9170_EP_RX 2
77#define AR9170_EP_IRQ 3
78#define AR9170_EP_CMD 4
79
80#define AR9170_EEPROM_START 0x1600
81
82#define AR9170_GPIO_REG_BASE 0x1d0100
83#define AR9170_GPIO_REG_PORT_TYPE AR9170_GPIO_REG_BASE
84#define AR9170_GPIO_REG_DATA (AR9170_GPIO_REG_BASE + 4)
85#define AR9170_NUM_LEDS 2
86
87
88#define AR9170_USB_REG_BASE 0x1e1000
89#define AR9170_USB_REG_DMA_CTL (AR9170_USB_REG_BASE + 0x108)
90#define AR9170_DMA_CTL_ENABLE_TO_DEVICE 0x1
91#define AR9170_DMA_CTL_ENABLE_FROM_DEVICE 0x2
92#define AR9170_DMA_CTL_HIGH_SPEED 0x4
93#define AR9170_DMA_CTL_PACKET_MODE 0x8
94
95#define AR9170_USB_REG_MAX_AGG_UPLOAD (AR9170_USB_REG_BASE + 0x110)
96#define AR9170_USB_REG_UPLOAD_TIME_CTL (AR9170_USB_REG_BASE + 0x114)
97
98
99
100#define AR9170_MAC_REG_BASE 0x1c3000
101
102#define AR9170_MAC_REG_TSF_L (AR9170_MAC_REG_BASE + 0x514)
103#define AR9170_MAC_REG_TSF_H (AR9170_MAC_REG_BASE + 0x518)
104
105#define AR9170_MAC_REG_ATIM_WINDOW (AR9170_MAC_REG_BASE + 0x51C)
106#define AR9170_MAC_REG_BCN_PERIOD (AR9170_MAC_REG_BASE + 0x520)
107#define AR9170_MAC_REG_PRETBTT (AR9170_MAC_REG_BASE + 0x524)
108
109#define AR9170_MAC_REG_MAC_ADDR_L (AR9170_MAC_REG_BASE + 0x610)
110#define AR9170_MAC_REG_MAC_ADDR_H (AR9170_MAC_REG_BASE + 0x614)
111#define AR9170_MAC_REG_BSSID_L (AR9170_MAC_REG_BASE + 0x618)
112#define AR9170_MAC_REG_BSSID_H (AR9170_MAC_REG_BASE + 0x61c)
113
114#define AR9170_MAC_REG_GROUP_HASH_TBL_L (AR9170_MAC_REG_BASE + 0x624)
115#define AR9170_MAC_REG_GROUP_HASH_TBL_H (AR9170_MAC_REG_BASE + 0x628)
116
117#define AR9170_MAC_REG_RX_TIMEOUT (AR9170_MAC_REG_BASE + 0x62C)
118
119#define AR9170_MAC_REG_BASIC_RATE (AR9170_MAC_REG_BASE + 0x630)
120#define AR9170_MAC_REG_MANDATORY_RATE (AR9170_MAC_REG_BASE + 0x634)
121#define AR9170_MAC_REG_RTS_CTS_RATE (AR9170_MAC_REG_BASE + 0x638)
122#define AR9170_MAC_REG_BACKOFF_PROTECT (AR9170_MAC_REG_BASE + 0x63c)
123#define AR9170_MAC_REG_RX_THRESHOLD (AR9170_MAC_REG_BASE + 0x640)
124#define AR9170_MAC_REG_RX_PE_DELAY (AR9170_MAC_REG_BASE + 0x64C)
125
126#define AR9170_MAC_REG_DYNAMIC_SIFS_ACK (AR9170_MAC_REG_BASE + 0x658)
127#define AR9170_MAC_REG_SNIFFER (AR9170_MAC_REG_BASE + 0x674)
128#define AR9170_MAC_REG_SNIFFER_ENABLE_PROMISC BIT(0)
129#define AR9170_MAC_REG_SNIFFER_DEFAULTS 0x02000000
130#define AR9170_MAC_REG_ENCRYPTION (AR9170_MAC_REG_BASE + 0x678)
131#define AR9170_MAC_REG_ENCRYPTION_RX_SOFTWARE BIT(3)
132#define AR9170_MAC_REG_ENCRYPTION_DEFAULTS 0x70
133
134#define AR9170_MAC_REG_MISC_680 (AR9170_MAC_REG_BASE + 0x680)
135#define AR9170_MAC_REG_TX_UNDERRUN (AR9170_MAC_REG_BASE + 0x688)
136
137#define AR9170_MAC_REG_FRAMETYPE_FILTER (AR9170_MAC_REG_BASE + 0x68c)
138#define AR9170_MAC_REG_FTF_ASSOC_REQ BIT(0)
139#define AR9170_MAC_REG_FTF_ASSOC_RESP BIT(1)
140#define AR9170_MAC_REG_FTF_REASSOC_REQ BIT(2)
141#define AR9170_MAC_REG_FTF_REASSOC_RESP BIT(3)
142#define AR9170_MAC_REG_FTF_PRB_REQ BIT(4)
143#define AR9170_MAC_REG_FTF_PRB_RESP BIT(5)
144#define AR9170_MAC_REG_FTF_BIT6 BIT(6)
145#define AR9170_MAC_REG_FTF_BIT7 BIT(7)
146#define AR9170_MAC_REG_FTF_BEACON BIT(8)
147#define AR9170_MAC_REG_FTF_ATIM BIT(9)
148#define AR9170_MAC_REG_FTF_DEASSOC BIT(10)
149#define AR9170_MAC_REG_FTF_AUTH BIT(11)
150#define AR9170_MAC_REG_FTF_DEAUTH BIT(12)
151#define AR9170_MAC_REG_FTF_BIT13 BIT(13)
152#define AR9170_MAC_REG_FTF_BIT14 BIT(14)
153#define AR9170_MAC_REG_FTF_BIT15 BIT(15)
154#define AR9170_MAC_REG_FTF_BAR BIT(24)
155#define AR9170_MAC_REG_FTF_BA BIT(25)
156#define AR9170_MAC_REG_FTF_PSPOLL BIT(26)
157#define AR9170_MAC_REG_FTF_RTS BIT(27)
158#define AR9170_MAC_REG_FTF_CTS BIT(28)
159#define AR9170_MAC_REG_FTF_ACK BIT(29)
160#define AR9170_MAC_REG_FTF_CFE BIT(30)
161#define AR9170_MAC_REG_FTF_CFE_ACK BIT(31)
162#define AR9170_MAC_REG_FTF_DEFAULTS 0x0700ffff
163#define AR9170_MAC_REG_FTF_MONITOR 0xfd00ffff
164
165#define AR9170_MAC_REG_RX_TOTAL (AR9170_MAC_REG_BASE + 0x6A0)
166#define AR9170_MAC_REG_RX_CRC32 (AR9170_MAC_REG_BASE + 0x6A4)
167#define AR9170_MAC_REG_RX_CRC16 (AR9170_MAC_REG_BASE + 0x6A8)
168#define AR9170_MAC_REG_RX_ERR_DECRYPTION_UNI (AR9170_MAC_REG_BASE + 0x6AC)
169#define AR9170_MAC_REG_RX_OVERRUN (AR9170_MAC_REG_BASE + 0x6B0)
170#define AR9170_MAC_REG_RX_ERR_DECRYPTION_MUL (AR9170_MAC_REG_BASE + 0x6BC)
171#define AR9170_MAC_REG_TX_RETRY (AR9170_MAC_REG_BASE + 0x6CC)
172#define AR9170_MAC_REG_TX_TOTAL (AR9170_MAC_REG_BASE + 0x6F4)
173
174
175#define AR9170_MAC_REG_ACK_EXTENSION (AR9170_MAC_REG_BASE + 0x690)
176#define AR9170_MAC_REG_EIFS_AND_SIFS (AR9170_MAC_REG_BASE + 0x698)
177
178#define AR9170_MAC_REG_SLOT_TIME (AR9170_MAC_REG_BASE + 0x6F0)
179
180#define AR9170_MAC_REG_POWERMANAGEMENT (AR9170_MAC_REG_BASE + 0x700)
181#define AR9170_MAC_REG_POWERMGT_IBSS 0xe0
182#define AR9170_MAC_REG_POWERMGT_AP 0xa1
183#define AR9170_MAC_REG_POWERMGT_STA 0x2
184#define AR9170_MAC_REG_POWERMGT_AP_WDS 0x3
185#define AR9170_MAC_REG_POWERMGT_DEFAULTS (0xf << 24)
186
187#define AR9170_MAC_REG_ROLL_CALL_TBL_L (AR9170_MAC_REG_BASE + 0x704)
188#define AR9170_MAC_REG_ROLL_CALL_TBL_H (AR9170_MAC_REG_BASE + 0x708)
189
190#define AR9170_MAC_REG_AC0_CW (AR9170_MAC_REG_BASE + 0xB00)
191#define AR9170_MAC_REG_AC1_CW (AR9170_MAC_REG_BASE + 0xB04)
192#define AR9170_MAC_REG_AC2_CW (AR9170_MAC_REG_BASE + 0xB08)
193#define AR9170_MAC_REG_AC3_CW (AR9170_MAC_REG_BASE + 0xB0C)
194#define AR9170_MAC_REG_AC4_CW (AR9170_MAC_REG_BASE + 0xB10)
195#define AR9170_MAC_REG_AC1_AC0_AIFS (AR9170_MAC_REG_BASE + 0xB14)
196#define AR9170_MAC_REG_AC3_AC2_AIFS (AR9170_MAC_REG_BASE + 0xB18)
197
198#define AR9170_MAC_REG_RETRY_MAX (AR9170_MAC_REG_BASE + 0xB28)
199
200#define AR9170_MAC_REG_FCS_SELECT (AR9170_MAC_REG_BASE + 0xBB0)
201#define AR9170_MAC_FCS_SWFCS 0x1
202#define AR9170_MAC_FCS_FIFO_PROT 0x4
203
204
205#define AR9170_MAC_REG_TXOP_NOT_ENOUGH_IND (AR9170_MAC_REG_BASE + 0xB30)
206
207#define AR9170_MAC_REG_AC1_AC0_TXOP (AR9170_MAC_REG_BASE + 0xB44)
208#define AR9170_MAC_REG_AC3_AC2_TXOP (AR9170_MAC_REG_BASE + 0xB48)
209
210#define AR9170_MAC_REG_AMPDU_FACTOR (AR9170_MAC_REG_BASE + 0xB9C)
211#define AR9170_MAC_REG_AMPDU_DENSITY (AR9170_MAC_REG_BASE + 0xBA0)
212
213#define AR9170_MAC_REG_ACK_TABLE (AR9170_MAC_REG_BASE + 0xC00)
214#define AR9170_MAC_REG_AMPDU_RX_THRESH (AR9170_MAC_REG_BASE + 0xC50)
215
216#define AR9170_MAC_REG_TXRX_MPI (AR9170_MAC_REG_BASE + 0xD7C)
217#define AR9170_MAC_TXRX_MPI_TX_MPI_MASK 0x0000000f
218#define AR9170_MAC_TXRX_MPI_TX_TO_MASK 0x0000fff0
219#define AR9170_MAC_TXRX_MPI_RX_MPI_MASK 0x000f0000
220#define AR9170_MAC_TXRX_MPI_RX_TO_MASK 0xfff00000
221
222#define AR9170_MAC_REG_BCN_ADDR (AR9170_MAC_REG_BASE + 0xD84)
223#define AR9170_MAC_REG_BCN_LENGTH (AR9170_MAC_REG_BASE + 0xD88)
224#define AR9170_MAC_REG_BCN_PLCP (AR9170_MAC_REG_BASE + 0xD90)
225#define AR9170_MAC_REG_BCN_CTRL (AR9170_MAC_REG_BASE + 0xD94)
226#define AR9170_MAC_REG_BCN_HT1 (AR9170_MAC_REG_BASE + 0xDA0)
227#define AR9170_MAC_REG_BCN_HT2 (AR9170_MAC_REG_BASE + 0xDA4)
228
229
230#define AR9170_PWR_REG_BASE 0x1D4000
231
232#define AR9170_PWR_REG_CLOCK_SEL (AR9170_PWR_REG_BASE + 0x008)
233#define AR9170_PWR_CLK_AHB_40MHZ 0
234#define AR9170_PWR_CLK_AHB_20_22MHZ 1
235#define AR9170_PWR_CLK_AHB_40_44MHZ 2
236#define AR9170_PWR_CLK_AHB_80_88MHZ 3
237#define AR9170_PWR_CLK_DAC_160_INV_DLY 0x70
238
239
240/* put beacon here in memory */
241#define AR9170_BEACON_BUFFER_ADDRESS 0x117900
242
243
244struct ar9170_tx_control {
245 __le16 length;
246 __le16 mac_control;
247 __le32 phy_control;
248 u8 frame_data[0];
249} __packed;
250
251/* these are either-or */
252#define AR9170_TX_MAC_PROT_RTS 0x0001
253#define AR9170_TX_MAC_PROT_CTS 0x0002
254
255#define AR9170_TX_MAC_NO_ACK 0x0004
256/* if unset, MAC will only do SIFS space before frame */
257#define AR9170_TX_MAC_BACKOFF 0x0008
258#define AR9170_TX_MAC_BURST 0x0010
259#define AR9170_TX_MAC_AGGR 0x0020
260
261/* encryption is a two-bit field */
262#define AR9170_TX_MAC_ENCR_NONE 0x0000
263#define AR9170_TX_MAC_ENCR_RC4 0x0040
264#define AR9170_TX_MAC_ENCR_CENC 0x0080
265#define AR9170_TX_MAC_ENCR_AES 0x00c0
266
267#define AR9170_TX_MAC_MMIC 0x0100
268#define AR9170_TX_MAC_HW_DURATION 0x0200
269#define AR9170_TX_MAC_QOS_SHIFT 10
270#define AR9170_TX_MAC_QOS_MASK (3 << AR9170_TX_MAC_QOS_SHIFT)
271#define AR9170_TX_MAC_AGGR_QOS_BIT1 0x0400
272#define AR9170_TX_MAC_AGGR_QOS_BIT2 0x0800
273#define AR9170_TX_MAC_DISABLE_TXOP 0x1000
274#define AR9170_TX_MAC_TXOP_RIFS 0x2000
275#define AR9170_TX_MAC_IMM_AMPDU 0x4000
276#define AR9170_TX_MAC_RATE_PROBE 0x8000
277
278/* either-or */
279#define AR9170_TX_PHY_MOD_MASK 0x00000003
280#define AR9170_TX_PHY_MOD_CCK 0x00000000
281#define AR9170_TX_PHY_MOD_OFDM 0x00000001
282#define AR9170_TX_PHY_MOD_HT 0x00000002
283
284/* depends on modulation */
285#define AR9170_TX_PHY_SHORT_PREAMBLE 0x00000004
286#define AR9170_TX_PHY_GREENFIELD 0x00000004
287
288#define AR9170_TX_PHY_BW_SHIFT 3
289#define AR9170_TX_PHY_BW_MASK (3 << AR9170_TX_PHY_BW_SHIFT)
290#define AR9170_TX_PHY_BW_20MHZ 0
291#define AR9170_TX_PHY_BW_40MHZ 2
292#define AR9170_TX_PHY_BW_40MHZ_DUP 3
293
294#define AR9170_TX_PHY_TX_HEAVY_CLIP_SHIFT 6
295#define AR9170_TX_PHY_TX_HEAVY_CLIP_MASK (7 << AR9170_TX_PHY_TX_HEAVY_CLIP_SHIFT)
296
297#define AR9170_TX_PHY_TX_PWR_SHIFT 9
298#define AR9170_TX_PHY_TX_PWR_MASK (0x3f << AR9170_TX_PHY_TX_PWR_SHIFT)
299
300/* not part of the hw-spec */
301#define AR9170_TX_PHY_QOS_SHIFT 25
302#define AR9170_TX_PHY_QOS_MASK (3 << AR9170_TX_PHY_QOS_SHIFT)
303
304#define AR9170_TX_PHY_TXCHAIN_SHIFT 15
305#define AR9170_TX_PHY_TXCHAIN_MASK (7 << AR9170_TX_PHY_TXCHAIN_SHIFT)
306#define AR9170_TX_PHY_TXCHAIN_1 1
307/* use for cck, ofdm 6/9/12/18/24 and HT if capable */
308#define AR9170_TX_PHY_TXCHAIN_2 5
309
310#define AR9170_TX_PHY_MCS_SHIFT 18
311#define AR9170_TX_PHY_MCS_MASK (0x7f << AR9170_TX_PHY_MCS_SHIFT)
312
313#define AR9170_TX_PHY_SHORT_GI 0x80000000
314
315#define AR5416_MAX_RATE_POWER 63
316
317struct ar9170_rx_head {
318 u8 plcp[12];
319} __packed;
320
321struct ar9170_rx_phystatus {
322 union {
323 struct {
324 u8 rssi_ant0, rssi_ant1, rssi_ant2,
325 rssi_ant0x, rssi_ant1x, rssi_ant2x,
326 rssi_combined;
327 } __packed;
328 u8 rssi[7];
329 } __packed;
330
331 u8 evm_stream0[6], evm_stream1[6];
332 u8 phy_err;
333} __packed;
334
335struct ar9170_rx_macstatus {
336 u8 SAidx, DAidx;
337 u8 error;
338 u8 status;
339} __packed;
340
341#define AR9170_ENC_ALG_NONE 0x0
342#define AR9170_ENC_ALG_WEP64 0x1
343#define AR9170_ENC_ALG_TKIP 0x2
344#define AR9170_ENC_ALG_AESCCMP 0x4
345#define AR9170_ENC_ALG_WEP128 0x5
346#define AR9170_ENC_ALG_WEP256 0x6
347#define AR9170_ENC_ALG_CENC 0x7
348
349#define AR9170_RX_ENC_SOFTWARE 0x8
350
351static inline u8 ar9170_get_decrypt_type(struct ar9170_rx_macstatus *t)
352{
353 return (t->SAidx & 0xc0) >> 4 |
354 (t->DAidx & 0xc0) >> 6;
355}
356
357#define AR9170_RX_STATUS_MODULATION_MASK 0x03
358#define AR9170_RX_STATUS_MODULATION_CCK 0x00
359#define AR9170_RX_STATUS_MODULATION_OFDM 0x01
360#define AR9170_RX_STATUS_MODULATION_HT 0x02
361#define AR9170_RX_STATUS_MODULATION_DUPOFDM 0x03
362
363/* depends on modulation */
364#define AR9170_RX_STATUS_SHORT_PREAMBLE 0x08
365#define AR9170_RX_STATUS_GREENFIELD 0x08
366
367#define AR9170_RX_STATUS_MPDU_MASK 0x30
368#define AR9170_RX_STATUS_MPDU_SINGLE 0x00
369#define AR9170_RX_STATUS_MPDU_FIRST 0x20
370#define AR9170_RX_STATUS_MPDU_MIDDLE 0x30
371#define AR9170_RX_STATUS_MPDU_LAST 0x10
372
373#define AR9170_RX_ERROR_RXTO 0x01
374#define AR9170_RX_ERROR_OVERRUN 0x02
375#define AR9170_RX_ERROR_DECRYPT 0x04
376#define AR9170_RX_ERROR_FCS 0x08
377#define AR9170_RX_ERROR_WRONG_RA 0x10
378#define AR9170_RX_ERROR_PLCP 0x20
379#define AR9170_RX_ERROR_MMIC 0x40
380#define AR9170_RX_ERROR_FATAL 0x80
381
382struct ar9170_cmd_tx_status {
383 u8 dst[ETH_ALEN];
384 __le32 rate;
385 __le16 status;
386} __packed;
387
388#define AR9170_TX_STATUS_COMPLETE 0x00
389#define AR9170_TX_STATUS_RETRY 0x01
390#define AR9170_TX_STATUS_FAILED 0x02
391
392struct ar9170_cmd_ba_failed_count {
393 __le16 failed;
394 __le16 rate;
395} __packed;
396
397struct ar9170_cmd_response {
398 u8 flag;
399 u8 type;
400 __le16 padding;
401
402 union {
403 struct ar9170_cmd_tx_status tx_status;
404 struct ar9170_cmd_ba_failed_count ba_fail_cnt;
405 u8 data[0];
406 };
407} __packed;
408
409/* QoS */
410
411/* mac80211 queue to HW/FW map */
412static const u8 ar9170_qos_hwmap[4] = { 3, 2, 0, 1 };
413
414/* HW/FW queue to mac80211 map */
415static const u8 ar9170_qos_mac80211map[4] = { 2, 3, 1, 0 };
416
417enum ar9170_txq {
418 AR9170_TXQ_BE,
419 AR9170_TXQ_BK,
420 AR9170_TXQ_VI,
421 AR9170_TXQ_VO,
422
423 __AR9170_NUM_TXQ,
424};
425
426#define AR9170_TXQ_DEPTH 32
427#define AR9170_TX_MAX_PENDING 128
428#define AR9170_RX_STREAM_MAX_SIZE 65535
429
430#endif /* __AR9170_HW_H */
diff --git a/drivers/net/wireless/ath/ar9170/led.c b/drivers/net/wireless/ath/ar9170/led.c
deleted file mode 100644
index 832d90087f8a..000000000000
--- a/drivers/net/wireless/ath/ar9170/led.c
+++ /dev/null
@@ -1,181 +0,0 @@
1/*
2 * Atheros AR9170 driver
3 *
4 * LED handling
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38
39#include "ar9170.h"
40#include "cmd.h"
41
42int ar9170_set_leds_state(struct ar9170 *ar, u32 led_state)
43{
44 return ar9170_write_reg(ar, AR9170_GPIO_REG_DATA, led_state);
45}
46
47int ar9170_init_leds(struct ar9170 *ar)
48{
49 int err;
50
51 /* disable LEDs */
52 /* GPIO [0/1 mode: output, 2/3: input] */
53 err = ar9170_write_reg(ar, AR9170_GPIO_REG_PORT_TYPE, 3);
54 if (err)
55 goto out;
56
57 /* GPIO 0/1 value: off */
58 err = ar9170_set_leds_state(ar, 0);
59
60out:
61 return err;
62}
63
64#ifdef CONFIG_AR9170_LEDS
65static void ar9170_update_leds(struct work_struct *work)
66{
67 struct ar9170 *ar = container_of(work, struct ar9170, led_work.work);
68 int i, tmp, blink_delay = 1000;
69 u32 led_val = 0;
70 bool rerun = false;
71
72 if (unlikely(!IS_ACCEPTING_CMD(ar)))
73 return ;
74
75 mutex_lock(&ar->mutex);
76 for (i = 0; i < AR9170_NUM_LEDS; i++)
77 if (ar->leds[i].registered && ar->leds[i].toggled) {
78 led_val |= 1 << i;
79
80 tmp = 70 + 200 / (ar->leds[i].toggled);
81 if (tmp < blink_delay)
82 blink_delay = tmp;
83
84 if (ar->leds[i].toggled > 1)
85 ar->leds[i].toggled = 0;
86
87 rerun = true;
88 }
89
90 ar9170_set_leds_state(ar, led_val);
91 mutex_unlock(&ar->mutex);
92
93 if (!rerun)
94 return;
95
96 ieee80211_queue_delayed_work(ar->hw,
97 &ar->led_work,
98 msecs_to_jiffies(blink_delay));
99}
100
101static void ar9170_led_brightness_set(struct led_classdev *led,
102 enum led_brightness brightness)
103{
104 struct ar9170_led *arl = container_of(led, struct ar9170_led, l);
105 struct ar9170 *ar = arl->ar;
106
107 if (unlikely(!arl->registered))
108 return ;
109
110 if (arl->last_state != !!brightness) {
111 arl->toggled++;
112 arl->last_state = !!brightness;
113 }
114
115 if (likely(IS_ACCEPTING_CMD(ar) && arl->toggled))
116 ieee80211_queue_delayed_work(ar->hw, &ar->led_work, HZ/10);
117}
118
119static int ar9170_register_led(struct ar9170 *ar, int i, char *name,
120 char *trigger)
121{
122 int err;
123
124 snprintf(ar->leds[i].name, sizeof(ar->leds[i].name),
125 "ar9170-%s::%s", wiphy_name(ar->hw->wiphy), name);
126
127 ar->leds[i].ar = ar;
128 ar->leds[i].l.name = ar->leds[i].name;
129 ar->leds[i].l.brightness_set = ar9170_led_brightness_set;
130 ar->leds[i].l.brightness = 0;
131 ar->leds[i].l.default_trigger = trigger;
132
133 err = led_classdev_register(wiphy_dev(ar->hw->wiphy),
134 &ar->leds[i].l);
135 if (err)
136 wiphy_err(ar->hw->wiphy, "failed to register %s LED (%d).\n",
137 ar->leds[i].name, err);
138 else
139 ar->leds[i].registered = true;
140
141 return err;
142}
143
144void ar9170_unregister_leds(struct ar9170 *ar)
145{
146 int i;
147
148 for (i = 0; i < AR9170_NUM_LEDS; i++)
149 if (ar->leds[i].registered) {
150 led_classdev_unregister(&ar->leds[i].l);
151 ar->leds[i].registered = false;
152 ar->leds[i].toggled = 0;
153 }
154
155 cancel_delayed_work_sync(&ar->led_work);
156}
157
158int ar9170_register_leds(struct ar9170 *ar)
159{
160 int err;
161
162 INIT_DELAYED_WORK(&ar->led_work, ar9170_update_leds);
163
164 err = ar9170_register_led(ar, 0, "tx",
165 ieee80211_get_tx_led_name(ar->hw));
166 if (err)
167 goto fail;
168
169 err = ar9170_register_led(ar, 1, "assoc",
170 ieee80211_get_assoc_led_name(ar->hw));
171 if (err)
172 goto fail;
173
174 return 0;
175
176fail:
177 ar9170_unregister_leds(ar);
178 return err;
179}
180
181#endif /* CONFIG_AR9170_LEDS */
diff --git a/drivers/net/wireless/ath/ar9170/mac.c b/drivers/net/wireless/ath/ar9170/mac.c
deleted file mode 100644
index 857e86104295..000000000000
--- a/drivers/net/wireless/ath/ar9170/mac.c
+++ /dev/null
@@ -1,519 +0,0 @@
1/*
2 * Atheros AR9170 driver
3 *
4 * MAC programming
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38
39#include <asm/unaligned.h>
40
41#include "ar9170.h"
42#include "cmd.h"
43
44int ar9170_set_dyn_sifs_ack(struct ar9170 *ar)
45{
46 u32 val;
47
48 if (conf_is_ht40(&ar->hw->conf))
49 val = 0x010a;
50 else {
51 if (ar->hw->conf.channel->band == IEEE80211_BAND_2GHZ)
52 val = 0x105;
53 else
54 val = 0x104;
55 }
56
57 return ar9170_write_reg(ar, AR9170_MAC_REG_DYNAMIC_SIFS_ACK, val);
58}
59
60int ar9170_set_slot_time(struct ar9170 *ar)
61{
62 u32 slottime = 20;
63
64 if (!ar->vif)
65 return 0;
66
67 if ((ar->hw->conf.channel->band == IEEE80211_BAND_5GHZ) ||
68 ar->vif->bss_conf.use_short_slot)
69 slottime = 9;
70
71 return ar9170_write_reg(ar, AR9170_MAC_REG_SLOT_TIME, slottime << 10);
72}
73
74int ar9170_set_basic_rates(struct ar9170 *ar)
75{
76 u8 cck, ofdm;
77
78 if (!ar->vif)
79 return 0;
80
81 ofdm = ar->vif->bss_conf.basic_rates >> 4;
82
83 /* FIXME: is still necessary? */
84 if (ar->hw->conf.channel->band == IEEE80211_BAND_5GHZ)
85 cck = 0;
86 else
87 cck = ar->vif->bss_conf.basic_rates & 0xf;
88
89 return ar9170_write_reg(ar, AR9170_MAC_REG_BASIC_RATE,
90 ofdm << 8 | cck);
91}
92
93int ar9170_set_qos(struct ar9170 *ar)
94{
95 ar9170_regwrite_begin(ar);
96
97 ar9170_regwrite(AR9170_MAC_REG_AC0_CW, ar->edcf[0].cw_min |
98 (ar->edcf[0].cw_max << 16));
99 ar9170_regwrite(AR9170_MAC_REG_AC1_CW, ar->edcf[1].cw_min |
100 (ar->edcf[1].cw_max << 16));
101 ar9170_regwrite(AR9170_MAC_REG_AC2_CW, ar->edcf[2].cw_min |
102 (ar->edcf[2].cw_max << 16));
103 ar9170_regwrite(AR9170_MAC_REG_AC3_CW, ar->edcf[3].cw_min |
104 (ar->edcf[3].cw_max << 16));
105 ar9170_regwrite(AR9170_MAC_REG_AC4_CW, ar->edcf[4].cw_min |
106 (ar->edcf[4].cw_max << 16));
107
108 ar9170_regwrite(AR9170_MAC_REG_AC1_AC0_AIFS,
109 ((ar->edcf[0].aifs * 9 + 10)) |
110 ((ar->edcf[1].aifs * 9 + 10) << 12) |
111 ((ar->edcf[2].aifs * 9 + 10) << 24));
112 ar9170_regwrite(AR9170_MAC_REG_AC3_AC2_AIFS,
113 ((ar->edcf[2].aifs * 9 + 10) >> 8) |
114 ((ar->edcf[3].aifs * 9 + 10) << 4) |
115 ((ar->edcf[4].aifs * 9 + 10) << 16));
116
117 ar9170_regwrite(AR9170_MAC_REG_AC1_AC0_TXOP,
118 ar->edcf[0].txop | ar->edcf[1].txop << 16);
119 ar9170_regwrite(AR9170_MAC_REG_AC3_AC2_TXOP,
120 ar->edcf[2].txop | ar->edcf[3].txop << 16);
121
122 ar9170_regwrite_finish();
123
124 return ar9170_regwrite_result();
125}
126
127static int ar9170_set_ampdu_density(struct ar9170 *ar, u8 mpdudensity)
128{
129 u32 val;
130
131 /* don't allow AMPDU density > 8us */
132 if (mpdudensity > 6)
133 return -EINVAL;
134
135 /* Watch out! Otus uses slightly different density values. */
136 val = 0x140a00 | (mpdudensity ? (mpdudensity + 1) : 0);
137
138 ar9170_regwrite_begin(ar);
139 ar9170_regwrite(AR9170_MAC_REG_AMPDU_DENSITY, val);
140 ar9170_regwrite_finish();
141
142 return ar9170_regwrite_result();
143}
144
145int ar9170_init_mac(struct ar9170 *ar)
146{
147 ar9170_regwrite_begin(ar);
148
149 ar9170_regwrite(AR9170_MAC_REG_ACK_EXTENSION, 0x40);
150
151 ar9170_regwrite(AR9170_MAC_REG_RETRY_MAX, 0);
152
153 /* enable MMIC */
154 ar9170_regwrite(AR9170_MAC_REG_SNIFFER,
155 AR9170_MAC_REG_SNIFFER_DEFAULTS);
156
157 ar9170_regwrite(AR9170_MAC_REG_RX_THRESHOLD, 0xc1f80);
158
159 ar9170_regwrite(AR9170_MAC_REG_RX_PE_DELAY, 0x70);
160 ar9170_regwrite(AR9170_MAC_REG_EIFS_AND_SIFS, 0xa144000);
161 ar9170_regwrite(AR9170_MAC_REG_SLOT_TIME, 9 << 10);
162
163 /* CF-END mode */
164 ar9170_regwrite(0x1c3b2c, 0x19000000);
165
166 /* NAV protects ACK only (in TXOP) */
167 ar9170_regwrite(0x1c3b38, 0x201);
168
169 /* Set Beacon PHY CTRL's TPC to 0x7, TA1=1 */
170 /* OTUS set AM to 0x1 */
171 ar9170_regwrite(AR9170_MAC_REG_BCN_HT1, 0x8000170);
172
173 ar9170_regwrite(AR9170_MAC_REG_BACKOFF_PROTECT, 0x105);
174
175 /* AGG test code*/
176 /* Aggregation MAX number and timeout */
177 ar9170_regwrite(0x1c3b9c, 0x10000a);
178
179 ar9170_regwrite(AR9170_MAC_REG_FRAMETYPE_FILTER,
180 AR9170_MAC_REG_FTF_DEFAULTS);
181
182 /* Enable deaggregator, response in sniffer mode */
183 ar9170_regwrite(0x1c3c40, 0x1 | 1<<30);
184
185 /* rate sets */
186 ar9170_regwrite(AR9170_MAC_REG_BASIC_RATE, 0x150f);
187 ar9170_regwrite(AR9170_MAC_REG_MANDATORY_RATE, 0x150f);
188 ar9170_regwrite(AR9170_MAC_REG_RTS_CTS_RATE, 0x10b01bb);
189
190 /* MIMO response control */
191 ar9170_regwrite(0x1c3694, 0x4003C1E);/* bit 26~28 otus-AM */
192
193 /* switch MAC to OTUS interface */
194 ar9170_regwrite(0x1c3600, 0x3);
195
196 ar9170_regwrite(AR9170_MAC_REG_AMPDU_RX_THRESH, 0xffff);
197
198 /* set PHY register read timeout (??) */
199 ar9170_regwrite(AR9170_MAC_REG_MISC_680, 0xf00008);
200
201 /* Disable Rx TimeOut, workaround for BB. */
202 ar9170_regwrite(AR9170_MAC_REG_RX_TIMEOUT, 0x0);
203
204 /* Set CPU clock frequency to 88/80MHz */
205 ar9170_regwrite(AR9170_PWR_REG_CLOCK_SEL,
206 AR9170_PWR_CLK_AHB_80_88MHZ |
207 AR9170_PWR_CLK_DAC_160_INV_DLY);
208
209 /* Set WLAN DMA interrupt mode: generate int per packet */
210 ar9170_regwrite(AR9170_MAC_REG_TXRX_MPI, 0x110011);
211
212 ar9170_regwrite(AR9170_MAC_REG_FCS_SELECT,
213 AR9170_MAC_FCS_FIFO_PROT);
214
215 /* Disables the CF_END frame, undocumented register */
216 ar9170_regwrite(AR9170_MAC_REG_TXOP_NOT_ENOUGH_IND,
217 0x141E0F48);
218
219 ar9170_regwrite_finish();
220
221 return ar9170_regwrite_result();
222}
223
224static int ar9170_set_mac_reg(struct ar9170 *ar, const u32 reg, const u8 *mac)
225{
226 static const u8 zero[ETH_ALEN] = { 0 };
227
228 if (!mac)
229 mac = zero;
230
231 ar9170_regwrite_begin(ar);
232
233 ar9170_regwrite(reg, get_unaligned_le32(mac));
234 ar9170_regwrite(reg + 4, get_unaligned_le16(mac + 4));
235
236 ar9170_regwrite_finish();
237
238 return ar9170_regwrite_result();
239}
240
241int ar9170_update_multicast(struct ar9170 *ar, const u64 mc_hash)
242{
243 int err;
244
245 ar9170_regwrite_begin(ar);
246 ar9170_regwrite(AR9170_MAC_REG_GROUP_HASH_TBL_H, mc_hash >> 32);
247 ar9170_regwrite(AR9170_MAC_REG_GROUP_HASH_TBL_L, mc_hash);
248 ar9170_regwrite_finish();
249 err = ar9170_regwrite_result();
250 if (err)
251 return err;
252
253 ar->cur_mc_hash = mc_hash;
254 return 0;
255}
256
257int ar9170_update_frame_filter(struct ar9170 *ar, const u32 filter)
258{
259 int err;
260
261 err = ar9170_write_reg(ar, AR9170_MAC_REG_FRAMETYPE_FILTER, filter);
262 if (err)
263 return err;
264
265 ar->cur_filter = filter;
266 return 0;
267}
268
269static int ar9170_set_promiscouous(struct ar9170 *ar)
270{
271 u32 encr_mode, sniffer;
272 int err;
273
274 err = ar9170_read_reg(ar, AR9170_MAC_REG_SNIFFER, &sniffer);
275 if (err)
276 return err;
277
278 err = ar9170_read_reg(ar, AR9170_MAC_REG_ENCRYPTION, &encr_mode);
279 if (err)
280 return err;
281
282 if (ar->sniffer_enabled) {
283 sniffer |= AR9170_MAC_REG_SNIFFER_ENABLE_PROMISC;
284
285 /*
286 * Rx decryption works in place.
287 *
288 * If we don't disable it, the hardware will render all
289 * encrypted frames which are encrypted with an unknown
290 * key useless.
291 */
292
293 encr_mode |= AR9170_MAC_REG_ENCRYPTION_RX_SOFTWARE;
294 ar->sniffer_enabled = true;
295 } else {
296 sniffer &= ~AR9170_MAC_REG_SNIFFER_ENABLE_PROMISC;
297
298 if (ar->rx_software_decryption)
299 encr_mode |= AR9170_MAC_REG_ENCRYPTION_RX_SOFTWARE;
300 else
301 encr_mode &= ~AR9170_MAC_REG_ENCRYPTION_RX_SOFTWARE;
302 }
303
304 ar9170_regwrite_begin(ar);
305 ar9170_regwrite(AR9170_MAC_REG_ENCRYPTION, encr_mode);
306 ar9170_regwrite(AR9170_MAC_REG_SNIFFER, sniffer);
307 ar9170_regwrite_finish();
308
309 return ar9170_regwrite_result();
310}
311
312int ar9170_set_operating_mode(struct ar9170 *ar)
313{
314 struct ath_common *common = &ar->common;
315 u32 pm_mode = AR9170_MAC_REG_POWERMGT_DEFAULTS;
316 u8 *mac_addr, *bssid;
317 int err;
318
319 if (ar->vif) {
320 mac_addr = common->macaddr;
321 bssid = common->curbssid;
322
323 switch (ar->vif->type) {
324 case NL80211_IFTYPE_MESH_POINT:
325 case NL80211_IFTYPE_ADHOC:
326 pm_mode |= AR9170_MAC_REG_POWERMGT_IBSS;
327 break;
328 case NL80211_IFTYPE_AP:
329 pm_mode |= AR9170_MAC_REG_POWERMGT_AP;
330 break;
331 case NL80211_IFTYPE_WDS:
332 pm_mode |= AR9170_MAC_REG_POWERMGT_AP_WDS;
333 break;
334 case NL80211_IFTYPE_MONITOR:
335 ar->sniffer_enabled = true;
336 ar->rx_software_decryption = true;
337 break;
338 default:
339 pm_mode |= AR9170_MAC_REG_POWERMGT_STA;
340 break;
341 }
342 } else {
343 mac_addr = NULL;
344 bssid = NULL;
345 }
346
347 err = ar9170_set_mac_reg(ar, AR9170_MAC_REG_MAC_ADDR_L, mac_addr);
348 if (err)
349 return err;
350
351 err = ar9170_set_mac_reg(ar, AR9170_MAC_REG_BSSID_L, bssid);
352 if (err)
353 return err;
354
355 err = ar9170_set_promiscouous(ar);
356 if (err)
357 return err;
358
359 /* set AMPDU density to 8us. */
360 err = ar9170_set_ampdu_density(ar, 6);
361 if (err)
362 return err;
363
364 ar9170_regwrite_begin(ar);
365
366 ar9170_regwrite(AR9170_MAC_REG_POWERMANAGEMENT, pm_mode);
367 ar9170_regwrite_finish();
368
369 return ar9170_regwrite_result();
370}
371
372int ar9170_set_hwretry_limit(struct ar9170 *ar, unsigned int max_retry)
373{
374 u32 tmp = min_t(u32, 0x33333, max_retry * 0x11111);
375
376 return ar9170_write_reg(ar, AR9170_MAC_REG_RETRY_MAX, tmp);
377}
378
379int ar9170_set_beacon_timers(struct ar9170 *ar)
380{
381 u32 v = 0;
382 u32 pretbtt = 0;
383
384 if (ar->vif) {
385 v |= ar->vif->bss_conf.beacon_int;
386
387 if (ar->enable_beacon) {
388 switch (ar->vif->type) {
389 case NL80211_IFTYPE_MESH_POINT:
390 case NL80211_IFTYPE_ADHOC:
391 v |= BIT(25);
392 break;
393 case NL80211_IFTYPE_AP:
394 v |= BIT(24);
395 pretbtt = (ar->vif->bss_conf.beacon_int - 6) <<
396 16;
397 break;
398 default:
399 break;
400 }
401 }
402
403 v |= ar->vif->bss_conf.dtim_period << 16;
404 }
405
406 ar9170_regwrite_begin(ar);
407 ar9170_regwrite(AR9170_MAC_REG_PRETBTT, pretbtt);
408 ar9170_regwrite(AR9170_MAC_REG_BCN_PERIOD, v);
409 ar9170_regwrite_finish();
410 return ar9170_regwrite_result();
411}
412
413int ar9170_update_beacon(struct ar9170 *ar)
414{
415 struct sk_buff *skb;
416 __le32 *data, *old = NULL;
417 u32 word;
418 int i;
419
420 skb = ieee80211_beacon_get(ar->hw, ar->vif);
421 if (!skb)
422 return -ENOMEM;
423
424 data = (__le32 *)skb->data;
425 if (ar->beacon)
426 old = (__le32 *)ar->beacon->data;
427
428 ar9170_regwrite_begin(ar);
429 for (i = 0; i < DIV_ROUND_UP(skb->len, 4); i++) {
430 /*
431 * XXX: This accesses beyond skb data for up
432 * to the last 3 bytes!!
433 */
434
435 if (old && (data[i] == old[i]))
436 continue;
437
438 word = le32_to_cpu(data[i]);
439 ar9170_regwrite(AR9170_BEACON_BUFFER_ADDRESS + 4 * i, word);
440 }
441
442 /* XXX: use skb->cb info */
443 if (ar->hw->conf.channel->band == IEEE80211_BAND_2GHZ)
444 ar9170_regwrite(AR9170_MAC_REG_BCN_PLCP,
445 ((skb->len + 4) << (3 + 16)) + 0x0400);
446 else
447 ar9170_regwrite(AR9170_MAC_REG_BCN_PLCP,
448 ((skb->len + 4) << 16) + 0x001b);
449
450 ar9170_regwrite(AR9170_MAC_REG_BCN_LENGTH, skb->len + 4);
451 ar9170_regwrite(AR9170_MAC_REG_BCN_ADDR, AR9170_BEACON_BUFFER_ADDRESS);
452 ar9170_regwrite(AR9170_MAC_REG_BCN_CTRL, 1);
453
454 ar9170_regwrite_finish();
455
456 dev_kfree_skb(ar->beacon);
457 ar->beacon = skb;
458
459 return ar9170_regwrite_result();
460}
461
462void ar9170_new_beacon(struct work_struct *work)
463{
464 struct ar9170 *ar = container_of(work, struct ar9170,
465 beacon_work);
466 struct sk_buff *skb;
467
468 if (unlikely(!IS_STARTED(ar)))
469 return ;
470
471 mutex_lock(&ar->mutex);
472
473 if (!ar->vif)
474 goto out;
475
476 ar9170_update_beacon(ar);
477
478 rcu_read_lock();
479 while ((skb = ieee80211_get_buffered_bc(ar->hw, ar->vif)))
480 ar9170_op_tx(ar->hw, skb);
481
482 rcu_read_unlock();
483
484 out:
485 mutex_unlock(&ar->mutex);
486}
487
488int ar9170_upload_key(struct ar9170 *ar, u8 id, const u8 *mac, u8 ktype,
489 u8 keyidx, u8 *keydata, int keylen)
490{
491 __le32 vals[7];
492 static const u8 bcast[ETH_ALEN] =
493 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
494 u8 dummy;
495
496 mac = mac ? : bcast;
497
498 vals[0] = cpu_to_le32((keyidx << 16) + id);
499 vals[1] = cpu_to_le32(mac[1] << 24 | mac[0] << 16 | ktype);
500 vals[2] = cpu_to_le32(mac[5] << 24 | mac[4] << 16 |
501 mac[3] << 8 | mac[2]);
502 memset(&vals[3], 0, 16);
503 if (keydata)
504 memcpy(&vals[3], keydata, keylen);
505
506 return ar->exec_cmd(ar, AR9170_CMD_EKEY,
507 sizeof(vals), (u8 *)vals,
508 1, &dummy);
509}
510
511int ar9170_disable_key(struct ar9170 *ar, u8 id)
512{
513 __le32 val = cpu_to_le32(id);
514 u8 dummy;
515
516 return ar->exec_cmd(ar, AR9170_CMD_EKEY,
517 sizeof(val), (u8 *)&val,
518 1, &dummy);
519}
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
deleted file mode 100644
index ccc2edaaeda0..000000000000
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ /dev/null
@@ -1,2190 +0,0 @@
1/*
2 * Atheros AR9170 driver
3 *
4 * mac80211 interaction code
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, Christian Lamparter <chunkeey@web.de>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
22 *
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
26 *
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38 */
39
40#include <linux/init.h>
41#include <linux/slab.h>
42#include <linux/module.h>
43#include <linux/etherdevice.h>
44#include <net/mac80211.h>
45#include "ar9170.h"
46#include "hw.h"
47#include "cmd.h"
48
49static int modparam_nohwcrypt;
50module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
51MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
52
53#define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \
54 .bitrate = (_bitrate), \
55 .flags = (_flags), \
56 .hw_value = (_hw_rate) | (_txpidx) << 4, \
57}
58
59static struct ieee80211_rate __ar9170_ratetable[] = {
60 RATE(10, 0, 0, 0),
61 RATE(20, 1, 1, IEEE80211_RATE_SHORT_PREAMBLE),
62 RATE(55, 2, 2, IEEE80211_RATE_SHORT_PREAMBLE),
63 RATE(110, 3, 3, IEEE80211_RATE_SHORT_PREAMBLE),
64 RATE(60, 0xb, 0, 0),
65 RATE(90, 0xf, 0, 0),
66 RATE(120, 0xa, 0, 0),
67 RATE(180, 0xe, 0, 0),
68 RATE(240, 0x9, 0, 0),
69 RATE(360, 0xd, 1, 0),
70 RATE(480, 0x8, 2, 0),
71 RATE(540, 0xc, 3, 0),
72};
73#undef RATE
74
75#define ar9170_g_ratetable (__ar9170_ratetable + 0)
76#define ar9170_g_ratetable_size 12
77#define ar9170_a_ratetable (__ar9170_ratetable + 4)
78#define ar9170_a_ratetable_size 8
79
80/*
81 * NB: The hw_value is used as an index into the ar9170_phy_freq_params
82 * array in phy.c so that we don't have to do frequency lookups!
83 */
84#define CHAN(_freq, _idx) { \
85 .center_freq = (_freq), \
86 .hw_value = (_idx), \
87 .max_power = 18, /* XXX */ \
88}
89
90static struct ieee80211_channel ar9170_2ghz_chantable[] = {
91 CHAN(2412, 0),
92 CHAN(2417, 1),
93 CHAN(2422, 2),
94 CHAN(2427, 3),
95 CHAN(2432, 4),
96 CHAN(2437, 5),
97 CHAN(2442, 6),
98 CHAN(2447, 7),
99 CHAN(2452, 8),
100 CHAN(2457, 9),
101 CHAN(2462, 10),
102 CHAN(2467, 11),
103 CHAN(2472, 12),
104 CHAN(2484, 13),
105};
106
107static struct ieee80211_channel ar9170_5ghz_chantable[] = {
108 CHAN(4920, 14),
109 CHAN(4940, 15),
110 CHAN(4960, 16),
111 CHAN(4980, 17),
112 CHAN(5040, 18),
113 CHAN(5060, 19),
114 CHAN(5080, 20),
115 CHAN(5180, 21),
116 CHAN(5200, 22),
117 CHAN(5220, 23),
118 CHAN(5240, 24),
119 CHAN(5260, 25),
120 CHAN(5280, 26),
121 CHAN(5300, 27),
122 CHAN(5320, 28),
123 CHAN(5500, 29),
124 CHAN(5520, 30),
125 CHAN(5540, 31),
126 CHAN(5560, 32),
127 CHAN(5580, 33),
128 CHAN(5600, 34),
129 CHAN(5620, 35),
130 CHAN(5640, 36),
131 CHAN(5660, 37),
132 CHAN(5680, 38),
133 CHAN(5700, 39),
134 CHAN(5745, 40),
135 CHAN(5765, 41),
136 CHAN(5785, 42),
137 CHAN(5805, 43),
138 CHAN(5825, 44),
139 CHAN(5170, 45),
140 CHAN(5190, 46),
141 CHAN(5210, 47),
142 CHAN(5230, 48),
143};
144#undef CHAN
145
146#define AR9170_HT_CAP \
147{ \
148 .ht_supported = true, \
149 .cap = IEEE80211_HT_CAP_MAX_AMSDU | \
150 IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \
151 IEEE80211_HT_CAP_SGI_40 | \
152 IEEE80211_HT_CAP_GRN_FLD | \
153 IEEE80211_HT_CAP_DSSSCCK40 | \
154 IEEE80211_HT_CAP_SM_PS, \
155 .ampdu_factor = 3, \
156 .ampdu_density = 6, \
157 .mcs = { \
158 .rx_mask = { 0xff, 0xff, 0, 0, 0x1, 0, 0, 0, 0, 0, }, \
159 .rx_highest = cpu_to_le16(300), \
160 .tx_params = IEEE80211_HT_MCS_TX_DEFINED, \
161 }, \
162}
163
164static struct ieee80211_supported_band ar9170_band_2GHz = {
165 .channels = ar9170_2ghz_chantable,
166 .n_channels = ARRAY_SIZE(ar9170_2ghz_chantable),
167 .bitrates = ar9170_g_ratetable,
168 .n_bitrates = ar9170_g_ratetable_size,
169 .ht_cap = AR9170_HT_CAP,
170};
171
172static struct ieee80211_supported_band ar9170_band_5GHz = {
173 .channels = ar9170_5ghz_chantable,
174 .n_channels = ARRAY_SIZE(ar9170_5ghz_chantable),
175 .bitrates = ar9170_a_ratetable,
176 .n_bitrates = ar9170_a_ratetable_size,
177 .ht_cap = AR9170_HT_CAP,
178};
179
180static void ar9170_tx(struct ar9170 *ar);
181
182static inline u16 ar9170_get_seq_h(struct ieee80211_hdr *hdr)
183{
184 return le16_to_cpu(hdr->seq_ctrl) >> 4;
185}
186
187static inline u16 ar9170_get_seq(struct sk_buff *skb)
188{
189 struct ar9170_tx_control *txc = (void *) skb->data;
190 return ar9170_get_seq_h((void *) txc->frame_data);
191}
192
193#ifdef AR9170_QUEUE_DEBUG
194static void ar9170_print_txheader(struct ar9170 *ar, struct sk_buff *skb)
195{
196 struct ar9170_tx_control *txc = (void *) skb->data;
197 struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
198 struct ar9170_tx_info *arinfo = (void *) txinfo->rate_driver_data;
199 struct ieee80211_hdr *hdr = (void *) txc->frame_data;
200
201 wiphy_debug(ar->hw->wiphy,
202 "=> FRAME [skb:%p, q:%d, DA:[%pM] s:%d "
203 "mac_ctrl:%04x, phy_ctrl:%08x, timeout:[%d ms]]\n",
204 skb, skb_get_queue_mapping(skb),
205 ieee80211_get_DA(hdr), ar9170_get_seq_h(hdr),
206 le16_to_cpu(txc->mac_control), le32_to_cpu(txc->phy_control),
207 jiffies_to_msecs(arinfo->timeout - jiffies));
208}
209
210static void __ar9170_dump_txqueue(struct ar9170 *ar,
211 struct sk_buff_head *queue)
212{
213 struct sk_buff *skb;
214 int i = 0;
215
216 printk(KERN_DEBUG "---[ cut here ]---\n");
217 wiphy_debug(ar->hw->wiphy, "%d entries in queue.\n",
218 skb_queue_len(queue));
219
220 skb_queue_walk(queue, skb) {
221 printk(KERN_DEBUG "index:%d =>\n", i++);
222 ar9170_print_txheader(ar, skb);
223 }
224 if (i != skb_queue_len(queue))
225 printk(KERN_DEBUG "WARNING: queue frame counter "
226 "mismatch %d != %d\n", skb_queue_len(queue), i);
227 printk(KERN_DEBUG "---[ end ]---\n");
228}
229#endif /* AR9170_QUEUE_DEBUG */
230
231#ifdef AR9170_QUEUE_DEBUG
232static void ar9170_dump_txqueue(struct ar9170 *ar,
233 struct sk_buff_head *queue)
234{
235 unsigned long flags;
236
237 spin_lock_irqsave(&queue->lock, flags);
238 __ar9170_dump_txqueue(ar, queue);
239 spin_unlock_irqrestore(&queue->lock, flags);
240}
241#endif /* AR9170_QUEUE_DEBUG */
242
243#ifdef AR9170_QUEUE_STOP_DEBUG
244static void __ar9170_dump_txstats(struct ar9170 *ar)
245{
246 int i;
247
248 wiphy_debug(ar->hw->wiphy, "QoS queue stats\n");
249
250 for (i = 0; i < __AR9170_NUM_TXQ; i++)
251 wiphy_debug(ar->hw->wiphy,
252 "queue:%d limit:%d len:%d waitack:%d stopped:%d\n",
253 i, ar->tx_stats[i].limit, ar->tx_stats[i].len,
254 skb_queue_len(&ar->tx_status[i]),
255 ieee80211_queue_stopped(ar->hw, i));
256}
257#endif /* AR9170_QUEUE_STOP_DEBUG */
258
259/* caller must guarantee exclusive access for _bin_ queue. */
260static void ar9170_recycle_expired(struct ar9170 *ar,
261 struct sk_buff_head *queue,
262 struct sk_buff_head *bin)
263{
264 struct sk_buff *skb, *old = NULL;
265 unsigned long flags;
266
267 spin_lock_irqsave(&queue->lock, flags);
268 while ((skb = skb_peek(queue))) {
269 struct ieee80211_tx_info *txinfo;
270 struct ar9170_tx_info *arinfo;
271
272 txinfo = IEEE80211_SKB_CB(skb);
273 arinfo = (void *) txinfo->rate_driver_data;
274
275 if (time_is_before_jiffies(arinfo->timeout)) {
276#ifdef AR9170_QUEUE_DEBUG
277 wiphy_debug(ar->hw->wiphy,
278 "[%ld > %ld] frame expired => recycle\n",
279 jiffies, arinfo->timeout);
280 ar9170_print_txheader(ar, skb);
281#endif /* AR9170_QUEUE_DEBUG */
282 __skb_unlink(skb, queue);
283 __skb_queue_tail(bin, skb);
284 } else {
285 break;
286 }
287
288 if (unlikely(old == skb)) {
289 /* bail out - queue is shot. */
290
291 WARN_ON(1);
292 break;
293 }
294 old = skb;
295 }
296 spin_unlock_irqrestore(&queue->lock, flags);
297}
298
299static void ar9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
300 u16 tx_status)
301{
302 struct ieee80211_tx_info *txinfo;
303 unsigned int retries = 0;
304
305 txinfo = IEEE80211_SKB_CB(skb);
306 ieee80211_tx_info_clear_status(txinfo);
307
308 switch (tx_status) {
309 case AR9170_TX_STATUS_RETRY:
310 retries = 2;
311 case AR9170_TX_STATUS_COMPLETE:
312 txinfo->flags |= IEEE80211_TX_STAT_ACK;
313 break;
314
315 case AR9170_TX_STATUS_FAILED:
316 retries = ar->hw->conf.long_frame_max_tx_count;
317 break;
318
319 default:
320 wiphy_err(ar->hw->wiphy,
321 "invalid tx_status response (%x)\n", tx_status);
322 break;
323 }
324
325 txinfo->status.rates[0].count = retries + 1;
326 skb_pull(skb, sizeof(struct ar9170_tx_control));
327 ieee80211_tx_status_irqsafe(ar->hw, skb);
328}
329
330void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
331{
332 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
333 struct ar9170_tx_info *arinfo = (void *) info->rate_driver_data;
334 unsigned int queue = skb_get_queue_mapping(skb);
335 unsigned long flags;
336
337 spin_lock_irqsave(&ar->tx_stats_lock, flags);
338 ar->tx_stats[queue].len--;
339
340 if (ar->tx_stats[queue].len < AR9170_NUM_TX_LIMIT_SOFT) {
341#ifdef AR9170_QUEUE_STOP_DEBUG
342 wiphy_debug(ar->hw->wiphy, "wake queue %d\n", queue);
343 __ar9170_dump_txstats(ar);
344#endif /* AR9170_QUEUE_STOP_DEBUG */
345 ieee80211_wake_queue(ar->hw, queue);
346 }
347 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
348
349 if (info->flags & IEEE80211_TX_CTL_NO_ACK) {
350 ar9170_tx_status(ar, skb, AR9170_TX_STATUS_FAILED);
351 } else {
352 arinfo->timeout = jiffies +
353 msecs_to_jiffies(AR9170_TX_TIMEOUT);
354
355 skb_queue_tail(&ar->tx_status[queue], skb);
356 }
357
358 if (!ar->tx_stats[queue].len &&
359 !skb_queue_empty(&ar->tx_pending[queue])) {
360 ar9170_tx(ar);
361 }
362}
363
364static struct sk_buff *ar9170_get_queued_skb(struct ar9170 *ar,
365 const u8 *mac,
366 struct sk_buff_head *queue,
367 const u32 rate)
368{
369 unsigned long flags;
370 struct sk_buff *skb;
371
372 /*
373 * Unfortunately, the firmware does not tell to which (queued) frame
374 * this transmission status report belongs to.
375 *
376 * So we have to make risky guesses - with the scarce information
377 * the firmware provided (-> destination MAC, and phy_control) -
378 * and hope that we picked the right one...
379 */
380
381 spin_lock_irqsave(&queue->lock, flags);
382 skb_queue_walk(queue, skb) {
383 struct ar9170_tx_control *txc = (void *) skb->data;
384 struct ieee80211_hdr *hdr = (void *) txc->frame_data;
385 u32 r;
386
387 if (mac && compare_ether_addr(ieee80211_get_DA(hdr), mac)) {
388#ifdef AR9170_QUEUE_DEBUG
389 wiphy_debug(ar->hw->wiphy,
390 "skip frame => DA %pM != %pM\n",
391 mac, ieee80211_get_DA(hdr));
392 ar9170_print_txheader(ar, skb);
393#endif /* AR9170_QUEUE_DEBUG */
394 continue;
395 }
396
397 r = (le32_to_cpu(txc->phy_control) & AR9170_TX_PHY_MCS_MASK) >>
398 AR9170_TX_PHY_MCS_SHIFT;
399
400 if ((rate != AR9170_TX_INVALID_RATE) && (r != rate)) {
401#ifdef AR9170_QUEUE_DEBUG
402 wiphy_debug(ar->hw->wiphy,
403 "skip frame => rate %d != %d\n", rate, r);
404 ar9170_print_txheader(ar, skb);
405#endif /* AR9170_QUEUE_DEBUG */
406 continue;
407 }
408
409 __skb_unlink(skb, queue);
410 spin_unlock_irqrestore(&queue->lock, flags);
411 return skb;
412 }
413
414#ifdef AR9170_QUEUE_DEBUG
415 wiphy_err(ar->hw->wiphy,
416 "ESS:[%pM] does not have any outstanding frames in queue.\n",
417 mac);
418 __ar9170_dump_txqueue(ar, queue);
419#endif /* AR9170_QUEUE_DEBUG */
420 spin_unlock_irqrestore(&queue->lock, flags);
421
422 return NULL;
423}
424
425/*
426 * This worker tries to keeps an maintain tx_status queues.
427 * So we can guarantee that incoming tx_status reports are
428 * actually for a pending frame.
429 */
430
431static void ar9170_tx_janitor(struct work_struct *work)
432{
433 struct ar9170 *ar = container_of(work, struct ar9170,
434 tx_janitor.work);
435 struct sk_buff_head waste;
436 unsigned int i;
437 bool resched = false;
438
439 if (unlikely(!IS_STARTED(ar)))
440 return ;
441
442 skb_queue_head_init(&waste);
443
444 for (i = 0; i < __AR9170_NUM_TXQ; i++) {
445#ifdef AR9170_QUEUE_DEBUG
446 wiphy_debug(ar->hw->wiphy, "garbage collector scans queue:%d\n",
447 i);
448 ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
449 ar9170_dump_txqueue(ar, &ar->tx_status[i]);
450#endif /* AR9170_QUEUE_DEBUG */
451
452 ar9170_recycle_expired(ar, &ar->tx_status[i], &waste);
453 ar9170_recycle_expired(ar, &ar->tx_pending[i], &waste);
454 skb_queue_purge(&waste);
455
456 if (!skb_queue_empty(&ar->tx_status[i]) ||
457 !skb_queue_empty(&ar->tx_pending[i]))
458 resched = true;
459 }
460
461 if (!resched)
462 return;
463
464 ieee80211_queue_delayed_work(ar->hw,
465 &ar->tx_janitor,
466 msecs_to_jiffies(AR9170_JANITOR_DELAY));
467}
468
469void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
470{
471 struct ar9170_cmd_response *cmd = (void *) buf;
472
473 if ((cmd->type & 0xc0) != 0xc0) {
474 ar->callback_cmd(ar, len, buf);
475 return;
476 }
477
478 /* hardware event handlers */
479 switch (cmd->type) {
480 case 0xc1: {
481 /*
482 * TX status notification:
483 * bytes: 0c c1 XX YY M1 M2 M3 M4 M5 M6 R4 R3 R2 R1 S2 S1
484 *
485 * XX always 81
486 * YY always 00
487 * M1-M6 is the MAC address
488 * R1-R4 is the transmit rate
489 * S1-S2 is the transmit status
490 */
491
492 struct sk_buff *skb;
493 u32 phy = le32_to_cpu(cmd->tx_status.rate);
494 u32 q = (phy & AR9170_TX_PHY_QOS_MASK) >>
495 AR9170_TX_PHY_QOS_SHIFT;
496#ifdef AR9170_QUEUE_DEBUG
497 wiphy_debug(ar->hw->wiphy,
498 "recv tx_status for %pm, p:%08x, q:%d\n",
499 cmd->tx_status.dst, phy, q);
500#endif /* AR9170_QUEUE_DEBUG */
501
502 skb = ar9170_get_queued_skb(ar, cmd->tx_status.dst,
503 &ar->tx_status[q],
504 AR9170_TX_INVALID_RATE);
505 if (unlikely(!skb))
506 return ;
507
508 ar9170_tx_status(ar, skb, le16_to_cpu(cmd->tx_status.status));
509 break;
510 }
511
512 case 0xc0:
513 /*
514 * pre-TBTT event
515 */
516 if (ar->vif && ar->vif->type == NL80211_IFTYPE_AP)
517 ieee80211_queue_work(ar->hw, &ar->beacon_work);
518 break;
519
520 case 0xc2:
521 /*
522 * (IBSS) beacon send notification
523 * bytes: 04 c2 XX YY B4 B3 B2 B1
524 *
525 * XX always 80
526 * YY always 00
527 * B1-B4 "should" be the number of send out beacons.
528 */
529 break;
530
531 case 0xc3:
532 /* End of Atim Window */
533 break;
534
535 case 0xc4:
536 /* BlockACK bitmap */
537 break;
538
539 case 0xc5:
540 /* BlockACK events */
541 break;
542
543 case 0xc6:
544 /* Watchdog Interrupt */
545 break;
546
547 case 0xc9:
548 /* retransmission issue / SIFS/EIFS collision ?! */
549 break;
550
551 /* firmware debug */
552 case 0xca:
553 printk(KERN_DEBUG "ar9170 FW: %.*s\n", len - 4,
554 (char *)buf + 4);
555 break;
556 case 0xcb:
557 len -= 4;
558
559 switch (len) {
560 case 1:
561 printk(KERN_DEBUG "ar9170 FW: u8: %#.2x\n",
562 *((char *)buf + 4));
563 break;
564 case 2:
565 printk(KERN_DEBUG "ar9170 FW: u8: %#.4x\n",
566 le16_to_cpup((__le16 *)((char *)buf + 4)));
567 break;
568 case 4:
569 printk(KERN_DEBUG "ar9170 FW: u8: %#.8x\n",
570 le32_to_cpup((__le32 *)((char *)buf + 4)));
571 break;
572 case 8:
573 printk(KERN_DEBUG "ar9170 FW: u8: %#.16lx\n",
574 (unsigned long)le64_to_cpup(
575 (__le64 *)((char *)buf + 4)));
576 break;
577 }
578 break;
579 case 0xcc:
580 print_hex_dump_bytes("ar9170 FW:", DUMP_PREFIX_NONE,
581 (char *)buf + 4, len - 4);
582 break;
583
584 default:
585 pr_info("received unhandled event %x\n", cmd->type);
586 print_hex_dump_bytes("dump:", DUMP_PREFIX_NONE, buf, len);
587 break;
588 }
589}
590
591static void ar9170_rx_reset_rx_mpdu(struct ar9170 *ar)
592{
593 memset(&ar->rx_mpdu.plcp, 0, sizeof(struct ar9170_rx_head));
594 ar->rx_mpdu.has_plcp = false;
595}
596
597int ar9170_nag_limiter(struct ar9170 *ar)
598{
599 bool print_message;
600
601 /*
602 * we expect all sorts of errors in promiscuous mode.
603 * don't bother with it, it's OK!
604 */
605 if (ar->sniffer_enabled)
606 return false;
607
608 /*
609 * only go for frequent errors! The hardware tends to
610 * do some stupid thing once in a while under load, in
611 * noisy environments or just for fun!
612 */
613 if (time_before(jiffies, ar->bad_hw_nagger) && net_ratelimit())
614 print_message = true;
615 else
616 print_message = false;
617
618 /* reset threshold for "once in a while" */
619 ar->bad_hw_nagger = jiffies + HZ / 4;
620 return print_message;
621}
622
623static int ar9170_rx_mac_status(struct ar9170 *ar,
624 struct ar9170_rx_head *head,
625 struct ar9170_rx_macstatus *mac,
626 struct ieee80211_rx_status *status)
627{
628 u8 error, decrypt;
629
630 BUILD_BUG_ON(sizeof(struct ar9170_rx_head) != 12);
631 BUILD_BUG_ON(sizeof(struct ar9170_rx_macstatus) != 4);
632
633 error = mac->error;
634 if (error & AR9170_RX_ERROR_MMIC) {
635 status->flag |= RX_FLAG_MMIC_ERROR;
636 error &= ~AR9170_RX_ERROR_MMIC;
637 }
638
639 if (error & AR9170_RX_ERROR_PLCP) {
640 status->flag |= RX_FLAG_FAILED_PLCP_CRC;
641 error &= ~AR9170_RX_ERROR_PLCP;
642
643 if (!(ar->filter_state & FIF_PLCPFAIL))
644 return -EINVAL;
645 }
646
647 if (error & AR9170_RX_ERROR_FCS) {
648 status->flag |= RX_FLAG_FAILED_FCS_CRC;
649 error &= ~AR9170_RX_ERROR_FCS;
650
651 if (!(ar->filter_state & FIF_FCSFAIL))
652 return -EINVAL;
653 }
654
655 decrypt = ar9170_get_decrypt_type(mac);
656 if (!(decrypt & AR9170_RX_ENC_SOFTWARE) &&
657 decrypt != AR9170_ENC_ALG_NONE)
658 status->flag |= RX_FLAG_DECRYPTED;
659
660 /* ignore wrong RA errors */
661 error &= ~AR9170_RX_ERROR_WRONG_RA;
662
663 if (error & AR9170_RX_ERROR_DECRYPT) {
664 error &= ~AR9170_RX_ERROR_DECRYPT;
665 /*
666 * Rx decryption is done in place,
667 * the original data is lost anyway.
668 */
669
670 return -EINVAL;
671 }
672
673 /* drop any other error frames */
674 if (unlikely(error)) {
675 /* TODO: update netdevice's RX dropped/errors statistics */
676
677 if (ar9170_nag_limiter(ar))
678 wiphy_debug(ar->hw->wiphy,
679 "received frame with suspicious error code (%#x).\n",
680 error);
681
682 return -EINVAL;
683 }
684
685 status->band = ar->channel->band;
686 status->freq = ar->channel->center_freq;
687
688 switch (mac->status & AR9170_RX_STATUS_MODULATION_MASK) {
689 case AR9170_RX_STATUS_MODULATION_CCK:
690 if (mac->status & AR9170_RX_STATUS_SHORT_PREAMBLE)
691 status->flag |= RX_FLAG_SHORTPRE;
692 switch (head->plcp[0]) {
693 case 0x0a:
694 status->rate_idx = 0;
695 break;
696 case 0x14:
697 status->rate_idx = 1;
698 break;
699 case 0x37:
700 status->rate_idx = 2;
701 break;
702 case 0x6e:
703 status->rate_idx = 3;
704 break;
705 default:
706 if (ar9170_nag_limiter(ar))
707 wiphy_err(ar->hw->wiphy,
708 "invalid plcp cck rate (%x).\n",
709 head->plcp[0]);
710 return -EINVAL;
711 }
712 break;
713
714 case AR9170_RX_STATUS_MODULATION_DUPOFDM:
715 case AR9170_RX_STATUS_MODULATION_OFDM:
716 switch (head->plcp[0] & 0xf) {
717 case 0xb:
718 status->rate_idx = 0;
719 break;
720 case 0xf:
721 status->rate_idx = 1;
722 break;
723 case 0xa:
724 status->rate_idx = 2;
725 break;
726 case 0xe:
727 status->rate_idx = 3;
728 break;
729 case 0x9:
730 status->rate_idx = 4;
731 break;
732 case 0xd:
733 status->rate_idx = 5;
734 break;
735 case 0x8:
736 status->rate_idx = 6;
737 break;
738 case 0xc:
739 status->rate_idx = 7;
740 break;
741 default:
742 if (ar9170_nag_limiter(ar))
743 wiphy_err(ar->hw->wiphy,
744 "invalid plcp ofdm rate (%x).\n",
745 head->plcp[0]);
746 return -EINVAL;
747 }
748 if (status->band == IEEE80211_BAND_2GHZ)
749 status->rate_idx += 4;
750 break;
751
752 case AR9170_RX_STATUS_MODULATION_HT:
753 if (head->plcp[3] & 0x80)
754 status->flag |= RX_FLAG_40MHZ;
755 if (head->plcp[6] & 0x80)
756 status->flag |= RX_FLAG_SHORT_GI;
757
758 status->rate_idx = clamp(0, 75, head->plcp[6] & 0x7f);
759 status->flag |= RX_FLAG_HT;
760 break;
761
762 default:
763 if (ar9170_nag_limiter(ar))
764 wiphy_err(ar->hw->wiphy, "invalid modulation\n");
765 return -EINVAL;
766 }
767
768 return 0;
769}
770
771static void ar9170_rx_phy_status(struct ar9170 *ar,
772 struct ar9170_rx_phystatus *phy,
773 struct ieee80211_rx_status *status)
774{
775 int i;
776
777 BUILD_BUG_ON(sizeof(struct ar9170_rx_phystatus) != 20);
778
779 for (i = 0; i < 3; i++)
780 if (phy->rssi[i] != 0x80)
781 status->antenna |= BIT(i);
782
783 /* post-process RSSI */
784 for (i = 0; i < 7; i++)
785 if (phy->rssi[i] & 0x80)
786 phy->rssi[i] = ((phy->rssi[i] & 0x7f) + 1) & 0x7f;
787
788 /* TODO: we could do something with phy_errors */
789 status->signal = ar->noise[0] + phy->rssi_combined;
790}
791
792static struct sk_buff *ar9170_rx_copy_data(u8 *buf, int len)
793{
794 struct sk_buff *skb;
795 int reserved = 0;
796 struct ieee80211_hdr *hdr = (void *) buf;
797
798 if (ieee80211_is_data_qos(hdr->frame_control)) {
799 u8 *qc = ieee80211_get_qos_ctl(hdr);
800 reserved += NET_IP_ALIGN;
801
802 if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
803 reserved += NET_IP_ALIGN;
804 }
805
806 if (ieee80211_has_a4(hdr->frame_control))
807 reserved += NET_IP_ALIGN;
808
809 reserved = 32 + (reserved & NET_IP_ALIGN);
810
811 skb = dev_alloc_skb(len + reserved);
812 if (likely(skb)) {
813 skb_reserve(skb, reserved);
814 memcpy(skb_put(skb, len), buf, len);
815 }
816
817 return skb;
818}
819
820/*
821 * If the frame alignment is right (or the kernel has
822 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS), and there
823 * is only a single MPDU in the USB frame, then we could
824 * submit to mac80211 the SKB directly. However, since
825 * there may be multiple packets in one SKB in stream
826 * mode, and we need to observe the proper ordering,
827 * this is non-trivial.
828 */
829
830static void ar9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
831{
832 struct ar9170_rx_head *head;
833 struct ar9170_rx_macstatus *mac;
834 struct ar9170_rx_phystatus *phy = NULL;
835 struct ieee80211_rx_status status;
836 struct sk_buff *skb;
837 int mpdu_len;
838
839 if (unlikely(!IS_STARTED(ar) || len < (sizeof(*mac))))
840 return ;
841
842 /* Received MPDU */
843 mpdu_len = len - sizeof(*mac);
844
845 mac = (void *)(buf + mpdu_len);
846 if (unlikely(mac->error & AR9170_RX_ERROR_FATAL)) {
847 /* this frame is too damaged and can't be used - drop it */
848
849 return ;
850 }
851
852 switch (mac->status & AR9170_RX_STATUS_MPDU_MASK) {
853 case AR9170_RX_STATUS_MPDU_FIRST:
854 /* first mpdu packet has the plcp header */
855 if (likely(mpdu_len >= sizeof(struct ar9170_rx_head))) {
856 head = (void *) buf;
857 memcpy(&ar->rx_mpdu.plcp, (void *) buf,
858 sizeof(struct ar9170_rx_head));
859
860 mpdu_len -= sizeof(struct ar9170_rx_head);
861 buf += sizeof(struct ar9170_rx_head);
862 ar->rx_mpdu.has_plcp = true;
863 } else {
864 if (ar9170_nag_limiter(ar))
865 wiphy_err(ar->hw->wiphy,
866 "plcp info is clipped.\n");
867 return ;
868 }
869 break;
870
871 case AR9170_RX_STATUS_MPDU_LAST:
872 /* last mpdu has a extra tail with phy status information */
873
874 if (likely(mpdu_len >= sizeof(struct ar9170_rx_phystatus))) {
875 mpdu_len -= sizeof(struct ar9170_rx_phystatus);
876 phy = (void *)(buf + mpdu_len);
877 } else {
878 if (ar9170_nag_limiter(ar))
879 wiphy_err(ar->hw->wiphy,
880 "frame tail is clipped.\n");
881 return ;
882 }
883
884 case AR9170_RX_STATUS_MPDU_MIDDLE:
885 /* middle mpdus are just data */
886 if (unlikely(!ar->rx_mpdu.has_plcp)) {
887 if (!ar9170_nag_limiter(ar))
888 return ;
889
890 wiphy_err(ar->hw->wiphy,
891 "rx stream did not start with a first_mpdu frame tag.\n");
892
893 return ;
894 }
895
896 head = &ar->rx_mpdu.plcp;
897 break;
898
899 case AR9170_RX_STATUS_MPDU_SINGLE:
900 /* single mpdu - has plcp (head) and phy status (tail) */
901 head = (void *) buf;
902
903 mpdu_len -= sizeof(struct ar9170_rx_head);
904 mpdu_len -= sizeof(struct ar9170_rx_phystatus);
905
906 buf += sizeof(struct ar9170_rx_head);
907 phy = (void *)(buf + mpdu_len);
908 break;
909
910 default:
911 BUG_ON(1);
912 break;
913 }
914
915 if (unlikely(mpdu_len < FCS_LEN))
916 return ;
917
918 memset(&status, 0, sizeof(status));
919 if (unlikely(ar9170_rx_mac_status(ar, head, mac, &status)))
920 return ;
921
922 if (phy)
923 ar9170_rx_phy_status(ar, phy, &status);
924
925 skb = ar9170_rx_copy_data(buf, mpdu_len);
926 if (likely(skb)) {
927 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
928 ieee80211_rx_irqsafe(ar->hw, skb);
929 }
930}
931
932void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb)
933{
934 unsigned int i, tlen, resplen, wlen = 0, clen = 0;
935 u8 *tbuf, *respbuf;
936
937 tbuf = skb->data;
938 tlen = skb->len;
939
940 while (tlen >= 4) {
941 clen = tbuf[1] << 8 | tbuf[0];
942 wlen = ALIGN(clen, 4);
943
944 /* check if this is stream has a valid tag.*/
945 if (tbuf[2] != 0 || tbuf[3] != 0x4e) {
946 /*
947 * TODO: handle the highly unlikely event that the
948 * corrupted stream has the TAG at the right position.
949 */
950
951 /* check if the frame can be repaired. */
952 if (!ar->rx_failover_missing) {
953 /* this is no "short read". */
954 if (ar9170_nag_limiter(ar)) {
955 wiphy_err(ar->hw->wiphy,
956 "missing tag!\n");
957 goto err_telluser;
958 } else
959 goto err_silent;
960 }
961
962 if (ar->rx_failover_missing > tlen) {
963 if (ar9170_nag_limiter(ar)) {
964 wiphy_err(ar->hw->wiphy,
965 "possible multi stream corruption!\n");
966 goto err_telluser;
967 } else
968 goto err_silent;
969 }
970
971 memcpy(skb_put(ar->rx_failover, tlen), tbuf, tlen);
972 ar->rx_failover_missing -= tlen;
973
974 if (ar->rx_failover_missing <= 0) {
975 /*
976 * nested ar9170_rx call!
977 * termination is guaranteed, even when the
978 * combined frame also have a element with
979 * a bad tag.
980 */
981
982 ar->rx_failover_missing = 0;
983 ar9170_rx(ar, ar->rx_failover);
984
985 skb_reset_tail_pointer(ar->rx_failover);
986 skb_trim(ar->rx_failover, 0);
987 }
988
989 return ;
990 }
991
992 /* check if stream is clipped */
993 if (wlen > tlen - 4) {
994 if (ar->rx_failover_missing) {
995 /* TODO: handle double stream corruption. */
996 if (ar9170_nag_limiter(ar)) {
997 wiphy_err(ar->hw->wiphy,
998 "double rx stream corruption!\n");
999 goto err_telluser;
1000 } else
1001 goto err_silent;
1002 }
1003
1004 /*
1005 * save incomplete data set.
1006 * the firmware will resend the missing bits when
1007 * the rx - descriptor comes round again.
1008 */
1009
1010 memcpy(skb_put(ar->rx_failover, tlen), tbuf, tlen);
1011 ar->rx_failover_missing = clen - tlen;
1012 return ;
1013 }
1014 resplen = clen;
1015 respbuf = tbuf + 4;
1016 tbuf += wlen + 4;
1017 tlen -= wlen + 4;
1018
1019 i = 0;
1020
1021 /* weird thing, but this is the same in the original driver */
1022 while (resplen > 2 && i < 12 &&
1023 respbuf[0] == 0xff && respbuf[1] == 0xff) {
1024 i += 2;
1025 resplen -= 2;
1026 respbuf += 2;
1027 }
1028
1029 if (resplen < 4)
1030 continue;
1031
1032 /* found the 6 * 0xffff marker? */
1033 if (i == 12)
1034 ar9170_handle_command_response(ar, respbuf, resplen);
1035 else
1036 ar9170_handle_mpdu(ar, respbuf, clen);
1037 }
1038
1039 if (tlen) {
1040 if (net_ratelimit())
1041 wiphy_err(ar->hw->wiphy,
1042 "%d bytes of unprocessed data left in rx stream!\n",
1043 tlen);
1044
1045 goto err_telluser;
1046 }
1047
1048 return ;
1049
1050err_telluser:
1051 wiphy_err(ar->hw->wiphy,
1052 "damaged RX stream data [want:%d, data:%d, rx:%d, pending:%d ]\n",
1053 clen, wlen, tlen, ar->rx_failover_missing);
1054
1055 if (ar->rx_failover_missing)
1056 print_hex_dump_bytes("rxbuf:", DUMP_PREFIX_OFFSET,
1057 ar->rx_failover->data,
1058 ar->rx_failover->len);
1059
1060 print_hex_dump_bytes("stream:", DUMP_PREFIX_OFFSET,
1061 skb->data, skb->len);
1062
1063 wiphy_err(ar->hw->wiphy,
1064 "If you see this message frequently, please check your hardware and cables.\n");
1065
1066err_silent:
1067 if (ar->rx_failover_missing) {
1068 skb_reset_tail_pointer(ar->rx_failover);
1069 skb_trim(ar->rx_failover, 0);
1070 ar->rx_failover_missing = 0;
1071 }
1072}
1073
1074#define AR9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop) \
1075do { \
1076 queue.aifs = ai_fs; \
1077 queue.cw_min = cwmin; \
1078 queue.cw_max = cwmax; \
1079 queue.txop = _txop; \
1080} while (0)
1081
1082static int ar9170_op_start(struct ieee80211_hw *hw)
1083{
1084 struct ar9170 *ar = hw->priv;
1085 int err, i;
1086
1087 mutex_lock(&ar->mutex);
1088
1089 /* reinitialize queues statistics */
1090 memset(&ar->tx_stats, 0, sizeof(ar->tx_stats));
1091 for (i = 0; i < __AR9170_NUM_TXQ; i++)
1092 ar->tx_stats[i].limit = AR9170_TXQ_DEPTH;
1093
1094 /* reset QoS defaults */
1095 AR9170_FILL_QUEUE(ar->edcf[0], 3, 15, 1023, 0); /* BEST EFFORT*/
1096 AR9170_FILL_QUEUE(ar->edcf[1], 7, 15, 1023, 0); /* BACKGROUND */
1097 AR9170_FILL_QUEUE(ar->edcf[2], 2, 7, 15, 94); /* VIDEO */
1098 AR9170_FILL_QUEUE(ar->edcf[3], 2, 3, 7, 47); /* VOICE */
1099 AR9170_FILL_QUEUE(ar->edcf[4], 2, 3, 7, 0); /* SPECIAL */
1100
1101 /* set sane AMPDU defaults */
1102 ar->global_ampdu_density = 6;
1103 ar->global_ampdu_factor = 3;
1104
1105 ar->bad_hw_nagger = jiffies;
1106
1107 err = ar->open(ar);
1108 if (err)
1109 goto out;
1110
1111 err = ar9170_init_mac(ar);
1112 if (err)
1113 goto out;
1114
1115 err = ar9170_set_qos(ar);
1116 if (err)
1117 goto out;
1118
1119 err = ar9170_init_phy(ar, IEEE80211_BAND_2GHZ);
1120 if (err)
1121 goto out;
1122
1123 err = ar9170_init_rf(ar);
1124 if (err)
1125 goto out;
1126
1127 /* start DMA */
1128 err = ar9170_write_reg(ar, 0x1c3d30, 0x100);
1129 if (err)
1130 goto out;
1131
1132 ar->state = AR9170_STARTED;
1133
1134out:
1135 mutex_unlock(&ar->mutex);
1136 return err;
1137}
1138
1139static void ar9170_op_stop(struct ieee80211_hw *hw)
1140{
1141 struct ar9170 *ar = hw->priv;
1142 unsigned int i;
1143
1144 if (IS_STARTED(ar))
1145 ar->state = AR9170_IDLE;
1146
1147 cancel_delayed_work_sync(&ar->tx_janitor);
1148#ifdef CONFIG_AR9170_LEDS
1149 cancel_delayed_work_sync(&ar->led_work);
1150#endif
1151 cancel_work_sync(&ar->beacon_work);
1152
1153 mutex_lock(&ar->mutex);
1154
1155 if (IS_ACCEPTING_CMD(ar)) {
1156 ar9170_set_leds_state(ar, 0);
1157
1158 /* stop DMA */
1159 ar9170_write_reg(ar, 0x1c3d30, 0);
1160 ar->stop(ar);
1161 }
1162
1163 for (i = 0; i < __AR9170_NUM_TXQ; i++) {
1164 skb_queue_purge(&ar->tx_pending[i]);
1165 skb_queue_purge(&ar->tx_status[i]);
1166 }
1167
1168 mutex_unlock(&ar->mutex);
1169}
1170
1171static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
1172{
1173 struct ieee80211_hdr *hdr;
1174 struct ar9170_tx_control *txc;
1175 struct ieee80211_tx_info *info;
1176 struct ieee80211_tx_rate *txrate;
1177 struct ar9170_tx_info *arinfo;
1178 unsigned int queue = skb_get_queue_mapping(skb);
1179 u16 keytype = 0;
1180 u16 len, icv = 0;
1181
1182 BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
1183
1184 hdr = (void *)skb->data;
1185 info = IEEE80211_SKB_CB(skb);
1186 len = skb->len;
1187
1188 txc = (void *)skb_push(skb, sizeof(*txc));
1189
1190 if (info->control.hw_key) {
1191 icv = info->control.hw_key->icv_len;
1192
1193 switch (info->control.hw_key->cipher) {
1194 case WLAN_CIPHER_SUITE_WEP40:
1195 case WLAN_CIPHER_SUITE_WEP104:
1196 case WLAN_CIPHER_SUITE_TKIP:
1197 keytype = AR9170_TX_MAC_ENCR_RC4;
1198 break;
1199 case WLAN_CIPHER_SUITE_CCMP:
1200 keytype = AR9170_TX_MAC_ENCR_AES;
1201 break;
1202 default:
1203 WARN_ON(1);
1204 goto err_out;
1205 }
1206 }
1207
1208 /* Length */
1209 txc->length = cpu_to_le16(len + icv + 4);
1210
1211 txc->mac_control = cpu_to_le16(AR9170_TX_MAC_HW_DURATION |
1212 AR9170_TX_MAC_BACKOFF);
1213 txc->mac_control |= cpu_to_le16(ar9170_qos_hwmap[queue] <<
1214 AR9170_TX_MAC_QOS_SHIFT);
1215 txc->mac_control |= cpu_to_le16(keytype);
1216 txc->phy_control = cpu_to_le32(0);
1217
1218 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
1219 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_NO_ACK);
1220
1221 txrate = &info->control.rates[0];
1222 if (txrate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
1223 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
1224 else if (txrate->flags & IEEE80211_TX_RC_USE_RTS_CTS)
1225 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
1226
1227 arinfo = (void *)info->rate_driver_data;
1228 arinfo->timeout = jiffies + msecs_to_jiffies(AR9170_QUEUE_TIMEOUT);
1229
1230 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
1231 (is_valid_ether_addr(ieee80211_get_DA(hdr)))) {
1232 /*
1233 * WARNING:
1234 * Putting the QoS queue bits into an unexplored territory is
1235 * certainly not elegant.
1236 *
1237 * In my defense: This idea provides a reasonable way to
1238 * smuggle valuable information to the tx_status callback.
1239 * Also, the idea behind this bit-abuse came straight from
1240 * the original driver code.
1241 */
1242
1243 txc->phy_control |=
1244 cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT);
1245
1246 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
1247 }
1248
1249 return 0;
1250
1251err_out:
1252 skb_pull(skb, sizeof(*txc));
1253 return -EINVAL;
1254}
1255
1256static void ar9170_tx_prepare_phy(struct ar9170 *ar, struct sk_buff *skb)
1257{
1258 struct ar9170_tx_control *txc;
1259 struct ieee80211_tx_info *info;
1260 struct ieee80211_rate *rate = NULL;
1261 struct ieee80211_tx_rate *txrate;
1262 u32 power, chains;
1263
1264 txc = (void *) skb->data;
1265 info = IEEE80211_SKB_CB(skb);
1266 txrate = &info->control.rates[0];
1267
1268 if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
1269 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD);
1270
1271 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1272 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_SHORT_PREAMBLE);
1273
1274 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1275 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ);
1276 /* this works because 40 MHz is 2 and dup is 3 */
1277 if (txrate->flags & IEEE80211_TX_RC_DUP_DATA)
1278 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ_DUP);
1279
1280 if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
1281 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_SHORT_GI);
1282
1283 if (txrate->flags & IEEE80211_TX_RC_MCS) {
1284 u32 r = txrate->idx;
1285 u8 *txpower;
1286
1287 /* heavy clip control */
1288 txc->phy_control |= cpu_to_le32((r & 0x7) << 7);
1289
1290 r <<= AR9170_TX_PHY_MCS_SHIFT;
1291 BUG_ON(r & ~AR9170_TX_PHY_MCS_MASK);
1292
1293 txc->phy_control |= cpu_to_le32(r & AR9170_TX_PHY_MCS_MASK);
1294 txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_MOD_HT);
1295
1296 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
1297 if (info->band == IEEE80211_BAND_5GHZ)
1298 txpower = ar->power_5G_ht40;
1299 else
1300 txpower = ar->power_2G_ht40;
1301 } else {
1302 if (info->band == IEEE80211_BAND_5GHZ)
1303 txpower = ar->power_5G_ht20;
1304 else
1305 txpower = ar->power_2G_ht20;
1306 }
1307
1308 power = txpower[(txrate->idx) & 7];
1309 } else {
1310 u8 *txpower;
1311 u32 mod;
1312 u32 phyrate;
1313 u8 idx = txrate->idx;
1314
1315 if (info->band != IEEE80211_BAND_2GHZ) {
1316 idx += 4;
1317 txpower = ar->power_5G_leg;
1318 mod = AR9170_TX_PHY_MOD_OFDM;
1319 } else {
1320 if (idx < 4) {
1321 txpower = ar->power_2G_cck;
1322 mod = AR9170_TX_PHY_MOD_CCK;
1323 } else {
1324 mod = AR9170_TX_PHY_MOD_OFDM;
1325 txpower = ar->power_2G_ofdm;
1326 }
1327 }
1328
1329 rate = &__ar9170_ratetable[idx];
1330
1331 phyrate = rate->hw_value & 0xF;
1332 power = txpower[(rate->hw_value & 0x30) >> 4];
1333 phyrate <<= AR9170_TX_PHY_MCS_SHIFT;
1334
1335 txc->phy_control |= cpu_to_le32(mod);
1336 txc->phy_control |= cpu_to_le32(phyrate);
1337 }
1338
1339 power <<= AR9170_TX_PHY_TX_PWR_SHIFT;
1340 power &= AR9170_TX_PHY_TX_PWR_MASK;
1341 txc->phy_control |= cpu_to_le32(power);
1342
1343 /* set TX chains */
1344 if (ar->eeprom.tx_mask == 1) {
1345 chains = AR9170_TX_PHY_TXCHAIN_1;
1346 } else {
1347 chains = AR9170_TX_PHY_TXCHAIN_2;
1348
1349 /* >= 36M legacy OFDM - use only one chain */
1350 if (rate && rate->bitrate >= 360)
1351 chains = AR9170_TX_PHY_TXCHAIN_1;
1352 }
1353 txc->phy_control |= cpu_to_le32(chains << AR9170_TX_PHY_TXCHAIN_SHIFT);
1354}
1355
1356static void ar9170_tx(struct ar9170 *ar)
1357{
1358 struct sk_buff *skb;
1359 unsigned long flags;
1360 struct ieee80211_tx_info *info;
1361 struct ar9170_tx_info *arinfo;
1362 unsigned int i, frames, frames_failed, remaining_space;
1363 int err;
1364 bool schedule_garbagecollector = false;
1365
1366 BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
1367
1368 if (unlikely(!IS_STARTED(ar)))
1369 return ;
1370
1371 remaining_space = AR9170_TX_MAX_PENDING;
1372
1373 for (i = 0; i < __AR9170_NUM_TXQ; i++) {
1374 spin_lock_irqsave(&ar->tx_stats_lock, flags);
1375 frames = min(ar->tx_stats[i].limit - ar->tx_stats[i].len,
1376 skb_queue_len(&ar->tx_pending[i]));
1377
1378 if (remaining_space < frames) {
1379#ifdef AR9170_QUEUE_DEBUG
1380 wiphy_debug(ar->hw->wiphy,
1381 "tx quota reached queue:%d, "
1382 "remaining slots:%d, needed:%d\n",
1383 i, remaining_space, frames);
1384#endif /* AR9170_QUEUE_DEBUG */
1385 frames = remaining_space;
1386 }
1387
1388 ar->tx_stats[i].len += frames;
1389 ar->tx_stats[i].count += frames;
1390 if (ar->tx_stats[i].len >= ar->tx_stats[i].limit) {
1391#ifdef AR9170_QUEUE_DEBUG
1392 wiphy_debug(ar->hw->wiphy, "queue %d full\n", i);
1393 wiphy_debug(ar->hw->wiphy, "stuck frames: ===>\n");
1394 ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
1395 ar9170_dump_txqueue(ar, &ar->tx_status[i]);
1396#endif /* AR9170_QUEUE_DEBUG */
1397
1398#ifdef AR9170_QUEUE_STOP_DEBUG
1399 wiphy_debug(ar->hw->wiphy, "stop queue %d\n", i);
1400 __ar9170_dump_txstats(ar);
1401#endif /* AR9170_QUEUE_STOP_DEBUG */
1402 ieee80211_stop_queue(ar->hw, i);
1403 }
1404
1405 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1406
1407 if (!frames)
1408 continue;
1409
1410 frames_failed = 0;
1411 while (frames) {
1412 skb = skb_dequeue(&ar->tx_pending[i]);
1413 if (unlikely(!skb)) {
1414 frames_failed += frames;
1415 frames = 0;
1416 break;
1417 }
1418
1419 info = IEEE80211_SKB_CB(skb);
1420 arinfo = (void *) info->rate_driver_data;
1421
1422 /* TODO: cancel stuck frames */
1423 arinfo->timeout = jiffies +
1424 msecs_to_jiffies(AR9170_TX_TIMEOUT);
1425
1426#ifdef AR9170_QUEUE_DEBUG
1427 wiphy_debug(ar->hw->wiphy, "send frame q:%d =>\n", i);
1428 ar9170_print_txheader(ar, skb);
1429#endif /* AR9170_QUEUE_DEBUG */
1430
1431 err = ar->tx(ar, skb);
1432 if (unlikely(err)) {
1433 frames_failed++;
1434 dev_kfree_skb_any(skb);
1435 } else {
1436 remaining_space--;
1437 schedule_garbagecollector = true;
1438 }
1439
1440 frames--;
1441 }
1442
1443#ifdef AR9170_QUEUE_DEBUG
1444 wiphy_debug(ar->hw->wiphy,
1445 "ar9170_tx report for queue %d\n", i);
1446
1447 wiphy_debug(ar->hw->wiphy,
1448 "unprocessed pending frames left:\n");
1449 ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
1450#endif /* AR9170_QUEUE_DEBUG */
1451
1452 if (unlikely(frames_failed)) {
1453#ifdef AR9170_QUEUE_DEBUG
1454 wiphy_debug(ar->hw->wiphy,
1455 "frames failed %d =>\n", frames_failed);
1456#endif /* AR9170_QUEUE_DEBUG */
1457
1458 spin_lock_irqsave(&ar->tx_stats_lock, flags);
1459 ar->tx_stats[i].len -= frames_failed;
1460 ar->tx_stats[i].count -= frames_failed;
1461#ifdef AR9170_QUEUE_STOP_DEBUG
1462 wiphy_debug(ar->hw->wiphy, "wake queue %d\n", i);
1463 __ar9170_dump_txstats(ar);
1464#endif /* AR9170_QUEUE_STOP_DEBUG */
1465 ieee80211_wake_queue(ar->hw, i);
1466 spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1467 }
1468 }
1469
1470 if (!schedule_garbagecollector)
1471 return;
1472
1473 ieee80211_queue_delayed_work(ar->hw,
1474 &ar->tx_janitor,
1475 msecs_to_jiffies(AR9170_JANITOR_DELAY));
1476}
1477
1478void ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1479{
1480 struct ar9170 *ar = hw->priv;
1481 struct ieee80211_tx_info *info;
1482 unsigned int queue;
1483
1484 if (unlikely(!IS_STARTED(ar)))
1485 goto err_free;
1486
1487 if (unlikely(ar9170_tx_prepare(ar, skb)))
1488 goto err_free;
1489
1490 queue = skb_get_queue_mapping(skb);
1491 info = IEEE80211_SKB_CB(skb);
1492 ar9170_tx_prepare_phy(ar, skb);
1493 skb_queue_tail(&ar->tx_pending[queue], skb);
1494
1495 ar9170_tx(ar);
1496 return;
1497
1498err_free:
1499 dev_kfree_skb_any(skb);
1500}
1501
1502static int ar9170_op_add_interface(struct ieee80211_hw *hw,
1503 struct ieee80211_vif *vif)
1504{
1505 struct ar9170 *ar = hw->priv;
1506 struct ath_common *common = &ar->common;
1507 int err = 0;
1508
1509 mutex_lock(&ar->mutex);
1510
1511 if (ar->vif) {
1512 err = -EBUSY;
1513 goto unlock;
1514 }
1515
1516 ar->vif = vif;
1517 memcpy(common->macaddr, vif->addr, ETH_ALEN);
1518
1519 if (modparam_nohwcrypt || (ar->vif->type != NL80211_IFTYPE_STATION)) {
1520 ar->rx_software_decryption = true;
1521 ar->disable_offload = true;
1522 }
1523
1524 ar->cur_filter = 0;
1525 err = ar9170_update_frame_filter(ar, AR9170_MAC_REG_FTF_DEFAULTS);
1526 if (err)
1527 goto unlock;
1528
1529 err = ar9170_set_operating_mode(ar);
1530
1531unlock:
1532 mutex_unlock(&ar->mutex);
1533 return err;
1534}
1535
1536static void ar9170_op_remove_interface(struct ieee80211_hw *hw,
1537 struct ieee80211_vif *vif)
1538{
1539 struct ar9170 *ar = hw->priv;
1540
1541 mutex_lock(&ar->mutex);
1542 ar->vif = NULL;
1543 ar9170_update_frame_filter(ar, 0);
1544 ar9170_set_beacon_timers(ar);
1545 dev_kfree_skb(ar->beacon);
1546 ar->beacon = NULL;
1547 ar->sniffer_enabled = false;
1548 ar->rx_software_decryption = false;
1549 ar9170_set_operating_mode(ar);
1550 mutex_unlock(&ar->mutex);
1551}
1552
1553static int ar9170_op_config(struct ieee80211_hw *hw, u32 changed)
1554{
1555 struct ar9170 *ar = hw->priv;
1556 int err = 0;
1557
1558 mutex_lock(&ar->mutex);
1559
1560 if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
1561 /* TODO */
1562 err = 0;
1563 }
1564
1565 if (changed & IEEE80211_CONF_CHANGE_PS) {
1566 /* TODO */
1567 err = 0;
1568 }
1569
1570 if (changed & IEEE80211_CONF_CHANGE_POWER) {
1571 /* TODO */
1572 err = 0;
1573 }
1574
1575 if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
1576 /*
1577 * is it long_frame_max_tx_count or short_frame_max_tx_count?
1578 */
1579
1580 err = ar9170_set_hwretry_limit(ar,
1581 ar->hw->conf.long_frame_max_tx_count);
1582 if (err)
1583 goto out;
1584 }
1585
1586 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
1587
1588 /* adjust slot time for 5 GHz */
1589 err = ar9170_set_slot_time(ar);
1590 if (err)
1591 goto out;
1592
1593 err = ar9170_set_dyn_sifs_ack(ar);
1594 if (err)
1595 goto out;
1596
1597 err = ar9170_set_channel(ar, hw->conf.channel,
1598 AR9170_RFI_NONE,
1599 nl80211_to_ar9170(hw->conf.channel_type));
1600 if (err)
1601 goto out;
1602 }
1603
1604out:
1605 mutex_unlock(&ar->mutex);
1606 return err;
1607}
1608
1609static u64 ar9170_op_prepare_multicast(struct ieee80211_hw *hw,
1610 struct netdev_hw_addr_list *mc_list)
1611{
1612 u64 mchash;
1613 struct netdev_hw_addr *ha;
1614
1615 /* always get broadcast frames */
1616 mchash = 1ULL << (0xff >> 2);
1617
1618 netdev_hw_addr_list_for_each(ha, mc_list)
1619 mchash |= 1ULL << (ha->addr[5] >> 2);
1620
1621 return mchash;
1622}
1623
1624static void ar9170_op_configure_filter(struct ieee80211_hw *hw,
1625 unsigned int changed_flags,
1626 unsigned int *new_flags,
1627 u64 multicast)
1628{
1629 struct ar9170 *ar = hw->priv;
1630
1631 if (unlikely(!IS_ACCEPTING_CMD(ar)))
1632 return ;
1633
1634 mutex_lock(&ar->mutex);
1635
1636 /* mask supported flags */
1637 *new_flags &= FIF_ALLMULTI | FIF_CONTROL | FIF_BCN_PRBRESP_PROMISC |
1638 FIF_PROMISC_IN_BSS | FIF_FCSFAIL | FIF_PLCPFAIL;
1639 ar->filter_state = *new_flags;
1640 /*
1641 * We can support more by setting the sniffer bit and
1642 * then checking the error flags, later.
1643 */
1644
1645 if (changed_flags & FIF_ALLMULTI && *new_flags & FIF_ALLMULTI)
1646 multicast = ~0ULL;
1647
1648 if (multicast != ar->cur_mc_hash)
1649 ar9170_update_multicast(ar, multicast);
1650
1651 if (changed_flags & FIF_CONTROL) {
1652 u32 filter = AR9170_MAC_REG_FTF_PSPOLL |
1653 AR9170_MAC_REG_FTF_RTS |
1654 AR9170_MAC_REG_FTF_CTS |
1655 AR9170_MAC_REG_FTF_ACK |
1656 AR9170_MAC_REG_FTF_CFE |
1657 AR9170_MAC_REG_FTF_CFE_ACK;
1658
1659 if (*new_flags & FIF_CONTROL)
1660 filter |= ar->cur_filter;
1661 else
1662 filter &= (~ar->cur_filter);
1663
1664 ar9170_update_frame_filter(ar, filter);
1665 }
1666
1667 if (changed_flags & FIF_PROMISC_IN_BSS) {
1668 ar->sniffer_enabled = ((*new_flags) & FIF_PROMISC_IN_BSS) != 0;
1669 ar9170_set_operating_mode(ar);
1670 }
1671
1672 mutex_unlock(&ar->mutex);
1673}
1674
1675
1676static void ar9170_op_bss_info_changed(struct ieee80211_hw *hw,
1677 struct ieee80211_vif *vif,
1678 struct ieee80211_bss_conf *bss_conf,
1679 u32 changed)
1680{
1681 struct ar9170 *ar = hw->priv;
1682 struct ath_common *common = &ar->common;
1683 int err = 0;
1684
1685 mutex_lock(&ar->mutex);
1686
1687 if (changed & BSS_CHANGED_BSSID) {
1688 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
1689 err = ar9170_set_operating_mode(ar);
1690 if (err)
1691 goto out;
1692 }
1693
1694 if (changed & BSS_CHANGED_BEACON_ENABLED)
1695 ar->enable_beacon = bss_conf->enable_beacon;
1696
1697 if (changed & BSS_CHANGED_BEACON) {
1698 err = ar9170_update_beacon(ar);
1699 if (err)
1700 goto out;
1701 }
1702
1703 if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON |
1704 BSS_CHANGED_BEACON_INT)) {
1705 err = ar9170_set_beacon_timers(ar);
1706 if (err)
1707 goto out;
1708 }
1709
1710 if (changed & BSS_CHANGED_ASSOC) {
1711#ifndef CONFIG_AR9170_LEDS
1712 /* enable assoc LED. */
1713 err = ar9170_set_leds_state(ar, bss_conf->assoc ? 2 : 0);
1714#endif /* CONFIG_AR9170_LEDS */
1715 }
1716
1717 if (changed & BSS_CHANGED_HT) {
1718 /* TODO */
1719 err = 0;
1720 }
1721
1722 if (changed & BSS_CHANGED_ERP_SLOT) {
1723 err = ar9170_set_slot_time(ar);
1724 if (err)
1725 goto out;
1726 }
1727
1728 if (changed & BSS_CHANGED_BASIC_RATES) {
1729 err = ar9170_set_basic_rates(ar);
1730 if (err)
1731 goto out;
1732 }
1733
1734out:
1735 mutex_unlock(&ar->mutex);
1736}
1737
1738static u64 ar9170_op_get_tsf(struct ieee80211_hw *hw)
1739{
1740 struct ar9170 *ar = hw->priv;
1741 int err;
1742 u64 tsf;
1743#define NR 3
1744 static const u32 addr[NR] = { AR9170_MAC_REG_TSF_H,
1745 AR9170_MAC_REG_TSF_L,
1746 AR9170_MAC_REG_TSF_H };
1747 u32 val[NR];
1748 int loops = 0;
1749
1750 mutex_lock(&ar->mutex);
1751
1752 while (loops++ < 10) {
1753 err = ar9170_read_mreg(ar, NR, addr, val);
1754 if (err || val[0] == val[2])
1755 break;
1756 }
1757
1758 mutex_unlock(&ar->mutex);
1759
1760 if (WARN_ON(err))
1761 return 0;
1762 tsf = val[0];
1763 tsf = (tsf << 32) | val[1];
1764 return tsf;
1765#undef NR
1766}
1767
1768static int ar9170_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1769 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
1770 struct ieee80211_key_conf *key)
1771{
1772 struct ar9170 *ar = hw->priv;
1773 int err = 0, i;
1774 u8 ktype;
1775
1776 if ((!ar->vif) || (ar->disable_offload))
1777 return -EOPNOTSUPP;
1778
1779 switch (key->cipher) {
1780 case WLAN_CIPHER_SUITE_WEP40:
1781 ktype = AR9170_ENC_ALG_WEP64;
1782 break;
1783 case WLAN_CIPHER_SUITE_WEP104:
1784 ktype = AR9170_ENC_ALG_WEP128;
1785 break;
1786 case WLAN_CIPHER_SUITE_TKIP:
1787 ktype = AR9170_ENC_ALG_TKIP;
1788 break;
1789 case WLAN_CIPHER_SUITE_CCMP:
1790 ktype = AR9170_ENC_ALG_AESCCMP;
1791 break;
1792 default:
1793 return -EOPNOTSUPP;
1794 }
1795
1796 mutex_lock(&ar->mutex);
1797 if (cmd == SET_KEY) {
1798 if (unlikely(!IS_STARTED(ar))) {
1799 err = -EOPNOTSUPP;
1800 goto out;
1801 }
1802
1803 /* group keys need all-zeroes address */
1804 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1805 sta = NULL;
1806
1807 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
1808 for (i = 0; i < 64; i++)
1809 if (!(ar->usedkeys & BIT(i)))
1810 break;
1811 if (i == 64) {
1812 ar->rx_software_decryption = true;
1813 ar9170_set_operating_mode(ar);
1814 err = -ENOSPC;
1815 goto out;
1816 }
1817 } else {
1818 i = 64 + key->keyidx;
1819 }
1820
1821 key->hw_key_idx = i;
1822
1823 err = ar9170_upload_key(ar, i, sta ? sta->addr : NULL, ktype, 0,
1824 key->key, min_t(u8, 16, key->keylen));
1825 if (err)
1826 goto out;
1827
1828 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1829 err = ar9170_upload_key(ar, i, sta ? sta->addr : NULL,
1830 ktype, 1, key->key + 16, 16);
1831 if (err)
1832 goto out;
1833
1834 /*
1835 * hardware is not capable generating the MMIC
1836 * for fragmented frames!
1837 */
1838 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1839 }
1840
1841 if (i < 64)
1842 ar->usedkeys |= BIT(i);
1843
1844 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1845 } else {
1846 if (unlikely(!IS_STARTED(ar))) {
1847 /* The device is gone... together with the key ;-) */
1848 err = 0;
1849 goto out;
1850 }
1851
1852 err = ar9170_disable_key(ar, key->hw_key_idx);
1853 if (err)
1854 goto out;
1855
1856 if (key->hw_key_idx < 64) {
1857 ar->usedkeys &= ~BIT(key->hw_key_idx);
1858 } else {
1859 err = ar9170_upload_key(ar, key->hw_key_idx, NULL,
1860 AR9170_ENC_ALG_NONE, 0,
1861 NULL, 0);
1862 if (err)
1863 goto out;
1864
1865 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1866 err = ar9170_upload_key(ar, key->hw_key_idx,
1867 NULL,
1868 AR9170_ENC_ALG_NONE, 1,
1869 NULL, 0);
1870 if (err)
1871 goto out;
1872 }
1873
1874 }
1875 }
1876
1877 ar9170_regwrite_begin(ar);
1878 ar9170_regwrite(AR9170_MAC_REG_ROLL_CALL_TBL_L, ar->usedkeys);
1879 ar9170_regwrite(AR9170_MAC_REG_ROLL_CALL_TBL_H, ar->usedkeys >> 32);
1880 ar9170_regwrite_finish();
1881 err = ar9170_regwrite_result();
1882
1883out:
1884 mutex_unlock(&ar->mutex);
1885
1886 return err;
1887}
1888
1889static int ar9170_get_stats(struct ieee80211_hw *hw,
1890 struct ieee80211_low_level_stats *stats)
1891{
1892 struct ar9170 *ar = hw->priv;
1893 u32 val;
1894 int err;
1895
1896 mutex_lock(&ar->mutex);
1897 err = ar9170_read_reg(ar, AR9170_MAC_REG_TX_RETRY, &val);
1898 ar->stats.dot11ACKFailureCount += val;
1899
1900 memcpy(stats, &ar->stats, sizeof(*stats));
1901 mutex_unlock(&ar->mutex);
1902
1903 return 0;
1904}
1905
1906static int ar9170_get_survey(struct ieee80211_hw *hw, int idx,
1907 struct survey_info *survey)
1908{
1909 struct ar9170 *ar = hw->priv;
1910 struct ieee80211_conf *conf = &hw->conf;
1911
1912 if (idx != 0)
1913 return -ENOENT;
1914
1915 /* TODO: update noise value, e.g. call ar9170_set_channel */
1916
1917 survey->channel = conf->channel;
1918 survey->filled = SURVEY_INFO_NOISE_DBM;
1919 survey->noise = ar->noise[0];
1920
1921 return 0;
1922}
1923
1924static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue,
1925 const struct ieee80211_tx_queue_params *param)
1926{
1927 struct ar9170 *ar = hw->priv;
1928 int ret;
1929
1930 mutex_lock(&ar->mutex);
1931 if (queue < __AR9170_NUM_TXQ) {
1932 memcpy(&ar->edcf[ar9170_qos_hwmap[queue]],
1933 param, sizeof(*param));
1934
1935 ret = ar9170_set_qos(ar);
1936 } else {
1937 ret = -EINVAL;
1938 }
1939
1940 mutex_unlock(&ar->mutex);
1941 return ret;
1942}
1943
1944static int ar9170_ampdu_action(struct ieee80211_hw *hw,
1945 struct ieee80211_vif *vif,
1946 enum ieee80211_ampdu_mlme_action action,
1947 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
1948 u8 buf_size)
1949{
1950 switch (action) {
1951 case IEEE80211_AMPDU_RX_START:
1952 case IEEE80211_AMPDU_RX_STOP:
1953 /* Handled by firmware */
1954 break;
1955
1956 default:
1957 return -EOPNOTSUPP;
1958 }
1959
1960 return 0;
1961}
1962
1963static const struct ieee80211_ops ar9170_ops = {
1964 .start = ar9170_op_start,
1965 .stop = ar9170_op_stop,
1966 .tx = ar9170_op_tx,
1967 .add_interface = ar9170_op_add_interface,
1968 .remove_interface = ar9170_op_remove_interface,
1969 .config = ar9170_op_config,
1970 .prepare_multicast = ar9170_op_prepare_multicast,
1971 .configure_filter = ar9170_op_configure_filter,
1972 .conf_tx = ar9170_conf_tx,
1973 .bss_info_changed = ar9170_op_bss_info_changed,
1974 .get_tsf = ar9170_op_get_tsf,
1975 .set_key = ar9170_set_key,
1976 .get_stats = ar9170_get_stats,
1977 .get_survey = ar9170_get_survey,
1978 .ampdu_action = ar9170_ampdu_action,
1979};
1980
1981void *ar9170_alloc(size_t priv_size)
1982{
1983 struct ieee80211_hw *hw;
1984 struct ar9170 *ar;
1985 struct sk_buff *skb;
1986 int i;
1987
1988 /*
1989 * this buffer is used for rx stream reconstruction.
1990 * Under heavy load this device (or the transport layer?)
1991 * tends to split the streams into separate rx descriptors.
1992 */
1993
1994 skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
1995 if (!skb)
1996 goto err_nomem;
1997
1998 hw = ieee80211_alloc_hw(priv_size, &ar9170_ops);
1999 if (!hw)
2000 goto err_nomem;
2001
2002 ar = hw->priv;
2003 ar->hw = hw;
2004 ar->rx_failover = skb;
2005
2006 mutex_init(&ar->mutex);
2007 spin_lock_init(&ar->cmdlock);
2008 spin_lock_init(&ar->tx_stats_lock);
2009 for (i = 0; i < __AR9170_NUM_TXQ; i++) {
2010 skb_queue_head_init(&ar->tx_status[i]);
2011 skb_queue_head_init(&ar->tx_pending[i]);
2012 }
2013 ar9170_rx_reset_rx_mpdu(ar);
2014 INIT_WORK(&ar->beacon_work, ar9170_new_beacon);
2015 INIT_DELAYED_WORK(&ar->tx_janitor, ar9170_tx_janitor);
2016
2017 /* all hw supports 2.4 GHz, so set channel to 1 by default */
2018 ar->channel = &ar9170_2ghz_chantable[0];
2019
2020 /* first part of wiphy init */
2021 ar->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
2022 BIT(NL80211_IFTYPE_WDS) |
2023 BIT(NL80211_IFTYPE_ADHOC);
2024 ar->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
2025 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
2026 IEEE80211_HW_SIGNAL_DBM;
2027
2028 ar->hw->queues = __AR9170_NUM_TXQ;
2029 ar->hw->extra_tx_headroom = 8;
2030
2031 ar->hw->max_rates = 1;
2032 ar->hw->max_rate_tries = 3;
2033
2034 for (i = 0; i < ARRAY_SIZE(ar->noise); i++)
2035 ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
2036
2037 return ar;
2038
2039err_nomem:
2040 kfree_skb(skb);
2041 return ERR_PTR(-ENOMEM);
2042}
2043
2044static int ar9170_read_eeprom(struct ar9170 *ar)
2045{
2046#define RW 8 /* number of words to read at once */
2047#define RB (sizeof(u32) * RW)
2048 struct ath_regulatory *regulatory = &ar->common.regulatory;
2049 u8 *eeprom = (void *)&ar->eeprom;
2050 u8 *addr = ar->eeprom.mac_address;
2051 __le32 offsets[RW];
2052 unsigned int rx_streams, tx_streams, tx_params = 0;
2053 int i, j, err, bands = 0;
2054
2055 BUILD_BUG_ON(sizeof(ar->eeprom) & 3);
2056
2057 BUILD_BUG_ON(RB > AR9170_MAX_CMD_LEN - 4);
2058#ifndef __CHECKER__
2059 /* don't want to handle trailing remains */
2060 BUILD_BUG_ON(sizeof(ar->eeprom) % RB);
2061#endif
2062
2063 for (i = 0; i < sizeof(ar->eeprom)/RB; i++) {
2064 for (j = 0; j < RW; j++)
2065 offsets[j] = cpu_to_le32(AR9170_EEPROM_START +
2066 RB * i + 4 * j);
2067
2068 err = ar->exec_cmd(ar, AR9170_CMD_RREG,
2069 RB, (u8 *) &offsets,
2070 RB, eeprom + RB * i);
2071 if (err)
2072 return err;
2073 }
2074
2075#undef RW
2076#undef RB
2077
2078 if (ar->eeprom.length == cpu_to_le16(0xFFFF))
2079 return -ENODATA;
2080
2081 if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
2082 ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &ar9170_band_2GHz;
2083 bands++;
2084 }
2085 if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
2086 ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &ar9170_band_5GHz;
2087 bands++;
2088 }
2089
2090 rx_streams = hweight8(ar->eeprom.rx_mask);
2091 tx_streams = hweight8(ar->eeprom.tx_mask);
2092
2093 if (rx_streams != tx_streams)
2094 tx_params = IEEE80211_HT_MCS_TX_RX_DIFF;
2095
2096 if (tx_streams >= 1 && tx_streams <= IEEE80211_HT_MCS_TX_MAX_STREAMS)
2097 tx_params = (tx_streams - 1) <<
2098 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
2099
2100 ar9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params;
2101 ar9170_band_5GHz.ht_cap.mcs.tx_params |= tx_params;
2102
2103 /*
2104 * I measured this, a bandswitch takes roughly
2105 * 135 ms and a frequency switch about 80.
2106 *
2107 * FIXME: measure these values again once EEPROM settings
2108 * are used, that will influence them!
2109 */
2110 if (bands == 2)
2111 ar->hw->channel_change_time = 135 * 1000;
2112 else
2113 ar->hw->channel_change_time = 80 * 1000;
2114
2115 regulatory->current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]);
2116 regulatory->current_rd_ext = le16_to_cpu(ar->eeprom.reg_domain[1]);
2117
2118 /* second part of wiphy init */
2119 SET_IEEE80211_PERM_ADDR(ar->hw, addr);
2120
2121 return bands ? 0 : -EINVAL;
2122}
2123
2124static int ar9170_reg_notifier(struct wiphy *wiphy,
2125 struct regulatory_request *request)
2126{
2127 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
2128 struct ar9170 *ar = hw->priv;
2129
2130 return ath_reg_notifier_apply(wiphy, request, &ar->common.regulatory);
2131}
2132
2133int ar9170_register(struct ar9170 *ar, struct device *pdev)
2134{
2135 struct ath_regulatory *regulatory = &ar->common.regulatory;
2136 int err;
2137
2138 /* try to read EEPROM, init MAC addr */
2139 err = ar9170_read_eeprom(ar);
2140 if (err)
2141 goto err_out;
2142
2143 err = ath_regd_init(regulatory, ar->hw->wiphy,
2144 ar9170_reg_notifier);
2145 if (err)
2146 goto err_out;
2147
2148 err = ieee80211_register_hw(ar->hw);
2149 if (err)
2150 goto err_out;
2151
2152 if (!ath_is_world_regd(regulatory))
2153 regulatory_hint(ar->hw->wiphy, regulatory->alpha2);
2154
2155 err = ar9170_init_leds(ar);
2156 if (err)
2157 goto err_unreg;
2158
2159#ifdef CONFIG_AR9170_LEDS
2160 err = ar9170_register_leds(ar);
2161 if (err)
2162 goto err_unreg;
2163#endif /* CONFIG_AR9170_LEDS */
2164
2165 dev_info(pdev, "Atheros AR9170 is registered as '%s'\n",
2166 wiphy_name(ar->hw->wiphy));
2167
2168 ar->registered = true;
2169 return 0;
2170
2171err_unreg:
2172 ieee80211_unregister_hw(ar->hw);
2173
2174err_out:
2175 return err;
2176}
2177
2178void ar9170_unregister(struct ar9170 *ar)
2179{
2180 if (ar->registered) {
2181#ifdef CONFIG_AR9170_LEDS
2182 ar9170_unregister_leds(ar);
2183#endif /* CONFIG_AR9170_LEDS */
2184
2185 ieee80211_unregister_hw(ar->hw);
2186 }
2187
2188 kfree_skb(ar->rx_failover);
2189 mutex_destroy(&ar->mutex);
2190}
diff --git a/drivers/net/wireless/ath/ar9170/phy.c b/drivers/net/wireless/ath/ar9170/phy.c
deleted file mode 100644
index aa8d06ba1ee4..000000000000
--- a/drivers/net/wireless/ath/ar9170/phy.c
+++ /dev/null
@@ -1,1719 +0,0 @@
1/*
2 * Atheros AR9170 driver
3 *
4 * PHY and RF code
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38
39#include <linux/bitrev.h>
40#include "ar9170.h"
41#include "cmd.h"
42
43static int ar9170_init_power_cal(struct ar9170 *ar)
44{
45 ar9170_regwrite_begin(ar);
46
47 ar9170_regwrite(0x1bc000 + 0x993c, 0x7f);
48 ar9170_regwrite(0x1bc000 + 0x9934, 0x3f3f3f3f);
49 ar9170_regwrite(0x1bc000 + 0x9938, 0x3f3f3f3f);
50 ar9170_regwrite(0x1bc000 + 0xa234, 0x3f3f3f3f);
51 ar9170_regwrite(0x1bc000 + 0xa238, 0x3f3f3f3f);
52 ar9170_regwrite(0x1bc000 + 0xa38c, 0x3f3f3f3f);
53 ar9170_regwrite(0x1bc000 + 0xa390, 0x3f3f3f3f);
54 ar9170_regwrite(0x1bc000 + 0xa3cc, 0x3f3f3f3f);
55 ar9170_regwrite(0x1bc000 + 0xa3d0, 0x3f3f3f3f);
56 ar9170_regwrite(0x1bc000 + 0xa3d4, 0x3f3f3f3f);
57
58 ar9170_regwrite_finish();
59 return ar9170_regwrite_result();
60}
61
62struct ar9170_phy_init {
63 u32 reg, _5ghz_20, _5ghz_40, _2ghz_40, _2ghz_20;
64};
65
66static struct ar9170_phy_init ar5416_phy_init[] = {
67 { 0x1c5800, 0x00000007, 0x00000007, 0x00000007, 0x00000007, },
68 { 0x1c5804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, },
69 { 0x1c5808, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
70 { 0x1c580c, 0xad848e19, 0xad848e19, 0xad848e19, 0xad848e19, },
71 { 0x1c5810, 0x7d14e000, 0x7d14e000, 0x7d14e000, 0x7d14e000, },
72 { 0x1c5814, 0x9c0a9f6b, 0x9c0a9f6b, 0x9c0a9f6b, 0x9c0a9f6b, },
73 { 0x1c5818, 0x00000090, 0x00000090, 0x00000090, 0x00000090, },
74 { 0x1c581c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
75 { 0x1c5820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, },
76 { 0x1c5824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, },
77 { 0x1c5828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, },
78 { 0x1c582c, 0x0000a000, 0x0000a000, 0x0000a000, 0x0000a000, },
79 { 0x1c5830, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
80 { 0x1c5834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, },
81 { 0x1c5838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, },
82 { 0x1c583c, 0x00200400, 0x00200400, 0x00200400, 0x00200400, },
83 { 0x1c5840, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e, },
84 { 0x1c5844, 0x1372161e, 0x13721c1e, 0x13721c24, 0x137216a4, },
85 { 0x1c5848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, },
86 { 0x1c584c, 0x1284233c, 0x1284233c, 0x1284233c, 0x1284233c, },
87 { 0x1c5850, 0x6c48b4e4, 0x6c48b4e4, 0x6c48b0e4, 0x6c48b0e4, },
88 { 0x1c5854, 0x00000859, 0x00000859, 0x00000859, 0x00000859, },
89 { 0x1c5858, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, },
90 { 0x1c585c, 0x31395c5e, 0x31395c5e, 0x31395c5e, 0x31395c5e, },
91 { 0x1c5860, 0x0004dd10, 0x0004dd10, 0x0004dd20, 0x0004dd20, },
92 { 0x1c5868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, },
93 { 0x1c586c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, },
94 { 0x1c5900, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
95 { 0x1c5904, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
96 { 0x1c5908, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
97 { 0x1c590c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
98 { 0x1c5914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, },
99 { 0x1c5918, 0x00000118, 0x00000230, 0x00000268, 0x00000134, },
100 { 0x1c591c, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff, },
101 { 0x1c5920, 0x0510081c, 0x0510081c, 0x0510001c, 0x0510001c, },
102 { 0x1c5924, 0xd0058a15, 0xd0058a15, 0xd0058a15, 0xd0058a15, },
103 { 0x1c5928, 0x00000001, 0x00000001, 0x00000001, 0x00000001, },
104 { 0x1c592c, 0x00000004, 0x00000004, 0x00000004, 0x00000004, },
105 { 0x1c5934, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
106 { 0x1c5938, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
107 { 0x1c593c, 0x0000007f, 0x0000007f, 0x0000007f, 0x0000007f, },
108 { 0x1c5944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, },
109 { 0x1c5948, 0x9280b212, 0x9280b212, 0x9280b212, 0x9280b212, },
110 { 0x1c594c, 0x00020028, 0x00020028, 0x00020028, 0x00020028, },
111 { 0x1c5954, 0x5d50e188, 0x5d50e188, 0x5d50e188, 0x5d50e188, },
112 { 0x1c5958, 0x00081fff, 0x00081fff, 0x00081fff, 0x00081fff, },
113 { 0x1c5960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, },
114 { 0x1c5964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, },
115 { 0x1c5970, 0x190fb515, 0x190fb515, 0x190fb515, 0x190fb515, },
116 { 0x1c5974, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
117 { 0x1c5978, 0x00000001, 0x00000001, 0x00000001, 0x00000001, },
118 { 0x1c597c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
119 { 0x1c5980, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
120 { 0x1c5984, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
121 { 0x1c5988, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
122 { 0x1c598c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
123 { 0x1c5990, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
124 { 0x1c5994, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
125 { 0x1c5998, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
126 { 0x1c599c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
127 { 0x1c59a0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
128 { 0x1c59a4, 0x00000007, 0x00000007, 0x00000007, 0x00000007, },
129 { 0x1c59a8, 0x001fff00, 0x001fff00, 0x001fff00, 0x001fff00, },
130 { 0x1c59ac, 0x006f00c4, 0x006f00c4, 0x006f00c4, 0x006f00c4, },
131 { 0x1c59b0, 0x03051000, 0x03051000, 0x03051000, 0x03051000, },
132 { 0x1c59b4, 0x00000820, 0x00000820, 0x00000820, 0x00000820, },
133 { 0x1c59c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, },
134 { 0x1c59c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, },
135 { 0x1c59c8, 0x60f6532c, 0x60f6532c, 0x60f6532c, 0x60f6532c, },
136 { 0x1c59cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, },
137 { 0x1c59d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, },
138 { 0x1c59d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
139 { 0x1c59d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
140 { 0x1c59dc, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
141 { 0x1c59e0, 0x00000200, 0x00000200, 0x00000200, 0x00000200, },
142 { 0x1c59e4, 0x64646464, 0x64646464, 0x64646464, 0x64646464, },
143 { 0x1c59e8, 0x3c787878, 0x3c787878, 0x3c787878, 0x3c787878, },
144 { 0x1c59ec, 0x000000aa, 0x000000aa, 0x000000aa, 0x000000aa, },
145 { 0x1c59f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
146 { 0x1c59fc, 0x00001042, 0x00001042, 0x00001042, 0x00001042, },
147 { 0x1c5a00, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
148 { 0x1c5a04, 0x00000040, 0x00000040, 0x00000040, 0x00000040, },
149 { 0x1c5a08, 0x00000080, 0x00000080, 0x00000080, 0x00000080, },
150 { 0x1c5a0c, 0x000001a1, 0x000001a1, 0x00000141, 0x00000141, },
151 { 0x1c5a10, 0x000001e1, 0x000001e1, 0x00000181, 0x00000181, },
152 { 0x1c5a14, 0x00000021, 0x00000021, 0x000001c1, 0x000001c1, },
153 { 0x1c5a18, 0x00000061, 0x00000061, 0x00000001, 0x00000001, },
154 { 0x1c5a1c, 0x00000168, 0x00000168, 0x00000041, 0x00000041, },
155 { 0x1c5a20, 0x000001a8, 0x000001a8, 0x000001a8, 0x000001a8, },
156 { 0x1c5a24, 0x000001e8, 0x000001e8, 0x000001e8, 0x000001e8, },
157 { 0x1c5a28, 0x00000028, 0x00000028, 0x00000028, 0x00000028, },
158 { 0x1c5a2c, 0x00000068, 0x00000068, 0x00000068, 0x00000068, },
159 { 0x1c5a30, 0x00000189, 0x00000189, 0x000000a8, 0x000000a8, },
160 { 0x1c5a34, 0x000001c9, 0x000001c9, 0x00000169, 0x00000169, },
161 { 0x1c5a38, 0x00000009, 0x00000009, 0x000001a9, 0x000001a9, },
162 { 0x1c5a3c, 0x00000049, 0x00000049, 0x000001e9, 0x000001e9, },
163 { 0x1c5a40, 0x00000089, 0x00000089, 0x00000029, 0x00000029, },
164 { 0x1c5a44, 0x00000170, 0x00000170, 0x00000069, 0x00000069, },
165 { 0x1c5a48, 0x000001b0, 0x000001b0, 0x00000190, 0x00000190, },
166 { 0x1c5a4c, 0x000001f0, 0x000001f0, 0x000001d0, 0x000001d0, },
167 { 0x1c5a50, 0x00000030, 0x00000030, 0x00000010, 0x00000010, },
168 { 0x1c5a54, 0x00000070, 0x00000070, 0x00000050, 0x00000050, },
169 { 0x1c5a58, 0x00000191, 0x00000191, 0x00000090, 0x00000090, },
170 { 0x1c5a5c, 0x000001d1, 0x000001d1, 0x00000151, 0x00000151, },
171 { 0x1c5a60, 0x00000011, 0x00000011, 0x00000191, 0x00000191, },
172 { 0x1c5a64, 0x00000051, 0x00000051, 0x000001d1, 0x000001d1, },
173 { 0x1c5a68, 0x00000091, 0x00000091, 0x00000011, 0x00000011, },
174 { 0x1c5a6c, 0x000001b8, 0x000001b8, 0x00000051, 0x00000051, },
175 { 0x1c5a70, 0x000001f8, 0x000001f8, 0x00000198, 0x00000198, },
176 { 0x1c5a74, 0x00000038, 0x00000038, 0x000001d8, 0x000001d8, },
177 { 0x1c5a78, 0x00000078, 0x00000078, 0x00000018, 0x00000018, },
178 { 0x1c5a7c, 0x00000199, 0x00000199, 0x00000058, 0x00000058, },
179 { 0x1c5a80, 0x000001d9, 0x000001d9, 0x00000098, 0x00000098, },
180 { 0x1c5a84, 0x00000019, 0x00000019, 0x00000159, 0x00000159, },
181 { 0x1c5a88, 0x00000059, 0x00000059, 0x00000199, 0x00000199, },
182 { 0x1c5a8c, 0x00000099, 0x00000099, 0x000001d9, 0x000001d9, },
183 { 0x1c5a90, 0x000000d9, 0x000000d9, 0x00000019, 0x00000019, },
184 { 0x1c5a94, 0x000000f9, 0x000000f9, 0x00000059, 0x00000059, },
185 { 0x1c5a98, 0x000000f9, 0x000000f9, 0x00000099, 0x00000099, },
186 { 0x1c5a9c, 0x000000f9, 0x000000f9, 0x000000d9, 0x000000d9, },
187 { 0x1c5aa0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
188 { 0x1c5aa4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
189 { 0x1c5aa8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
190 { 0x1c5aac, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
191 { 0x1c5ab0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
192 { 0x1c5ab4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
193 { 0x1c5ab8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
194 { 0x1c5abc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
195 { 0x1c5ac0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
196 { 0x1c5ac4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
197 { 0x1c5ac8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
198 { 0x1c5acc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
199 { 0x1c5ad0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
200 { 0x1c5ad4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
201 { 0x1c5ad8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
202 { 0x1c5adc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
203 { 0x1c5ae0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
204 { 0x1c5ae4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
205 { 0x1c5ae8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
206 { 0x1c5aec, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
207 { 0x1c5af0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
208 { 0x1c5af4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
209 { 0x1c5af8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
210 { 0x1c5afc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
211 { 0x1c5b00, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
212 { 0x1c5b04, 0x00000001, 0x00000001, 0x00000001, 0x00000001, },
213 { 0x1c5b08, 0x00000002, 0x00000002, 0x00000002, 0x00000002, },
214 { 0x1c5b0c, 0x00000003, 0x00000003, 0x00000003, 0x00000003, },
215 { 0x1c5b10, 0x00000004, 0x00000004, 0x00000004, 0x00000004, },
216 { 0x1c5b14, 0x00000005, 0x00000005, 0x00000005, 0x00000005, },
217 { 0x1c5b18, 0x00000008, 0x00000008, 0x00000008, 0x00000008, },
218 { 0x1c5b1c, 0x00000009, 0x00000009, 0x00000009, 0x00000009, },
219 { 0x1c5b20, 0x0000000a, 0x0000000a, 0x0000000a, 0x0000000a, },
220 { 0x1c5b24, 0x0000000b, 0x0000000b, 0x0000000b, 0x0000000b, },
221 { 0x1c5b28, 0x0000000c, 0x0000000c, 0x0000000c, 0x0000000c, },
222 { 0x1c5b2c, 0x0000000d, 0x0000000d, 0x0000000d, 0x0000000d, },
223 { 0x1c5b30, 0x00000010, 0x00000010, 0x00000010, 0x00000010, },
224 { 0x1c5b34, 0x00000011, 0x00000011, 0x00000011, 0x00000011, },
225 { 0x1c5b38, 0x00000012, 0x00000012, 0x00000012, 0x00000012, },
226 { 0x1c5b3c, 0x00000013, 0x00000013, 0x00000013, 0x00000013, },
227 { 0x1c5b40, 0x00000014, 0x00000014, 0x00000014, 0x00000014, },
228 { 0x1c5b44, 0x00000015, 0x00000015, 0x00000015, 0x00000015, },
229 { 0x1c5b48, 0x00000018, 0x00000018, 0x00000018, 0x00000018, },
230 { 0x1c5b4c, 0x00000019, 0x00000019, 0x00000019, 0x00000019, },
231 { 0x1c5b50, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a, },
232 { 0x1c5b54, 0x0000001b, 0x0000001b, 0x0000001b, 0x0000001b, },
233 { 0x1c5b58, 0x0000001c, 0x0000001c, 0x0000001c, 0x0000001c, },
234 { 0x1c5b5c, 0x0000001d, 0x0000001d, 0x0000001d, 0x0000001d, },
235 { 0x1c5b60, 0x00000020, 0x00000020, 0x00000020, 0x00000020, },
236 { 0x1c5b64, 0x00000021, 0x00000021, 0x00000021, 0x00000021, },
237 { 0x1c5b68, 0x00000022, 0x00000022, 0x00000022, 0x00000022, },
238 { 0x1c5b6c, 0x00000023, 0x00000023, 0x00000023, 0x00000023, },
239 { 0x1c5b70, 0x00000024, 0x00000024, 0x00000024, 0x00000024, },
240 { 0x1c5b74, 0x00000025, 0x00000025, 0x00000025, 0x00000025, },
241 { 0x1c5b78, 0x00000028, 0x00000028, 0x00000028, 0x00000028, },
242 { 0x1c5b7c, 0x00000029, 0x00000029, 0x00000029, 0x00000029, },
243 { 0x1c5b80, 0x0000002a, 0x0000002a, 0x0000002a, 0x0000002a, },
244 { 0x1c5b84, 0x0000002b, 0x0000002b, 0x0000002b, 0x0000002b, },
245 { 0x1c5b88, 0x0000002c, 0x0000002c, 0x0000002c, 0x0000002c, },
246 { 0x1c5b8c, 0x0000002d, 0x0000002d, 0x0000002d, 0x0000002d, },
247 { 0x1c5b90, 0x00000030, 0x00000030, 0x00000030, 0x00000030, },
248 { 0x1c5b94, 0x00000031, 0x00000031, 0x00000031, 0x00000031, },
249 { 0x1c5b98, 0x00000032, 0x00000032, 0x00000032, 0x00000032, },
250 { 0x1c5b9c, 0x00000033, 0x00000033, 0x00000033, 0x00000033, },
251 { 0x1c5ba0, 0x00000034, 0x00000034, 0x00000034, 0x00000034, },
252 { 0x1c5ba4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
253 { 0x1c5ba8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
254 { 0x1c5bac, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
255 { 0x1c5bb0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
256 { 0x1c5bb4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
257 { 0x1c5bb8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
258 { 0x1c5bbc, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
259 { 0x1c5bc0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
260 { 0x1c5bc4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
261 { 0x1c5bc8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
262 { 0x1c5bcc, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
263 { 0x1c5bd0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
264 { 0x1c5bd4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
265 { 0x1c5bd8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
266 { 0x1c5bdc, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
267 { 0x1c5be0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
268 { 0x1c5be4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
269 { 0x1c5be8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
270 { 0x1c5bec, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
271 { 0x1c5bf0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
272 { 0x1c5bf4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
273 { 0x1c5bf8, 0x00000010, 0x00000010, 0x00000010, 0x00000010, },
274 { 0x1c5bfc, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a, },
275 { 0x1c5c00, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
276 { 0x1c5c0c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
277 { 0x1c5c10, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
278 { 0x1c5c14, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
279 { 0x1c5c18, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
280 { 0x1c5c1c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
281 { 0x1c5c20, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
282 { 0x1c5c24, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
283 { 0x1c5c28, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
284 { 0x1c5c2c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
285 { 0x1c5c30, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
286 { 0x1c5c34, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
287 { 0x1c5c38, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
288 { 0x1c5c3c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
289 { 0x1c5cf0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
290 { 0x1c5cf4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
291 { 0x1c5cf8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
292 { 0x1c5cfc, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
293 { 0x1c6200, 0x00000008, 0x00000008, 0x0000000e, 0x0000000e, },
294 { 0x1c6204, 0x00000440, 0x00000440, 0x00000440, 0x00000440, },
295 { 0x1c6208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, },
296 { 0x1c620c, 0x012e8160, 0x012e8160, 0x012a8160, 0x012a8160, },
297 { 0x1c6210, 0x40806333, 0x40806333, 0x40806333, 0x40806333, },
298 { 0x1c6214, 0x00106c10, 0x00106c10, 0x00106c10, 0x00106c10, },
299 { 0x1c6218, 0x009c4060, 0x009c4060, 0x009c4060, 0x009c4060, },
300 { 0x1c621c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, },
301 { 0x1c6220, 0x018830c6, 0x018830c6, 0x018830c6, 0x018830c6, },
302 { 0x1c6224, 0x00000400, 0x00000400, 0x00000400, 0x00000400, },
303 { 0x1c6228, 0x000009b5, 0x000009b5, 0x000009b5, 0x000009b5, },
304 { 0x1c622c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
305 { 0x1c6230, 0x00000108, 0x00000210, 0x00000210, 0x00000108, },
306 { 0x1c6234, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
307 { 0x1c6238, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
308 { 0x1c623c, 0x13c889af, 0x13c889af, 0x13c889af, 0x13c889af, },
309 { 0x1c6240, 0x38490a20, 0x38490a20, 0x38490a20, 0x38490a20, },
310 { 0x1c6244, 0x00007bb6, 0x00007bb6, 0x00007bb6, 0x00007bb6, },
311 { 0x1c6248, 0x0fff3ffc, 0x0fff3ffc, 0x0fff3ffc, 0x0fff3ffc, },
312 { 0x1c624c, 0x00000001, 0x00000001, 0x00000001, 0x00000001, },
313 { 0x1c6250, 0x0000a000, 0x0000a000, 0x0000a000, 0x0000a000, },
314 { 0x1c6254, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
315 { 0x1c6258, 0x0cc75380, 0x0cc75380, 0x0cc75380, 0x0cc75380, },
316 { 0x1c625c, 0x0f0f0f01, 0x0f0f0f01, 0x0f0f0f01, 0x0f0f0f01, },
317 { 0x1c6260, 0xdfa91f01, 0xdfa91f01, 0xdfa91f01, 0xdfa91f01, },
318 { 0x1c6264, 0x00418a11, 0x00418a11, 0x00418a11, 0x00418a11, },
319 { 0x1c6268, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
320 { 0x1c626c, 0x09249126, 0x09249126, 0x09249126, 0x09249126, },
321 { 0x1c6274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, },
322 { 0x1c6278, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, },
323 { 0x1c627c, 0x051701ce, 0x051701ce, 0x051701ce, 0x051701ce, },
324 { 0x1c6300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, },
325 { 0x1c6304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, },
326 { 0x1c6308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, },
327 { 0x1c630c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, },
328 { 0x1c6310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, },
329 { 0x1c6314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, },
330 { 0x1c6318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, },
331 { 0x1c631c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, },
332 { 0x1c6320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, },
333 { 0x1c6324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, },
334 { 0x1c6328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, },
335 { 0x1c632c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
336 { 0x1c6330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
337 { 0x1c6334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
338 { 0x1c6338, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
339 { 0x1c633c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
340 { 0x1c6340, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
341 { 0x1c6344, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
342 { 0x1c6348, 0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff, },
343 { 0x1c634c, 0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff, },
344 { 0x1c6350, 0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff, },
345 { 0x1c6354, 0x0003ffff, 0x0003ffff, 0x0003ffff, 0x0003ffff, },
346 { 0x1c6358, 0x79a8aa1f, 0x79a8aa1f, 0x79a8aa1f, 0x79a8aa1f, },
347 { 0x1c6388, 0x08000000, 0x08000000, 0x08000000, 0x08000000, },
348 { 0x1c638c, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
349 { 0x1c6390, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
350 { 0x1c6394, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, },
351 { 0x1c6398, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce, },
352 { 0x1c639c, 0x00000007, 0x00000007, 0x00000007, 0x00000007, },
353 { 0x1c63a0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
354 { 0x1c63a4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
355 { 0x1c63a8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
356 { 0x1c63ac, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
357 { 0x1c63b0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
358 { 0x1c63b4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
359 { 0x1c63b8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
360 { 0x1c63bc, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
361 { 0x1c63c0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
362 { 0x1c63c4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
363 { 0x1c63c8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
364 { 0x1c63cc, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
365 { 0x1c63d0, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
366 { 0x1c63d4, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
367 { 0x1c63d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
368 { 0x1c63dc, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, },
369 { 0x1c63e0, 0x000000c0, 0x000000c0, 0x000000c0, 0x000000c0, },
370 { 0x1c6848, 0x00180a65, 0x00180a65, 0x00180a68, 0x00180a68, },
371 { 0x1c6920, 0x0510001c, 0x0510001c, 0x0510001c, 0x0510001c, },
372 { 0x1c6960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, },
373 { 0x1c720c, 0x012e8160, 0x012e8160, 0x012a8160, 0x012a8160, },
374 { 0x1c726c, 0x09249126, 0x09249126, 0x09249126, 0x09249126, },
375 { 0x1c7848, 0x00180a65, 0x00180a65, 0x00180a68, 0x00180a68, },
376 { 0x1c7920, 0x0510001c, 0x0510001c, 0x0510001c, 0x0510001c, },
377 { 0x1c7960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, },
378 { 0x1c820c, 0x012e8160, 0x012e8160, 0x012a8160, 0x012a8160, },
379 { 0x1c826c, 0x09249126, 0x09249126, 0x09249126, 0x09249126, },
380/* { 0x1c8864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, }, */
381 { 0x1c8864, 0x0001c600, 0x0001c600, 0x0001c600, 0x0001c600, },
382 { 0x1c895c, 0x004b6a8e, 0x004b6a8e, 0x004b6a8e, 0x004b6a8e, },
383 { 0x1c8968, 0x000003ce, 0x000003ce, 0x000003ce, 0x000003ce, },
384 { 0x1c89bc, 0x00181400, 0x00181400, 0x00181400, 0x00181400, },
385 { 0x1c9270, 0x00820820, 0x00820820, 0x00820820, 0x00820820, },
386 { 0x1c935c, 0x066c420f, 0x066c420f, 0x066c420f, 0x066c420f, },
387 { 0x1c9360, 0x0f282207, 0x0f282207, 0x0f282207, 0x0f282207, },
388 { 0x1c9364, 0x17601685, 0x17601685, 0x17601685, 0x17601685, },
389 { 0x1c9368, 0x1f801104, 0x1f801104, 0x1f801104, 0x1f801104, },
390 { 0x1c936c, 0x37a00c03, 0x37a00c03, 0x37a00c03, 0x37a00c03, },
391 { 0x1c9370, 0x3fc40883, 0x3fc40883, 0x3fc40883, 0x3fc40883, },
392 { 0x1c9374, 0x57c00803, 0x57c00803, 0x57c00803, 0x57c00803, },
393 { 0x1c9378, 0x5fd80682, 0x5fd80682, 0x5fd80682, 0x5fd80682, },
394 { 0x1c937c, 0x7fe00482, 0x7fe00482, 0x7fe00482, 0x7fe00482, },
395 { 0x1c9380, 0x7f3c7bba, 0x7f3c7bba, 0x7f3c7bba, 0x7f3c7bba, },
396 { 0x1c9384, 0xf3307ff0, 0xf3307ff0, 0xf3307ff0, 0xf3307ff0, }
397};
398
399/*
400 * look up a certain register in ar5416_phy_init[] and return the init. value
401 * for the band and bandwidth given. Return 0 if register address not found.
402 */
403static u32 ar9170_get_default_phy_reg_val(u32 reg, bool is_2ghz, bool is_40mhz)
404{
405 unsigned int i;
406 for (i = 0; i < ARRAY_SIZE(ar5416_phy_init); i++) {
407 if (ar5416_phy_init[i].reg != reg)
408 continue;
409
410 if (is_2ghz) {
411 if (is_40mhz)
412 return ar5416_phy_init[i]._2ghz_40;
413 else
414 return ar5416_phy_init[i]._2ghz_20;
415 } else {
416 if (is_40mhz)
417 return ar5416_phy_init[i]._5ghz_40;
418 else
419 return ar5416_phy_init[i]._5ghz_20;
420 }
421 }
422 return 0;
423}
424
425/*
426 * initialize some phy regs from eeprom values in modal_header[]
427 * acc. to band and bandwidth
428 */
429static int ar9170_init_phy_from_eeprom(struct ar9170 *ar,
430 bool is_2ghz, bool is_40mhz)
431{
432 static const u8 xpd2pd[16] = {
433 0x2, 0x2, 0x2, 0x1, 0x2, 0x2, 0x6, 0x2,
434 0x2, 0x3, 0x7, 0x2, 0xB, 0x2, 0x2, 0x2
435 };
436 u32 defval, newval;
437 /* pointer to the modal_header acc. to band */
438 struct ar9170_eeprom_modal *m = &ar->eeprom.modal_header[is_2ghz];
439
440 ar9170_regwrite_begin(ar);
441
442 /* ant common control (index 0) */
443 newval = le32_to_cpu(m->antCtrlCommon);
444 ar9170_regwrite(0x1c5964, newval);
445
446 /* ant control chain 0 (index 1) */
447 newval = le32_to_cpu(m->antCtrlChain[0]);
448 ar9170_regwrite(0x1c5960, newval);
449
450 /* ant control chain 2 (index 2) */
451 newval = le32_to_cpu(m->antCtrlChain[1]);
452 ar9170_regwrite(0x1c7960, newval);
453
454 /* SwSettle (index 3) */
455 if (!is_40mhz) {
456 defval = ar9170_get_default_phy_reg_val(0x1c5844,
457 is_2ghz, is_40mhz);
458 newval = (defval & ~0x3f80) |
459 ((m->switchSettling & 0x7f) << 7);
460 ar9170_regwrite(0x1c5844, newval);
461 }
462
463 /* adcDesired, pdaDesired (index 4) */
464 defval = ar9170_get_default_phy_reg_val(0x1c5850, is_2ghz, is_40mhz);
465 newval = (defval & ~0xffff) | ((u8)m->pgaDesiredSize << 8) |
466 ((u8)m->adcDesiredSize);
467 ar9170_regwrite(0x1c5850, newval);
468
469 /* TxEndToXpaOff, TxFrameToXpaOn (index 5) */
470 defval = ar9170_get_default_phy_reg_val(0x1c5834, is_2ghz, is_40mhz);
471 newval = (m->txEndToXpaOff << 24) | (m->txEndToXpaOff << 16) |
472 (m->txFrameToXpaOn << 8) | m->txFrameToXpaOn;
473 ar9170_regwrite(0x1c5834, newval);
474
475 /* TxEndToRxOn (index 6) */
476 defval = ar9170_get_default_phy_reg_val(0x1c5828, is_2ghz, is_40mhz);
477 newval = (defval & ~0xff0000) | (m->txEndToRxOn << 16);
478 ar9170_regwrite(0x1c5828, newval);
479
480 /* thresh62 (index 7) */
481 defval = ar9170_get_default_phy_reg_val(0x1c8864, is_2ghz, is_40mhz);
482 newval = (defval & ~0x7f000) | (m->thresh62 << 12);
483 ar9170_regwrite(0x1c8864, newval);
484
485 /* tx/rx attenuation chain 0 (index 8) */
486 defval = ar9170_get_default_phy_reg_val(0x1c5848, is_2ghz, is_40mhz);
487 newval = (defval & ~0x3f000) | ((m->txRxAttenCh[0] & 0x3f) << 12);
488 ar9170_regwrite(0x1c5848, newval);
489
490 /* tx/rx attenuation chain 2 (index 9) */
491 defval = ar9170_get_default_phy_reg_val(0x1c7848, is_2ghz, is_40mhz);
492 newval = (defval & ~0x3f000) | ((m->txRxAttenCh[1] & 0x3f) << 12);
493 ar9170_regwrite(0x1c7848, newval);
494
495 /* tx/rx margin chain 0 (index 10) */
496 defval = ar9170_get_default_phy_reg_val(0x1c620c, is_2ghz, is_40mhz);
497 newval = (defval & ~0xfc0000) | ((m->rxTxMarginCh[0] & 0x3f) << 18);
498 /* bsw margin chain 0 for 5GHz only */
499 if (!is_2ghz)
500 newval = (newval & ~0x3c00) | ((m->bswMargin[0] & 0xf) << 10);
501 ar9170_regwrite(0x1c620c, newval);
502
503 /* tx/rx margin chain 2 (index 11) */
504 defval = ar9170_get_default_phy_reg_val(0x1c820c, is_2ghz, is_40mhz);
505 newval = (defval & ~0xfc0000) | ((m->rxTxMarginCh[1] & 0x3f) << 18);
506 ar9170_regwrite(0x1c820c, newval);
507
508 /* iqCall, iqCallq chain 0 (index 12) */
509 defval = ar9170_get_default_phy_reg_val(0x1c5920, is_2ghz, is_40mhz);
510 newval = (defval & ~0x7ff) | (((u8)m->iqCalICh[0] & 0x3f) << 5) |
511 ((u8)m->iqCalQCh[0] & 0x1f);
512 ar9170_regwrite(0x1c5920, newval);
513
514 /* iqCall, iqCallq chain 2 (index 13) */
515 defval = ar9170_get_default_phy_reg_val(0x1c7920, is_2ghz, is_40mhz);
516 newval = (defval & ~0x7ff) | (((u8)m->iqCalICh[1] & 0x3f) << 5) |
517 ((u8)m->iqCalQCh[1] & 0x1f);
518 ar9170_regwrite(0x1c7920, newval);
519
520 /* xpd gain mask (index 14) */
521 defval = ar9170_get_default_phy_reg_val(0x1c6258, is_2ghz, is_40mhz);
522 newval = (defval & ~0xf0000) | (xpd2pd[m->xpdGain & 0xf] << 16);
523 ar9170_regwrite(0x1c6258, newval);
524 ar9170_regwrite_finish();
525
526 return ar9170_regwrite_result();
527}
528
529int ar9170_init_phy(struct ar9170 *ar, enum ieee80211_band band)
530{
531 int i, err;
532 u32 val;
533 bool is_2ghz = band == IEEE80211_BAND_2GHZ;
534 bool is_40mhz = conf_is_ht40(&ar->hw->conf);
535
536 ar9170_regwrite_begin(ar);
537
538 for (i = 0; i < ARRAY_SIZE(ar5416_phy_init); i++) {
539 if (is_40mhz) {
540 if (is_2ghz)
541 val = ar5416_phy_init[i]._2ghz_40;
542 else
543 val = ar5416_phy_init[i]._5ghz_40;
544 } else {
545 if (is_2ghz)
546 val = ar5416_phy_init[i]._2ghz_20;
547 else
548 val = ar5416_phy_init[i]._5ghz_20;
549 }
550
551 ar9170_regwrite(ar5416_phy_init[i].reg, val);
552 }
553
554 ar9170_regwrite_finish();
555 err = ar9170_regwrite_result();
556 if (err)
557 return err;
558
559 err = ar9170_init_phy_from_eeprom(ar, is_2ghz, is_40mhz);
560 if (err)
561 return err;
562
563 err = ar9170_init_power_cal(ar);
564 if (err)
565 return err;
566
567 /* XXX: remove magic! */
568 if (is_2ghz)
569 err = ar9170_write_reg(ar, 0x1d4014, 0x5163);
570 else
571 err = ar9170_write_reg(ar, 0x1d4014, 0x5143);
572
573 return err;
574}
575
576struct ar9170_rf_init {
577 u32 reg, _5ghz, _2ghz;
578};
579
580static struct ar9170_rf_init ar9170_rf_init[] = {
581 /* bank 0 */
582 { 0x1c58b0, 0x1e5795e5, 0x1e5795e5},
583 { 0x1c58e0, 0x02008020, 0x02008020},
584 /* bank 1 */
585 { 0x1c58b0, 0x02108421, 0x02108421},
586 { 0x1c58ec, 0x00000008, 0x00000008},
587 /* bank 2 */
588 { 0x1c58b0, 0x0e73ff17, 0x0e73ff17},
589 { 0x1c58e0, 0x00000420, 0x00000420},
590 /* bank 3 */
591 { 0x1c58f0, 0x01400018, 0x01c00018},
592 /* bank 4 */
593 { 0x1c58b0, 0x000001a1, 0x000001a1},
594 { 0x1c58e8, 0x00000001, 0x00000001},
595 /* bank 5 */
596 { 0x1c58b0, 0x00000013, 0x00000013},
597 { 0x1c58e4, 0x00000002, 0x00000002},
598 /* bank 6 */
599 { 0x1c58b0, 0x00000000, 0x00000000},
600 { 0x1c58b0, 0x00000000, 0x00000000},
601 { 0x1c58b0, 0x00000000, 0x00000000},
602 { 0x1c58b0, 0x00000000, 0x00000000},
603 { 0x1c58b0, 0x00000000, 0x00000000},
604 { 0x1c58b0, 0x00004000, 0x00004000},
605 { 0x1c58b0, 0x00006c00, 0x00006c00},
606 { 0x1c58b0, 0x00002c00, 0x00002c00},
607 { 0x1c58b0, 0x00004800, 0x00004800},
608 { 0x1c58b0, 0x00004000, 0x00004000},
609 { 0x1c58b0, 0x00006000, 0x00006000},
610 { 0x1c58b0, 0x00001000, 0x00001000},
611 { 0x1c58b0, 0x00004000, 0x00004000},
612 { 0x1c58b0, 0x00007c00, 0x00007c00},
613 { 0x1c58b0, 0x00007c00, 0x00007c00},
614 { 0x1c58b0, 0x00007c00, 0x00007c00},
615 { 0x1c58b0, 0x00007c00, 0x00007c00},
616 { 0x1c58b0, 0x00007c00, 0x00007c00},
617 { 0x1c58b0, 0x00087c00, 0x00087c00},
618 { 0x1c58b0, 0x00007c00, 0x00007c00},
619 { 0x1c58b0, 0x00005400, 0x00005400},
620 { 0x1c58b0, 0x00000c00, 0x00000c00},
621 { 0x1c58b0, 0x00001800, 0x00001800},
622 { 0x1c58b0, 0x00007c00, 0x00007c00},
623 { 0x1c58b0, 0x00006c00, 0x00006c00},
624 { 0x1c58b0, 0x00006c00, 0x00006c00},
625 { 0x1c58b0, 0x00007c00, 0x00007c00},
626 { 0x1c58b0, 0x00002c00, 0x00002c00},
627 { 0x1c58b0, 0x00003c00, 0x00003c00},
628 { 0x1c58b0, 0x00003800, 0x00003800},
629 { 0x1c58b0, 0x00001c00, 0x00001c00},
630 { 0x1c58b0, 0x00000800, 0x00000800},
631 { 0x1c58b0, 0x00000408, 0x00000408},
632 { 0x1c58b0, 0x00004c15, 0x00004c15},
633 { 0x1c58b0, 0x00004188, 0x00004188},
634 { 0x1c58b0, 0x0000201e, 0x0000201e},
635 { 0x1c58b0, 0x00010408, 0x00010408},
636 { 0x1c58b0, 0x00000801, 0x00000801},
637 { 0x1c58b0, 0x00000c08, 0x00000c08},
638 { 0x1c58b0, 0x0000181e, 0x0000181e},
639 { 0x1c58b0, 0x00001016, 0x00001016},
640 { 0x1c58b0, 0x00002800, 0x00002800},
641 { 0x1c58b0, 0x00004010, 0x00004010},
642 { 0x1c58b0, 0x0000081c, 0x0000081c},
643 { 0x1c58b0, 0x00000115, 0x00000115},
644 { 0x1c58b0, 0x00000015, 0x00000015},
645 { 0x1c58b0, 0x00000066, 0x00000066},
646 { 0x1c58b0, 0x0000001c, 0x0000001c},
647 { 0x1c58b0, 0x00000000, 0x00000000},
648 { 0x1c58b0, 0x00000004, 0x00000004},
649 { 0x1c58b0, 0x00000015, 0x00000015},
650 { 0x1c58b0, 0x0000001f, 0x0000001f},
651 { 0x1c58e0, 0x00000000, 0x00000400},
652 /* bank 7 */
653 { 0x1c58b0, 0x000000a0, 0x000000a0},
654 { 0x1c58b0, 0x00000000, 0x00000000},
655 { 0x1c58b0, 0x00000040, 0x00000040},
656 { 0x1c58f0, 0x0000001c, 0x0000001c},
657};
658
659static int ar9170_init_rf_banks_0_7(struct ar9170 *ar, bool band5ghz)
660{
661 int err, i;
662
663 ar9170_regwrite_begin(ar);
664
665 for (i = 0; i < ARRAY_SIZE(ar9170_rf_init); i++)
666 ar9170_regwrite(ar9170_rf_init[i].reg,
667 band5ghz ? ar9170_rf_init[i]._5ghz
668 : ar9170_rf_init[i]._2ghz);
669
670 ar9170_regwrite_finish();
671 err = ar9170_regwrite_result();
672 if (err)
673 wiphy_err(ar->hw->wiphy, "rf init failed\n");
674 return err;
675}
676
677static int ar9170_init_rf_bank4_pwr(struct ar9170 *ar, bool band5ghz,
678 u32 freq, enum ar9170_bw bw)
679{
680 int err;
681 u32 d0, d1, td0, td1, fd0, fd1;
682 u8 chansel;
683 u8 refsel0 = 1, refsel1 = 0;
684 u8 lf_synth = 0;
685
686 switch (bw) {
687 case AR9170_BW_40_ABOVE:
688 freq += 10;
689 break;
690 case AR9170_BW_40_BELOW:
691 freq -= 10;
692 break;
693 case AR9170_BW_20:
694 break;
695 case __AR9170_NUM_BW:
696 BUG();
697 }
698
699 if (band5ghz) {
700 if (freq % 10) {
701 chansel = (freq - 4800) / 5;
702 } else {
703 chansel = ((freq - 4800) / 10) * 2;
704 refsel0 = 0;
705 refsel1 = 1;
706 }
707 chansel = byte_rev_table[chansel];
708 } else {
709 if (freq == 2484) {
710 chansel = 10 + (freq - 2274) / 5;
711 lf_synth = 1;
712 } else
713 chansel = 16 + (freq - 2272) / 5;
714 chansel *= 4;
715 chansel = byte_rev_table[chansel];
716 }
717
718 d1 = chansel;
719 d0 = 0x21 |
720 refsel0 << 3 |
721 refsel1 << 2 |
722 lf_synth << 1;
723 td0 = d0 & 0x1f;
724 td1 = d1 & 0x1f;
725 fd0 = td1 << 5 | td0;
726
727 td0 = (d0 >> 5) & 0x7;
728 td1 = (d1 >> 5) & 0x7;
729 fd1 = td1 << 5 | td0;
730
731 ar9170_regwrite_begin(ar);
732
733 ar9170_regwrite(0x1c58b0, fd0);
734 ar9170_regwrite(0x1c58e8, fd1);
735
736 ar9170_regwrite_finish();
737 err = ar9170_regwrite_result();
738 if (err)
739 return err;
740
741 msleep(10);
742
743 return 0;
744}
745
746struct ar9170_phy_freq_params {
747 u8 coeff_exp;
748 u16 coeff_man;
749 u8 coeff_exp_shgi;
750 u16 coeff_man_shgi;
751};
752
753struct ar9170_phy_freq_entry {
754 u16 freq;
755 struct ar9170_phy_freq_params params[__AR9170_NUM_BW];
756};
757
758/* NB: must be in sync with channel tables in main! */
759static const struct ar9170_phy_freq_entry ar9170_phy_freq_params[] = {
760/*
761 * freq,
762 * 20MHz,
763 * 40MHz (below),
764 * 40Mhz (above),
765 */
766 { 2412, {
767 { 3, 21737, 3, 19563, },
768 { 3, 21827, 3, 19644, },
769 { 3, 21647, 3, 19482, },
770 } },
771 { 2417, {
772 { 3, 21692, 3, 19523, },
773 { 3, 21782, 3, 19604, },
774 { 3, 21602, 3, 19442, },
775 } },
776 { 2422, {
777 { 3, 21647, 3, 19482, },
778 { 3, 21737, 3, 19563, },
779 { 3, 21558, 3, 19402, },
780 } },
781 { 2427, {
782 { 3, 21602, 3, 19442, },
783 { 3, 21692, 3, 19523, },
784 { 3, 21514, 3, 19362, },
785 } },
786 { 2432, {
787 { 3, 21558, 3, 19402, },
788 { 3, 21647, 3, 19482, },
789 { 3, 21470, 3, 19323, },
790 } },
791 { 2437, {
792 { 3, 21514, 3, 19362, },
793 { 3, 21602, 3, 19442, },
794 { 3, 21426, 3, 19283, },
795 } },
796 { 2442, {
797 { 3, 21470, 3, 19323, },
798 { 3, 21558, 3, 19402, },
799 { 3, 21382, 3, 19244, },
800 } },
801 { 2447, {
802 { 3, 21426, 3, 19283, },
803 { 3, 21514, 3, 19362, },
804 { 3, 21339, 3, 19205, },
805 } },
806 { 2452, {
807 { 3, 21382, 3, 19244, },
808 { 3, 21470, 3, 19323, },
809 { 3, 21295, 3, 19166, },
810 } },
811 { 2457, {
812 { 3, 21339, 3, 19205, },
813 { 3, 21426, 3, 19283, },
814 { 3, 21252, 3, 19127, },
815 } },
816 { 2462, {
817 { 3, 21295, 3, 19166, },
818 { 3, 21382, 3, 19244, },
819 { 3, 21209, 3, 19088, },
820 } },
821 { 2467, {
822 { 3, 21252, 3, 19127, },
823 { 3, 21339, 3, 19205, },
824 { 3, 21166, 3, 19050, },
825 } },
826 { 2472, {
827 { 3, 21209, 3, 19088, },
828 { 3, 21295, 3, 19166, },
829 { 3, 21124, 3, 19011, },
830 } },
831 { 2484, {
832 { 3, 21107, 3, 18996, },
833 { 3, 21192, 3, 19073, },
834 { 3, 21022, 3, 18920, },
835 } },
836 { 4920, {
837 { 4, 21313, 4, 19181, },
838 { 4, 21356, 4, 19220, },
839 { 4, 21269, 4, 19142, },
840 } },
841 { 4940, {
842 { 4, 21226, 4, 19104, },
843 { 4, 21269, 4, 19142, },
844 { 4, 21183, 4, 19065, },
845 } },
846 { 4960, {
847 { 4, 21141, 4, 19027, },
848 { 4, 21183, 4, 19065, },
849 { 4, 21098, 4, 18988, },
850 } },
851 { 4980, {
852 { 4, 21056, 4, 18950, },
853 { 4, 21098, 4, 18988, },
854 { 4, 21014, 4, 18912, },
855 } },
856 { 5040, {
857 { 4, 20805, 4, 18725, },
858 { 4, 20846, 4, 18762, },
859 { 4, 20764, 4, 18687, },
860 } },
861 { 5060, {
862 { 4, 20723, 4, 18651, },
863 { 4, 20764, 4, 18687, },
864 { 4, 20682, 4, 18614, },
865 } },
866 { 5080, {
867 { 4, 20641, 4, 18577, },
868 { 4, 20682, 4, 18614, },
869 { 4, 20601, 4, 18541, },
870 } },
871 { 5180, {
872 { 4, 20243, 4, 18219, },
873 { 4, 20282, 4, 18254, },
874 { 4, 20204, 4, 18183, },
875 } },
876 { 5200, {
877 { 4, 20165, 4, 18148, },
878 { 4, 20204, 4, 18183, },
879 { 4, 20126, 4, 18114, },
880 } },
881 { 5220, {
882 { 4, 20088, 4, 18079, },
883 { 4, 20126, 4, 18114, },
884 { 4, 20049, 4, 18044, },
885 } },
886 { 5240, {
887 { 4, 20011, 4, 18010, },
888 { 4, 20049, 4, 18044, },
889 { 4, 19973, 4, 17976, },
890 } },
891 { 5260, {
892 { 4, 19935, 4, 17941, },
893 { 4, 19973, 4, 17976, },
894 { 4, 19897, 4, 17907, },
895 } },
896 { 5280, {
897 { 4, 19859, 4, 17873, },
898 { 4, 19897, 4, 17907, },
899 { 4, 19822, 4, 17840, },
900 } },
901 { 5300, {
902 { 4, 19784, 4, 17806, },
903 { 4, 19822, 4, 17840, },
904 { 4, 19747, 4, 17772, },
905 } },
906 { 5320, {
907 { 4, 19710, 4, 17739, },
908 { 4, 19747, 4, 17772, },
909 { 4, 19673, 4, 17706, },
910 } },
911 { 5500, {
912 { 4, 19065, 4, 17159, },
913 { 4, 19100, 4, 17190, },
914 { 4, 19030, 4, 17127, },
915 } },
916 { 5520, {
917 { 4, 18996, 4, 17096, },
918 { 4, 19030, 4, 17127, },
919 { 4, 18962, 4, 17065, },
920 } },
921 { 5540, {
922 { 4, 18927, 4, 17035, },
923 { 4, 18962, 4, 17065, },
924 { 4, 18893, 4, 17004, },
925 } },
926 { 5560, {
927 { 4, 18859, 4, 16973, },
928 { 4, 18893, 4, 17004, },
929 { 4, 18825, 4, 16943, },
930 } },
931 { 5580, {
932 { 4, 18792, 4, 16913, },
933 { 4, 18825, 4, 16943, },
934 { 4, 18758, 4, 16882, },
935 } },
936 { 5600, {
937 { 4, 18725, 4, 16852, },
938 { 4, 18758, 4, 16882, },
939 { 4, 18691, 4, 16822, },
940 } },
941 { 5620, {
942 { 4, 18658, 4, 16792, },
943 { 4, 18691, 4, 16822, },
944 { 4, 18625, 4, 16762, },
945 } },
946 { 5640, {
947 { 4, 18592, 4, 16733, },
948 { 4, 18625, 4, 16762, },
949 { 4, 18559, 4, 16703, },
950 } },
951 { 5660, {
952 { 4, 18526, 4, 16673, },
953 { 4, 18559, 4, 16703, },
954 { 4, 18493, 4, 16644, },
955 } },
956 { 5680, {
957 { 4, 18461, 4, 16615, },
958 { 4, 18493, 4, 16644, },
959 { 4, 18428, 4, 16586, },
960 } },
961 { 5700, {
962 { 4, 18396, 4, 16556, },
963 { 4, 18428, 4, 16586, },
964 { 4, 18364, 4, 16527, },
965 } },
966 { 5745, {
967 { 4, 18252, 4, 16427, },
968 { 4, 18284, 4, 16455, },
969 { 4, 18220, 4, 16398, },
970 } },
971 { 5765, {
972 { 4, 18189, 5, 32740, },
973 { 4, 18220, 4, 16398, },
974 { 4, 18157, 5, 32683, },
975 } },
976 { 5785, {
977 { 4, 18126, 5, 32626, },
978 { 4, 18157, 5, 32683, },
979 { 4, 18094, 5, 32570, },
980 } },
981 { 5805, {
982 { 4, 18063, 5, 32514, },
983 { 4, 18094, 5, 32570, },
984 { 4, 18032, 5, 32458, },
985 } },
986 { 5825, {
987 { 4, 18001, 5, 32402, },
988 { 4, 18032, 5, 32458, },
989 { 4, 17970, 5, 32347, },
990 } },
991 { 5170, {
992 { 4, 20282, 4, 18254, },
993 { 4, 20321, 4, 18289, },
994 { 4, 20243, 4, 18219, },
995 } },
996 { 5190, {
997 { 4, 20204, 4, 18183, },
998 { 4, 20243, 4, 18219, },
999 { 4, 20165, 4, 18148, },
1000 } },
1001 { 5210, {
1002 { 4, 20126, 4, 18114, },
1003 { 4, 20165, 4, 18148, },
1004 { 4, 20088, 4, 18079, },
1005 } },
1006 { 5230, {
1007 { 4, 20049, 4, 18044, },
1008 { 4, 20088, 4, 18079, },
1009 { 4, 20011, 4, 18010, },
1010 } },
1011};
1012
1013static const struct ar9170_phy_freq_params *
1014ar9170_get_hw_dyn_params(struct ieee80211_channel *channel,
1015 enum ar9170_bw bw)
1016{
1017 unsigned int chanidx = 0;
1018 u16 freq = 2412;
1019
1020 if (channel) {
1021 chanidx = channel->hw_value;
1022 freq = channel->center_freq;
1023 }
1024
1025 BUG_ON(chanidx >= ARRAY_SIZE(ar9170_phy_freq_params));
1026
1027 BUILD_BUG_ON(__AR9170_NUM_BW != 3);
1028
1029 WARN_ON(ar9170_phy_freq_params[chanidx].freq != freq);
1030
1031 return &ar9170_phy_freq_params[chanidx].params[bw];
1032}
1033
1034
1035int ar9170_init_rf(struct ar9170 *ar)
1036{
1037 const struct ar9170_phy_freq_params *freqpar;
1038 __le32 cmd[7];
1039 int err;
1040
1041 err = ar9170_init_rf_banks_0_7(ar, false);
1042 if (err)
1043 return err;
1044
1045 err = ar9170_init_rf_bank4_pwr(ar, false, 2412, AR9170_BW_20);
1046 if (err)
1047 return err;
1048
1049 freqpar = ar9170_get_hw_dyn_params(NULL, AR9170_BW_20);
1050
1051 cmd[0] = cpu_to_le32(2412 * 1000);
1052 cmd[1] = cpu_to_le32(0);
1053 cmd[2] = cpu_to_le32(1);
1054 cmd[3] = cpu_to_le32(freqpar->coeff_exp);
1055 cmd[4] = cpu_to_le32(freqpar->coeff_man);
1056 cmd[5] = cpu_to_le32(freqpar->coeff_exp_shgi);
1057 cmd[6] = cpu_to_le32(freqpar->coeff_man_shgi);
1058
1059 /* RF_INIT echoes the command back to us */
1060 err = ar->exec_cmd(ar, AR9170_CMD_RF_INIT,
1061 sizeof(cmd), (u8 *)cmd,
1062 sizeof(cmd), (u8 *)cmd);
1063 if (err)
1064 return err;
1065
1066 msleep(1000);
1067
1068 return ar9170_echo_test(ar, 0xaabbccdd);
1069}
1070
1071static int ar9170_find_freq_idx(int nfreqs, u8 *freqs, u8 f)
1072{
1073 int idx = nfreqs - 2;
1074
1075 while (idx >= 0) {
1076 if (f >= freqs[idx])
1077 return idx;
1078 idx--;
1079 }
1080
1081 return 0;
1082}
1083
1084static s32 ar9170_interpolate_s32(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
1085{
1086 /* nothing to interpolate, it's horizontal */
1087 if (y2 == y1)
1088 return y1;
1089
1090 /* check if we hit one of the edges */
1091 if (x == x1)
1092 return y1;
1093 if (x == x2)
1094 return y2;
1095
1096 /* x1 == x2 is bad, hopefully == x */
1097 if (x2 == x1)
1098 return y1;
1099
1100 return y1 + (((y2 - y1) * (x - x1)) / (x2 - x1));
1101}
1102
1103static u8 ar9170_interpolate_u8(u8 x, u8 x1, u8 y1, u8 x2, u8 y2)
1104{
1105#define SHIFT 8
1106 s32 y;
1107
1108 y = ar9170_interpolate_s32(x << SHIFT,
1109 x1 << SHIFT, y1 << SHIFT,
1110 x2 << SHIFT, y2 << SHIFT);
1111
1112 /*
1113 * XXX: unwrap this expression
1114 * Isn't it just DIV_ROUND_UP(y, 1<<SHIFT)?
1115 * Can we rely on the compiler to optimise away the div?
1116 */
1117 return (y >> SHIFT) + ((y & (1<<(SHIFT-1))) >> (SHIFT - 1));
1118#undef SHIFT
1119}
1120
1121static u8 ar9170_interpolate_val(u8 x, u8 *x_array, u8 *y_array)
1122{
1123 int i;
1124
1125 for (i = 0; i < 3; i++)
1126 if (x <= x_array[i + 1])
1127 break;
1128
1129 return ar9170_interpolate_u8(x,
1130 x_array[i],
1131 y_array[i],
1132 x_array[i + 1],
1133 y_array[i + 1]);
1134}
1135
1136static int ar9170_set_freq_cal_data(struct ar9170 *ar,
1137 struct ieee80211_channel *channel)
1138{
1139 u8 *cal_freq_pier;
1140 u8 vpds[2][AR5416_PD_GAIN_ICEPTS];
1141 u8 pwrs[2][AR5416_PD_GAIN_ICEPTS];
1142 int chain, idx, i;
1143 u32 phy_data = 0;
1144 u8 f, tmp;
1145
1146 switch (channel->band) {
1147 case IEEE80211_BAND_2GHZ:
1148 f = channel->center_freq - 2300;
1149 cal_freq_pier = ar->eeprom.cal_freq_pier_2G;
1150 i = AR5416_NUM_2G_CAL_PIERS - 1;
1151 break;
1152
1153 case IEEE80211_BAND_5GHZ:
1154 f = (channel->center_freq - 4800) / 5;
1155 cal_freq_pier = ar->eeprom.cal_freq_pier_5G;
1156 i = AR5416_NUM_5G_CAL_PIERS - 1;
1157 break;
1158
1159 default:
1160 return -EINVAL;
1161 break;
1162 }
1163
1164 for (; i >= 0; i--) {
1165 if (cal_freq_pier[i] != 0xff)
1166 break;
1167 }
1168 if (i < 0)
1169 return -EINVAL;
1170
1171 idx = ar9170_find_freq_idx(i, cal_freq_pier, f);
1172
1173 ar9170_regwrite_begin(ar);
1174
1175 for (chain = 0; chain < AR5416_MAX_CHAINS; chain++) {
1176 for (i = 0; i < AR5416_PD_GAIN_ICEPTS; i++) {
1177 struct ar9170_calibration_data_per_freq *cal_pier_data;
1178 int j;
1179
1180 switch (channel->band) {
1181 case IEEE80211_BAND_2GHZ:
1182 cal_pier_data = &ar->eeprom.
1183 cal_pier_data_2G[chain][idx];
1184 break;
1185
1186 case IEEE80211_BAND_5GHZ:
1187 cal_pier_data = &ar->eeprom.
1188 cal_pier_data_5G[chain][idx];
1189 break;
1190
1191 default:
1192 return -EINVAL;
1193 }
1194
1195 for (j = 0; j < 2; j++) {
1196 vpds[j][i] = ar9170_interpolate_u8(f,
1197 cal_freq_pier[idx],
1198 cal_pier_data->vpd_pdg[j][i],
1199 cal_freq_pier[idx + 1],
1200 cal_pier_data[1].vpd_pdg[j][i]);
1201
1202 pwrs[j][i] = ar9170_interpolate_u8(f,
1203 cal_freq_pier[idx],
1204 cal_pier_data->pwr_pdg[j][i],
1205 cal_freq_pier[idx + 1],
1206 cal_pier_data[1].pwr_pdg[j][i]) / 2;
1207 }
1208 }
1209
1210 for (i = 0; i < 76; i++) {
1211 if (i < 25) {
1212 tmp = ar9170_interpolate_val(i, &pwrs[0][0],
1213 &vpds[0][0]);
1214 } else {
1215 tmp = ar9170_interpolate_val(i - 12,
1216 &pwrs[1][0],
1217 &vpds[1][0]);
1218 }
1219
1220 phy_data |= tmp << ((i & 3) << 3);
1221 if ((i & 3) == 3) {
1222 ar9170_regwrite(0x1c6280 + chain * 0x1000 +
1223 (i & ~3), phy_data);
1224 phy_data = 0;
1225 }
1226 }
1227
1228 for (i = 19; i < 32; i++)
1229 ar9170_regwrite(0x1c6280 + chain * 0x1000 + (i << 2),
1230 0x0);
1231 }
1232
1233 ar9170_regwrite_finish();
1234 return ar9170_regwrite_result();
1235}
1236
1237static u8 ar9170_get_max_edge_power(struct ar9170 *ar,
1238 struct ar9170_calctl_edges edges[],
1239 u32 freq)
1240{
1241 int i;
1242 u8 rc = AR5416_MAX_RATE_POWER;
1243 u8 f;
1244 if (freq < 3000)
1245 f = freq - 2300;
1246 else
1247 f = (freq - 4800) / 5;
1248
1249 for (i = 0; i < AR5416_NUM_BAND_EDGES; i++) {
1250 if (edges[i].channel == 0xff)
1251 break;
1252 if (f == edges[i].channel) {
1253 /* exact freq match */
1254 rc = edges[i].power_flags & ~AR9170_CALCTL_EDGE_FLAGS;
1255 break;
1256 }
1257 if (i > 0 && f < edges[i].channel) {
1258 if (f > edges[i - 1].channel &&
1259 edges[i - 1].power_flags &
1260 AR9170_CALCTL_EDGE_FLAGS) {
1261 /* lower channel has the inband flag set */
1262 rc = edges[i - 1].power_flags &
1263 ~AR9170_CALCTL_EDGE_FLAGS;
1264 }
1265 break;
1266 }
1267 }
1268
1269 if (i == AR5416_NUM_BAND_EDGES) {
1270 if (f > edges[i - 1].channel &&
1271 edges[i - 1].power_flags & AR9170_CALCTL_EDGE_FLAGS) {
1272 /* lower channel has the inband flag set */
1273 rc = edges[i - 1].power_flags &
1274 ~AR9170_CALCTL_EDGE_FLAGS;
1275 }
1276 }
1277 return rc;
1278}
1279
1280static u8 ar9170_get_heavy_clip(struct ar9170 *ar,
1281 struct ar9170_calctl_edges edges[],
1282 u32 freq, enum ar9170_bw bw)
1283{
1284 u8 f;
1285 int i;
1286 u8 rc = 0;
1287
1288 if (freq < 3000)
1289 f = freq - 2300;
1290 else
1291 f = (freq - 4800) / 5;
1292
1293 if (bw == AR9170_BW_40_BELOW || bw == AR9170_BW_40_ABOVE)
1294 rc |= 0xf0;
1295
1296 for (i = 0; i < AR5416_NUM_BAND_EDGES; i++) {
1297 if (edges[i].channel == 0xff)
1298 break;
1299 if (f == edges[i].channel) {
1300 if (!(edges[i].power_flags & AR9170_CALCTL_EDGE_FLAGS))
1301 rc |= 0x0f;
1302 break;
1303 }
1304 }
1305
1306 return rc;
1307}
1308
1309/*
1310 * calculate the conformance test limits and the heavy clip parameter
1311 * and apply them to ar->power* (derived from otus hal/hpmain.c, line 3706)
1312 */
1313static void ar9170_calc_ctl(struct ar9170 *ar, u32 freq, enum ar9170_bw bw)
1314{
1315 u8 ctl_grp; /* CTL group */
1316 u8 ctl_idx; /* CTL index */
1317 int i, j;
1318 struct ctl_modes {
1319 u8 ctl_mode;
1320 u8 max_power;
1321 u8 *pwr_cal_data;
1322 int pwr_cal_len;
1323 } *modes;
1324
1325 /*
1326 * order is relevant in the mode_list_*: we fall back to the
1327 * lower indices if any mode is missed in the EEPROM.
1328 */
1329 struct ctl_modes mode_list_2ghz[] = {
1330 { CTL_11B, 0, ar->power_2G_cck, 4 },
1331 { CTL_11G, 0, ar->power_2G_ofdm, 4 },
1332 { CTL_2GHT20, 0, ar->power_2G_ht20, 8 },
1333 { CTL_2GHT40, 0, ar->power_2G_ht40, 8 },
1334 };
1335 struct ctl_modes mode_list_5ghz[] = {
1336 { CTL_11A, 0, ar->power_5G_leg, 4 },
1337 { CTL_5GHT20, 0, ar->power_5G_ht20, 8 },
1338 { CTL_5GHT40, 0, ar->power_5G_ht40, 8 },
1339 };
1340 int nr_modes;
1341
1342#define EDGES(c, n) (ar->eeprom.ctl_data[c].control_edges[n])
1343
1344 ar->phy_heavy_clip = 0;
1345
1346 /*
1347 * TODO: investigate the differences between OTUS'
1348 * hpreg.c::zfHpGetRegulatoryDomain() and
1349 * ath/regd.c::ath_regd_get_band_ctl() -
1350 * e.g. for FCC3_WORLD the OTUS procedure
1351 * always returns CTL_FCC, while the one in ath/ delivers
1352 * CTL_ETSI for 2GHz and CTL_FCC for 5GHz.
1353 */
1354 ctl_grp = ath_regd_get_band_ctl(&ar->common.regulatory,
1355 ar->hw->conf.channel->band);
1356
1357 /* ctl group not found - either invalid band (NO_CTL) or ww roaming */
1358 if (ctl_grp == NO_CTL || ctl_grp == SD_NO_CTL)
1359 ctl_grp = CTL_FCC;
1360
1361 if (ctl_grp != CTL_FCC)
1362 /* skip CTL and heavy clip for CTL_MKK and CTL_ETSI */
1363 return;
1364
1365 if (ar->hw->conf.channel->band == IEEE80211_BAND_2GHZ) {
1366 modes = mode_list_2ghz;
1367 nr_modes = ARRAY_SIZE(mode_list_2ghz);
1368 } else {
1369 modes = mode_list_5ghz;
1370 nr_modes = ARRAY_SIZE(mode_list_5ghz);
1371 }
1372
1373 for (i = 0; i < nr_modes; i++) {
1374 u8 c = ctl_grp | modes[i].ctl_mode;
1375 for (ctl_idx = 0; ctl_idx < AR5416_NUM_CTLS; ctl_idx++)
1376 if (c == ar->eeprom.ctl_index[ctl_idx])
1377 break;
1378 if (ctl_idx < AR5416_NUM_CTLS) {
1379 int f_off = 0;
1380
1381 /* determine heav clip parameter from
1382 the 11G edges array */
1383 if (modes[i].ctl_mode == CTL_11G) {
1384 ar->phy_heavy_clip =
1385 ar9170_get_heavy_clip(ar,
1386 EDGES(ctl_idx, 1),
1387 freq, bw);
1388 }
1389
1390 /* adjust freq for 40MHz */
1391 if (modes[i].ctl_mode == CTL_2GHT40 ||
1392 modes[i].ctl_mode == CTL_5GHT40) {
1393 if (bw == AR9170_BW_40_BELOW)
1394 f_off = -10;
1395 else
1396 f_off = 10;
1397 }
1398
1399 modes[i].max_power =
1400 ar9170_get_max_edge_power(ar, EDGES(ctl_idx, 1),
1401 freq+f_off);
1402
1403 /*
1404 * TODO: check if the regulatory max. power is
1405 * controlled by cfg80211 for DFS
1406 * (hpmain applies it to max_power itself for DFS freq)
1407 */
1408
1409 } else {
1410 /*
1411 * Workaround in otus driver, hpmain.c, line 3906:
1412 * if no data for 5GHT20 are found, take the
1413 * legacy 5G value.
1414 * We extend this here to fallback from any other *HT or
1415 * 11G, too.
1416 */
1417 int k = i;
1418
1419 modes[i].max_power = AR5416_MAX_RATE_POWER;
1420 while (k-- > 0) {
1421 if (modes[k].max_power !=
1422 AR5416_MAX_RATE_POWER) {
1423 modes[i].max_power = modes[k].max_power;
1424 break;
1425 }
1426 }
1427 }
1428
1429 /* apply max power to pwr_cal_data (ar->power_*) */
1430 for (j = 0; j < modes[i].pwr_cal_len; j++) {
1431 modes[i].pwr_cal_data[j] = min(modes[i].pwr_cal_data[j],
1432 modes[i].max_power);
1433 }
1434 }
1435
1436 if (ar->phy_heavy_clip & 0xf0) {
1437 ar->power_2G_ht40[0]--;
1438 ar->power_2G_ht40[1]--;
1439 ar->power_2G_ht40[2]--;
1440 }
1441 if (ar->phy_heavy_clip & 0xf) {
1442 ar->power_2G_ht20[0]++;
1443 ar->power_2G_ht20[1]++;
1444 ar->power_2G_ht20[2]++;
1445 }
1446
1447
1448#undef EDGES
1449}
1450
1451static int ar9170_set_power_cal(struct ar9170 *ar, u32 freq, enum ar9170_bw bw)
1452{
1453 struct ar9170_calibration_target_power_legacy *ctpl;
1454 struct ar9170_calibration_target_power_ht *ctph;
1455 u8 *ctpres;
1456 int ntargets;
1457 int idx, i, n;
1458 u8 ackpower, ackchains, f;
1459 u8 pwr_freqs[AR5416_MAX_NUM_TGT_PWRS];
1460
1461 if (freq < 3000)
1462 f = freq - 2300;
1463 else
1464 f = (freq - 4800)/5;
1465
1466 /*
1467 * cycle through the various modes
1468 *
1469 * legacy modes first: 5G, 2G CCK, 2G OFDM
1470 */
1471 for (i = 0; i < 3; i++) {
1472 switch (i) {
1473 case 0: /* 5 GHz legacy */
1474 ctpl = &ar->eeprom.cal_tgt_pwr_5G[0];
1475 ntargets = AR5416_NUM_5G_TARGET_PWRS;
1476 ctpres = ar->power_5G_leg;
1477 break;
1478 case 1: /* 2.4 GHz CCK */
1479 ctpl = &ar->eeprom.cal_tgt_pwr_2G_cck[0];
1480 ntargets = AR5416_NUM_2G_CCK_TARGET_PWRS;
1481 ctpres = ar->power_2G_cck;
1482 break;
1483 case 2: /* 2.4 GHz OFDM */
1484 ctpl = &ar->eeprom.cal_tgt_pwr_2G_ofdm[0];
1485 ntargets = AR5416_NUM_2G_OFDM_TARGET_PWRS;
1486 ctpres = ar->power_2G_ofdm;
1487 break;
1488 default:
1489 BUG();
1490 }
1491
1492 for (n = 0; n < ntargets; n++) {
1493 if (ctpl[n].freq == 0xff)
1494 break;
1495 pwr_freqs[n] = ctpl[n].freq;
1496 }
1497 ntargets = n;
1498 idx = ar9170_find_freq_idx(ntargets, pwr_freqs, f);
1499 for (n = 0; n < 4; n++)
1500 ctpres[n] = ar9170_interpolate_u8(
1501 f,
1502 ctpl[idx + 0].freq,
1503 ctpl[idx + 0].power[n],
1504 ctpl[idx + 1].freq,
1505 ctpl[idx + 1].power[n]);
1506 }
1507
1508 /*
1509 * HT modes now: 5G HT20, 5G HT40, 2G CCK, 2G OFDM, 2G HT20, 2G HT40
1510 */
1511 for (i = 0; i < 4; i++) {
1512 switch (i) {
1513 case 0: /* 5 GHz HT 20 */
1514 ctph = &ar->eeprom.cal_tgt_pwr_5G_ht20[0];
1515 ntargets = AR5416_NUM_5G_TARGET_PWRS;
1516 ctpres = ar->power_5G_ht20;
1517 break;
1518 case 1: /* 5 GHz HT 40 */
1519 ctph = &ar->eeprom.cal_tgt_pwr_5G_ht40[0];
1520 ntargets = AR5416_NUM_5G_TARGET_PWRS;
1521 ctpres = ar->power_5G_ht40;
1522 break;
1523 case 2: /* 2.4 GHz HT 20 */
1524 ctph = &ar->eeprom.cal_tgt_pwr_2G_ht20[0];
1525 ntargets = AR5416_NUM_2G_OFDM_TARGET_PWRS;
1526 ctpres = ar->power_2G_ht20;
1527 break;
1528 case 3: /* 2.4 GHz HT 40 */
1529 ctph = &ar->eeprom.cal_tgt_pwr_2G_ht40[0];
1530 ntargets = AR5416_NUM_2G_OFDM_TARGET_PWRS;
1531 ctpres = ar->power_2G_ht40;
1532 break;
1533 default:
1534 BUG();
1535 }
1536
1537 for (n = 0; n < ntargets; n++) {
1538 if (ctph[n].freq == 0xff)
1539 break;
1540 pwr_freqs[n] = ctph[n].freq;
1541 }
1542 ntargets = n;
1543 idx = ar9170_find_freq_idx(ntargets, pwr_freqs, f);
1544 for (n = 0; n < 8; n++)
1545 ctpres[n] = ar9170_interpolate_u8(
1546 f,
1547 ctph[idx + 0].freq,
1548 ctph[idx + 0].power[n],
1549 ctph[idx + 1].freq,
1550 ctph[idx + 1].power[n]);
1551 }
1552
1553
1554 /* calc. conformance test limits and apply to ar->power*[] */
1555 ar9170_calc_ctl(ar, freq, bw);
1556
1557 /* set ACK/CTS TX power */
1558 ar9170_regwrite_begin(ar);
1559
1560 if (ar->eeprom.tx_mask != 1)
1561 ackchains = AR9170_TX_PHY_TXCHAIN_2;
1562 else
1563 ackchains = AR9170_TX_PHY_TXCHAIN_1;
1564
1565 if (freq < 3000)
1566 ackpower = ar->power_2G_ofdm[0] & 0x3f;
1567 else
1568 ackpower = ar->power_5G_leg[0] & 0x3f;
1569
1570 ar9170_regwrite(0x1c3694, ackpower << 20 | ackchains << 26);
1571 ar9170_regwrite(0x1c3bb4, ackpower << 5 | ackchains << 11 |
1572 ackpower << 21 | ackchains << 27);
1573
1574 ar9170_regwrite_finish();
1575 return ar9170_regwrite_result();
1576}
1577
1578static int ar9170_calc_noise_dbm(u32 raw_noise)
1579{
1580 if (raw_noise & 0x100)
1581 return ~((raw_noise & 0x0ff) >> 1);
1582 else
1583 return (raw_noise & 0xff) >> 1;
1584}
1585
1586int ar9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
1587 enum ar9170_rf_init_mode rfi, enum ar9170_bw bw)
1588{
1589 const struct ar9170_phy_freq_params *freqpar;
1590 u32 cmd, tmp, offs;
1591 __le32 vals[8];
1592 int i, err;
1593 bool bandswitch;
1594
1595 /* clear BB heavy clip enable */
1596 err = ar9170_write_reg(ar, 0x1c59e0, 0x200);
1597 if (err)
1598 return err;
1599
1600 /* may be NULL at first setup */
1601 if (ar->channel)
1602 bandswitch = ar->channel->band != channel->band;
1603 else
1604 bandswitch = true;
1605
1606 /* HW workaround */
1607 if (!ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] &&
1608 channel->center_freq <= 2417)
1609 bandswitch = true;
1610
1611 err = ar->exec_cmd(ar, AR9170_CMD_FREQ_START, 0, NULL, 0, NULL);
1612 if (err)
1613 return err;
1614
1615 if (rfi != AR9170_RFI_NONE || bandswitch) {
1616 u32 val = 0x400;
1617
1618 if (rfi == AR9170_RFI_COLD)
1619 val = 0x800;
1620
1621 /* warm/cold reset BB/ADDA */
1622 err = ar9170_write_reg(ar, 0x1d4004, val);
1623 if (err)
1624 return err;
1625
1626 err = ar9170_write_reg(ar, 0x1d4004, 0x0);
1627 if (err)
1628 return err;
1629
1630 err = ar9170_init_phy(ar, channel->band);
1631 if (err)
1632 return err;
1633
1634 err = ar9170_init_rf_banks_0_7(ar,
1635 channel->band == IEEE80211_BAND_5GHZ);
1636 if (err)
1637 return err;
1638
1639 cmd = AR9170_CMD_RF_INIT;
1640 } else {
1641 cmd = AR9170_CMD_FREQUENCY;
1642 }
1643
1644 err = ar9170_init_rf_bank4_pwr(ar,
1645 channel->band == IEEE80211_BAND_5GHZ,
1646 channel->center_freq, bw);
1647 if (err)
1648 return err;
1649
1650 switch (bw) {
1651 case AR9170_BW_20:
1652 tmp = 0x240;
1653 offs = 0;
1654 break;
1655 case AR9170_BW_40_BELOW:
1656 tmp = 0x2c4;
1657 offs = 3;
1658 break;
1659 case AR9170_BW_40_ABOVE:
1660 tmp = 0x2d4;
1661 offs = 1;
1662 break;
1663 default:
1664 BUG();
1665 return -ENOSYS;
1666 }
1667
1668 if (ar->eeprom.tx_mask != 1)
1669 tmp |= 0x100;
1670
1671 err = ar9170_write_reg(ar, 0x1c5804, tmp);
1672 if (err)
1673 return err;
1674
1675 err = ar9170_set_freq_cal_data(ar, channel);
1676 if (err)
1677 return err;
1678
1679 err = ar9170_set_power_cal(ar, channel->center_freq, bw);
1680 if (err)
1681 return err;
1682
1683 freqpar = ar9170_get_hw_dyn_params(channel, bw);
1684
1685 vals[0] = cpu_to_le32(channel->center_freq * 1000);
1686 vals[1] = cpu_to_le32(conf_is_ht40(&ar->hw->conf));
1687 vals[2] = cpu_to_le32(offs << 2 | 1);
1688 vals[3] = cpu_to_le32(freqpar->coeff_exp);
1689 vals[4] = cpu_to_le32(freqpar->coeff_man);
1690 vals[5] = cpu_to_le32(freqpar->coeff_exp_shgi);
1691 vals[6] = cpu_to_le32(freqpar->coeff_man_shgi);
1692 vals[7] = cpu_to_le32(1000);
1693
1694 err = ar->exec_cmd(ar, cmd, sizeof(vals), (u8 *)vals,
1695 sizeof(vals), (u8 *)vals);
1696 if (err)
1697 return err;
1698
1699 if (ar->phy_heavy_clip) {
1700 err = ar9170_write_reg(ar, 0x1c59e0,
1701 0x200 | ar->phy_heavy_clip);
1702 if (err) {
1703 if (ar9170_nag_limiter(ar))
1704 wiphy_err(ar->hw->wiphy,
1705 "failed to set heavy clip\n");
1706 }
1707 }
1708
1709 for (i = 0; i < 2; i++) {
1710 ar->noise[i] = ar9170_calc_noise_dbm(
1711 (le32_to_cpu(vals[2 + i]) >> 19) & 0x1ff);
1712
1713 ar->noise[i + 2] = ar9170_calc_noise_dbm(
1714 (le32_to_cpu(vals[5 + i]) >> 23) & 0x1ff);
1715 }
1716
1717 ar->channel = channel;
1718 return 0;
1719}
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
deleted file mode 100644
index d3be6f9816b5..000000000000
--- a/drivers/net/wireless/ath/ar9170/usb.c
+++ /dev/null
@@ -1,1008 +0,0 @@
1/*
2 * Atheros AR9170 driver
3 *
4 * USB - frontend
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, Christian Lamparter <chunkeey@web.de>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
22 *
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
26 *
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38 */
39
40#include <linux/module.h>
41#include <linux/slab.h>
42#include <linux/usb.h>
43#include <linux/firmware.h>
44#include <linux/etherdevice.h>
45#include <linux/device.h>
46#include <net/mac80211.h>
47#include "ar9170.h"
48#include "cmd.h"
49#include "hw.h"
50#include "usb.h"
51
52MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
53MODULE_AUTHOR("Christian Lamparter <chunkeey@web.de>");
54MODULE_LICENSE("GPL");
55MODULE_DESCRIPTION("Atheros AR9170 802.11n USB wireless");
56MODULE_FIRMWARE("ar9170.fw");
57
58enum ar9170_requirements {
59 AR9170_REQ_FW1_ONLY = 1,
60};
61
62static struct usb_device_id ar9170_usb_ids[] = {
63 /* Atheros 9170 */
64 { USB_DEVICE(0x0cf3, 0x9170) },
65 /* Atheros TG121N */
66 { USB_DEVICE(0x0cf3, 0x1001) },
67 /* TP-Link TL-WN821N v2 */
68 { USB_DEVICE(0x0cf3, 0x1002) },
69 /* 3Com Dual Band 802.11n USB Adapter */
70 { USB_DEVICE(0x0cf3, 0x1010) },
71 /* H3C Dual Band 802.11n USB Adapter */
72 { USB_DEVICE(0x0cf3, 0x1011) },
73 /* Cace Airpcap NX */
74 { USB_DEVICE(0xcace, 0x0300) },
75 /* D-Link DWA 160 A1 */
76 { USB_DEVICE(0x07d1, 0x3c10) },
77 /* D-Link DWA 160 A2 */
78 { USB_DEVICE(0x07d1, 0x3a09) },
79 /* Netgear WNA1000 */
80 { USB_DEVICE(0x0846, 0x9040) },
81 /* Netgear WNDA3100 */
82 { USB_DEVICE(0x0846, 0x9010) },
83 /* Netgear WN111 v2 */
84 { USB_DEVICE(0x0846, 0x9001) },
85 /* Zydas ZD1221 */
86 { USB_DEVICE(0x0ace, 0x1221) },
87 /* Proxim ORiNOCO 802.11n USB */
88 { USB_DEVICE(0x1435, 0x0804) },
89 /* WNC Generic 11n USB Dongle */
90 { USB_DEVICE(0x1435, 0x0326) },
91 /* ZyXEL NWD271N */
92 { USB_DEVICE(0x0586, 0x3417) },
93 /* Z-Com UB81 BG */
94 { USB_DEVICE(0x0cde, 0x0023) },
95 /* Z-Com UB82 ABG */
96 { USB_DEVICE(0x0cde, 0x0026) },
97 /* Sphairon Homelink 1202 */
98 { USB_DEVICE(0x0cde, 0x0027) },
99 /* Arcadyan WN7512 */
100 { USB_DEVICE(0x083a, 0xf522) },
101 /* Planex GWUS300 */
102 { USB_DEVICE(0x2019, 0x5304) },
103 /* IO-Data WNGDNUS2 */
104 { USB_DEVICE(0x04bb, 0x093f) },
105 /* AVM FRITZ!WLAN USB Stick N */
106 { USB_DEVICE(0x057C, 0x8401) },
107 /* NEC WL300NU-G */
108 { USB_DEVICE(0x0409, 0x0249) },
109 /* AVM FRITZ!WLAN USB Stick N 2.4 */
110 { USB_DEVICE(0x057C, 0x8402), .driver_info = AR9170_REQ_FW1_ONLY },
111 /* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */
112 { USB_DEVICE(0x1668, 0x1200) },
113
114 /* terminate */
115 {}
116};
117MODULE_DEVICE_TABLE(usb, ar9170_usb_ids);
118
119static void ar9170_usb_submit_urb(struct ar9170_usb *aru)
120{
121 struct urb *urb;
122 unsigned long flags;
123 int err;
124
125 if (unlikely(!IS_STARTED(&aru->common)))
126 return ;
127
128 spin_lock_irqsave(&aru->tx_urb_lock, flags);
129 if (atomic_read(&aru->tx_submitted_urbs) >= AR9170_NUM_TX_URBS) {
130 spin_unlock_irqrestore(&aru->tx_urb_lock, flags);
131 return ;
132 }
133 atomic_inc(&aru->tx_submitted_urbs);
134
135 urb = usb_get_from_anchor(&aru->tx_pending);
136 if (!urb) {
137 atomic_dec(&aru->tx_submitted_urbs);
138 spin_unlock_irqrestore(&aru->tx_urb_lock, flags);
139
140 return ;
141 }
142 spin_unlock_irqrestore(&aru->tx_urb_lock, flags);
143
144 aru->tx_pending_urbs--;
145 usb_anchor_urb(urb, &aru->tx_submitted);
146
147 err = usb_submit_urb(urb, GFP_ATOMIC);
148 if (unlikely(err)) {
149 if (ar9170_nag_limiter(&aru->common))
150 dev_err(&aru->udev->dev, "submit_urb failed (%d).\n",
151 err);
152
153 usb_unanchor_urb(urb);
154 atomic_dec(&aru->tx_submitted_urbs);
155 ar9170_tx_callback(&aru->common, urb->context);
156 }
157
158 usb_free_urb(urb);
159}
160
161static void ar9170_usb_tx_urb_complete_frame(struct urb *urb)
162{
163 struct sk_buff *skb = urb->context;
164 struct ar9170_usb *aru = usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
165
166 if (unlikely(!aru)) {
167 dev_kfree_skb_irq(skb);
168 return ;
169 }
170
171 atomic_dec(&aru->tx_submitted_urbs);
172
173 ar9170_tx_callback(&aru->common, skb);
174
175 ar9170_usb_submit_urb(aru);
176}
177
178static void ar9170_usb_tx_urb_complete(struct urb *urb)
179{
180}
181
182static void ar9170_usb_irq_completed(struct urb *urb)
183{
184 struct ar9170_usb *aru = urb->context;
185
186 switch (urb->status) {
187 /* everything is fine */
188 case 0:
189 break;
190
191 /* disconnect */
192 case -ENOENT:
193 case -ECONNRESET:
194 case -ENODEV:
195 case -ESHUTDOWN:
196 goto free;
197
198 default:
199 goto resubmit;
200 }
201
202 ar9170_handle_command_response(&aru->common, urb->transfer_buffer,
203 urb->actual_length);
204
205resubmit:
206 usb_anchor_urb(urb, &aru->rx_submitted);
207 if (usb_submit_urb(urb, GFP_ATOMIC)) {
208 usb_unanchor_urb(urb);
209 goto free;
210 }
211
212 return;
213
214free:
215 usb_free_coherent(aru->udev, 64, urb->transfer_buffer, urb->transfer_dma);
216}
217
218static void ar9170_usb_rx_completed(struct urb *urb)
219{
220 struct sk_buff *skb = urb->context;
221 struct ar9170_usb *aru = usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
222 int err;
223
224 if (!aru)
225 goto free;
226
227 switch (urb->status) {
228 /* everything is fine */
229 case 0:
230 break;
231
232 /* disconnect */
233 case -ENOENT:
234 case -ECONNRESET:
235 case -ENODEV:
236 case -ESHUTDOWN:
237 goto free;
238
239 default:
240 goto resubmit;
241 }
242
243 skb_put(skb, urb->actual_length);
244 ar9170_rx(&aru->common, skb);
245
246resubmit:
247 skb_reset_tail_pointer(skb);
248 skb_trim(skb, 0);
249
250 usb_anchor_urb(urb, &aru->rx_submitted);
251 err = usb_submit_urb(urb, GFP_ATOMIC);
252 if (unlikely(err)) {
253 usb_unanchor_urb(urb);
254 goto free;
255 }
256
257 return ;
258
259free:
260 dev_kfree_skb_irq(skb);
261}
262
263static int ar9170_usb_prep_rx_urb(struct ar9170_usb *aru,
264 struct urb *urb, gfp_t gfp)
265{
266 struct sk_buff *skb;
267
268 skb = __dev_alloc_skb(AR9170_MAX_RX_BUFFER_SIZE + 32, gfp);
269 if (!skb)
270 return -ENOMEM;
271
272 /* reserve some space for mac80211's radiotap */
273 skb_reserve(skb, 32);
274
275 usb_fill_bulk_urb(urb, aru->udev,
276 usb_rcvbulkpipe(aru->udev, AR9170_EP_RX),
277 skb->data, min(skb_tailroom(skb),
278 AR9170_MAX_RX_BUFFER_SIZE),
279 ar9170_usb_rx_completed, skb);
280
281 return 0;
282}
283
284static int ar9170_usb_alloc_rx_irq_urb(struct ar9170_usb *aru)
285{
286 struct urb *urb = NULL;
287 void *ibuf;
288 int err = -ENOMEM;
289
290 /* initialize interrupt endpoint */
291 urb = usb_alloc_urb(0, GFP_KERNEL);
292 if (!urb)
293 goto out;
294
295 ibuf = usb_alloc_coherent(aru->udev, 64, GFP_KERNEL, &urb->transfer_dma);
296 if (!ibuf)
297 goto out;
298
299 usb_fill_int_urb(urb, aru->udev,
300 usb_rcvintpipe(aru->udev, AR9170_EP_IRQ), ibuf,
301 64, ar9170_usb_irq_completed, aru, 1);
302 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
303
304 usb_anchor_urb(urb, &aru->rx_submitted);
305 err = usb_submit_urb(urb, GFP_KERNEL);
306 if (err) {
307 usb_unanchor_urb(urb);
308 usb_free_coherent(aru->udev, 64, urb->transfer_buffer,
309 urb->transfer_dma);
310 }
311
312out:
313 usb_free_urb(urb);
314 return err;
315}
316
317static int ar9170_usb_alloc_rx_bulk_urbs(struct ar9170_usb *aru)
318{
319 struct urb *urb;
320 int i;
321 int err = -EINVAL;
322
323 for (i = 0; i < AR9170_NUM_RX_URBS; i++) {
324 err = -ENOMEM;
325 urb = usb_alloc_urb(0, GFP_KERNEL);
326 if (!urb)
327 goto err_out;
328
329 err = ar9170_usb_prep_rx_urb(aru, urb, GFP_KERNEL);
330 if (err) {
331 usb_free_urb(urb);
332 goto err_out;
333 }
334
335 usb_anchor_urb(urb, &aru->rx_submitted);
336 err = usb_submit_urb(urb, GFP_KERNEL);
337 if (err) {
338 usb_unanchor_urb(urb);
339 dev_kfree_skb_any((void *) urb->transfer_buffer);
340 usb_free_urb(urb);
341 goto err_out;
342 }
343 usb_free_urb(urb);
344 }
345
346 /* the device now waiting for a firmware. */
347 aru->common.state = AR9170_IDLE;
348 return 0;
349
350err_out:
351
352 usb_kill_anchored_urbs(&aru->rx_submitted);
353 return err;
354}
355
356static int ar9170_usb_flush(struct ar9170 *ar)
357{
358 struct ar9170_usb *aru = (void *) ar;
359 struct urb *urb;
360 int ret, err = 0;
361
362 if (IS_STARTED(ar))
363 aru->common.state = AR9170_IDLE;
364
365 usb_wait_anchor_empty_timeout(&aru->tx_pending,
366 msecs_to_jiffies(800));
367 while ((urb = usb_get_from_anchor(&aru->tx_pending))) {
368 ar9170_tx_callback(&aru->common, (void *) urb->context);
369 usb_free_urb(urb);
370 }
371
372 /* lets wait a while until the tx - queues are dried out */
373 ret = usb_wait_anchor_empty_timeout(&aru->tx_submitted,
374 msecs_to_jiffies(100));
375 if (ret == 0)
376 err = -ETIMEDOUT;
377
378 usb_kill_anchored_urbs(&aru->tx_submitted);
379
380 if (IS_ACCEPTING_CMD(ar))
381 aru->common.state = AR9170_STARTED;
382
383 return err;
384}
385
386static void ar9170_usb_cancel_urbs(struct ar9170_usb *aru)
387{
388 int err;
389
390 aru->common.state = AR9170_UNKNOWN_STATE;
391
392 err = ar9170_usb_flush(&aru->common);
393 if (err)
394 dev_err(&aru->udev->dev, "stuck tx urbs!\n");
395
396 usb_poison_anchored_urbs(&aru->tx_submitted);
397 usb_poison_anchored_urbs(&aru->rx_submitted);
398}
399
400static int ar9170_usb_exec_cmd(struct ar9170 *ar, enum ar9170_cmd cmd,
401 unsigned int plen, void *payload,
402 unsigned int outlen, void *out)
403{
404 struct ar9170_usb *aru = (void *) ar;
405 struct urb *urb = NULL;
406 unsigned long flags;
407 int err = -ENOMEM;
408
409 if (unlikely(!IS_ACCEPTING_CMD(ar)))
410 return -EPERM;
411
412 if (WARN_ON(plen > AR9170_MAX_CMD_LEN - 4))
413 return -EINVAL;
414
415 urb = usb_alloc_urb(0, GFP_ATOMIC);
416 if (unlikely(!urb))
417 goto err_free;
418
419 ar->cmdbuf[0] = cpu_to_le32(plen);
420 ar->cmdbuf[0] |= cpu_to_le32(cmd << 8);
421 /* writing multiple regs fills this buffer already */
422 if (plen && payload != (u8 *)(&ar->cmdbuf[1]))
423 memcpy(&ar->cmdbuf[1], payload, plen);
424
425 spin_lock_irqsave(&aru->common.cmdlock, flags);
426 aru->readbuf = (u8 *)out;
427 aru->readlen = outlen;
428 spin_unlock_irqrestore(&aru->common.cmdlock, flags);
429
430 usb_fill_int_urb(urb, aru->udev,
431 usb_sndintpipe(aru->udev, AR9170_EP_CMD),
432 aru->common.cmdbuf, plen + 4,
433 ar9170_usb_tx_urb_complete, NULL, 1);
434
435 usb_anchor_urb(urb, &aru->tx_submitted);
436 err = usb_submit_urb(urb, GFP_ATOMIC);
437 if (unlikely(err)) {
438 usb_unanchor_urb(urb);
439 usb_free_urb(urb);
440 goto err_unbuf;
441 }
442 usb_free_urb(urb);
443
444 err = wait_for_completion_timeout(&aru->cmd_wait, HZ);
445 if (err == 0) {
446 err = -ETIMEDOUT;
447 goto err_unbuf;
448 }
449
450 if (aru->readlen != outlen) {
451 err = -EMSGSIZE;
452 goto err_unbuf;
453 }
454
455 return 0;
456
457err_unbuf:
458 /* Maybe the device was removed in the second we were waiting? */
459 if (IS_STARTED(ar)) {
460 dev_err(&aru->udev->dev, "no command feedback "
461 "received (%d).\n", err);
462
463 /* provide some maybe useful debug information */
464 print_hex_dump_bytes("ar9170 cmd: ", DUMP_PREFIX_NONE,
465 aru->common.cmdbuf, plen + 4);
466 dump_stack();
467 }
468
469 /* invalidate to avoid completing the next prematurely */
470 spin_lock_irqsave(&aru->common.cmdlock, flags);
471 aru->readbuf = NULL;
472 aru->readlen = 0;
473 spin_unlock_irqrestore(&aru->common.cmdlock, flags);
474
475err_free:
476
477 return err;
478}
479
480static int ar9170_usb_tx(struct ar9170 *ar, struct sk_buff *skb)
481{
482 struct ar9170_usb *aru = (struct ar9170_usb *) ar;
483 struct urb *urb;
484
485 if (unlikely(!IS_STARTED(ar))) {
486 /* Seriously, what were you drink... err... thinking!? */
487 return -EPERM;
488 }
489
490 urb = usb_alloc_urb(0, GFP_ATOMIC);
491 if (unlikely(!urb))
492 return -ENOMEM;
493
494 usb_fill_bulk_urb(urb, aru->udev,
495 usb_sndbulkpipe(aru->udev, AR9170_EP_TX),
496 skb->data, skb->len,
497 ar9170_usb_tx_urb_complete_frame, skb);
498 urb->transfer_flags |= URB_ZERO_PACKET;
499
500 usb_anchor_urb(urb, &aru->tx_pending);
501 aru->tx_pending_urbs++;
502
503 usb_free_urb(urb);
504
505 ar9170_usb_submit_urb(aru);
506 return 0;
507}
508
509static void ar9170_usb_callback_cmd(struct ar9170 *ar, u32 len , void *buffer)
510{
511 struct ar9170_usb *aru = (void *) ar;
512 unsigned long flags;
513 u32 in, out;
514
515 if (unlikely(!buffer))
516 return ;
517
518 in = le32_to_cpup((__le32 *)buffer);
519 out = le32_to_cpu(ar->cmdbuf[0]);
520
521 /* mask off length byte */
522 out &= ~0xFF;
523
524 if (aru->readlen >= 0) {
525 /* add expected length */
526 out |= aru->readlen;
527 } else {
528 /* add obtained length */
529 out |= in & 0xFF;
530 }
531
532 /*
533 * Some commands (e.g: AR9170_CMD_FREQUENCY) have a variable response
534 * length and we cannot predict the correct length in advance.
535 * So we only check if we provided enough space for the data.
536 */
537 if (unlikely(out < in)) {
538 dev_warn(&aru->udev->dev, "received invalid command response "
539 "got %d bytes, instead of %d bytes "
540 "and the resp length is %d bytes\n",
541 in, out, len);
542 print_hex_dump_bytes("ar9170 invalid resp: ",
543 DUMP_PREFIX_OFFSET, buffer, len);
544 /*
545 * Do not complete, then the command times out,
546 * and we get a stack trace from there.
547 */
548 return ;
549 }
550
551 spin_lock_irqsave(&aru->common.cmdlock, flags);
552 if (aru->readbuf && len > 0) {
553 memcpy(aru->readbuf, buffer + 4, len - 4);
554 aru->readbuf = NULL;
555 }
556 complete(&aru->cmd_wait);
557 spin_unlock_irqrestore(&aru->common.cmdlock, flags);
558}
559
560static int ar9170_usb_upload(struct ar9170_usb *aru, const void *data,
561 size_t len, u32 addr, bool complete)
562{
563 int transfer, err;
564 u8 *buf = kmalloc(4096, GFP_KERNEL);
565
566 if (!buf)
567 return -ENOMEM;
568
569 while (len) {
570 transfer = min_t(int, len, 4096);
571 memcpy(buf, data, transfer);
572
573 err = usb_control_msg(aru->udev, usb_sndctrlpipe(aru->udev, 0),
574 0x30 /* FW DL */, 0x40 | USB_DIR_OUT,
575 addr >> 8, 0, buf, transfer, 1000);
576
577 if (err < 0) {
578 kfree(buf);
579 return err;
580 }
581
582 len -= transfer;
583 data += transfer;
584 addr += transfer;
585 }
586 kfree(buf);
587
588 if (complete) {
589 err = usb_control_msg(aru->udev, usb_sndctrlpipe(aru->udev, 0),
590 0x31 /* FW DL COMPLETE */,
591 0x40 | USB_DIR_OUT, 0, 0, NULL, 0, 5000);
592 }
593
594 return 0;
595}
596
597static int ar9170_usb_reset(struct ar9170_usb *aru)
598{
599 int ret, lock = (aru->intf->condition != USB_INTERFACE_BINDING);
600
601 if (lock) {
602 ret = usb_lock_device_for_reset(aru->udev, aru->intf);
603 if (ret < 0) {
604 dev_err(&aru->udev->dev, "unable to lock device "
605 "for reset (%d).\n", ret);
606 return ret;
607 }
608 }
609
610 ret = usb_reset_device(aru->udev);
611 if (lock)
612 usb_unlock_device(aru->udev);
613
614 /* let it rest - for a second - */
615 msleep(1000);
616
617 return ret;
618}
619
620static int ar9170_usb_upload_firmware(struct ar9170_usb *aru)
621{
622 int err;
623
624 if (!aru->init_values)
625 goto upload_fw_start;
626
627 /* First, upload initial values to device RAM */
628 err = ar9170_usb_upload(aru, aru->init_values->data,
629 aru->init_values->size, 0x102800, false);
630 if (err) {
631 dev_err(&aru->udev->dev, "firmware part 1 "
632 "upload failed (%d).\n", err);
633 return err;
634 }
635
636upload_fw_start:
637
638 /* Then, upload the firmware itself and start it */
639 return ar9170_usb_upload(aru, aru->firmware->data, aru->firmware->size,
640 0x200000, true);
641}
642
643static int ar9170_usb_init_transport(struct ar9170_usb *aru)
644{
645 struct ar9170 *ar = (void *) &aru->common;
646 int err;
647
648 ar9170_regwrite_begin(ar);
649
650 /* Set USB Rx stream mode MAX packet number to 2 */
651 ar9170_regwrite(AR9170_USB_REG_MAX_AGG_UPLOAD, 0x4);
652
653 /* Set USB Rx stream mode timeout to 10us */
654 ar9170_regwrite(AR9170_USB_REG_UPLOAD_TIME_CTL, 0x80);
655
656 ar9170_regwrite_finish();
657
658 err = ar9170_regwrite_result();
659 if (err)
660 dev_err(&aru->udev->dev, "USB setup failed (%d).\n", err);
661
662 return err;
663}
664
665static void ar9170_usb_stop(struct ar9170 *ar)
666{
667 struct ar9170_usb *aru = (void *) ar;
668 int ret;
669
670 if (IS_ACCEPTING_CMD(ar))
671 aru->common.state = AR9170_STOPPED;
672
673 ret = ar9170_usb_flush(ar);
674 if (ret)
675 dev_err(&aru->udev->dev, "kill pending tx urbs.\n");
676
677 usb_poison_anchored_urbs(&aru->tx_submitted);
678
679 /*
680 * Note:
681 * So far we freed all tx urbs, but we won't dare to touch any rx urbs.
682 * Else we would end up with a unresponsive device...
683 */
684}
685
686static int ar9170_usb_open(struct ar9170 *ar)
687{
688 struct ar9170_usb *aru = (void *) ar;
689 int err;
690
691 usb_unpoison_anchored_urbs(&aru->tx_submitted);
692 err = ar9170_usb_init_transport(aru);
693 if (err) {
694 usb_poison_anchored_urbs(&aru->tx_submitted);
695 return err;
696 }
697
698 aru->common.state = AR9170_IDLE;
699 return 0;
700}
701
702static int ar9170_usb_init_device(struct ar9170_usb *aru)
703{
704 int err;
705
706 err = ar9170_usb_alloc_rx_irq_urb(aru);
707 if (err)
708 goto err_out;
709
710 err = ar9170_usb_alloc_rx_bulk_urbs(aru);
711 if (err)
712 goto err_unrx;
713
714 err = ar9170_usb_upload_firmware(aru);
715 if (err) {
716 err = ar9170_echo_test(&aru->common, 0x60d43110);
717 if (err) {
718 /* force user invention, by disabling the device */
719 err = usb_driver_set_configuration(aru->udev, -1);
720 dev_err(&aru->udev->dev, "device is in a bad state. "
721 "please reconnect it!\n");
722 goto err_unrx;
723 }
724 }
725
726 return 0;
727
728err_unrx:
729 ar9170_usb_cancel_urbs(aru);
730
731err_out:
732 return err;
733}
734
735static void ar9170_usb_firmware_failed(struct ar9170_usb *aru)
736{
737 struct device *parent = aru->udev->dev.parent;
738 struct usb_device *udev;
739
740 /*
741 * Store a copy of the usb_device pointer locally.
742 * This is because device_release_driver initiates
743 * ar9170_usb_disconnect, which in turn frees our
744 * driver context (aru).
745 */
746 udev = aru->udev;
747
748 complete(&aru->firmware_loading_complete);
749
750 /* unbind anything failed */
751 if (parent)
752 device_lock(parent);
753
754 device_release_driver(&udev->dev);
755 if (parent)
756 device_unlock(parent);
757
758 usb_put_dev(udev);
759}
760
761static void ar9170_usb_firmware_finish(const struct firmware *fw, void *context)
762{
763 struct ar9170_usb *aru = context;
764 int err;
765
766 aru->firmware = fw;
767
768 if (!fw) {
769 dev_err(&aru->udev->dev, "firmware file not found.\n");
770 goto err_freefw;
771 }
772
773 err = ar9170_usb_init_device(aru);
774 if (err)
775 goto err_freefw;
776
777 err = ar9170_usb_open(&aru->common);
778 if (err)
779 goto err_unrx;
780
781 err = ar9170_register(&aru->common, &aru->udev->dev);
782
783 ar9170_usb_stop(&aru->common);
784 if (err)
785 goto err_unrx;
786
787 complete(&aru->firmware_loading_complete);
788 usb_put_dev(aru->udev);
789 return;
790
791 err_unrx:
792 ar9170_usb_cancel_urbs(aru);
793
794 err_freefw:
795 ar9170_usb_firmware_failed(aru);
796}
797
798static void ar9170_usb_firmware_inits(const struct firmware *fw,
799 void *context)
800{
801 struct ar9170_usb *aru = context;
802 int err;
803
804 if (!fw) {
805 dev_err(&aru->udev->dev, "file with init values not found.\n");
806 ar9170_usb_firmware_failed(aru);
807 return;
808 }
809
810 aru->init_values = fw;
811
812 /* ok so we have the init values -- get code for two-stage */
813
814 err = request_firmware_nowait(THIS_MODULE, 1, "ar9170-2.fw",
815 &aru->udev->dev, GFP_KERNEL, aru,
816 ar9170_usb_firmware_finish);
817 if (err)
818 ar9170_usb_firmware_failed(aru);
819}
820
821static void ar9170_usb_firmware_step2(const struct firmware *fw, void *context)
822{
823 struct ar9170_usb *aru = context;
824 int err;
825
826 if (fw) {
827 ar9170_usb_firmware_finish(fw, context);
828 return;
829 }
830
831 if (aru->req_one_stage_fw) {
832 dev_err(&aru->udev->dev, "ar9170.fw firmware file "
833 "not found and is required for this device\n");
834 ar9170_usb_firmware_failed(aru);
835 return;
836 }
837
838 dev_err(&aru->udev->dev, "ar9170.fw firmware file "
839 "not found, trying old firmware...\n");
840
841 err = request_firmware_nowait(THIS_MODULE, 1, "ar9170-1.fw",
842 &aru->udev->dev, GFP_KERNEL, aru,
843 ar9170_usb_firmware_inits);
844 if (err)
845 ar9170_usb_firmware_failed(aru);
846}
847
848static bool ar9170_requires_one_stage(const struct usb_device_id *id)
849{
850 if (!id->driver_info)
851 return false;
852 if (id->driver_info == AR9170_REQ_FW1_ONLY)
853 return true;
854 return false;
855}
856
857static int ar9170_usb_probe(struct usb_interface *intf,
858 const struct usb_device_id *id)
859{
860 struct ar9170_usb *aru;
861 struct ar9170 *ar;
862 struct usb_device *udev;
863 int err;
864
865 aru = ar9170_alloc(sizeof(*aru));
866 if (IS_ERR(aru)) {
867 err = PTR_ERR(aru);
868 goto out;
869 }
870
871 udev = interface_to_usbdev(intf);
872 usb_get_dev(udev);
873 aru->udev = udev;
874 aru->intf = intf;
875 ar = &aru->common;
876
877 aru->req_one_stage_fw = ar9170_requires_one_stage(id);
878
879 usb_set_intfdata(intf, aru);
880 SET_IEEE80211_DEV(ar->hw, &intf->dev);
881
882 init_usb_anchor(&aru->rx_submitted);
883 init_usb_anchor(&aru->tx_pending);
884 init_usb_anchor(&aru->tx_submitted);
885 init_completion(&aru->cmd_wait);
886 init_completion(&aru->firmware_loading_complete);
887 spin_lock_init(&aru->tx_urb_lock);
888
889 aru->tx_pending_urbs = 0;
890 atomic_set(&aru->tx_submitted_urbs, 0);
891
892 aru->common.stop = ar9170_usb_stop;
893 aru->common.flush = ar9170_usb_flush;
894 aru->common.open = ar9170_usb_open;
895 aru->common.tx = ar9170_usb_tx;
896 aru->common.exec_cmd = ar9170_usb_exec_cmd;
897 aru->common.callback_cmd = ar9170_usb_callback_cmd;
898
899#ifdef CONFIG_PM
900 udev->reset_resume = 1;
901#endif /* CONFIG_PM */
902 err = ar9170_usb_reset(aru);
903 if (err)
904 goto err_freehw;
905
906 usb_get_dev(aru->udev);
907 return request_firmware_nowait(THIS_MODULE, 1, "ar9170.fw",
908 &aru->udev->dev, GFP_KERNEL, aru,
909 ar9170_usb_firmware_step2);
910err_freehw:
911 usb_set_intfdata(intf, NULL);
912 usb_put_dev(udev);
913 ieee80211_free_hw(ar->hw);
914out:
915 return err;
916}
917
918static void ar9170_usb_disconnect(struct usb_interface *intf)
919{
920 struct ar9170_usb *aru = usb_get_intfdata(intf);
921
922 if (!aru)
923 return;
924
925 aru->common.state = AR9170_IDLE;
926
927 wait_for_completion(&aru->firmware_loading_complete);
928
929 ar9170_unregister(&aru->common);
930 ar9170_usb_cancel_urbs(aru);
931
932 usb_put_dev(aru->udev);
933 usb_set_intfdata(intf, NULL);
934 ieee80211_free_hw(aru->common.hw);
935
936 release_firmware(aru->init_values);
937 release_firmware(aru->firmware);
938}
939
940#ifdef CONFIG_PM
941static int ar9170_suspend(struct usb_interface *intf,
942 pm_message_t message)
943{
944 struct ar9170_usb *aru = usb_get_intfdata(intf);
945
946 if (!aru)
947 return -ENODEV;
948
949 aru->common.state = AR9170_IDLE;
950 ar9170_usb_cancel_urbs(aru);
951
952 return 0;
953}
954
955static int ar9170_resume(struct usb_interface *intf)
956{
957 struct ar9170_usb *aru = usb_get_intfdata(intf);
958 int err;
959
960 if (!aru)
961 return -ENODEV;
962
963 usb_unpoison_anchored_urbs(&aru->rx_submitted);
964 usb_unpoison_anchored_urbs(&aru->tx_submitted);
965
966 err = ar9170_usb_init_device(aru);
967 if (err)
968 goto err_unrx;
969
970 err = ar9170_usb_open(&aru->common);
971 if (err)
972 goto err_unrx;
973
974 return 0;
975
976err_unrx:
977 aru->common.state = AR9170_IDLE;
978 ar9170_usb_cancel_urbs(aru);
979
980 return err;
981}
982#endif /* CONFIG_PM */
983
984static struct usb_driver ar9170_driver = {
985 .name = "ar9170usb",
986 .probe = ar9170_usb_probe,
987 .disconnect = ar9170_usb_disconnect,
988 .id_table = ar9170_usb_ids,
989 .soft_unbind = 1,
990#ifdef CONFIG_PM
991 .suspend = ar9170_suspend,
992 .resume = ar9170_resume,
993 .reset_resume = ar9170_resume,
994#endif /* CONFIG_PM */
995};
996
997static int __init ar9170_init(void)
998{
999 return usb_register(&ar9170_driver);
1000}
1001
1002static void __exit ar9170_exit(void)
1003{
1004 usb_deregister(&ar9170_driver);
1005}
1006
1007module_init(ar9170_init);
1008module_exit(ar9170_exit);
diff --git a/drivers/net/wireless/ath/ar9170/usb.h b/drivers/net/wireless/ath/ar9170/usb.h
deleted file mode 100644
index 919b06046eb3..000000000000
--- a/drivers/net/wireless/ath/ar9170/usb.h
+++ /dev/null
@@ -1,82 +0,0 @@
1/*
2 * Atheros AR9170 USB driver
3 *
4 * Driver specific definitions
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, Christian Lamparter <chunkeey@web.de>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
22 *
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
26 *
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38 */
39#ifndef __USB_H
40#define __USB_H
41
42#include <linux/usb.h>
43#include <linux/completion.h>
44#include <linux/spinlock.h>
45#include <linux/leds.h>
46#include <net/cfg80211.h>
47#include <net/mac80211.h>
48#include <linux/firmware.h>
49#include "eeprom.h"
50#include "hw.h"
51#include "ar9170.h"
52
53#define AR9170_NUM_RX_URBS 16
54#define AR9170_NUM_TX_URBS 8
55
56struct firmware;
57
58struct ar9170_usb {
59 struct ar9170 common;
60 struct usb_device *udev;
61 struct usb_interface *intf;
62
63 struct usb_anchor rx_submitted;
64 struct usb_anchor tx_pending;
65 struct usb_anchor tx_submitted;
66
67 bool req_one_stage_fw;
68
69 spinlock_t tx_urb_lock;
70 atomic_t tx_submitted_urbs;
71 unsigned int tx_pending_urbs;
72
73 struct completion cmd_wait;
74 struct completion firmware_loading_complete;
75 int readlen;
76 u8 *readbuf;
77
78 const struct firmware *init_values;
79 const struct firmware *firmware;
80};
81
82#endif /* __USB_H */
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index a6c6a466000f..7cf4317a2a84 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -119,17 +119,11 @@ struct ath_ops {
119 void (*write)(void *, u32 val, u32 reg_offset); 119 void (*write)(void *, u32 val, u32 reg_offset);
120 void (*enable_write_buffer)(void *); 120 void (*enable_write_buffer)(void *);
121 void (*write_flush) (void *); 121 void (*write_flush) (void *);
122 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
122}; 123};
123 124
124struct ath_common; 125struct ath_common;
125 126struct ath_bus_ops;
126struct ath_bus_ops {
127 enum ath_bus_type ath_bus_type;
128 void (*read_cachesize)(struct ath_common *common, int *csz);
129 bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data);
130 void (*bt_coex_prep)(struct ath_common *common);
131 void (*extn_synch_en)(struct ath_common *common);
132};
133 127
134struct ath_common { 128struct ath_common {
135 void *ah; 129 void *ah;
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index 82324e98efef..ea9982781559 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -18,6 +18,7 @@
18 18
19#include <linux/nl80211.h> 19#include <linux/nl80211.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/etherdevice.h>
21#include <ar231x_platform.h> 22#include <ar231x_platform.h>
22#include "ath5k.h" 23#include "ath5k.h"
23#include "debug.h" 24#include "debug.h"
@@ -62,10 +63,27 @@ int ath5k_hw_read_srev(struct ath5k_hw *ah)
62 return 0; 63 return 0;
63} 64}
64 65
66static int ath5k_ahb_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
67{
68 struct ath5k_softc *sc = ah->ah_sc;
69 struct platform_device *pdev = to_platform_device(sc->dev);
70 struct ar231x_board_config *bcfg = pdev->dev.platform_data;
71 u8 *cfg_mac;
72
73 if (to_platform_device(sc->dev)->id == 0)
74 cfg_mac = bcfg->config->wlan0_mac;
75 else
76 cfg_mac = bcfg->config->wlan1_mac;
77
78 memcpy(mac, cfg_mac, ETH_ALEN);
79 return 0;
80}
81
65static const struct ath_bus_ops ath_ahb_bus_ops = { 82static const struct ath_bus_ops ath_ahb_bus_ops = {
66 .ath_bus_type = ATH_AHB, 83 .ath_bus_type = ATH_AHB,
67 .read_cachesize = ath5k_ahb_read_cachesize, 84 .read_cachesize = ath5k_ahb_read_cachesize,
68 .eeprom_read = ath5k_ahb_eeprom_read, 85 .eeprom_read = ath5k_ahb_eeprom_read,
86 .eeprom_read_mac = ath5k_ahb_eeprom_read_mac,
69}; 87};
70 88
71/*Initialization*/ 89/*Initialization*/
@@ -142,6 +160,16 @@ static int ath_ahb_probe(struct platform_device *pdev)
142 else 160 else
143 reg |= AR5K_AR5312_ENABLE_WLAN1; 161 reg |= AR5K_AR5312_ENABLE_WLAN1;
144 __raw_writel(reg, (void __iomem *) AR5K_AR5312_ENABLE); 162 __raw_writel(reg, (void __iomem *) AR5K_AR5312_ENABLE);
163
164 /*
165 * On a dual-band AR5312, the multiband radio is only
166 * used as pass-through. Disable 2 GHz support in the
167 * driver for it
168 */
169 if (to_platform_device(sc->dev)->id == 0 &&
170 (bcfg->config->flags & (BD_WLAN0|BD_WLAN1)) ==
171 (BD_WLAN1|BD_WLAN0))
172 __set_bit(ATH_STAT_2G_DISABLED, sc->status);
145 } 173 }
146 174
147 ret = ath5k_init_softc(sc, &ath_ahb_bus_ops); 175 ret = ath5k_init_softc(sc, &ath_ahb_bus_ops);
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 8a06dbd39629..bb50700436fe 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -224,8 +224,7 @@
224 224
225/* SIFS */ 225/* SIFS */
226#define AR5K_INIT_SIFS_TURBO 6 226#define AR5K_INIT_SIFS_TURBO 6
227/* XXX: 8 from initvals 10 from standard */ 227#define AR5K_INIT_SIFS_DEFAULT_BG 10
228#define AR5K_INIT_SIFS_DEFAULT_BG 8
229#define AR5K_INIT_SIFS_DEFAULT_A 16 228#define AR5K_INIT_SIFS_DEFAULT_A 16
230#define AR5K_INIT_SIFS_HALF_RATE 32 229#define AR5K_INIT_SIFS_HALF_RATE 32
231#define AR5K_INIT_SIFS_QUARTER_RATE 64 230#define AR5K_INIT_SIFS_QUARTER_RATE 64
@@ -453,12 +452,10 @@ struct ath5k_tx_status {
453 u16 ts_seqnum; 452 u16 ts_seqnum;
454 u16 ts_tstamp; 453 u16 ts_tstamp;
455 u8 ts_status; 454 u8 ts_status;
456 u8 ts_rate[4];
457 u8 ts_retry[4];
458 u8 ts_final_idx; 455 u8 ts_final_idx;
456 u8 ts_final_retry;
459 s8 ts_rssi; 457 s8 ts_rssi;
460 u8 ts_shortretry; 458 u8 ts_shortretry;
461 u8 ts_longretry;
462 u8 ts_virtcol; 459 u8 ts_virtcol;
463 u8 ts_antenna; 460 u8 ts_antenna;
464}; 461};
@@ -875,6 +872,19 @@ enum ath5k_int {
875 AR5K_INT_QTRIG = 0x40000000, /* Non common */ 872 AR5K_INT_QTRIG = 0x40000000, /* Non common */
876 AR5K_INT_GLOBAL = 0x80000000, 873 AR5K_INT_GLOBAL = 0x80000000,
877 874
875 AR5K_INT_TX_ALL = AR5K_INT_TXOK
876 | AR5K_INT_TXDESC
877 | AR5K_INT_TXERR
878 | AR5K_INT_TXEOL
879 | AR5K_INT_TXURN,
880
881 AR5K_INT_RX_ALL = AR5K_INT_RXOK
882 | AR5K_INT_RXDESC
883 | AR5K_INT_RXERR
884 | AR5K_INT_RXNOFRM
885 | AR5K_INT_RXEOL
886 | AR5K_INT_RXORN,
887
878 AR5K_INT_COMMON = AR5K_INT_RXOK 888 AR5K_INT_COMMON = AR5K_INT_RXOK
879 | AR5K_INT_RXDESC 889 | AR5K_INT_RXDESC
880 | AR5K_INT_RXERR 890 | AR5K_INT_RXERR
@@ -1058,6 +1068,7 @@ struct ath5k_hw {
1058 u8 ah_coverage_class; 1068 u8 ah_coverage_class;
1059 bool ah_ack_bitrate_high; 1069 bool ah_ack_bitrate_high;
1060 u8 ah_bwmode; 1070 u8 ah_bwmode;
1071 bool ah_short_slot;
1061 1072
1062 /* Antenna Control */ 1073 /* Antenna Control */
1063 u32 ah_ant_ctl[AR5K_EEPROM_N_MODES][AR5K_ANT_MAX]; 1074 u32 ah_ant_ctl[AR5K_EEPROM_N_MODES][AR5K_ANT_MAX];
@@ -1144,6 +1155,13 @@ struct ath5k_hw {
1144 struct ath5k_rx_status *); 1155 struct ath5k_rx_status *);
1145}; 1156};
1146 1157
1158struct ath_bus_ops {
1159 enum ath_bus_type ath_bus_type;
1160 void (*read_cachesize)(struct ath_common *common, int *csz);
1161 bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data);
1162 int (*eeprom_read_mac)(struct ath5k_hw *ah, u8 *mac);
1163};
1164
1147/* 1165/*
1148 * Prototypes 1166 * Prototypes
1149 */ 1167 */
@@ -1227,13 +1245,12 @@ int ath5k_hw_dma_stop(struct ath5k_hw *ah);
1227/* EEPROM access functions */ 1245/* EEPROM access functions */
1228int ath5k_eeprom_init(struct ath5k_hw *ah); 1246int ath5k_eeprom_init(struct ath5k_hw *ah);
1229void ath5k_eeprom_detach(struct ath5k_hw *ah); 1247void ath5k_eeprom_detach(struct ath5k_hw *ah);
1230int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac);
1231 1248
1232 1249
1233/* Protocol Control Unit Functions */ 1250/* Protocol Control Unit Functions */
1234/* Helpers */ 1251/* Helpers */
1235int ath5k_hw_get_frame_duration(struct ath5k_hw *ah, 1252int ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
1236 int len, struct ieee80211_rate *rate); 1253 int len, struct ieee80211_rate *rate, bool shortpre);
1237unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah); 1254unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah);
1238unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah); 1255unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah);
1239extern int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype opmode); 1256extern int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype opmode);
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index bc8240560488..1588401de3c4 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -313,12 +313,17 @@ int ath5k_hw_init(struct ath5k_softc *sc)
313 goto err; 313 goto err;
314 } 314 }
315 315
316 if (test_bit(ATH_STAT_2G_DISABLED, sc->status)) {
317 __clear_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode);
318 __clear_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode);
319 }
320
316 /* Crypto settings */ 321 /* Crypto settings */
317 common->keymax = (sc->ah->ah_version == AR5K_AR5210 ? 322 common->keymax = (sc->ah->ah_version == AR5K_AR5210 ?
318 AR5K_KEYTABLE_SIZE_5210 : AR5K_KEYTABLE_SIZE_5211); 323 AR5K_KEYTABLE_SIZE_5210 : AR5K_KEYTABLE_SIZE_5211);
319 324
320 if (srev >= AR5K_SREV_AR5212_V4 && 325 if (srev >= AR5K_SREV_AR5212_V4 &&
321 (ee->ee_version >= AR5K_EEPROM_VERSION_5_0 && 326 (ee->ee_version < AR5K_EEPROM_VERSION_5_0 ||
322 !AR5K_EEPROM_AES_DIS(ee->ee_misc5))) 327 !AR5K_EEPROM_AES_DIS(ee->ee_misc5)))
323 common->crypt_caps |= ATH_CRYPT_CAP_CIPHER_AESCCM; 328 common->crypt_caps |= ATH_CRYPT_CAP_CIPHER_AESCCM;
324 329
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 349a5963931b..22047628ccfa 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1444,6 +1444,21 @@ ath5k_receive_frame_ok(struct ath5k_softc *sc, struct ath5k_rx_status *rs)
1444} 1444}
1445 1445
1446static void 1446static void
1447ath5k_set_current_imask(struct ath5k_softc *sc)
1448{
1449 enum ath5k_int imask = sc->imask;
1450 unsigned long flags;
1451
1452 spin_lock_irqsave(&sc->irqlock, flags);
1453 if (sc->rx_pending)
1454 imask &= ~AR5K_INT_RX_ALL;
1455 if (sc->tx_pending)
1456 imask &= ~AR5K_INT_TX_ALL;
1457 ath5k_hw_set_imr(sc->ah, imask);
1458 spin_unlock_irqrestore(&sc->irqlock, flags);
1459}
1460
1461static void
1447ath5k_tasklet_rx(unsigned long data) 1462ath5k_tasklet_rx(unsigned long data)
1448{ 1463{
1449 struct ath5k_rx_status rs = {}; 1464 struct ath5k_rx_status rs = {};
@@ -1506,6 +1521,8 @@ next:
1506 } while (ath5k_rxbuf_setup(sc, bf) == 0); 1521 } while (ath5k_rxbuf_setup(sc, bf) == 0);
1507unlock: 1522unlock:
1508 spin_unlock(&sc->rxbuflock); 1523 spin_unlock(&sc->rxbuflock);
1524 sc->rx_pending = false;
1525 ath5k_set_current_imask(sc);
1509} 1526}
1510 1527
1511 1528
@@ -1573,28 +1590,28 @@ ath5k_tx_frame_completed(struct ath5k_softc *sc, struct sk_buff *skb,
1573 struct ath5k_txq *txq, struct ath5k_tx_status *ts) 1590 struct ath5k_txq *txq, struct ath5k_tx_status *ts)
1574{ 1591{
1575 struct ieee80211_tx_info *info; 1592 struct ieee80211_tx_info *info;
1593 u8 tries[3];
1576 int i; 1594 int i;
1577 1595
1578 sc->stats.tx_all_count++; 1596 sc->stats.tx_all_count++;
1579 sc->stats.tx_bytes_count += skb->len; 1597 sc->stats.tx_bytes_count += skb->len;
1580 info = IEEE80211_SKB_CB(skb); 1598 info = IEEE80211_SKB_CB(skb);
1581 1599
1600 tries[0] = info->status.rates[0].count;
1601 tries[1] = info->status.rates[1].count;
1602 tries[2] = info->status.rates[2].count;
1603
1582 ieee80211_tx_info_clear_status(info); 1604 ieee80211_tx_info_clear_status(info);
1583 for (i = 0; i < 4; i++) { 1605
1606 for (i = 0; i < ts->ts_final_idx; i++) {
1584 struct ieee80211_tx_rate *r = 1607 struct ieee80211_tx_rate *r =
1585 &info->status.rates[i]; 1608 &info->status.rates[i];
1586 1609
1587 if (ts->ts_rate[i]) { 1610 r->count = tries[i];
1588 r->idx = ath5k_hw_to_driver_rix(sc, ts->ts_rate[i]);
1589 r->count = ts->ts_retry[i];
1590 } else {
1591 r->idx = -1;
1592 r->count = 0;
1593 }
1594 } 1611 }
1595 1612
1596 /* count the successful attempt as well */ 1613 info->status.rates[ts->ts_final_idx].count = ts->ts_final_retry;
1597 info->status.rates[ts->ts_final_idx].count++; 1614 info->status.rates[ts->ts_final_idx + 1].idx = -1;
1598 1615
1599 if (unlikely(ts->ts_status)) { 1616 if (unlikely(ts->ts_status)) {
1600 sc->stats.ack_fail++; 1617 sc->stats.ack_fail++;
@@ -1609,6 +1626,9 @@ ath5k_tx_frame_completed(struct ath5k_softc *sc, struct sk_buff *skb,
1609 } else { 1626 } else {
1610 info->flags |= IEEE80211_TX_STAT_ACK; 1627 info->flags |= IEEE80211_TX_STAT_ACK;
1611 info->status.ack_signal = ts->ts_rssi; 1628 info->status.ack_signal = ts->ts_rssi;
1629
1630 /* count the successful attempt as well */
1631 info->status.rates[ts->ts_final_idx].count++;
1612 } 1632 }
1613 1633
1614 /* 1634 /*
@@ -1690,6 +1710,9 @@ ath5k_tasklet_tx(unsigned long data)
1690 for (i=0; i < AR5K_NUM_TX_QUEUES; i++) 1710 for (i=0; i < AR5K_NUM_TX_QUEUES; i++)
1691 if (sc->txqs[i].setup && (sc->ah->ah_txq_isr & BIT(i))) 1711 if (sc->txqs[i].setup && (sc->ah->ah_txq_isr & BIT(i)))
1692 ath5k_tx_processq(sc, &sc->txqs[i]); 1712 ath5k_tx_processq(sc, &sc->txqs[i]);
1713
1714 sc->tx_pending = false;
1715 ath5k_set_current_imask(sc);
1693} 1716}
1694 1717
1695 1718
@@ -2119,6 +2142,20 @@ ath5k_intr_calibration_poll(struct ath5k_hw *ah)
2119 * AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI); */ 2142 * AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI); */
2120} 2143}
2121 2144
2145static void
2146ath5k_schedule_rx(struct ath5k_softc *sc)
2147{
2148 sc->rx_pending = true;
2149 tasklet_schedule(&sc->rxtq);
2150}
2151
2152static void
2153ath5k_schedule_tx(struct ath5k_softc *sc)
2154{
2155 sc->tx_pending = true;
2156 tasklet_schedule(&sc->txtq);
2157}
2158
2122irqreturn_t 2159irqreturn_t
2123ath5k_intr(int irq, void *dev_id) 2160ath5k_intr(int irq, void *dev_id)
2124{ 2161{
@@ -2161,7 +2198,7 @@ ath5k_intr(int irq, void *dev_id)
2161 ieee80211_queue_work(sc->hw, &sc->reset_work); 2198 ieee80211_queue_work(sc->hw, &sc->reset_work);
2162 } 2199 }
2163 else 2200 else
2164 tasklet_schedule(&sc->rxtq); 2201 ath5k_schedule_rx(sc);
2165 } else { 2202 } else {
2166 if (status & AR5K_INT_SWBA) { 2203 if (status & AR5K_INT_SWBA) {
2167 tasklet_hi_schedule(&sc->beacontq); 2204 tasklet_hi_schedule(&sc->beacontq);
@@ -2179,10 +2216,10 @@ ath5k_intr(int irq, void *dev_id)
2179 ath5k_hw_update_tx_triglevel(ah, true); 2216 ath5k_hw_update_tx_triglevel(ah, true);
2180 } 2217 }
2181 if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR)) 2218 if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR))
2182 tasklet_schedule(&sc->rxtq); 2219 ath5k_schedule_rx(sc);
2183 if (status & (AR5K_INT_TXOK | AR5K_INT_TXDESC 2220 if (status & (AR5K_INT_TXOK | AR5K_INT_TXDESC
2184 | AR5K_INT_TXERR | AR5K_INT_TXEOL)) 2221 | AR5K_INT_TXERR | AR5K_INT_TXEOL))
2185 tasklet_schedule(&sc->txtq); 2222 ath5k_schedule_tx(sc);
2186 if (status & AR5K_INT_BMISS) { 2223 if (status & AR5K_INT_BMISS) {
2187 /* TODO */ 2224 /* TODO */
2188 } 2225 }
@@ -2201,6 +2238,9 @@ ath5k_intr(int irq, void *dev_id)
2201 2238
2202 } while (ath5k_hw_is_intr_pending(ah) && --counter > 0); 2239 } while (ath5k_hw_is_intr_pending(ah) && --counter > 0);
2203 2240
2241 if (sc->rx_pending || sc->tx_pending)
2242 ath5k_set_current_imask(sc);
2243
2204 if (unlikely(!counter)) 2244 if (unlikely(!counter))
2205 ATH5K_WARN(sc, "too many interrupts, giving up for now\n"); 2245 ATH5K_WARN(sc, "too many interrupts, giving up for now\n");
2206 2246
@@ -2354,7 +2394,7 @@ ath5k_init_softc(struct ath5k_softc *sc, const struct ath_bus_ops *bus_ops)
2354 spin_lock_init(&sc->rxbuflock); 2394 spin_lock_init(&sc->rxbuflock);
2355 spin_lock_init(&sc->txbuflock); 2395 spin_lock_init(&sc->txbuflock);
2356 spin_lock_init(&sc->block); 2396 spin_lock_init(&sc->block);
2357 2397 spin_lock_init(&sc->irqlock);
2358 2398
2359 /* Setup interrupt handler */ 2399 /* Setup interrupt handler */
2360 ret = request_irq(sc->irq, ath5k_intr, IRQF_SHARED, "ath", sc); 2400 ret = request_irq(sc->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
@@ -2572,6 +2612,8 @@ done:
2572 2612
2573static void stop_tasklets(struct ath5k_softc *sc) 2613static void stop_tasklets(struct ath5k_softc *sc)
2574{ 2614{
2615 sc->rx_pending = false;
2616 sc->tx_pending = false;
2575 tasklet_kill(&sc->rxtq); 2617 tasklet_kill(&sc->rxtq);
2576 tasklet_kill(&sc->txtq); 2618 tasklet_kill(&sc->txtq);
2577 tasklet_kill(&sc->calib); 2619 tasklet_kill(&sc->calib);
@@ -2838,7 +2880,7 @@ ath5k_init(struct ieee80211_hw *hw)
2838 INIT_WORK(&sc->reset_work, ath5k_reset_work); 2880 INIT_WORK(&sc->reset_work, ath5k_reset_work);
2839 INIT_DELAYED_WORK(&sc->tx_complete_work, ath5k_tx_complete_poll_work); 2881 INIT_DELAYED_WORK(&sc->tx_complete_work, ath5k_tx_complete_poll_work);
2840 2882
2841 ret = ath5k_eeprom_read_mac(ah, mac); 2883 ret = ath5k_hw_common(ah)->bus_ops->eeprom_read_mac(ah, mac);
2842 if (ret) { 2884 if (ret) {
2843 ATH5K_ERR(sc, "unable to read address from EEPROM\n"); 2885 ATH5K_ERR(sc, "unable to read address from EEPROM\n");
2844 goto err_queues; 2886 goto err_queues;
@@ -2898,7 +2940,6 @@ ath5k_deinit_softc(struct ath5k_softc *sc)
2898 * XXX: ??? detach ath5k_hw ??? 2940 * XXX: ??? detach ath5k_hw ???
2899 * Other than that, it's straightforward... 2941 * Other than that, it's straightforward...
2900 */ 2942 */
2901 ath5k_debug_finish_device(sc);
2902 ieee80211_unregister_hw(hw); 2943 ieee80211_unregister_hw(hw);
2903 ath5k_desc_free(sc); 2944 ath5k_desc_free(sc);
2904 ath5k_txq_release(sc); 2945 ath5k_txq_release(sc);
diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h
index 978f1f4ac2f3..b294f3305011 100644
--- a/drivers/net/wireless/ath/ath5k/base.h
+++ b/drivers/net/wireless/ath/ath5k/base.h
@@ -193,12 +193,13 @@ struct ath5k_softc {
193 dma_addr_t desc_daddr; /* DMA (physical) address */ 193 dma_addr_t desc_daddr; /* DMA (physical) address */
194 size_t desc_len; /* size of TX/RX descriptors */ 194 size_t desc_len; /* size of TX/RX descriptors */
195 195
196 DECLARE_BITMAP(status, 5); 196 DECLARE_BITMAP(status, 6);
197#define ATH_STAT_INVALID 0 /* disable hardware accesses */ 197#define ATH_STAT_INVALID 0 /* disable hardware accesses */
198#define ATH_STAT_MRRETRY 1 /* multi-rate retry support */ 198#define ATH_STAT_MRRETRY 1 /* multi-rate retry support */
199#define ATH_STAT_PROMISC 2 199#define ATH_STAT_PROMISC 2
200#define ATH_STAT_LEDSOFT 3 /* enable LED gpio status */ 200#define ATH_STAT_LEDSOFT 3 /* enable LED gpio status */
201#define ATH_STAT_STARTED 4 /* opened & irqs enabled */ 201#define ATH_STAT_STARTED 4 /* opened & irqs enabled */
202#define ATH_STAT_2G_DISABLED 5 /* multiband radio without 2G */
202 203
203 unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */ 204 unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */
204 struct ieee80211_channel *curchan; /* current h/w channel */ 205 struct ieee80211_channel *curchan; /* current h/w channel */
@@ -207,6 +208,10 @@ struct ath5k_softc {
207 208
208 enum ath5k_int imask; /* interrupt mask copy */ 209 enum ath5k_int imask; /* interrupt mask copy */
209 210
211 spinlock_t irqlock;
212 bool rx_pending; /* rx tasklet pending */
213 bool tx_pending; /* tx tasklet pending */
214
210 u8 lladdr[ETH_ALEN]; 215 u8 lladdr[ETH_ALEN];
211 u8 bssidmask[ETH_ALEN]; 216 u8 bssidmask[ETH_ALEN];
212 217
diff --git a/drivers/net/wireless/ath/ath5k/caps.c b/drivers/net/wireless/ath/ath5k/caps.c
index f77e8a703c5c..7dd88e1c3ff8 100644
--- a/drivers/net/wireless/ath/ath5k/caps.c
+++ b/drivers/net/wireless/ath/ath5k/caps.c
@@ -94,6 +94,9 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
94 } 94 }
95 } 95 }
96 96
97 if ((ah->ah_radio_5ghz_revision & 0xf0) == AR5K_SREV_RAD_2112)
98 __clear_bit(AR5K_MODE_11A, caps->cap_mode);
99
97 /* Set number of supported TX queues */ 100 /* Set number of supported TX queues */
98 if (ah->ah_version == AR5K_AR5210) 101 if (ah->ah_version == AR5K_AR5210)
99 caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES_NOQCU; 102 caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES_NOQCU;
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index 0230f30e9e9a..0bf7313b8a17 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -888,65 +888,38 @@ static const struct file_operations fops_queue = {
888void 888void
889ath5k_debug_init_device(struct ath5k_softc *sc) 889ath5k_debug_init_device(struct ath5k_softc *sc)
890{ 890{
891 sc->debug.level = ath5k_debug; 891 struct dentry *phydir;
892 892
893 sc->debug.debugfs_phydir = debugfs_create_dir("ath5k", 893 sc->debug.level = ath5k_debug;
894 sc->hw->wiphy->debugfsdir);
895 894
896 sc->debug.debugfs_debug = debugfs_create_file("debug", 895 phydir = debugfs_create_dir("ath5k", sc->hw->wiphy->debugfsdir);
897 S_IWUSR | S_IRUSR, 896 if (!phydir)
898 sc->debug.debugfs_phydir, sc, &fops_debug); 897 return;
899 898
900 sc->debug.debugfs_registers = debugfs_create_file("registers", S_IRUSR, 899 debugfs_create_file("debug", S_IWUSR | S_IRUSR, phydir, sc,
901 sc->debug.debugfs_phydir, sc, &fops_registers); 900 &fops_debug);
902 901
903 sc->debug.debugfs_beacon = debugfs_create_file("beacon", 902 debugfs_create_file("registers", S_IRUSR, phydir, sc, &fops_registers);
904 S_IWUSR | S_IRUSR,
905 sc->debug.debugfs_phydir, sc, &fops_beacon);
906 903
907 sc->debug.debugfs_reset = debugfs_create_file("reset", S_IWUSR, 904 debugfs_create_file("beacon", S_IWUSR | S_IRUSR, phydir, sc,
908 sc->debug.debugfs_phydir, sc, &fops_reset); 905 &fops_beacon);
909 906
910 sc->debug.debugfs_antenna = debugfs_create_file("antenna", 907 debugfs_create_file("reset", S_IWUSR, phydir, sc, &fops_reset);
911 S_IWUSR | S_IRUSR,
912 sc->debug.debugfs_phydir, sc, &fops_antenna);
913 908
914 sc->debug.debugfs_misc = debugfs_create_file("misc", 909 debugfs_create_file("antenna", S_IWUSR | S_IRUSR, phydir, sc,
915 S_IRUSR, 910 &fops_antenna);
916 sc->debug.debugfs_phydir, sc, &fops_misc);
917 911
918 sc->debug.debugfs_frameerrors = debugfs_create_file("frameerrors", 912 debugfs_create_file("misc", S_IRUSR, phydir, sc, &fops_misc);
919 S_IWUSR | S_IRUSR,
920 sc->debug.debugfs_phydir, sc,
921 &fops_frameerrors);
922 913
923 sc->debug.debugfs_ani = debugfs_create_file("ani", 914 debugfs_create_file("frameerrors", S_IWUSR | S_IRUSR, phydir, sc,
924 S_IWUSR | S_IRUSR, 915 &fops_frameerrors);
925 sc->debug.debugfs_phydir, sc,
926 &fops_ani);
927 916
928 sc->debug.debugfs_queue = debugfs_create_file("queue", 917 debugfs_create_file("ani", S_IWUSR | S_IRUSR, phydir, sc, &fops_ani);
929 S_IWUSR | S_IRUSR,
930 sc->debug.debugfs_phydir, sc,
931 &fops_queue);
932}
933 918
934void 919 debugfs_create_file("queue", S_IWUSR | S_IRUSR, phydir, sc,
935ath5k_debug_finish_device(struct ath5k_softc *sc) 920 &fops_queue);
936{
937 debugfs_remove(sc->debug.debugfs_debug);
938 debugfs_remove(sc->debug.debugfs_registers);
939 debugfs_remove(sc->debug.debugfs_beacon);
940 debugfs_remove(sc->debug.debugfs_reset);
941 debugfs_remove(sc->debug.debugfs_antenna);
942 debugfs_remove(sc->debug.debugfs_misc);
943 debugfs_remove(sc->debug.debugfs_frameerrors);
944 debugfs_remove(sc->debug.debugfs_ani);
945 debugfs_remove(sc->debug.debugfs_queue);
946 debugfs_remove(sc->debug.debugfs_phydir);
947} 921}
948 922
949
950/* functions used in other places */ 923/* functions used in other places */
951 924
952void 925void
diff --git a/drivers/net/wireless/ath/ath5k/debug.h b/drivers/net/wireless/ath/ath5k/debug.h
index b0355aef68d3..193dd2d4ea3c 100644
--- a/drivers/net/wireless/ath/ath5k/debug.h
+++ b/drivers/net/wireless/ath/ath5k/debug.h
@@ -68,17 +68,6 @@ struct ath5k_buf;
68 68
69struct ath5k_dbg_info { 69struct ath5k_dbg_info {
70 unsigned int level; /* debug level */ 70 unsigned int level; /* debug level */
71 /* debugfs entries */
72 struct dentry *debugfs_phydir;
73 struct dentry *debugfs_debug;
74 struct dentry *debugfs_registers;
75 struct dentry *debugfs_beacon;
76 struct dentry *debugfs_reset;
77 struct dentry *debugfs_antenna;
78 struct dentry *debugfs_misc;
79 struct dentry *debugfs_frameerrors;
80 struct dentry *debugfs_ani;
81 struct dentry *debugfs_queue;
82}; 71};
83 72
84/** 73/**
@@ -141,9 +130,6 @@ void
141ath5k_debug_init_device(struct ath5k_softc *sc); 130ath5k_debug_init_device(struct ath5k_softc *sc);
142 131
143void 132void
144ath5k_debug_finish_device(struct ath5k_softc *sc);
145
146void
147ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah); 133ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah);
148 134
149void 135void
@@ -167,9 +153,6 @@ static inline void
167ath5k_debug_init_device(struct ath5k_softc *sc) {} 153ath5k_debug_init_device(struct ath5k_softc *sc) {}
168 154
169static inline void 155static inline void
170ath5k_debug_finish_device(struct ath5k_softc *sc) {}
171
172static inline void
173ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah) {} 156ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah) {}
174 157
175static inline void 158static inline void
diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c
index a8fcc94269f7..62172d585723 100644
--- a/drivers/net/wireless/ath/ath5k/desc.c
+++ b/drivers/net/wireless/ath/ath5k/desc.c
@@ -185,6 +185,12 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
185 struct ath5k_hw_4w_tx_ctl *tx_ctl; 185 struct ath5k_hw_4w_tx_ctl *tx_ctl;
186 unsigned int frame_len; 186 unsigned int frame_len;
187 187
188 /*
189 * Use local variables for these to reduce load/store access on
190 * uncached memory
191 */
192 u32 txctl0 = 0, txctl1 = 0, txctl2 = 0, txctl3 = 0;
193
188 tx_ctl = &desc->ud.ds_tx5212.tx_ctl; 194 tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
189 195
190 /* 196 /*
@@ -208,8 +214,9 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
208 if (tx_power > AR5K_TUNE_MAX_TXPOWER) 214 if (tx_power > AR5K_TUNE_MAX_TXPOWER)
209 tx_power = AR5K_TUNE_MAX_TXPOWER; 215 tx_power = AR5K_TUNE_MAX_TXPOWER;
210 216
211 /* Clear descriptor */ 217 /* Clear descriptor status area */
212 memset(&desc->ud.ds_tx5212, 0, sizeof(struct ath5k_hw_5212_tx_desc)); 218 memset(&desc->ud.ds_tx5212.tx_stat, 0,
219 sizeof(desc->ud.ds_tx5212.tx_stat));
213 220
214 /* Setup control descriptor */ 221 /* Setup control descriptor */
215 222
@@ -221,7 +228,7 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
221 if (frame_len & ~AR5K_4W_TX_DESC_CTL0_FRAME_LEN) 228 if (frame_len & ~AR5K_4W_TX_DESC_CTL0_FRAME_LEN)
222 return -EINVAL; 229 return -EINVAL;
223 230
224 tx_ctl->tx_control_0 = frame_len & AR5K_4W_TX_DESC_CTL0_FRAME_LEN; 231 txctl0 = frame_len & AR5K_4W_TX_DESC_CTL0_FRAME_LEN;
225 232
226 /* Verify and set buffer length */ 233 /* Verify and set buffer length */
227 234
@@ -232,21 +239,17 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
232 if (pkt_len & ~AR5K_4W_TX_DESC_CTL1_BUF_LEN) 239 if (pkt_len & ~AR5K_4W_TX_DESC_CTL1_BUF_LEN)
233 return -EINVAL; 240 return -EINVAL;
234 241
235 tx_ctl->tx_control_1 = pkt_len & AR5K_4W_TX_DESC_CTL1_BUF_LEN; 242 txctl1 = pkt_len & AR5K_4W_TX_DESC_CTL1_BUF_LEN;
236 243
237 tx_ctl->tx_control_0 |= 244 txctl0 |= AR5K_REG_SM(tx_power, AR5K_4W_TX_DESC_CTL0_XMIT_POWER) |
238 AR5K_REG_SM(tx_power, AR5K_4W_TX_DESC_CTL0_XMIT_POWER) | 245 AR5K_REG_SM(antenna_mode, AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT);
239 AR5K_REG_SM(antenna_mode, AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT); 246 txctl1 |= AR5K_REG_SM(type, AR5K_4W_TX_DESC_CTL1_FRAME_TYPE);
240 tx_ctl->tx_control_1 |= AR5K_REG_SM(type, 247 txctl2 = AR5K_REG_SM(tx_tries0, AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0);
241 AR5K_4W_TX_DESC_CTL1_FRAME_TYPE); 248 txctl3 = tx_rate0 & AR5K_4W_TX_DESC_CTL3_XMIT_RATE0;
242 tx_ctl->tx_control_2 = AR5K_REG_SM(tx_tries0,
243 AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0);
244 tx_ctl->tx_control_3 = tx_rate0 & AR5K_4W_TX_DESC_CTL3_XMIT_RATE0;
245 249
246#define _TX_FLAGS(_c, _flag) \ 250#define _TX_FLAGS(_c, _flag) \
247 if (flags & AR5K_TXDESC_##_flag) { \ 251 if (flags & AR5K_TXDESC_##_flag) { \
248 tx_ctl->tx_control_##_c |= \ 252 txctl##_c |= AR5K_4W_TX_DESC_CTL##_c##_##_flag; \
249 AR5K_4W_TX_DESC_CTL##_c##_##_flag; \
250 } 253 }
251 254
252 _TX_FLAGS(0, CLRDMASK); 255 _TX_FLAGS(0, CLRDMASK);
@@ -262,8 +265,8 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
262 * WEP crap 265 * WEP crap
263 */ 266 */
264 if (key_index != AR5K_TXKEYIX_INVALID) { 267 if (key_index != AR5K_TXKEYIX_INVALID) {
265 tx_ctl->tx_control_0 |= AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID; 268 txctl0 |= AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID;
266 tx_ctl->tx_control_1 |= AR5K_REG_SM(key_index, 269 txctl1 |= AR5K_REG_SM(key_index,
267 AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_IDX); 270 AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_IDX);
268 } 271 }
269 272
@@ -274,12 +277,16 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
274 if ((flags & AR5K_TXDESC_RTSENA) && 277 if ((flags & AR5K_TXDESC_RTSENA) &&
275 (flags & AR5K_TXDESC_CTSENA)) 278 (flags & AR5K_TXDESC_CTSENA))
276 return -EINVAL; 279 return -EINVAL;
277 tx_ctl->tx_control_2 |= rtscts_duration & 280 txctl2 |= rtscts_duration & AR5K_4W_TX_DESC_CTL2_RTS_DURATION;
278 AR5K_4W_TX_DESC_CTL2_RTS_DURATION; 281 txctl3 |= AR5K_REG_SM(rtscts_rate,
279 tx_ctl->tx_control_3 |= AR5K_REG_SM(rtscts_rate,
280 AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE); 282 AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE);
281 } 283 }
282 284
285 tx_ctl->tx_control_0 = txctl0;
286 tx_ctl->tx_control_1 = txctl1;
287 tx_ctl->tx_control_2 = txctl2;
288 tx_ctl->tx_control_3 = txctl3;
289
283 return 0; 290 return 0;
284} 291}
285 292
@@ -364,7 +371,7 @@ static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
364 AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP); 371 AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP);
365 ts->ts_shortretry = AR5K_REG_MS(tx_status->tx_status_0, 372 ts->ts_shortretry = AR5K_REG_MS(tx_status->tx_status_0,
366 AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT); 373 AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT);
367 ts->ts_longretry = AR5K_REG_MS(tx_status->tx_status_0, 374 ts->ts_final_retry = AR5K_REG_MS(tx_status->tx_status_0,
368 AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT); 375 AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT);
369 /*TODO: ts->ts_virtcol + test*/ 376 /*TODO: ts->ts_virtcol + test*/
370 ts->ts_seqnum = AR5K_REG_MS(tx_status->tx_status_1, 377 ts->ts_seqnum = AR5K_REG_MS(tx_status->tx_status_1,
@@ -373,9 +380,6 @@ static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
373 AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH); 380 AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH);
374 ts->ts_antenna = 1; 381 ts->ts_antenna = 1;
375 ts->ts_status = 0; 382 ts->ts_status = 0;
376 ts->ts_rate[0] = AR5K_REG_MS(tx_ctl->tx_control_0,
377 AR5K_2W_TX_DESC_CTL0_XMIT_RATE);
378 ts->ts_retry[0] = ts->ts_longretry;
379 ts->ts_final_idx = 0; 383 ts->ts_final_idx = 0;
380 384
381 if (!(tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK)) { 385 if (!(tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK)) {
@@ -401,81 +405,48 @@ static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
401{ 405{
402 struct ath5k_hw_4w_tx_ctl *tx_ctl; 406 struct ath5k_hw_4w_tx_ctl *tx_ctl;
403 struct ath5k_hw_tx_status *tx_status; 407 struct ath5k_hw_tx_status *tx_status;
408 u32 txstat0, txstat1;
404 409
405 tx_ctl = &desc->ud.ds_tx5212.tx_ctl; 410 tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
406 tx_status = &desc->ud.ds_tx5212.tx_stat; 411 tx_status = &desc->ud.ds_tx5212.tx_stat;
407 412
413 txstat1 = ACCESS_ONCE(tx_status->tx_status_1);
414
408 /* No frame has been send or error */ 415 /* No frame has been send or error */
409 if (unlikely(!(tx_status->tx_status_1 & AR5K_DESC_TX_STATUS1_DONE))) 416 if (unlikely(!(txstat1 & AR5K_DESC_TX_STATUS1_DONE)))
410 return -EINPROGRESS; 417 return -EINPROGRESS;
411 418
419 txstat0 = ACCESS_ONCE(tx_status->tx_status_0);
420
412 /* 421 /*
413 * Get descriptor status 422 * Get descriptor status
414 */ 423 */
415 ts->ts_tstamp = AR5K_REG_MS(tx_status->tx_status_0, 424 ts->ts_tstamp = AR5K_REG_MS(txstat0,
416 AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP); 425 AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP);
417 ts->ts_shortretry = AR5K_REG_MS(tx_status->tx_status_0, 426 ts->ts_shortretry = AR5K_REG_MS(txstat0,
418 AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT); 427 AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT);
419 ts->ts_longretry = AR5K_REG_MS(tx_status->tx_status_0, 428 ts->ts_final_retry = AR5K_REG_MS(txstat0,
420 AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT); 429 AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT);
421 ts->ts_seqnum = AR5K_REG_MS(tx_status->tx_status_1, 430 ts->ts_seqnum = AR5K_REG_MS(txstat1,
422 AR5K_DESC_TX_STATUS1_SEQ_NUM); 431 AR5K_DESC_TX_STATUS1_SEQ_NUM);
423 ts->ts_rssi = AR5K_REG_MS(tx_status->tx_status_1, 432 ts->ts_rssi = AR5K_REG_MS(txstat1,
424 AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH); 433 AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH);
425 ts->ts_antenna = (tx_status->tx_status_1 & 434 ts->ts_antenna = (txstat1 &
426 AR5K_DESC_TX_STATUS1_XMIT_ANTENNA_5212) ? 2 : 1; 435 AR5K_DESC_TX_STATUS1_XMIT_ANTENNA_5212) ? 2 : 1;
427 ts->ts_status = 0; 436 ts->ts_status = 0;
428 437
429 ts->ts_final_idx = AR5K_REG_MS(tx_status->tx_status_1, 438 ts->ts_final_idx = AR5K_REG_MS(txstat1,
430 AR5K_DESC_TX_STATUS1_FINAL_TS_IX_5212); 439 AR5K_DESC_TX_STATUS1_FINAL_TS_IX_5212);
431 440
432 /* The longretry counter has the number of un-acked retries
433 * for the final rate. To get the total number of retries
434 * we have to add the retry counters for the other rates
435 * as well
436 */
437 ts->ts_retry[ts->ts_final_idx] = ts->ts_longretry;
438 switch (ts->ts_final_idx) {
439 case 3:
440 ts->ts_rate[3] = AR5K_REG_MS(tx_ctl->tx_control_3,
441 AR5K_4W_TX_DESC_CTL3_XMIT_RATE3);
442
443 ts->ts_retry[2] = AR5K_REG_MS(tx_ctl->tx_control_2,
444 AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2);
445 ts->ts_longretry += ts->ts_retry[2];
446 /* fall through */
447 case 2:
448 ts->ts_rate[2] = AR5K_REG_MS(tx_ctl->tx_control_3,
449 AR5K_4W_TX_DESC_CTL3_XMIT_RATE2);
450
451 ts->ts_retry[1] = AR5K_REG_MS(tx_ctl->tx_control_2,
452 AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1);
453 ts->ts_longretry += ts->ts_retry[1];
454 /* fall through */
455 case 1:
456 ts->ts_rate[1] = AR5K_REG_MS(tx_ctl->tx_control_3,
457 AR5K_4W_TX_DESC_CTL3_XMIT_RATE1);
458
459 ts->ts_retry[0] = AR5K_REG_MS(tx_ctl->tx_control_2,
460 AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1);
461 ts->ts_longretry += ts->ts_retry[0];
462 /* fall through */
463 case 0:
464 ts->ts_rate[0] = tx_ctl->tx_control_3 &
465 AR5K_4W_TX_DESC_CTL3_XMIT_RATE0;
466 break;
467 }
468
469 /* TX error */ 441 /* TX error */
470 if (!(tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK)) { 442 if (!(txstat0 & AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK)) {
471 if (tx_status->tx_status_0 & 443 if (txstat0 & AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES)
472 AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES)
473 ts->ts_status |= AR5K_TXERR_XRETRY; 444 ts->ts_status |= AR5K_TXERR_XRETRY;
474 445
475 if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN) 446 if (txstat0 & AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN)
476 ts->ts_status |= AR5K_TXERR_FIFO; 447 ts->ts_status |= AR5K_TXERR_FIFO;
477 448
478 if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FILTERED) 449 if (txstat0 & AR5K_DESC_TX_STATUS0_FILTERED)
479 ts->ts_status |= AR5K_TXERR_FILT; 450 ts->ts_status |= AR5K_TXERR_FILT;
480 } 451 }
481 452
@@ -609,37 +580,37 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
609 struct ath5k_rx_status *rs) 580 struct ath5k_rx_status *rs)
610{ 581{
611 struct ath5k_hw_rx_status *rx_status; 582 struct ath5k_hw_rx_status *rx_status;
583 u32 rxstat0, rxstat1;
612 584
613 rx_status = &desc->ud.ds_rx.rx_stat; 585 rx_status = &desc->ud.ds_rx.rx_stat;
586 rxstat1 = ACCESS_ONCE(rx_status->rx_status_1);
614 587
615 /* No frame received / not ready */ 588 /* No frame received / not ready */
616 if (unlikely(!(rx_status->rx_status_1 & 589 if (unlikely(!(rxstat1 & AR5K_5212_RX_DESC_STATUS1_DONE)))
617 AR5K_5212_RX_DESC_STATUS1_DONE)))
618 return -EINPROGRESS; 590 return -EINPROGRESS;
619 591
620 memset(rs, 0, sizeof(struct ath5k_rx_status)); 592 memset(rs, 0, sizeof(struct ath5k_rx_status));
593 rxstat0 = ACCESS_ONCE(rx_status->rx_status_0);
621 594
622 /* 595 /*
623 * Frame receive status 596 * Frame receive status
624 */ 597 */
625 rs->rs_datalen = rx_status->rx_status_0 & 598 rs->rs_datalen = rxstat0 & AR5K_5212_RX_DESC_STATUS0_DATA_LEN;
626 AR5K_5212_RX_DESC_STATUS0_DATA_LEN; 599 rs->rs_rssi = AR5K_REG_MS(rxstat0,
627 rs->rs_rssi = AR5K_REG_MS(rx_status->rx_status_0,
628 AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL); 600 AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL);
629 rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0, 601 rs->rs_rate = AR5K_REG_MS(rxstat0,
630 AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE); 602 AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE);
631 rs->rs_antenna = AR5K_REG_MS(rx_status->rx_status_0, 603 rs->rs_antenna = AR5K_REG_MS(rxstat0,
632 AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA); 604 AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA);
633 rs->rs_more = !!(rx_status->rx_status_0 & 605 rs->rs_more = !!(rxstat0 & AR5K_5212_RX_DESC_STATUS0_MORE);
634 AR5K_5212_RX_DESC_STATUS0_MORE); 606 rs->rs_tstamp = AR5K_REG_MS(rxstat1,
635 rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1,
636 AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP); 607 AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
637 608
638 /* 609 /*
639 * Key table status 610 * Key table status
640 */ 611 */
641 if (rx_status->rx_status_1 & AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID) 612 if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID)
642 rs->rs_keyix = AR5K_REG_MS(rx_status->rx_status_1, 613 rs->rs_keyix = AR5K_REG_MS(rxstat1,
643 AR5K_5212_RX_DESC_STATUS1_KEY_INDEX); 614 AR5K_5212_RX_DESC_STATUS1_KEY_INDEX);
644 else 615 else
645 rs->rs_keyix = AR5K_RXKEYIX_INVALID; 616 rs->rs_keyix = AR5K_RXKEYIX_INVALID;
@@ -647,27 +618,22 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
647 /* 618 /*
648 * Receive/descriptor errors 619 * Receive/descriptor errors
649 */ 620 */
650 if (!(rx_status->rx_status_1 & 621 if (!(rxstat1 & AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) {
651 AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) { 622 if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_CRC_ERROR)
652 if (rx_status->rx_status_1 &
653 AR5K_5212_RX_DESC_STATUS1_CRC_ERROR)
654 rs->rs_status |= AR5K_RXERR_CRC; 623 rs->rs_status |= AR5K_RXERR_CRC;
655 624
656 if (rx_status->rx_status_1 & 625 if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_PHY_ERROR) {
657 AR5K_5212_RX_DESC_STATUS1_PHY_ERROR) {
658 rs->rs_status |= AR5K_RXERR_PHY; 626 rs->rs_status |= AR5K_RXERR_PHY;
659 rs->rs_phyerr = AR5K_REG_MS(rx_status->rx_status_1, 627 rs->rs_phyerr = AR5K_REG_MS(rxstat1,
660 AR5K_5212_RX_DESC_STATUS1_PHY_ERROR_CODE); 628 AR5K_5212_RX_DESC_STATUS1_PHY_ERROR_CODE);
661 if (!ah->ah_capabilities.cap_has_phyerr_counters) 629 if (!ah->ah_capabilities.cap_has_phyerr_counters)
662 ath5k_ani_phy_error_report(ah, rs->rs_phyerr); 630 ath5k_ani_phy_error_report(ah, rs->rs_phyerr);
663 } 631 }
664 632
665 if (rx_status->rx_status_1 & 633 if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_DECRYPT_CRC_ERROR)
666 AR5K_5212_RX_DESC_STATUS1_DECRYPT_CRC_ERROR)
667 rs->rs_status |= AR5K_RXERR_DECRYPT; 634 rs->rs_status |= AR5K_RXERR_DECRYPT;
668 635
669 if (rx_status->rx_status_1 & 636 if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_MIC_ERROR)
670 AR5K_5212_RX_DESC_STATUS1_MIC_ERROR)
671 rs->rs_status |= AR5K_RXERR_MIC; 637 rs->rs_status |= AR5K_RXERR_MIC;
672 } 638 }
673 return 0; 639 return 0;
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index efb672cb31e4..1fef84f87c78 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -660,6 +660,53 @@ ath5k_get_pcdac_intercepts(struct ath5k_hw *ah, u8 min, u8 max, u8 *vp)
660 vp[i] = (ip[i] * max + (100 - ip[i]) * min) / 100; 660 vp[i] = (ip[i] * max + (100 - ip[i]) * min) / 100;
661} 661}
662 662
663static int
664ath5k_eeprom_free_pcal_info(struct ath5k_hw *ah, int mode)
665{
666 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
667 struct ath5k_chan_pcal_info *chinfo;
668 u8 pier, pdg;
669
670 switch (mode) {
671 case AR5K_EEPROM_MODE_11A:
672 if (!AR5K_EEPROM_HDR_11A(ee->ee_header))
673 return 0;
674 chinfo = ee->ee_pwr_cal_a;
675 break;
676 case AR5K_EEPROM_MODE_11B:
677 if (!AR5K_EEPROM_HDR_11B(ee->ee_header))
678 return 0;
679 chinfo = ee->ee_pwr_cal_b;
680 break;
681 case AR5K_EEPROM_MODE_11G:
682 if (!AR5K_EEPROM_HDR_11G(ee->ee_header))
683 return 0;
684 chinfo = ee->ee_pwr_cal_g;
685 break;
686 default:
687 return -EINVAL;
688 }
689
690 for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) {
691 if (!chinfo[pier].pd_curves)
692 continue;
693
694 for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) {
695 struct ath5k_pdgain_info *pd =
696 &chinfo[pier].pd_curves[pdg];
697
698 if (pd != NULL) {
699 kfree(pd->pd_step);
700 kfree(pd->pd_pwr);
701 }
702 }
703
704 kfree(chinfo[pier].pd_curves);
705 }
706
707 return 0;
708}
709
663/* Convert RF5111 specific data to generic raw data 710/* Convert RF5111 specific data to generic raw data
664 * used by interpolation code */ 711 * used by interpolation code */
665static int 712static int
@@ -684,7 +731,7 @@ ath5k_eeprom_convert_pcal_info_5111(struct ath5k_hw *ah, int mode,
684 GFP_KERNEL); 731 GFP_KERNEL);
685 732
686 if (!chinfo[pier].pd_curves) 733 if (!chinfo[pier].pd_curves)
687 return -ENOMEM; 734 goto err_out;
688 735
689 /* Only one curve for RF5111 736 /* Only one curve for RF5111
690 * find out which one and place 737 * find out which one and place
@@ -708,12 +755,12 @@ ath5k_eeprom_convert_pcal_info_5111(struct ath5k_hw *ah, int mode,
708 pd->pd_step = kcalloc(AR5K_EEPROM_N_PWR_POINTS_5111, 755 pd->pd_step = kcalloc(AR5K_EEPROM_N_PWR_POINTS_5111,
709 sizeof(u8), GFP_KERNEL); 756 sizeof(u8), GFP_KERNEL);
710 if (!pd->pd_step) 757 if (!pd->pd_step)
711 return -ENOMEM; 758 goto err_out;
712 759
713 pd->pd_pwr = kcalloc(AR5K_EEPROM_N_PWR_POINTS_5111, 760 pd->pd_pwr = kcalloc(AR5K_EEPROM_N_PWR_POINTS_5111,
714 sizeof(s16), GFP_KERNEL); 761 sizeof(s16), GFP_KERNEL);
715 if (!pd->pd_pwr) 762 if (!pd->pd_pwr)
716 return -ENOMEM; 763 goto err_out;
717 764
718 /* Fill raw dataset 765 /* Fill raw dataset
719 * (convert power to 0.25dB units 766 * (convert power to 0.25dB units
@@ -734,6 +781,10 @@ ath5k_eeprom_convert_pcal_info_5111(struct ath5k_hw *ah, int mode,
734 } 781 }
735 782
736 return 0; 783 return 0;
784
785err_out:
786 ath5k_eeprom_free_pcal_info(ah, mode);
787 return -ENOMEM;
737} 788}
738 789
739/* Parse EEPROM data */ 790/* Parse EEPROM data */
@@ -867,7 +918,7 @@ ath5k_eeprom_convert_pcal_info_5112(struct ath5k_hw *ah, int mode,
867 GFP_KERNEL); 918 GFP_KERNEL);
868 919
869 if (!chinfo[pier].pd_curves) 920 if (!chinfo[pier].pd_curves)
870 return -ENOMEM; 921 goto err_out;
871 922
872 /* Fill pd_curves */ 923 /* Fill pd_curves */
873 for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) { 924 for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) {
@@ -886,14 +937,13 @@ ath5k_eeprom_convert_pcal_info_5112(struct ath5k_hw *ah, int mode,
886 sizeof(u8), GFP_KERNEL); 937 sizeof(u8), GFP_KERNEL);
887 938
888 if (!pd->pd_step) 939 if (!pd->pd_step)
889 return -ENOMEM; 940 goto err_out;
890 941
891 pd->pd_pwr = kcalloc(pd->pd_points, 942 pd->pd_pwr = kcalloc(pd->pd_points,
892 sizeof(s16), GFP_KERNEL); 943 sizeof(s16), GFP_KERNEL);
893 944
894 if (!pd->pd_pwr) 945 if (!pd->pd_pwr)
895 return -ENOMEM; 946 goto err_out;
896
897 947
898 /* Fill raw dataset 948 /* Fill raw dataset
899 * (all power levels are in 0.25dB units) */ 949 * (all power levels are in 0.25dB units) */
@@ -925,13 +975,13 @@ ath5k_eeprom_convert_pcal_info_5112(struct ath5k_hw *ah, int mode,
925 sizeof(u8), GFP_KERNEL); 975 sizeof(u8), GFP_KERNEL);
926 976
927 if (!pd->pd_step) 977 if (!pd->pd_step)
928 return -ENOMEM; 978 goto err_out;
929 979
930 pd->pd_pwr = kcalloc(pd->pd_points, 980 pd->pd_pwr = kcalloc(pd->pd_points,
931 sizeof(s16), GFP_KERNEL); 981 sizeof(s16), GFP_KERNEL);
932 982
933 if (!pd->pd_pwr) 983 if (!pd->pd_pwr)
934 return -ENOMEM; 984 goto err_out;
935 985
936 /* Fill raw dataset 986 /* Fill raw dataset
937 * (all power levels are in 0.25dB units) */ 987 * (all power levels are in 0.25dB units) */
@@ -954,6 +1004,10 @@ ath5k_eeprom_convert_pcal_info_5112(struct ath5k_hw *ah, int mode,
954 } 1004 }
955 1005
956 return 0; 1006 return 0;
1007
1008err_out:
1009 ath5k_eeprom_free_pcal_info(ah, mode);
1010 return -ENOMEM;
957} 1011}
958 1012
959/* Parse EEPROM data */ 1013/* Parse EEPROM data */
@@ -1156,7 +1210,7 @@ ath5k_eeprom_convert_pcal_info_2413(struct ath5k_hw *ah, int mode,
1156 GFP_KERNEL); 1210 GFP_KERNEL);
1157 1211
1158 if (!chinfo[pier].pd_curves) 1212 if (!chinfo[pier].pd_curves)
1159 return -ENOMEM; 1213 goto err_out;
1160 1214
1161 /* Fill pd_curves */ 1215 /* Fill pd_curves */
1162 for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) { 1216 for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) {
@@ -1177,13 +1231,13 @@ ath5k_eeprom_convert_pcal_info_2413(struct ath5k_hw *ah, int mode,
1177 sizeof(u8), GFP_KERNEL); 1231 sizeof(u8), GFP_KERNEL);
1178 1232
1179 if (!pd->pd_step) 1233 if (!pd->pd_step)
1180 return -ENOMEM; 1234 goto err_out;
1181 1235
1182 pd->pd_pwr = kcalloc(pd->pd_points, 1236 pd->pd_pwr = kcalloc(pd->pd_points,
1183 sizeof(s16), GFP_KERNEL); 1237 sizeof(s16), GFP_KERNEL);
1184 1238
1185 if (!pd->pd_pwr) 1239 if (!pd->pd_pwr)
1186 return -ENOMEM; 1240 goto err_out;
1187 1241
1188 /* Fill raw dataset 1242 /* Fill raw dataset
1189 * convert all pwr levels to 1243 * convert all pwr levels to
@@ -1213,6 +1267,10 @@ ath5k_eeprom_convert_pcal_info_2413(struct ath5k_hw *ah, int mode,
1213 } 1267 }
1214 1268
1215 return 0; 1269 return 0;
1270
1271err_out:
1272 ath5k_eeprom_free_pcal_info(ah, mode);
1273 return -ENOMEM;
1216} 1274}
1217 1275
1218/* Parse EEPROM data */ 1276/* Parse EEPROM data */
@@ -1534,53 +1592,6 @@ ath5k_eeprom_read_pcal_info(struct ath5k_hw *ah)
1534 return 0; 1592 return 0;
1535} 1593}
1536 1594
1537static int
1538ath5k_eeprom_free_pcal_info(struct ath5k_hw *ah, int mode)
1539{
1540 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
1541 struct ath5k_chan_pcal_info *chinfo;
1542 u8 pier, pdg;
1543
1544 switch (mode) {
1545 case AR5K_EEPROM_MODE_11A:
1546 if (!AR5K_EEPROM_HDR_11A(ee->ee_header))
1547 return 0;
1548 chinfo = ee->ee_pwr_cal_a;
1549 break;
1550 case AR5K_EEPROM_MODE_11B:
1551 if (!AR5K_EEPROM_HDR_11B(ee->ee_header))
1552 return 0;
1553 chinfo = ee->ee_pwr_cal_b;
1554 break;
1555 case AR5K_EEPROM_MODE_11G:
1556 if (!AR5K_EEPROM_HDR_11G(ee->ee_header))
1557 return 0;
1558 chinfo = ee->ee_pwr_cal_g;
1559 break;
1560 default:
1561 return -EINVAL;
1562 }
1563
1564 for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) {
1565 if (!chinfo[pier].pd_curves)
1566 continue;
1567
1568 for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) {
1569 struct ath5k_pdgain_info *pd =
1570 &chinfo[pier].pd_curves[pdg];
1571
1572 if (pd != NULL) {
1573 kfree(pd->pd_step);
1574 kfree(pd->pd_pwr);
1575 }
1576 }
1577
1578 kfree(chinfo[pier].pd_curves);
1579 }
1580
1581 return 0;
1582}
1583
1584/* Read conformance test limits used for regulatory control */ 1595/* Read conformance test limits used for regulatory control */
1585static int 1596static int
1586ath5k_eeprom_read_ctl_info(struct ath5k_hw *ah) 1597ath5k_eeprom_read_ctl_info(struct ath5k_hw *ah)
@@ -1721,35 +1732,6 @@ ath5k_eeprom_read_spur_chans(struct ath5k_hw *ah)
1721 return ret; 1732 return ret;
1722} 1733}
1723 1734
1724/*
1725 * Read the MAC address from eeprom
1726 */
1727int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
1728{
1729 u8 mac_d[ETH_ALEN] = {};
1730 u32 total, offset;
1731 u16 data;
1732 int octet;
1733
1734 AR5K_EEPROM_READ(0x20, data);
1735
1736 for (offset = 0x1f, octet = 0, total = 0; offset >= 0x1d; offset--) {
1737 AR5K_EEPROM_READ(offset, data);
1738
1739 total += data;
1740 mac_d[octet + 1] = data & 0xff;
1741 mac_d[octet] = data >> 8;
1742 octet += 2;
1743 }
1744
1745 if (!total || total == 3 * 0xffff)
1746 return -EINVAL;
1747
1748 memcpy(mac, mac_d, ETH_ALEN);
1749
1750 return 0;
1751}
1752
1753 1735
1754/***********************\ 1736/***********************\
1755* Init/Detach functions * 1737* Init/Detach functions *
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 9be29b728b1c..807bd6440169 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -282,6 +282,15 @@ ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
282 if (changes & BSS_CHANGED_BEACON_INT) 282 if (changes & BSS_CHANGED_BEACON_INT)
283 sc->bintval = bss_conf->beacon_int; 283 sc->bintval = bss_conf->beacon_int;
284 284
285 if (changes & BSS_CHANGED_ERP_SLOT) {
286 int slot_time;
287
288 ah->ah_short_slot = bss_conf->use_short_slot;
289 slot_time = ath5k_hw_get_default_slottime(ah) +
290 3 * ah->ah_coverage_class;
291 ath5k_hw_set_ifs_intervals(ah, slot_time);
292 }
293
285 if (changes & BSS_CHANGED_ASSOC) { 294 if (changes & BSS_CHANGED_ASSOC) {
286 avf->assoc = bss_conf->assoc; 295 avf->assoc = bss_conf->assoc;
287 if (bss_conf->assoc) 296 if (bss_conf->assoc)
diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c
index 3c44689a700b..296c316a8341 100644
--- a/drivers/net/wireless/ath/ath5k/pci.c
+++ b/drivers/net/wireless/ath/ath5k/pci.c
@@ -17,6 +17,7 @@
17#include <linux/nl80211.h> 17#include <linux/nl80211.h>
18#include <linux/pci.h> 18#include <linux/pci.h>
19#include <linux/pci-aspm.h> 19#include <linux/pci-aspm.h>
20#include <linux/etherdevice.h>
20#include "../ath.h" 21#include "../ath.h"
21#include "ath5k.h" 22#include "ath5k.h"
22#include "debug.h" 23#include "debug.h"
@@ -108,11 +109,42 @@ int ath5k_hw_read_srev(struct ath5k_hw *ah)
108 return 0; 109 return 0;
109} 110}
110 111
112/*
113 * Read the MAC address from eeprom or platform_data
114 */
115static int ath5k_pci_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
116{
117 u8 mac_d[ETH_ALEN] = {};
118 u32 total, offset;
119 u16 data;
120 int octet;
121
122 AR5K_EEPROM_READ(0x20, data);
123
124 for (offset = 0x1f, octet = 0, total = 0; offset >= 0x1d; offset--) {
125 AR5K_EEPROM_READ(offset, data);
126
127 total += data;
128 mac_d[octet + 1] = data & 0xff;
129 mac_d[octet] = data >> 8;
130 octet += 2;
131 }
132
133 if (!total || total == 3 * 0xffff)
134 return -EINVAL;
135
136 memcpy(mac, mac_d, ETH_ALEN);
137
138 return 0;
139}
140
141
111/* Common ath_bus_opts structure */ 142/* Common ath_bus_opts structure */
112static const struct ath_bus_ops ath_pci_bus_ops = { 143static const struct ath_bus_ops ath_pci_bus_ops = {
113 .ath_bus_type = ATH_PCI, 144 .ath_bus_type = ATH_PCI,
114 .read_cachesize = ath5k_pci_read_cachesize, 145 .read_cachesize = ath5k_pci_read_cachesize,
115 .eeprom_read = ath5k_pci_eeprom_read, 146 .eeprom_read = ath5k_pci_eeprom_read,
147 .eeprom_read_mac = ath5k_pci_eeprom_read_mac,
116}; 148};
117 149
118/********************\ 150/********************\
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index d9b3f828455a..712a9ac4000e 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -75,7 +75,7 @@ static const unsigned int ack_rates_high[] =
75 * bwmodes. 75 * bwmodes.
76 */ 76 */
77int ath5k_hw_get_frame_duration(struct ath5k_hw *ah, 77int ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
78 int len, struct ieee80211_rate *rate) 78 int len, struct ieee80211_rate *rate, bool shortpre)
79{ 79{
80 struct ath5k_softc *sc = ah->ah_sc; 80 struct ath5k_softc *sc = ah->ah_sc;
81 int sifs, preamble, plcp_bits, sym_time; 81 int sifs, preamble, plcp_bits, sym_time;
@@ -84,9 +84,15 @@ int ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
84 84
85 /* Fallback */ 85 /* Fallback */
86 if (!ah->ah_bwmode) { 86 if (!ah->ah_bwmode) {
87 dur = ieee80211_generic_frame_duration(sc->hw, 87 __le16 raw_dur = ieee80211_generic_frame_duration(sc->hw,
88 NULL, len, rate); 88 NULL, len, rate);
89 return le16_to_cpu(dur); 89
90 /* subtract difference between long and short preamble */
91 dur = le16_to_cpu(raw_dur);
92 if (shortpre)
93 dur -= 96;
94
95 return dur;
90 } 96 }
91 97
92 bitrate = rate->bitrate; 98 bitrate = rate->bitrate;
@@ -145,9 +151,9 @@ unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
145 slot_time = AR5K_INIT_SLOT_TIME_QUARTER_RATE; 151 slot_time = AR5K_INIT_SLOT_TIME_QUARTER_RATE;
146 break; 152 break;
147 case AR5K_BWMODE_DEFAULT: 153 case AR5K_BWMODE_DEFAULT:
148 slot_time = AR5K_INIT_SLOT_TIME_DEFAULT;
149 default: 154 default:
150 if (channel->hw_value & CHANNEL_CCK) 155 slot_time = AR5K_INIT_SLOT_TIME_DEFAULT;
156 if ((channel->hw_value & CHANNEL_CCK) && !ah->ah_short_slot)
151 slot_time = AR5K_INIT_SLOT_TIME_B; 157 slot_time = AR5K_INIT_SLOT_TIME_B;
152 break; 158 break;
153 } 159 }
@@ -263,27 +269,14 @@ static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
263 * actual rate for this rate. See mac80211 tx.c 269 * actual rate for this rate. See mac80211 tx.c
264 * ieee80211_duration() for a brief description of 270 * ieee80211_duration() for a brief description of
265 * what rate we should choose to TX ACKs. */ 271 * what rate we should choose to TX ACKs. */
266 tx_time = ath5k_hw_get_frame_duration(ah, 10, rate); 272 tx_time = ath5k_hw_get_frame_duration(ah, 10, rate, false);
267 273
268 ath5k_hw_reg_write(ah, tx_time, reg); 274 ath5k_hw_reg_write(ah, tx_time, reg);
269 275
270 if (!(rate->flags & IEEE80211_RATE_SHORT_PREAMBLE)) 276 if (!(rate->flags & IEEE80211_RATE_SHORT_PREAMBLE))
271 continue; 277 continue;
272 278
273 /* 279 tx_time = ath5k_hw_get_frame_duration(ah, 10, rate, true);
274 * We're not distinguishing short preamble here,
275 * This is true, all we'll get is a longer value here
276 * which is not necessarilly bad. We could use
277 * export ieee80211_frame_duration() but that needs to be
278 * fixed first to be properly used by mac802111 drivers:
279 *
280 * - remove erp stuff and let the routine figure ofdm
281 * erp rates
282 * - remove passing argument ieee80211_local as
283 * drivers don't have access to it
284 * - move drivers using ieee80211_generic_frame_duration()
285 * to this
286 */
287 ath5k_hw_reg_write(ah, tx_time, 280 ath5k_hw_reg_write(ah, tx_time,
288 reg + (AR5K_SET_SHORT_PREAMBLE << 2)); 281 reg + (AR5K_SET_SHORT_PREAMBLE << 2));
289 } 282 }
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index 3343fb9e4940..b18c5021aac3 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -519,7 +519,7 @@ int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
519 return -EINVAL; 519 return -EINVAL;
520 520
521 sifs = ath5k_hw_get_default_sifs(ah); 521 sifs = ath5k_hw_get_default_sifs(ah);
522 sifs_clock = ath5k_hw_htoclock(ah, sifs); 522 sifs_clock = ath5k_hw_htoclock(ah, sifs - 2);
523 523
524 /* EIFS 524 /* EIFS
525 * Txtime of ack at lowest rate + SIFS + DIFS 525 * Txtime of ack at lowest rate + SIFS + DIFS
@@ -550,7 +550,7 @@ int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
550 else 550 else
551 rate = &sc->sbands[IEEE80211_BAND_2GHZ].bitrates[0]; 551 rate = &sc->sbands[IEEE80211_BAND_2GHZ].bitrates[0];
552 552
553 ack_tx_time = ath5k_hw_get_frame_duration(ah, 10, rate); 553 ack_tx_time = ath5k_hw_get_frame_duration(ah, 10, rate, false);
554 554
555 /* ack_tx_time includes an SIFS already */ 555 /* ack_tx_time includes an SIFS already */
556 eifs = ack_tx_time + sifs + 2 * slot_time; 556 eifs = ack_tx_time + sifs + 2 * slot_time;
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 84206898f77d..3510de2cf622 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -159,6 +159,11 @@ static void ath5k_hw_init_core_clock(struct ath5k_hw *ah)
159 rxlat = AR5K_REG_MS(usec_reg, AR5K_USEC_RX_LATENCY_5211); 159 rxlat = AR5K_REG_MS(usec_reg, AR5K_USEC_RX_LATENCY_5211);
160 160
161 /* 161 /*
162 * Set default Tx frame to Tx data start delay
163 */
164 txf2txs = AR5K_INIT_TXF2TXD_START_DEFAULT;
165
166 /*
162 * 5210 initvals don't include usec settings 167 * 5210 initvals don't include usec settings
163 * so we need to use magic values here for 168 * so we need to use magic values here for
164 * tx/rx latencies 169 * tx/rx latencies
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index ad57a6d23110..d9ff8413ab9a 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -5,7 +5,7 @@ config ATH9K_COMMON
5 5
6config ATH9K 6config ATH9K
7 tristate "Atheros 802.11n wireless cards support" 7 tristate "Atheros 802.11n wireless cards support"
8 depends on PCI && MAC80211 8 depends on MAC80211
9 select ATH9K_HW 9 select ATH9K_HW
10 select MAC80211_LEDS 10 select MAC80211_LEDS
11 select LEDS_CLASS 11 select LEDS_CLASS
@@ -23,6 +23,25 @@ config ATH9K
23 23
24 If you choose to build a module, it'll be called ath9k. 24 If you choose to build a module, it'll be called ath9k.
25 25
26config ATH9K_PCI
27 bool "Atheros ath9k PCI/PCIe bus support"
28 depends on ATH9K && PCI
29 default PCI
30 ---help---
31 This option enables the PCI bus support in ath9k.
32
33 Say Y, if you have a compatible PCI/PCIe wireless card.
34
35config ATH9K_AHB
36 bool "Atheros ath9k AHB bus support"
37 depends on ATH9K
38 default n
39 ---help---
40 This option enables the AHB bus support in ath9k.
41
42 Say Y, if you have a SoC with a compatible built-in
43 wireless MAC. Say N if unsure.
44
26config ATH9K_DEBUGFS 45config ATH9K_DEBUGFS
27 bool "Atheros ath9k debugging" 46 bool "Atheros ath9k debugging"
28 depends on ATH9K && DEBUG_FS 47 depends on ATH9K && DEBUG_FS
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 4d66ca8042eb..05a6fade7b1c 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -6,8 +6,8 @@ ath9k-y += beacon.o \
6 xmit.o \ 6 xmit.o \
7 7
8ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o 8ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o
9ath9k-$(CONFIG_PCI) += pci.o 9ath9k-$(CONFIG_ATH9K_PCI) += pci.o
10ath9k-$(CONFIG_ATHEROS_AR71XX) += ahb.o 10ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
11ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o 11ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
12 12
13obj-$(CONFIG_ATH9K) += ath9k.o 13obj-$(CONFIG_ATH9K) += ath9k.o
@@ -48,4 +48,6 @@ ath9k_htc-y += htc_hst.o \
48 htc_drv_init.o \ 48 htc_drv_init.o \
49 htc_drv_gpio.o 49 htc_drv_gpio.o
50 50
51ath9k_htc-$(CONFIG_ATH9K_HTC_DEBUGFS) += htc_drv_debug.o
52
51obj-$(CONFIG_ATH9K_HTC) += ath9k_htc.o 53obj-$(CONFIG_ATH9K_HTC) += ath9k_htc.o
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 9cb0efa9b4c0..61956392f2da 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -21,6 +21,18 @@
21#include <linux/ath9k_platform.h> 21#include <linux/ath9k_platform.h>
22#include "ath9k.h" 22#include "ath9k.h"
23 23
24static const struct platform_device_id ath9k_platform_id_table[] = {
25 {
26 .name = "ath9k",
27 .driver_data = AR5416_AR9100_DEVID,
28 },
29 {
30 .name = "ar934x_wmac",
31 .driver_data = AR9300_DEVID_AR9340,
32 },
33 {},
34};
35
24/* return bus cachesize in 4B word units */ 36/* return bus cachesize in 4B word units */
25static void ath_ahb_read_cachesize(struct ath_common *common, int *csz) 37static void ath_ahb_read_cachesize(struct ath_common *common, int *csz)
26{ 38{
@@ -57,6 +69,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
57 struct ath_softc *sc; 69 struct ath_softc *sc;
58 struct ieee80211_hw *hw; 70 struct ieee80211_hw *hw;
59 struct resource *res; 71 struct resource *res;
72 const struct platform_device_id *id = platform_get_device_id(pdev);
60 int irq; 73 int irq;
61 int ret = 0; 74 int ret = 0;
62 struct ath_hw *ah; 75 struct ath_hw *ah;
@@ -116,7 +129,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
116 goto err_free_hw; 129 goto err_free_hw;
117 } 130 }
118 131
119 ret = ath9k_init_device(AR5416_AR9100_DEVID, sc, 0x0, &ath_ahb_bus_ops); 132 ret = ath9k_init_device(id->driver_data, sc, 0x0, &ath_ahb_bus_ops);
120 if (ret) { 133 if (ret) {
121 dev_err(&pdev->dev, "failed to initialize device\n"); 134 dev_err(&pdev->dev, "failed to initialize device\n");
122 goto err_irq; 135 goto err_irq;
@@ -165,8 +178,11 @@ static struct platform_driver ath_ahb_driver = {
165 .name = "ath9k", 178 .name = "ath9k",
166 .owner = THIS_MODULE, 179 .owner = THIS_MODULE,
167 }, 180 },
181 .id_table = ath9k_platform_id_table,
168}; 182};
169 183
184MODULE_DEVICE_TABLE(platform, ath9k_platform_id_table);
185
170int ath_ahb_init(void) 186int ath_ahb_init(void)
171{ 187{
172 return platform_driver_register(&ath_ahb_driver); 188 return platform_driver_register(&ath_ahb_driver);
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index 2e31c775351f..5a1f4f511bc1 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -899,12 +899,6 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
899 * check here default level should not modify INI setting. 899 * check here default level should not modify INI setting.
900 */ 900 */
901 if (use_new_ani(ah)) { 901 if (use_new_ani(ah)) {
902 const struct ani_ofdm_level_entry *entry_ofdm;
903 const struct ani_cck_level_entry *entry_cck;
904
905 entry_ofdm = &ofdm_level_table[ATH9K_ANI_OFDM_DEF_LEVEL];
906 entry_cck = &cck_level_table[ATH9K_ANI_CCK_DEF_LEVEL];
907
908 ah->aniperiod = ATH9K_ANI_PERIOD_NEW; 902 ah->aniperiod = ATH9K_ANI_PERIOD_NEW;
909 ah->config.ani_poll_interval = ATH9K_ANI_POLLINTERVAL_NEW; 903 ah->config.ani_poll_interval = ATH9K_ANI_POLLINTERVAL_NEW;
910 } else { 904 } else {
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 106c0b06cf55..4bf9dab4f2b3 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -44,6 +44,34 @@ static const int m1ThreshExt_off = 127;
44static const int m2ThreshExt_off = 127; 44static const int m2ThreshExt_off = 127;
45 45
46 46
47static void ar5008_rf_bank_setup(u32 *bank, struct ar5416IniArray *array,
48 int col)
49{
50 int i;
51
52 for (i = 0; i < array->ia_rows; i++)
53 bank[i] = INI_RA(array, i, col);
54}
55
56
57#define REG_WRITE_RF_ARRAY(iniarray, regData, regWr) \
58 ar5008_write_rf_array(ah, iniarray, regData, &(regWr))
59
60static void ar5008_write_rf_array(struct ath_hw *ah, struct ar5416IniArray *array,
61 u32 *data, unsigned int *writecnt)
62{
63 int r;
64
65 ENABLE_REGWRITE_BUFFER(ah);
66
67 for (r = 0; r < array->ia_rows; r++) {
68 REG_WRITE(ah, INI_RA(array, r, 0), data[r]);
69 DO_DELAY(*writecnt);
70 }
71
72 REGWRITE_BUFFER_FLUSH(ah);
73}
74
47/** 75/**
48 * ar5008_hw_phy_modify_rx_buffer() - perform analog swizzling of parameters 76 * ar5008_hw_phy_modify_rx_buffer() - perform analog swizzling of parameters
49 * @rfbuf: 77 * @rfbuf:
@@ -530,16 +558,16 @@ static bool ar5008_hw_set_rf_regs(struct ath_hw *ah,
530 eepMinorRev = ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV); 558 eepMinorRev = ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV);
531 559
532 /* Setup Bank 0 Write */ 560 /* Setup Bank 0 Write */
533 RF_BANK_SETUP(ah->analogBank0Data, &ah->iniBank0, 1); 561 ar5008_rf_bank_setup(ah->analogBank0Data, &ah->iniBank0, 1);
534 562
535 /* Setup Bank 1 Write */ 563 /* Setup Bank 1 Write */
536 RF_BANK_SETUP(ah->analogBank1Data, &ah->iniBank1, 1); 564 ar5008_rf_bank_setup(ah->analogBank1Data, &ah->iniBank1, 1);
537 565
538 /* Setup Bank 2 Write */ 566 /* Setup Bank 2 Write */
539 RF_BANK_SETUP(ah->analogBank2Data, &ah->iniBank2, 1); 567 ar5008_rf_bank_setup(ah->analogBank2Data, &ah->iniBank2, 1);
540 568
541 /* Setup Bank 6 Write */ 569 /* Setup Bank 6 Write */
542 RF_BANK_SETUP(ah->analogBank3Data, &ah->iniBank3, 570 ar5008_rf_bank_setup(ah->analogBank3Data, &ah->iniBank3,
543 modesIndex); 571 modesIndex);
544 { 572 {
545 int i; 573 int i;
@@ -569,7 +597,7 @@ static bool ar5008_hw_set_rf_regs(struct ath_hw *ah,
569 } 597 }
570 598
571 /* Setup Bank 7 Setup */ 599 /* Setup Bank 7 Setup */
572 RF_BANK_SETUP(ah->analogBank7Data, &ah->iniBank7, 1); 600 ar5008_rf_bank_setup(ah->analogBank7Data, &ah->iniBank7, 1);
573 601
574 /* Write Analog registers */ 602 /* Write Analog registers */
575 REG_WRITE_RF_ARRAY(&ah->iniBank0, ah->analogBank0Data, 603 REG_WRITE_RF_ARRAY(&ah->iniBank0, ah->analogBank0Data,
@@ -729,6 +757,7 @@ static int ar5008_hw_process_ini(struct ath_hw *ah,
729 struct ath9k_channel *chan) 757 struct ath9k_channel *chan)
730{ 758{
731 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); 759 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
760 struct ath_common *common = ath9k_hw_common(ah);
732 int i, regWrites = 0; 761 int i, regWrites = 0;
733 struct ieee80211_channel *channel = chan->chan; 762 struct ieee80211_channel *channel = chan->chan;
734 u32 modesIndex, freqIndex; 763 u32 modesIndex, freqIndex;
@@ -805,7 +834,8 @@ static int ar5008_hw_process_ini(struct ath_hw *ah,
805 REG_WRITE(ah, reg, val); 834 REG_WRITE(ah, reg, val);
806 835
807 if (reg >= 0x7800 && reg < 0x78a0 836 if (reg >= 0x7800 && reg < 0x78a0
808 && ah->config.analog_shiftreg) { 837 && ah->config.analog_shiftreg
838 && (common->bus_ops->ath_bus_type != ATH_USB)) {
809 udelay(100); 839 udelay(100);
810 } 840 }
811 841
@@ -835,7 +865,8 @@ static int ar5008_hw_process_ini(struct ath_hw *ah,
835 REG_WRITE(ah, reg, val); 865 REG_WRITE(ah, reg, val);
836 866
837 if (reg >= 0x7800 && reg < 0x78a0 867 if (reg >= 0x7800 && reg < 0x78a0
838 && ah->config.analog_shiftreg) { 868 && ah->config.analog_shiftreg
869 && (common->bus_ops->ath_bus_type != ATH_USB)) {
839 udelay(100); 870 udelay(100);
840 } 871 }
841 872
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 76388c6d6692..cb611b287b35 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -26,6 +26,27 @@ enum ar9002_cal_types {
26 IQ_MISMATCH_CAL = BIT(2), 26 IQ_MISMATCH_CAL = BIT(2),
27}; 27};
28 28
29static bool ar9002_hw_is_cal_supported(struct ath_hw *ah,
30 struct ath9k_channel *chan,
31 enum ar9002_cal_types cal_type)
32{
33 bool supported = false;
34 switch (ah->supp_cals & cal_type) {
35 case IQ_MISMATCH_CAL:
36 /* Run IQ Mismatch for non-CCK only */
37 if (!IS_CHAN_B(chan))
38 supported = true;
39 break;
40 case ADC_GAIN_CAL:
41 case ADC_DC_CAL:
42 /* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */
43 if (!IS_CHAN_B(chan) &&
44 !(IS_CHAN_2GHZ(chan) && IS_CHAN_HT20(chan)))
45 supported = true;
46 break;
47 }
48 return supported;
49}
29 50
30static void ar9002_hw_setup_calibration(struct ath_hw *ah, 51static void ar9002_hw_setup_calibration(struct ath_hw *ah,
31 struct ath9k_cal_list *currCal) 52 struct ath9k_cal_list *currCal)
@@ -858,26 +879,32 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
858 if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah)) { 879 if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah)) {
859 ah->supp_cals = IQ_MISMATCH_CAL; 880 ah->supp_cals = IQ_MISMATCH_CAL;
860 881
861 if (AR_SREV_9160_10_OR_LATER(ah) && 882 if (AR_SREV_9160_10_OR_LATER(ah))
862 !(IS_CHAN_2GHZ(chan) && IS_CHAN_HT20(chan))) {
863 ah->supp_cals |= ADC_GAIN_CAL | ADC_DC_CAL; 883 ah->supp_cals |= ADC_GAIN_CAL | ADC_DC_CAL;
864 884
885 if (AR_SREV_9287(ah))
886 ah->supp_cals &= ~ADC_GAIN_CAL;
865 887
888 if (ar9002_hw_is_cal_supported(ah, chan, ADC_GAIN_CAL)) {
866 INIT_CAL(&ah->adcgain_caldata); 889 INIT_CAL(&ah->adcgain_caldata);
867 INSERT_CAL(ah, &ah->adcgain_caldata); 890 INSERT_CAL(ah, &ah->adcgain_caldata);
868 ath_dbg(common, ATH_DBG_CALIBRATE, 891 ath_dbg(common, ATH_DBG_CALIBRATE,
869 "enabling ADC Gain Calibration.\n"); 892 "enabling ADC Gain Calibration.\n");
893 }
870 894
895 if (ar9002_hw_is_cal_supported(ah, chan, ADC_DC_CAL)) {
871 INIT_CAL(&ah->adcdc_caldata); 896 INIT_CAL(&ah->adcdc_caldata);
872 INSERT_CAL(ah, &ah->adcdc_caldata); 897 INSERT_CAL(ah, &ah->adcdc_caldata);
873 ath_dbg(common, ATH_DBG_CALIBRATE, 898 ath_dbg(common, ATH_DBG_CALIBRATE,
874 "enabling ADC DC Calibration.\n"); 899 "enabling ADC DC Calibration.\n");
875 } 900 }
876 901
877 INIT_CAL(&ah->iq_caldata); 902 if (ar9002_hw_is_cal_supported(ah, chan, IQ_MISMATCH_CAL)) {
878 INSERT_CAL(ah, &ah->iq_caldata); 903 INIT_CAL(&ah->iq_caldata);
879 ath_dbg(common, ATH_DBG_CALIBRATE, 904 INSERT_CAL(ah, &ah->iq_caldata);
880 "enabling IQ Calibration.\n"); 905 ath_dbg(common, ATH_DBG_CALIBRATE,
906 "enabling IQ Calibration.\n");
907 }
881 908
882 ah->cal_list_curr = ah->cal_list; 909 ah->cal_list_curr = ah->cal_list;
883 910
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index 399ab3bb299b..7a332f16b79a 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -290,7 +290,6 @@ static void ar9002_hw_set11n_txdesc(struct ath_hw *ah, void *ds,
290 | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0) 290 | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
291 | SM(txPower, AR_XmitPower) 291 | SM(txPower, AR_XmitPower)
292 | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0) 292 | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
293 | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
294 | (flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0) 293 | (flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0)
295 | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0); 294 | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0);
296 295
@@ -311,6 +310,16 @@ static void ar9002_hw_set11n_txdesc(struct ath_hw *ah, void *ds,
311 } 310 }
312} 311}
313 312
313static void ar9002_hw_set_clrdmask(struct ath_hw *ah, void *ds, bool val)
314{
315 struct ar5416_desc *ads = AR5416DESC(ds);
316
317 if (val)
318 ads->ds_ctl0 |= AR_ClrDestMask;
319 else
320 ads->ds_ctl0 &= ~AR_ClrDestMask;
321}
322
314static void ar9002_hw_set11n_ratescenario(struct ath_hw *ah, void *ds, 323static void ar9002_hw_set11n_ratescenario(struct ath_hw *ah, void *ds,
315 void *lastds, 324 void *lastds,
316 u32 durUpdateEn, u32 rtsctsRate, 325 u32 durUpdateEn, u32 rtsctsRate,
@@ -406,26 +415,6 @@ static void ar9002_hw_clr11n_aggr(struct ath_hw *ah, void *ds)
406 ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr); 415 ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
407} 416}
408 417
409static void ar9002_hw_set11n_burstduration(struct ath_hw *ah, void *ds,
410 u32 burstDuration)
411{
412 struct ar5416_desc *ads = AR5416DESC(ds);
413
414 ads->ds_ctl2 &= ~AR_BurstDur;
415 ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
416}
417
418static void ar9002_hw_set11n_virtualmorefrag(struct ath_hw *ah, void *ds,
419 u32 vmf)
420{
421 struct ar5416_desc *ads = AR5416DESC(ds);
422
423 if (vmf)
424 ads->ds_ctl0 |= AR_VirtMoreFrag;
425 else
426 ads->ds_ctl0 &= ~AR_VirtMoreFrag;
427}
428
429void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds, 418void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
430 u32 size, u32 flags) 419 u32 size, u32 flags)
431{ 420{
@@ -458,6 +447,5 @@ void ar9002_hw_attach_mac_ops(struct ath_hw *ah)
458 ops->set11n_aggr_middle = ar9002_hw_set11n_aggr_middle; 447 ops->set11n_aggr_middle = ar9002_hw_set11n_aggr_middle;
459 ops->set11n_aggr_last = ar9002_hw_set11n_aggr_last; 448 ops->set11n_aggr_last = ar9002_hw_set11n_aggr_last;
460 ops->clr11n_aggr = ar9002_hw_clr11n_aggr; 449 ops->clr11n_aggr = ar9002_hw_clr11n_aggr;
461 ops->set11n_burstduration = ar9002_hw_set11n_burstduration; 450 ops->set_clrdmask = ar9002_hw_set_clrdmask;
462 ops->set11n_virtualmorefrag = ar9002_hw_set11n_virtualmorefrag;
463} 451}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index 7d68d61e406b..a57e963cf0dc 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -517,23 +517,7 @@ static void ar9002_hw_set_nf_limits(struct ath_hw *ah)
517 } 517 }
518} 518}
519 519
520void ar9002_hw_attach_phy_ops(struct ath_hw *ah) 520static void ar9002_hw_antdiv_comb_conf_get(struct ath_hw *ah,
521{
522 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
523
524 priv_ops->set_rf_regs = NULL;
525 priv_ops->rf_alloc_ext_banks = NULL;
526 priv_ops->rf_free_ext_banks = NULL;
527 priv_ops->rf_set_freq = ar9002_hw_set_channel;
528 priv_ops->spur_mitigate_freq = ar9002_hw_spur_mitigate;
529 priv_ops->olc_init = ar9002_olc_init;
530 priv_ops->compute_pll_control = ar9002_hw_compute_pll_control;
531 priv_ops->do_getnf = ar9002_hw_do_getnf;
532
533 ar9002_hw_set_nf_limits(ah);
534}
535
536void ath9k_hw_antdiv_comb_conf_get(struct ath_hw *ah,
537 struct ath_hw_antcomb_conf *antconf) 521 struct ath_hw_antcomb_conf *antconf)
538{ 522{
539 u32 regval; 523 u32 regval;
@@ -545,10 +529,11 @@ void ath9k_hw_antdiv_comb_conf_get(struct ath_hw *ah,
545 AR_PHY_9285_ANT_DIV_ALT_LNACONF_S; 529 AR_PHY_9285_ANT_DIV_ALT_LNACONF_S;
546 antconf->fast_div_bias = (regval & AR_PHY_9285_FAST_DIV_BIAS) >> 530 antconf->fast_div_bias = (regval & AR_PHY_9285_FAST_DIV_BIAS) >>
547 AR_PHY_9285_FAST_DIV_BIAS_S; 531 AR_PHY_9285_FAST_DIV_BIAS_S;
532 antconf->lna1_lna2_delta = -3;
533 antconf->div_group = 0;
548} 534}
549EXPORT_SYMBOL(ath9k_hw_antdiv_comb_conf_get);
550 535
551void ath9k_hw_antdiv_comb_conf_set(struct ath_hw *ah, 536static void ar9002_hw_antdiv_comb_conf_set(struct ath_hw *ah,
552 struct ath_hw_antcomb_conf *antconf) 537 struct ath_hw_antcomb_conf *antconf)
553{ 538{
554 u32 regval; 539 u32 regval;
@@ -566,4 +551,23 @@ void ath9k_hw_antdiv_comb_conf_set(struct ath_hw *ah,
566 551
567 REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regval); 552 REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regval);
568} 553}
569EXPORT_SYMBOL(ath9k_hw_antdiv_comb_conf_set); 554
555void ar9002_hw_attach_phy_ops(struct ath_hw *ah)
556{
557 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
558 struct ath_hw_ops *ops = ath9k_hw_ops(ah);
559
560 priv_ops->set_rf_regs = NULL;
561 priv_ops->rf_alloc_ext_banks = NULL;
562 priv_ops->rf_free_ext_banks = NULL;
563 priv_ops->rf_set_freq = ar9002_hw_set_channel;
564 priv_ops->spur_mitigate_freq = ar9002_hw_spur_mitigate;
565 priv_ops->olc_init = ar9002_olc_init;
566 priv_ops->compute_pll_control = ar9002_hw_compute_pll_control;
567 priv_ops->do_getnf = ar9002_hw_do_getnf;
568
569 ops->antdiv_comb_conf_get = ar9002_hw_antdiv_comb_conf_get;
570 ops->antdiv_comb_conf_set = ar9002_hw_antdiv_comb_conf_set;
571
572 ar9002_hw_set_nf_limits(ah);
573}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.h b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
index 37663dbbcf57..47780ef1c892 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
@@ -483,7 +483,11 @@
483#define AR_PHY_TX_PWRCTRL_INIT_TX_GAIN 0x01F80000 483#define AR_PHY_TX_PWRCTRL_INIT_TX_GAIN 0x01F80000
484#define AR_PHY_TX_PWRCTRL_INIT_TX_GAIN_S 19 484#define AR_PHY_TX_PWRCTRL_INIT_TX_GAIN_S 19
485 485
486#define AR_PHY_TX_PWRCTRL8 0xa278
487
486#define AR_PHY_TX_PWRCTRL9 0xa27C 488#define AR_PHY_TX_PWRCTRL9 0xa27C
489
490#define AR_PHY_TX_PWRCTRL10 0xa394
487#define AR_PHY_TX_DESIRED_SCALE_CCK 0x00007C00 491#define AR_PHY_TX_DESIRED_SCALE_CCK 0x00007C00
488#define AR_PHY_TX_DESIRED_SCALE_CCK_S 10 492#define AR_PHY_TX_DESIRED_SCALE_CCK_S 10
489#define AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL 0x80000000 493#define AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL 0x80000000
@@ -495,6 +499,8 @@
495 499
496#define AR_PHY_CH0_TX_PWRCTRL11 0xa398 500#define AR_PHY_CH0_TX_PWRCTRL11 0xa398
497#define AR_PHY_CH1_TX_PWRCTRL11 0xb398 501#define AR_PHY_CH1_TX_PWRCTRL11 0xb398
502#define AR_PHY_CH0_TX_PWRCTRL12 0xa3dc
503#define AR_PHY_CH0_TX_PWRCTRL13 0xa3e0
498#define AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP 0x0000FC00 504#define AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP 0x0000FC00
499#define AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP_S 10 505#define AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP_S 10
500 506
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index 9ecca93392e8..f915a3dbfcad 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -34,10 +34,10 @@ static const u32 ar9300_2p2_radio_postamble[][5] = {
34 34
35static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p2[][5] = { 35static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p2[][5] = {
36 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 36 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
37 {0x0000a2dc, 0x00033800, 0x00033800, 0x00637800, 0x00637800}, 37 {0x0000a2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
38 {0x0000a2e0, 0x0003c000, 0x0003c000, 0x03838000, 0x03838000}, 38 {0x0000a2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
39 {0x0000a2e4, 0x03fc0000, 0x03fc0000, 0x03fc0000, 0x03fc0000}, 39 {0x0000a2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
40 {0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 40 {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
41 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, 41 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
42 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 42 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
43 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002}, 43 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
@@ -119,14 +119,14 @@ static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p2[][5] = {
119 {0x0000a634, 0x04411104, 0x04411104, 0x04015005, 0x04015005}, 119 {0x0000a634, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
120 {0x0000a638, 0x04411104, 0x04411104, 0x04015005, 0x04015005}, 120 {0x0000a638, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
121 {0x0000a63c, 0x04411104, 0x04411104, 0x04015005, 0x04015005}, 121 {0x0000a63c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
122 {0x0000b2dc, 0x00033800, 0x00033800, 0x00637800, 0x00637800}, 122 {0x0000b2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
123 {0x0000b2e0, 0x0003c000, 0x0003c000, 0x03838000, 0x03838000}, 123 {0x0000b2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
124 {0x0000b2e4, 0x03fc0000, 0x03fc0000, 0x03fc0000, 0x03fc0000}, 124 {0x0000b2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
125 {0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 125 {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
126 {0x0000c2dc, 0x00033800, 0x00033800, 0x00637800, 0x00637800}, 126 {0x0000c2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
127 {0x0000c2e0, 0x0003c000, 0x0003c000, 0x03838000, 0x03838000}, 127 {0x0000c2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
128 {0x0000c2e4, 0x03fc0000, 0x03fc0000, 0x03fc0000, 0x03fc0000}, 128 {0x0000c2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
129 {0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 129 {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
130 {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4}, 130 {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
131 {0x00016048, 0x62480001, 0x62480001, 0x62480001, 0x62480001}, 131 {0x00016048, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
132 {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, 132 {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
@@ -835,10 +835,10 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
835 835
836static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = { 836static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
837 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 837 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
838 {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800}, 838 {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
839 {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000}, 839 {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
840 {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000}, 840 {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
841 {0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 841 {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
842 {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9}, 842 {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
843 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, 843 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
844 {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002}, 844 {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
@@ -920,14 +920,14 @@ static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
920 {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005}, 920 {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
921 {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005}, 921 {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
922 {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005}, 922 {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
923 {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800}, 923 {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
924 {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000}, 924 {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
925 {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000}, 925 {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
926 {0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 926 {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
927 {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800}, 927 {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
928 {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000}, 928 {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
929 {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000}, 929 {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
930 {0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 930 {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
931 {0x00016044, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6}, 931 {0x00016044, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
932 {0x00016048, 0xae480001, 0xae480001, 0xae480001, 0xae480001}, 932 {0x00016048, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
933 {0x00016068, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c}, 933 {0x00016068, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
@@ -941,10 +941,10 @@ static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
941 941
942static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = { 942static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = {
943 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 943 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
944 {0x0000a2dc, 0x01feee00, 0x01feee00, 0x00637800, 0x00637800}, 944 {0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
945 {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03838000, 0x03838000}, 945 {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
946 {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03fc0000, 0x03fc0000}, 946 {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
947 {0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 947 {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
948 {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9}, 948 {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
949 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, 949 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
950 {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002}, 950 {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
@@ -1026,14 +1026,14 @@ static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = {
1026 {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005}, 1026 {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
1027 {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005}, 1027 {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
1028 {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005}, 1028 {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
1029 {0x0000b2dc, 0x01feee00, 0x01feee00, 0x00637800, 0x00637800}, 1029 {0x0000b2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
1030 {0x0000b2e0, 0x0000f000, 0x0000f000, 0x03838000, 0x03838000}, 1030 {0x0000b2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
1031 {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03fc0000, 0x03fc0000}, 1031 {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
1032 {0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1032 {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
1033 {0x0000c2dc, 0x01feee00, 0x01feee00, 0x00637800, 0x00637800}, 1033 {0x0000c2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
1034 {0x0000c2e0, 0x0000f000, 0x0000f000, 0x03838000, 0x03838000}, 1034 {0x0000c2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
1035 {0x0000c2e4, 0x01ff0000, 0x01ff0000, 0x03fc0000, 0x03fc0000}, 1035 {0x0000c2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
1036 {0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1036 {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
1037 {0x00016044, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4}, 1037 {0x00016044, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
1038 {0x00016048, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001}, 1038 {0x00016048, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
1039 {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, 1039 {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
@@ -1307,10 +1307,10 @@ static const u32 ar9300Common_rx_gain_table_2p2[][2] = {
1307 1307
1308static const u32 ar9300Modes_low_ob_db_tx_gain_table_2p2[][5] = { 1308static const u32 ar9300Modes_low_ob_db_tx_gain_table_2p2[][5] = {
1309 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 1309 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1310 {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800}, 1310 {0x0000a2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
1311 {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000}, 1311 {0x0000a2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
1312 {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000}, 1312 {0x0000a2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
1313 {0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1313 {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
1314 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, 1314 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
1315 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1315 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1316 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002}, 1316 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
@@ -1329,21 +1329,21 @@ static const u32 ar9300Modes_low_ob_db_tx_gain_table_2p2[][5] = {
1329 {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24}, 1329 {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
1330 {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640}, 1330 {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
1331 {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660}, 1331 {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
1332 {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861}, 1332 {0x0000a544, 0x52022470, 0x52022470, 0x3f001861, 0x3f001861},
1333 {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81}, 1333 {0x0000a548, 0x55022490, 0x55022490, 0x43001a81, 0x43001a81},
1334 {0x0000a54c, 0x5c02486b, 0x5c02486b, 0x47001a83, 0x47001a83}, 1334 {0x0000a54c, 0x59022492, 0x59022492, 0x47001a83, 0x47001a83},
1335 {0x0000a550, 0x61024a6c, 0x61024a6c, 0x4a001c84, 0x4a001c84}, 1335 {0x0000a550, 0x5d022692, 0x5d022692, 0x4a001c84, 0x4a001c84},
1336 {0x0000a554, 0x66026a6c, 0x66026a6c, 0x4e001ce3, 0x4e001ce3}, 1336 {0x0000a554, 0x61022892, 0x61022892, 0x4e001ce3, 0x4e001ce3},
1337 {0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x52001ce5, 0x52001ce5}, 1337 {0x0000a558, 0x65024890, 0x65024890, 0x52001ce5, 0x52001ce5},
1338 {0x0000a55c, 0x7002708c, 0x7002708c, 0x56001ce9, 0x56001ce9}, 1338 {0x0000a55c, 0x69024892, 0x69024892, 0x56001ce9, 0x56001ce9},
1339 {0x0000a560, 0x7302b08a, 0x7302b08a, 0x5a001ceb, 0x5a001ceb}, 1339 {0x0000a560, 0x6e024c92, 0x6e024c92, 0x5a001ceb, 0x5a001ceb},
1340 {0x0000a564, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec}, 1340 {0x0000a564, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
1341 {0x0000a568, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec}, 1341 {0x0000a568, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
1342 {0x0000a56c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec}, 1342 {0x0000a56c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
1343 {0x0000a570, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec}, 1343 {0x0000a570, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
1344 {0x0000a574, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec}, 1344 {0x0000a574, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
1345 {0x0000a578, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec}, 1345 {0x0000a578, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
1346 {0x0000a57c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec}, 1346 {0x0000a57c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
1347 {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000}, 1347 {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
1348 {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002}, 1348 {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
1349 {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004}, 1349 {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
@@ -1361,45 +1361,45 @@ static const u32 ar9300Modes_low_ob_db_tx_gain_table_2p2[][5] = {
1361 {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24}, 1361 {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
1362 {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640}, 1362 {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
1363 {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660}, 1363 {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
1364 {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3f801861, 0x3f801861}, 1364 {0x0000a5c4, 0x52822470, 0x52822470, 0x3f801861, 0x3f801861},
1365 {0x0000a5c8, 0x5782286c, 0x5782286c, 0x43801a81, 0x43801a81}, 1365 {0x0000a5c8, 0x55822490, 0x55822490, 0x43801a81, 0x43801a81},
1366 {0x0000a5cc, 0x5c82486b, 0x5c82486b, 0x47801a83, 0x47801a83}, 1366 {0x0000a5cc, 0x59822492, 0x59822492, 0x47801a83, 0x47801a83},
1367 {0x0000a5d0, 0x61824a6c, 0x61824a6c, 0x4a801c84, 0x4a801c84}, 1367 {0x0000a5d0, 0x5d822692, 0x5d822692, 0x4a801c84, 0x4a801c84},
1368 {0x0000a5d4, 0x66826a6c, 0x66826a6c, 0x4e801ce3, 0x4e801ce3}, 1368 {0x0000a5d4, 0x61822892, 0x61822892, 0x4e801ce3, 0x4e801ce3},
1369 {0x0000a5d8, 0x6b826e6c, 0x6b826e6c, 0x52801ce5, 0x52801ce5}, 1369 {0x0000a5d8, 0x65824890, 0x65824890, 0x52801ce5, 0x52801ce5},
1370 {0x0000a5dc, 0x7082708c, 0x7082708c, 0x56801ce9, 0x56801ce9}, 1370 {0x0000a5dc, 0x69824892, 0x69824892, 0x56801ce9, 0x56801ce9},
1371 {0x0000a5e0, 0x7382b08a, 0x7382b08a, 0x5a801ceb, 0x5a801ceb}, 1371 {0x0000a5e0, 0x6e824c92, 0x6e824c92, 0x5a801ceb, 0x5a801ceb},
1372 {0x0000a5e4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec}, 1372 {0x0000a5e4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
1373 {0x0000a5e8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec}, 1373 {0x0000a5e8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
1374 {0x0000a5ec, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec}, 1374 {0x0000a5ec, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
1375 {0x0000a5f0, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec}, 1375 {0x0000a5f0, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
1376 {0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec}, 1376 {0x0000a5f4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
1377 {0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec}, 1377 {0x0000a5f8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
1378 {0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec}, 1378 {0x0000a5fc, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
1379 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1379 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1380 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1380 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1381 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1381 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1382 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1382 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1383 {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1383 {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1384 {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000}, 1384 {0x0000a614, 0x02004000, 0x02004000, 0x01404000, 0x01404000},
1385 {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501}, 1385 {0x0000a618, 0x02004801, 0x02004801, 0x01404501, 0x01404501},
1386 {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501}, 1386 {0x0000a61c, 0x02808a02, 0x02808a02, 0x02008501, 0x02008501},
1387 {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03}, 1387 {0x0000a620, 0x0380ce03, 0x0380ce03, 0x0280ca03, 0x0280ca03},
1388 {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04}, 1388 {0x0000a624, 0x04411104, 0x04411104, 0x03010c04, 0x03010c04},
1389 {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04}, 1389 {0x0000a628, 0x04411104, 0x04411104, 0x04014c04, 0x04014c04},
1390 {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005}, 1390 {0x0000a62c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
1391 {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005}, 1391 {0x0000a630, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
1392 {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005}, 1392 {0x0000a634, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
1393 {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005}, 1393 {0x0000a638, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
1394 {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005}, 1394 {0x0000a63c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
1395 {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800}, 1395 {0x0000b2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
1396 {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000}, 1396 {0x0000b2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
1397 {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000}, 1397 {0x0000b2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
1398 {0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1398 {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
1399 {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800}, 1399 {0x0000c2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
1400 {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000}, 1400 {0x0000c2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
1401 {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000}, 1401 {0x0000c2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
1402 {0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1402 {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
1403 {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4}, 1403 {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
1404 {0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001}, 1404 {0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
1405 {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c}, 1405 {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 4a4cd88429c0..f276cb922b4d 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -18,13 +18,13 @@
18#include "hw-ops.h" 18#include "hw-ops.h"
19#include "ar9003_phy.h" 19#include "ar9003_phy.h"
20 20
21#define MPASS 3
22#define MAX_MEASUREMENT 8 21#define MAX_MEASUREMENT 8
23#define MAX_DIFFERENCE 10 22#define MAX_MAG_DELTA 11
23#define MAX_PHS_DELTA 10
24 24
25struct coeff { 25struct coeff {
26 int mag_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT][MPASS]; 26 int mag_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT];
27 int phs_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT][MPASS]; 27 int phs_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT];
28 int iqc_coeff[2]; 28 int iqc_coeff[2];
29}; 29};
30 30
@@ -185,17 +185,19 @@ static void ar9003_hw_iqcal_collect(struct ath_hw *ah)
185 185
186 /* Accumulate IQ cal measures for active chains */ 186 /* Accumulate IQ cal measures for active chains */
187 for (i = 0; i < AR5416_MAX_CHAINS; i++) { 187 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
188 ah->totalPowerMeasI[i] += 188 if (ah->txchainmask & BIT(i)) {
189 REG_READ(ah, AR_PHY_CAL_MEAS_0(i)); 189 ah->totalPowerMeasI[i] +=
190 ah->totalPowerMeasQ[i] += 190 REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
191 REG_READ(ah, AR_PHY_CAL_MEAS_1(i)); 191 ah->totalPowerMeasQ[i] +=
192 ah->totalIqCorrMeas[i] += 192 REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
193 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i)); 193 ah->totalIqCorrMeas[i] +=
194 ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE, 194 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
195 "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n", 195 ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
196 ah->cal_samples, i, ah->totalPowerMeasI[i], 196 "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
197 ah->totalPowerMeasQ[i], 197 ah->cal_samples, i, ah->totalPowerMeasI[i],
198 ah->totalIqCorrMeas[i]); 198 ah->totalPowerMeasQ[i],
199 ah->totalIqCorrMeas[i]);
200 }
199 } 201 }
200} 202}
201 203
@@ -608,36 +610,48 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
608 return true; 610 return true;
609} 611}
610 612
611static bool ar9003_hw_compute_closest_pass_and_avg(int *mp_coeff, int *mp_avg) 613static void ar9003_hw_detect_outlier(int *mp_coeff, int nmeasurement,
614 int max_delta)
612{ 615{
613 int diff[MPASS]; 616 int mp_max = -64, max_idx = 0;
614 617 int mp_min = 63, min_idx = 0;
615 diff[0] = abs(mp_coeff[0] - mp_coeff[1]); 618 int mp_avg = 0, i, outlier_idx = 0;
616 diff[1] = abs(mp_coeff[1] - mp_coeff[2]); 619
617 diff[2] = abs(mp_coeff[2] - mp_coeff[0]); 620 /* find min/max mismatch across all calibrated gains */
618 621 for (i = 0; i < nmeasurement; i++) {
619 if (diff[0] > MAX_DIFFERENCE && 622 mp_avg += mp_coeff[i];
620 diff[1] > MAX_DIFFERENCE && 623 if (mp_coeff[i] > mp_max) {
621 diff[2] > MAX_DIFFERENCE) 624 mp_max = mp_coeff[i];
622 return false; 625 max_idx = i;
626 } else if (mp_coeff[i] < mp_min) {
627 mp_min = mp_coeff[i];
628 min_idx = i;
629 }
630 }
623 631
624 if (diff[0] <= diff[1] && diff[0] <= diff[2]) 632 /* find average (exclude max abs value) */
625 *mp_avg = (mp_coeff[0] + mp_coeff[1]) / 2; 633 for (i = 0; i < nmeasurement; i++) {
626 else if (diff[1] <= diff[2]) 634 if ((abs(mp_coeff[i]) < abs(mp_max)) ||
627 *mp_avg = (mp_coeff[1] + mp_coeff[2]) / 2; 635 (abs(mp_coeff[i]) < abs(mp_min)))
628 else 636 mp_avg += mp_coeff[i];
629 *mp_avg = (mp_coeff[2] + mp_coeff[0]) / 2; 637 }
638 mp_avg /= (nmeasurement - 1);
630 639
631 return true; 640 /* detect outlier */
641 if (abs(mp_max - mp_min) > max_delta) {
642 if (abs(mp_max - mp_avg) > abs(mp_min - mp_avg))
643 outlier_idx = max_idx;
644 else
645 outlier_idx = min_idx;
646 }
647 mp_coeff[outlier_idx] = mp_avg;
632} 648}
633 649
634static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah, 650static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah,
635 u8 num_chains, 651 u8 num_chains,
636 struct coeff *coeff) 652 struct coeff *coeff)
637{ 653{
638 struct ath_common *common = ath9k_hw_common(ah);
639 int i, im, nmeasurement; 654 int i, im, nmeasurement;
640 int magnitude, phase;
641 u32 tx_corr_coeff[MAX_MEASUREMENT][AR9300_MAX_CHAINS]; 655 u32 tx_corr_coeff[MAX_MEASUREMENT][AR9300_MAX_CHAINS];
642 656
643 memset(tx_corr_coeff, 0, sizeof(tx_corr_coeff)); 657 memset(tx_corr_coeff, 0, sizeof(tx_corr_coeff));
@@ -657,37 +671,28 @@ static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah,
657 671
658 /* Load the average of 2 passes */ 672 /* Load the average of 2 passes */
659 for (i = 0; i < num_chains; i++) { 673 for (i = 0; i < num_chains; i++) {
660 if (AR_SREV_9485(ah)) 674 nmeasurement = REG_READ_FIELD(ah,
661 nmeasurement = REG_READ_FIELD(ah, 675 AR_PHY_TX_IQCAL_STATUS_B0,
662 AR_PHY_TX_IQCAL_STATUS_B0_9485, 676 AR_PHY_CALIBRATED_GAINS_0);
663 AR_PHY_CALIBRATED_GAINS_0);
664 else
665 nmeasurement = REG_READ_FIELD(ah,
666 AR_PHY_TX_IQCAL_STATUS_B0,
667 AR_PHY_CALIBRATED_GAINS_0);
668 677
669 if (nmeasurement > MAX_MEASUREMENT) 678 if (nmeasurement > MAX_MEASUREMENT)
670 nmeasurement = MAX_MEASUREMENT; 679 nmeasurement = MAX_MEASUREMENT;
671 680
672 for (im = 0; im < nmeasurement; im++) { 681 /* detect outlier only if nmeasurement > 1 */
673 /* 682 if (nmeasurement > 1) {
674 * Determine which 2 passes are closest and compute avg 683 /* Detect magnitude outlier */
675 * magnitude 684 ar9003_hw_detect_outlier(coeff->mag_coeff[i],
676 */ 685 nmeasurement, MAX_MAG_DELTA);
677 if (!ar9003_hw_compute_closest_pass_and_avg(coeff->mag_coeff[i][im],
678 &magnitude))
679 goto disable_txiqcal;
680 686
681 /* 687 /* Detect phase outlier */
682 * Determine which 2 passes are closest and compute avg 688 ar9003_hw_detect_outlier(coeff->phs_coeff[i],
683 * phase 689 nmeasurement, MAX_PHS_DELTA);
684 */ 690 }
685 if (!ar9003_hw_compute_closest_pass_and_avg(coeff->phs_coeff[i][im], 691
686 &phase)) 692 for (im = 0; im < nmeasurement; im++) {
687 goto disable_txiqcal;
688 693
689 coeff->iqc_coeff[0] = (magnitude & 0x7f) | 694 coeff->iqc_coeff[0] = (coeff->mag_coeff[i][im] & 0x7f) |
690 ((phase & 0x7f) << 7); 695 ((coeff->phs_coeff[i][im] & 0x7f) << 7);
691 696
692 if ((im % 2) == 0) 697 if ((im % 2) == 0)
693 REG_RMW_FIELD(ah, tx_corr_coeff[im][i], 698 REG_RMW_FIELD(ah, tx_corr_coeff[im][i],
@@ -707,141 +712,37 @@ static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah,
707 712
708 return; 713 return;
709 714
710disable_txiqcal:
711 REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_3,
712 AR_PHY_TX_IQCAL_CONTROL_3_IQCORR_EN, 0x0);
713 REG_RMW_FIELD(ah, AR_PHY_RX_IQCAL_CORR_B0,
714 AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN, 0x0);
715
716 ath_dbg(common, ATH_DBG_CALIBRATE, "TX IQ Cal disabled\n");
717} 715}
718 716
719static void ar9003_hw_tx_iq_cal(struct ath_hw *ah) 717static bool ar9003_hw_tx_iq_cal_run(struct ath_hw *ah)
720{ 718{
721 struct ath_common *common = ath9k_hw_common(ah); 719 struct ath_common *common = ath9k_hw_common(ah);
722 static const u32 txiqcal_status[AR9300_MAX_CHAINS] = {
723 AR_PHY_TX_IQCAL_STATUS_B0,
724 AR_PHY_TX_IQCAL_STATUS_B1,
725 AR_PHY_TX_IQCAL_STATUS_B2,
726 };
727 static const u32 chan_info_tab[] = {
728 AR_PHY_CHAN_INFO_TAB_0,
729 AR_PHY_CHAN_INFO_TAB_1,
730 AR_PHY_CHAN_INFO_TAB_2,
731 };
732 struct coeff coeff;
733 s32 iq_res[6];
734 s32 i, j, ip, im, nmeasurement;
735 u8 nchains = get_streams(common->tx_chainmask);
736
737 for (ip = 0; ip < MPASS; ip++) {
738 REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1,
739 AR_PHY_TX_IQCAQL_CONTROL_1_IQCORR_I_Q_COFF_DELPT,
740 DELPT);
741 REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_START,
742 AR_PHY_TX_IQCAL_START_DO_CAL,
743 AR_PHY_TX_IQCAL_START_DO_CAL);
744
745 if (!ath9k_hw_wait(ah, AR_PHY_TX_IQCAL_START,
746 AR_PHY_TX_IQCAL_START_DO_CAL,
747 0, AH_WAIT_TIMEOUT)) {
748 ath_dbg(common, ATH_DBG_CALIBRATE,
749 "Tx IQ Cal not complete.\n");
750 goto TX_IQ_CAL_FAILED;
751 }
752
753 nmeasurement = REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_STATUS_B0,
754 AR_PHY_CALIBRATED_GAINS_0);
755 if (nmeasurement > MAX_MEASUREMENT)
756 nmeasurement = MAX_MEASUREMENT;
757
758 for (i = 0; i < nchains; i++) {
759 ath_dbg(common, ATH_DBG_CALIBRATE,
760 "Doing Tx IQ Cal for chain %d.\n", i);
761 for (im = 0; im < nmeasurement; im++) {
762 if (REG_READ(ah, txiqcal_status[i]) &
763 AR_PHY_TX_IQCAL_STATUS_FAILED) {
764 ath_dbg(common, ATH_DBG_CALIBRATE,
765 "Tx IQ Cal failed for chain %d.\n", i);
766 goto TX_IQ_CAL_FAILED;
767 }
768
769 for (j = 0; j < 3; j++) {
770 u8 idx = 2 * j,
771 offset = 4 * (3 * im + j);
772
773 REG_RMW_FIELD(ah, AR_PHY_CHAN_INFO_MEMORY,
774 AR_PHY_CHAN_INFO_TAB_S2_READ,
775 0);
776
777 /* 32 bits */
778 iq_res[idx] = REG_READ(ah,
779 chan_info_tab[i] +
780 offset);
781
782 REG_RMW_FIELD(ah, AR_PHY_CHAN_INFO_MEMORY,
783 AR_PHY_CHAN_INFO_TAB_S2_READ,
784 1);
785
786 /* 16 bits */
787 iq_res[idx+1] = 0xffff & REG_READ(ah,
788 chan_info_tab[i] +
789 offset);
790
791 ath_dbg(common, ATH_DBG_CALIBRATE,
792 "IQ RES[%d]=0x%x IQ_RES[%d]=0x%x\n",
793 idx, iq_res[idx], idx+1, iq_res[idx+1]);
794 }
795
796 if (!ar9003_hw_calc_iq_corr(ah, i, iq_res,
797 coeff.iqc_coeff)) {
798 ath_dbg(common, ATH_DBG_CALIBRATE,
799 "Failed in calculation of IQ correction.\n");
800 goto TX_IQ_CAL_FAILED;
801 }
802 coeff.mag_coeff[i][im][ip] =
803 coeff.iqc_coeff[0] & 0x7f;
804 coeff.phs_coeff[i][im][ip] =
805 (coeff.iqc_coeff[0] >> 7) & 0x7f;
806
807 if (coeff.mag_coeff[i][im][ip] > 63)
808 coeff.mag_coeff[i][im][ip] -= 128;
809 if (coeff.phs_coeff[i][im][ip] > 63)
810 coeff.phs_coeff[i][im][ip] -= 128;
811
812 }
813 }
814 }
815
816 ar9003_hw_tx_iqcal_load_avg_2_passes(ah, nchains, &coeff);
817
818 return;
819
820TX_IQ_CAL_FAILED:
821 ath_dbg(common, ATH_DBG_CALIBRATE, "Tx IQ Cal failed\n");
822}
823
824static void ar9003_hw_tx_iq_cal_run(struct ath_hw *ah)
825{
826 u8 tx_gain_forced; 720 u8 tx_gain_forced;
827 721
828 REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1_9485,
829 AR_PHY_TX_IQCAQL_CONTROL_1_IQCORR_I_Q_COFF_DELPT, DELPT);
830 tx_gain_forced = REG_READ_FIELD(ah, AR_PHY_TX_FORCED_GAIN, 722 tx_gain_forced = REG_READ_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
831 AR_PHY_TXGAIN_FORCE); 723 AR_PHY_TXGAIN_FORCE);
832 if (tx_gain_forced) 724 if (tx_gain_forced)
833 REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN, 725 REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
834 AR_PHY_TXGAIN_FORCE, 0); 726 AR_PHY_TXGAIN_FORCE, 0);
835 727
836 REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_START_9485, 728 REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_START,
837 AR_PHY_TX_IQCAL_START_DO_CAL_9485, 1); 729 AR_PHY_TX_IQCAL_START_DO_CAL, 1);
730
731 if (!ath9k_hw_wait(ah, AR_PHY_TX_IQCAL_START,
732 AR_PHY_TX_IQCAL_START_DO_CAL, 0,
733 AH_WAIT_TIMEOUT)) {
734 ath_dbg(common, ATH_DBG_CALIBRATE,
735 "Tx IQ Cal is not completed.\n");
736 return false;
737 }
738 return true;
838} 739}
839 740
840static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah) 741static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah)
841{ 742{
842 struct ath_common *common = ath9k_hw_common(ah); 743 struct ath_common *common = ath9k_hw_common(ah);
843 const u32 txiqcal_status[AR9300_MAX_CHAINS] = { 744 const u32 txiqcal_status[AR9300_MAX_CHAINS] = {
844 AR_PHY_TX_IQCAL_STATUS_B0_9485, 745 AR_PHY_TX_IQCAL_STATUS_B0,
845 AR_PHY_TX_IQCAL_STATUS_B1, 746 AR_PHY_TX_IQCAL_STATUS_B1,
846 AR_PHY_TX_IQCAL_STATUS_B2, 747 AR_PHY_TX_IQCAL_STATUS_B2,
847 }; 748 };
@@ -853,7 +754,7 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah)
853 struct coeff coeff; 754 struct coeff coeff;
854 s32 iq_res[6]; 755 s32 iq_res[6];
855 u8 num_chains = 0; 756 u8 num_chains = 0;
856 int i, ip, im, j; 757 int i, im, j;
857 int nmeasurement; 758 int nmeasurement;
858 759
859 for (i = 0; i < AR9300_MAX_CHAINS; i++) { 760 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
@@ -861,71 +762,69 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah)
861 num_chains++; 762 num_chains++;
862 } 763 }
863 764
864 for (ip = 0; ip < MPASS; ip++) { 765 for (i = 0; i < num_chains; i++) {
865 for (i = 0; i < num_chains; i++) { 766 nmeasurement = REG_READ_FIELD(ah,
866 nmeasurement = REG_READ_FIELD(ah, 767 AR_PHY_TX_IQCAL_STATUS_B0,
867 AR_PHY_TX_IQCAL_STATUS_B0_9485, 768 AR_PHY_CALIBRATED_GAINS_0);
868 AR_PHY_CALIBRATED_GAINS_0); 769 if (nmeasurement > MAX_MEASUREMENT)
869 if (nmeasurement > MAX_MEASUREMENT) 770 nmeasurement = MAX_MEASUREMENT;
870 nmeasurement = MAX_MEASUREMENT;
871 771
872 for (im = 0; im < nmeasurement; im++) { 772 for (im = 0; im < nmeasurement; im++) {
873 ath_dbg(common, ATH_DBG_CALIBRATE, 773 ath_dbg(common, ATH_DBG_CALIBRATE,
874 "Doing Tx IQ Cal for chain %d.\n", i); 774 "Doing Tx IQ Cal for chain %d.\n", i);
875 775
876 if (REG_READ(ah, txiqcal_status[i]) & 776 if (REG_READ(ah, txiqcal_status[i]) &
877 AR_PHY_TX_IQCAL_STATUS_FAILED) { 777 AR_PHY_TX_IQCAL_STATUS_FAILED) {
878 ath_dbg(common, ATH_DBG_CALIBRATE, 778 ath_dbg(common, ATH_DBG_CALIBRATE,
879 "Tx IQ Cal failed for chain %d.\n", i); 779 "Tx IQ Cal failed for chain %d.\n", i);
880 goto tx_iqcal_fail; 780 goto tx_iqcal_fail;
881 } 781 }
882 782
883 for (j = 0; j < 3; j++) { 783 for (j = 0; j < 3; j++) {
884 u32 idx = 2 * j, offset = 4 * (3 * im + j); 784 u32 idx = 2 * j, offset = 4 * (3 * im + j);
885 785
886 REG_RMW_FIELD(ah, 786 REG_RMW_FIELD(ah,
887 AR_PHY_CHAN_INFO_MEMORY, 787 AR_PHY_CHAN_INFO_MEMORY,
888 AR_PHY_CHAN_INFO_TAB_S2_READ, 788 AR_PHY_CHAN_INFO_TAB_S2_READ,
889 0); 789 0);
890 790
891 /* 32 bits */ 791 /* 32 bits */
892 iq_res[idx] = REG_READ(ah, 792 iq_res[idx] = REG_READ(ah,
893 chan_info_tab[i] + 793 chan_info_tab[i] +
894 offset); 794 offset);
895 795
896 REG_RMW_FIELD(ah, 796 REG_RMW_FIELD(ah,
897 AR_PHY_CHAN_INFO_MEMORY, 797 AR_PHY_CHAN_INFO_MEMORY,
898 AR_PHY_CHAN_INFO_TAB_S2_READ, 798 AR_PHY_CHAN_INFO_TAB_S2_READ,
899 1); 799 1);
900 800
901 /* 16 bits */ 801 /* 16 bits */
902 iq_res[idx + 1] = 0xffff & REG_READ(ah, 802 iq_res[idx + 1] = 0xffff & REG_READ(ah,
903 chan_info_tab[i] + offset); 803 chan_info_tab[i] + offset);
904 804
905 ath_dbg(common, ATH_DBG_CALIBRATE, 805 ath_dbg(common, ATH_DBG_CALIBRATE,
906 "IQ RES[%d]=0x%x" 806 "IQ RES[%d]=0x%x"
907 "IQ_RES[%d]=0x%x\n", 807 "IQ_RES[%d]=0x%x\n",
908 idx, iq_res[idx], idx + 1, 808 idx, iq_res[idx], idx + 1,
909 iq_res[idx + 1]); 809 iq_res[idx + 1]);
910 } 810 }
911 811
912 if (!ar9003_hw_calc_iq_corr(ah, i, iq_res, 812 if (!ar9003_hw_calc_iq_corr(ah, i, iq_res,
913 coeff.iqc_coeff)) { 813 coeff.iqc_coeff)) {
914 ath_dbg(common, ATH_DBG_CALIBRATE, 814 ath_dbg(common, ATH_DBG_CALIBRATE,
915 "Failed in calculation of IQ correction.\n"); 815 "Failed in calculation of \
916 goto tx_iqcal_fail; 816 IQ correction.\n");
917 } 817 goto tx_iqcal_fail;
818 }
918 819
919 coeff.mag_coeff[i][im][ip] = 820 coeff.mag_coeff[i][im] = coeff.iqc_coeff[0] & 0x7f;
920 coeff.iqc_coeff[0] & 0x7f; 821 coeff.phs_coeff[i][im] =
921 coeff.phs_coeff[i][im][ip] = 822 (coeff.iqc_coeff[0] >> 7) & 0x7f;
922 (coeff.iqc_coeff[0] >> 7) & 0x7f;
923 823
924 if (coeff.mag_coeff[i][im][ip] > 63) 824 if (coeff.mag_coeff[i][im] > 63)
925 coeff.mag_coeff[i][im][ip] -= 128; 825 coeff.mag_coeff[i][im] -= 128;
926 if (coeff.phs_coeff[i][im][ip] > 63) 826 if (coeff.phs_coeff[i][im] > 63)
927 coeff.phs_coeff[i][im][ip] -= 128; 827 coeff.phs_coeff[i][im] -= 128;
928 }
929 } 828 }
930 } 829 }
931 ar9003_hw_tx_iqcal_load_avg_2_passes(ah, num_chains, &coeff); 830 ar9003_hw_tx_iqcal_load_avg_2_passes(ah, num_chains, &coeff);
@@ -940,31 +839,37 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
940 struct ath9k_channel *chan) 839 struct ath9k_channel *chan)
941{ 840{
942 struct ath_common *common = ath9k_hw_common(ah); 841 struct ath_common *common = ath9k_hw_common(ah);
842 struct ath9k_hw_capabilities *pCap = &ah->caps;
943 int val; 843 int val;
844 bool txiqcal_done = false;
944 845
945 val = REG_READ(ah, AR_ENT_OTP); 846 val = REG_READ(ah, AR_ENT_OTP);
946 ath_dbg(common, ATH_DBG_CALIBRATE, "ath9k: AR_ENT_OTP 0x%x\n", val); 847 ath_dbg(common, ATH_DBG_CALIBRATE, "ath9k: AR_ENT_OTP 0x%x\n", val);
947 848
948 if (AR_SREV_9485(ah)) 849 /* Configure rx/tx chains before running AGC/TxiQ cals */
949 ar9003_hw_set_chain_masks(ah, 0x1, 0x1); 850 if (val & AR_ENT_OTP_CHAIN2_DISABLE)
950 else if (val & AR_ENT_OTP_CHAIN2_DISABLE)
951 ar9003_hw_set_chain_masks(ah, 0x3, 0x3); 851 ar9003_hw_set_chain_masks(ah, 0x3, 0x3);
952 else 852 else
953 /* 853 ar9003_hw_set_chain_masks(ah, pCap->rx_chainmask,
954 * 0x7 = 0b111 , AR9003 needs to be configured for 3-chain 854 pCap->tx_chainmask);
955 * mode before running AGC/TxIQ cals
956 */
957 ar9003_hw_set_chain_masks(ah, 0x7, 0x7);
958 855
959 /* Do Tx IQ Calibration */ 856 /* Do Tx IQ Calibration */
960 if (AR_SREV_9485(ah)) 857 REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1,
961 ar9003_hw_tx_iq_cal_run(ah); 858 AR_PHY_TX_IQCAL_CONTROL_1_IQCORR_I_Q_COFF_DELPT,
962 else 859 DELPT);
963 ar9003_hw_tx_iq_cal(ah);
964 860
965 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS); 861 /*
966 udelay(5); 862 * For AR9485 or later chips, TxIQ cal runs as part of
967 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN); 863 * AGC calibration
864 */
865 if (AR_SREV_9485_OR_LATER(ah))
866 txiqcal_done = true;
867 else {
868 txiqcal_done = ar9003_hw_tx_iq_cal_run(ah);
869 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
870 udelay(5);
871 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
872 }
968 873
969 /* Calibrate the AGC */ 874 /* Calibrate the AGC */
970 REG_WRITE(ah, AR_PHY_AGC_CONTROL, 875 REG_WRITE(ah, AR_PHY_AGC_CONTROL,
@@ -979,7 +884,7 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
979 return false; 884 return false;
980 } 885 }
981 886
982 if (AR_SREV_9485(ah)) 887 if (txiqcal_done)
983 ar9003_hw_tx_iq_cal_post_proc(ah); 888 ar9003_hw_tx_iq_cal_post_proc(ah);
984 889
985 /* Revert chainmasks to their original values before NF cal */ 890 /* Revert chainmasks to their original values before NF cal */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 6eadf975ae48..1e220354e4be 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -652,7 +652,7 @@ static const struct ar9300_eeprom ar9300_x113 = {
652 .regDmn = { LE16(0), LE16(0x1f) }, 652 .regDmn = { LE16(0), LE16(0x1f) },
653 .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */ 653 .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */
654 .opCapFlags = { 654 .opCapFlags = {
655 .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A, 655 .opFlags = AR5416_OPFLAGS_11A,
656 .eepMisc = 0, 656 .eepMisc = 0,
657 }, 657 },
658 .rfSilent = 0, 658 .rfSilent = 0,
@@ -922,7 +922,7 @@ static const struct ar9300_eeprom ar9300_x113 = {
922 .db_stage2 = {3, 3, 3}, /* 3 chain */ 922 .db_stage2 = {3, 3, 3}, /* 3 chain */
923 .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */ 923 .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
924 .db_stage4 = {3, 3, 3}, /* don't exist for 2G */ 924 .db_stage4 = {3, 3, 3}, /* don't exist for 2G */
925 .xpaBiasLvl = 0, 925 .xpaBiasLvl = 0xf,
926 .txFrameToDataStart = 0x0e, 926 .txFrameToDataStart = 0x0e,
927 .txFrameToPaOn = 0x0e, 927 .txFrameToPaOn = 0x0e,
928 .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */ 928 .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
@@ -3217,7 +3217,6 @@ static int ar9300_compress_decision(struct ath_hw *ah,
3217 u8 *word, int length, int mdata_size) 3217 u8 *word, int length, int mdata_size)
3218{ 3218{
3219 struct ath_common *common = ath9k_hw_common(ah); 3219 struct ath_common *common = ath9k_hw_common(ah);
3220 u8 *dptr;
3221 const struct ar9300_eeprom *eep = NULL; 3220 const struct ar9300_eeprom *eep = NULL;
3222 3221
3223 switch (code) { 3222 switch (code) {
@@ -3235,7 +3234,6 @@ static int ar9300_compress_decision(struct ath_hw *ah,
3235 break; 3234 break;
3236 case _CompressBlock: 3235 case _CompressBlock:
3237 if (reference == 0) { 3236 if (reference == 0) {
3238 dptr = mptr;
3239 } else { 3237 } else {
3240 eep = ar9003_eeprom_struct_find_by_id(reference); 3238 eep = ar9003_eeprom_struct_find_by_id(reference);
3241 if (eep == NULL) { 3239 if (eep == NULL) {
@@ -3444,13 +3442,15 @@ static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz)
3444{ 3442{
3445 int bias = ar9003_hw_xpa_bias_level_get(ah, is2ghz); 3443 int bias = ar9003_hw_xpa_bias_level_get(ah, is2ghz);
3446 3444
3447 if (AR_SREV_9485(ah)) 3445 if (AR_SREV_9485(ah) || AR_SREV_9340(ah))
3448 REG_RMW_FIELD(ah, AR_CH0_TOP2, AR_CH0_TOP2_XPABIASLVL, bias); 3446 REG_RMW_FIELD(ah, AR_CH0_TOP2, AR_CH0_TOP2_XPABIASLVL, bias);
3449 else { 3447 else {
3450 REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias); 3448 REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias);
3451 REG_RMW_FIELD(ah, AR_CH0_THERM, AR_CH0_THERM_XPABIASLVL_MSB, 3449 REG_RMW_FIELD(ah, AR_CH0_THERM,
3452 bias >> 2); 3450 AR_CH0_THERM_XPABIASLVL_MSB,
3453 REG_RMW_FIELD(ah, AR_CH0_THERM, AR_CH0_THERM_XPASHORT2GND, 1); 3451 bias >> 2);
3452 REG_RMW_FIELD(ah, AR_CH0_THERM,
3453 AR_CH0_THERM_XPASHORT2GND, 1);
3454 } 3454 }
3455} 3455}
3456 3456
@@ -3497,34 +3497,77 @@ static u16 ar9003_hw_ant_ctrl_chain_get(struct ath_hw *ah,
3497 3497
3498static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz) 3498static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
3499{ 3499{
3500 int chain;
3501 u32 regval;
3502 u32 ant_div_ctl1;
3503 static const u32 switch_chain_reg[AR9300_MAX_CHAINS] = {
3504 AR_PHY_SWITCH_CHAIN_0,
3505 AR_PHY_SWITCH_CHAIN_1,
3506 AR_PHY_SWITCH_CHAIN_2,
3507 };
3508
3500 u32 value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz); 3509 u32 value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
3510
3501 REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM, AR_SWITCH_TABLE_COM_ALL, value); 3511 REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM, AR_SWITCH_TABLE_COM_ALL, value);
3502 3512
3503 value = ar9003_hw_ant_ctrl_common_2_get(ah, is2ghz); 3513 value = ar9003_hw_ant_ctrl_common_2_get(ah, is2ghz);
3504 REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM_2, AR_SWITCH_TABLE_COM2_ALL, value); 3514 REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM_2, AR_SWITCH_TABLE_COM2_ALL, value);
3505 3515
3506 value = ar9003_hw_ant_ctrl_chain_get(ah, 0, is2ghz); 3516 for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
3507 REG_RMW_FIELD(ah, AR_PHY_SWITCH_CHAIN_0, AR_SWITCH_TABLE_ALL, value); 3517 if ((ah->rxchainmask & BIT(chain)) ||
3508 3518 (ah->txchainmask & BIT(chain))) {
3509 if (!AR_SREV_9485(ah)) { 3519 value = ar9003_hw_ant_ctrl_chain_get(ah, chain,
3510 value = ar9003_hw_ant_ctrl_chain_get(ah, 1, is2ghz); 3520 is2ghz);
3511 REG_RMW_FIELD(ah, AR_PHY_SWITCH_CHAIN_1, AR_SWITCH_TABLE_ALL, 3521 REG_RMW_FIELD(ah, switch_chain_reg[chain],
3512 value); 3522 AR_SWITCH_TABLE_ALL, value);
3513 3523 }
3514 value = ar9003_hw_ant_ctrl_chain_get(ah, 2, is2ghz);
3515 REG_RMW_FIELD(ah, AR_PHY_SWITCH_CHAIN_2, AR_SWITCH_TABLE_ALL,
3516 value);
3517 } 3524 }
3518 3525
3519 if (AR_SREV_9485(ah)) { 3526 if (AR_SREV_9485(ah)) {
3520 value = ath9k_hw_ar9300_get_eeprom(ah, EEP_ANT_DIV_CTL1); 3527 value = ath9k_hw_ar9300_get_eeprom(ah, EEP_ANT_DIV_CTL1);
3521 REG_RMW_FIELD(ah, AR_PHY_MC_GAIN_CTRL, AR_ANT_DIV_CTRL_ALL, 3528 /*
3522 value); 3529 * main_lnaconf, alt_lnaconf, main_tb, alt_tb
3523 REG_RMW_FIELD(ah, AR_PHY_MC_GAIN_CTRL, AR_ANT_DIV_ENABLE, 3530 * are the fields present
3524 value >> 6); 3531 */
3525 REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT, AR_FAST_DIV_ENABLE, 3532 regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
3526 value >> 7); 3533 regval &= (~AR_ANT_DIV_CTRL_ALL);
3534 regval |= (value & 0x3f) << AR_ANT_DIV_CTRL_ALL_S;
3535 /* enable_lnadiv */
3536 regval &= (~AR_PHY_9485_ANT_DIV_LNADIV);
3537 regval |= ((value >> 6) & 0x1) <<
3538 AR_PHY_9485_ANT_DIV_LNADIV_S;
3539 REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
3540
3541 /*enable fast_div */
3542 regval = REG_READ(ah, AR_PHY_CCK_DETECT);
3543 regval &= (~AR_FAST_DIV_ENABLE);
3544 regval |= ((value >> 7) & 0x1) <<
3545 AR_FAST_DIV_ENABLE_S;
3546 REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
3547 ant_div_ctl1 =
3548 ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
3549 /* check whether antenna diversity is enabled */
3550 if ((ant_div_ctl1 >> 0x6) == 0x3) {
3551 regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
3552 /*
3553 * clear bits 25-30 main_lnaconf, alt_lnaconf,
3554 * main_tb, alt_tb
3555 */
3556 regval &= (~(AR_PHY_9485_ANT_DIV_MAIN_LNACONF |
3557 AR_PHY_9485_ANT_DIV_ALT_LNACONF |
3558 AR_PHY_9485_ANT_DIV_ALT_GAINTB |
3559 AR_PHY_9485_ANT_DIV_MAIN_GAINTB));
3560 /* by default use LNA1 for the main antenna */
3561 regval |= (AR_PHY_9485_ANT_DIV_LNA1 <<
3562 AR_PHY_9485_ANT_DIV_MAIN_LNACONF_S);
3563 regval |= (AR_PHY_9485_ANT_DIV_LNA2 <<
3564 AR_PHY_9485_ANT_DIV_ALT_LNACONF_S);
3565 REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
3566 }
3567
3568
3527 } 3569 }
3570
3528} 3571}
3529 3572
3530static void ar9003_hw_drive_strength_apply(struct ath_hw *ah) 3573static void ar9003_hw_drive_strength_apply(struct ath_hw *ah)
@@ -3634,13 +3677,16 @@ static void ar9003_hw_atten_apply(struct ath_hw *ah, struct ath9k_channel *chan)
3634 3677
3635 /* Test value. if 0 then attenuation is unused. Don't load anything. */ 3678 /* Test value. if 0 then attenuation is unused. Don't load anything. */
3636 for (i = 0; i < 3; i++) { 3679 for (i = 0; i < 3; i++) {
3637 value = ar9003_hw_atten_chain_get(ah, i, chan); 3680 if (ah->txchainmask & BIT(i)) {
3638 REG_RMW_FIELD(ah, ext_atten_reg[i], 3681 value = ar9003_hw_atten_chain_get(ah, i, chan);
3639 AR_PHY_EXT_ATTEN_CTL_XATTEN1_DB, value); 3682 REG_RMW_FIELD(ah, ext_atten_reg[i],
3640 3683 AR_PHY_EXT_ATTEN_CTL_XATTEN1_DB, value);
3641 value = ar9003_hw_atten_chain_get_margin(ah, i, chan); 3684
3642 REG_RMW_FIELD(ah, ext_atten_reg[i], 3685 value = ar9003_hw_atten_chain_get_margin(ah, i, chan);
3643 AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN, value); 3686 REG_RMW_FIELD(ah, ext_atten_reg[i],
3687 AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN,
3688 value);
3689 }
3644 } 3690 }
3645} 3691}
3646 3692
@@ -3749,8 +3795,9 @@ static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
3749 ar9003_hw_ant_ctrl_apply(ah, IS_CHAN_2GHZ(chan)); 3795 ar9003_hw_ant_ctrl_apply(ah, IS_CHAN_2GHZ(chan));
3750 ar9003_hw_drive_strength_apply(ah); 3796 ar9003_hw_drive_strength_apply(ah);
3751 ar9003_hw_atten_apply(ah, chan); 3797 ar9003_hw_atten_apply(ah, chan);
3752 ar9003_hw_internal_regulator_apply(ah); 3798 if (!AR_SREV_9340(ah))
3753 if (AR_SREV_9485(ah)) 3799 ar9003_hw_internal_regulator_apply(ah);
3800 if (AR_SREV_9485(ah) || AR_SREV_9340(ah))
3754 ar9003_hw_apply_tuning_caps(ah); 3801 ar9003_hw_apply_tuning_caps(ah);
3755} 3802}
3756 3803
@@ -3994,6 +4041,16 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
3994 POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0) 4041 POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0)
3995 ); 4042 );
3996 4043
4044 /* Write the power for duplicated frames - HT40 */
4045
4046 /* dup40_cck (LSB), dup40_ofdm, ext20_cck, ext20_ofdm (MSB) */
4047 REG_WRITE(ah, 0xa3e0,
4048 POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 24) |
4049 POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 16) |
4050 POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 8) |
4051 POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0)
4052 );
4053
3997 /* Write the HT20 power per rate set */ 4054 /* Write the HT20 power per rate set */
3998 4055
3999 /* 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB) */ 4056 /* 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB) */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 7f5de6e4448b..a55eddbb2589 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -18,6 +18,7 @@
18#include "ar9003_mac.h" 18#include "ar9003_mac.h"
19#include "ar9003_2p2_initvals.h" 19#include "ar9003_2p2_initvals.h"
20#include "ar9485_initvals.h" 20#include "ar9485_initvals.h"
21#include "ar9340_initvals.h"
21 22
22/* General hardware code for the AR9003 hadware family */ 23/* General hardware code for the AR9003 hadware family */
23 24
@@ -28,109 +29,105 @@
28 */ 29 */
29static void ar9003_hw_init_mode_regs(struct ath_hw *ah) 30static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
30{ 31{
31 if (AR_SREV_9485_11(ah)) { 32 if (AR_SREV_9340(ah)) {
32 /* mac */ 33 /* mac */
33 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0); 34 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
34 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], 35 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
35 ar9485_1_1_mac_core, 36 ar9340_1p0_mac_core,
36 ARRAY_SIZE(ar9485_1_1_mac_core), 2); 37 ARRAY_SIZE(ar9340_1p0_mac_core), 2);
37 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST], 38 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
38 ar9485_1_1_mac_postamble, 39 ar9340_1p0_mac_postamble,
39 ARRAY_SIZE(ar9485_1_1_mac_postamble), 5); 40 ARRAY_SIZE(ar9340_1p0_mac_postamble), 5);
40 41
41 /* bb */ 42 /* bb */
42 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], ar9485_1_1, 43 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], NULL, 0, 0);
43 ARRAY_SIZE(ar9485_1_1), 2);
44 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE], 44 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
45 ar9485_1_1_baseband_core, 45 ar9340_1p0_baseband_core,
46 ARRAY_SIZE(ar9485_1_1_baseband_core), 2); 46 ARRAY_SIZE(ar9340_1p0_baseband_core), 2);
47 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST], 47 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
48 ar9485_1_1_baseband_postamble, 48 ar9340_1p0_baseband_postamble,
49 ARRAY_SIZE(ar9485_1_1_baseband_postamble), 5); 49 ARRAY_SIZE(ar9340_1p0_baseband_postamble), 5);
50 50
51 /* radio */ 51 /* radio */
52 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0); 52 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
53 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE], 53 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
54 ar9485_1_1_radio_core, 54 ar9340_1p0_radio_core,
55 ARRAY_SIZE(ar9485_1_1_radio_core), 2); 55 ARRAY_SIZE(ar9340_1p0_radio_core), 2);
56 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST], 56 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
57 ar9485_1_1_radio_postamble, 57 ar9340_1p0_radio_postamble,
58 ARRAY_SIZE(ar9485_1_1_radio_postamble), 2); 58 ARRAY_SIZE(ar9340_1p0_radio_postamble), 5);
59 59
60 /* soc */ 60 /* soc */
61 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE], 61 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
62 ar9485_1_1_soc_preamble, 62 ar9340_1p0_soc_preamble,
63 ARRAY_SIZE(ar9485_1_1_soc_preamble), 2); 63 ARRAY_SIZE(ar9340_1p0_soc_preamble), 2);
64 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0); 64 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
65 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST], NULL, 0, 0); 65 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
66 ar9340_1p0_soc_postamble,
67 ARRAY_SIZE(ar9340_1p0_soc_postamble), 5);
66 68
67 /* rx/tx gain */ 69 /* rx/tx gain */
68 INIT_INI_ARRAY(&ah->iniModesRxGain, 70 INIT_INI_ARRAY(&ah->iniModesRxGain,
69 ar9485_common_rx_gain_1_1, 71 ar9340Common_wo_xlna_rx_gain_table_1p0,
70 ARRAY_SIZE(ar9485_common_rx_gain_1_1), 2); 72 ARRAY_SIZE(ar9340Common_wo_xlna_rx_gain_table_1p0),
73 5);
71 INIT_INI_ARRAY(&ah->iniModesTxGain, 74 INIT_INI_ARRAY(&ah->iniModesTxGain,
72 ar9485_modes_lowest_ob_db_tx_gain_1_1, 75 ar9340Modes_high_ob_db_tx_gain_table_1p0,
73 ARRAY_SIZE(ar9485_modes_lowest_ob_db_tx_gain_1_1), 76 ARRAY_SIZE(ar9340Modes_high_ob_db_tx_gain_table_1p0),
74 5); 77 5);
75 78
76 /* Load PCIE SERDES settings from INI */ 79 INIT_INI_ARRAY(&ah->iniModesAdditional,
77 80 ar9340Modes_fast_clock_1p0,
78 /* Awake Setting */ 81 ARRAY_SIZE(ar9340Modes_fast_clock_1p0),
79 82 3);
80 INIT_INI_ARRAY(&ah->iniPcieSerdes,
81 ar9485_1_1_pcie_phy_clkreq_disable_L1,
82 ARRAY_SIZE(ar9485_1_1_pcie_phy_clkreq_disable_L1),
83 2);
84
85 /* Sleep Setting */
86 83
87 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, 84 INIT_INI_ARRAY(&ah->iniModesAdditional_40M,
88 ar9485_1_1_pcie_phy_clkreq_disable_L1, 85 ar9340_1p0_radio_core_40M,
89 ARRAY_SIZE(ar9485_1_1_pcie_phy_clkreq_disable_L1), 86 ARRAY_SIZE(ar9340_1p0_radio_core_40M),
90 2); 87 2);
91 } else if (AR_SREV_9485(ah)) { 88 } else if (AR_SREV_9485_11(ah)) {
92 /* mac */ 89 /* mac */
93 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0); 90 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
94 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], 91 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
95 ar9485_1_0_mac_core, 92 ar9485_1_1_mac_core,
96 ARRAY_SIZE(ar9485_1_0_mac_core), 2); 93 ARRAY_SIZE(ar9485_1_1_mac_core), 2);
97 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST], 94 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
98 ar9485_1_0_mac_postamble, 95 ar9485_1_1_mac_postamble,
99 ARRAY_SIZE(ar9485_1_0_mac_postamble), 5); 96 ARRAY_SIZE(ar9485_1_1_mac_postamble), 5);
100 97
101 /* bb */ 98 /* bb */
102 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], ar9485_1_0, 99 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], ar9485_1_1,
103 ARRAY_SIZE(ar9485_1_0), 2); 100 ARRAY_SIZE(ar9485_1_1), 2);
104 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE], 101 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
105 ar9485_1_0_baseband_core, 102 ar9485_1_1_baseband_core,
106 ARRAY_SIZE(ar9485_1_0_baseband_core), 2); 103 ARRAY_SIZE(ar9485_1_1_baseband_core), 2);
107 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST], 104 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
108 ar9485_1_0_baseband_postamble, 105 ar9485_1_1_baseband_postamble,
109 ARRAY_SIZE(ar9485_1_0_baseband_postamble), 5); 106 ARRAY_SIZE(ar9485_1_1_baseband_postamble), 5);
110 107
111 /* radio */ 108 /* radio */
112 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0); 109 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
113 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE], 110 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
114 ar9485_1_0_radio_core, 111 ar9485_1_1_radio_core,
115 ARRAY_SIZE(ar9485_1_0_radio_core), 2); 112 ARRAY_SIZE(ar9485_1_1_radio_core), 2);
116 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST], 113 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
117 ar9485_1_0_radio_postamble, 114 ar9485_1_1_radio_postamble,
118 ARRAY_SIZE(ar9485_1_0_radio_postamble), 2); 115 ARRAY_SIZE(ar9485_1_1_radio_postamble), 2);
119 116
120 /* soc */ 117 /* soc */
121 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE], 118 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
122 ar9485_1_0_soc_preamble, 119 ar9485_1_1_soc_preamble,
123 ARRAY_SIZE(ar9485_1_0_soc_preamble), 2); 120 ARRAY_SIZE(ar9485_1_1_soc_preamble), 2);
124 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0); 121 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
125 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST], NULL, 0, 0); 122 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST], NULL, 0, 0);
126 123
127 /* rx/tx gain */ 124 /* rx/tx gain */
128 INIT_INI_ARRAY(&ah->iniModesRxGain, 125 INIT_INI_ARRAY(&ah->iniModesRxGain,
129 ar9485Common_rx_gain_1_0, 126 ar9485Common_wo_xlna_rx_gain_1_1,
130 ARRAY_SIZE(ar9485Common_rx_gain_1_0), 2); 127 ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_1), 2);
131 INIT_INI_ARRAY(&ah->iniModesTxGain, 128 INIT_INI_ARRAY(&ah->iniModesTxGain,
132 ar9485Modes_lowest_ob_db_tx_gain_1_0, 129 ar9485_modes_lowest_ob_db_tx_gain_1_1,
133 ARRAY_SIZE(ar9485Modes_lowest_ob_db_tx_gain_1_0), 130 ARRAY_SIZE(ar9485_modes_lowest_ob_db_tx_gain_1_1),
134 5); 131 5);
135 132
136 /* Load PCIE SERDES settings from INI */ 133 /* Load PCIE SERDES settings from INI */
@@ -138,15 +135,15 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
138 /* Awake Setting */ 135 /* Awake Setting */
139 136
140 INIT_INI_ARRAY(&ah->iniPcieSerdes, 137 INIT_INI_ARRAY(&ah->iniPcieSerdes,
141 ar9485_1_0_pcie_phy_pll_on_clkreq_disable_L1, 138 ar9485_1_1_pcie_phy_clkreq_disable_L1,
142 ARRAY_SIZE(ar9485_1_0_pcie_phy_pll_on_clkreq_disable_L1), 139 ARRAY_SIZE(ar9485_1_1_pcie_phy_clkreq_disable_L1),
143 2); 140 2);
144 141
145 /* Sleep Setting */ 142 /* Sleep Setting */
146 143
147 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, 144 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
148 ar9485_1_0_pcie_phy_pll_on_clkreq_disable_L1, 145 ar9485_1_1_pcie_phy_clkreq_disable_L1,
149 ARRAY_SIZE(ar9485_1_0_pcie_phy_pll_on_clkreq_disable_L1), 146 ARRAY_SIZE(ar9485_1_1_pcie_phy_clkreq_disable_L1),
150 2); 147 2);
151 } else { 148 } else {
152 /* mac */ 149 /* mac */
@@ -223,15 +220,15 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
223 switch (ar9003_hw_get_tx_gain_idx(ah)) { 220 switch (ar9003_hw_get_tx_gain_idx(ah)) {
224 case 0: 221 case 0:
225 default: 222 default:
226 if (AR_SREV_9485_11(ah)) 223 if (AR_SREV_9340(ah))
227 INIT_INI_ARRAY(&ah->iniModesTxGain, 224 INIT_INI_ARRAY(&ah->iniModesTxGain,
228 ar9485_modes_lowest_ob_db_tx_gain_1_1, 225 ar9340Modes_lowest_ob_db_tx_gain_table_1p0,
229 ARRAY_SIZE(ar9485_modes_lowest_ob_db_tx_gain_1_1), 226 ARRAY_SIZE(ar9340Modes_lowest_ob_db_tx_gain_table_1p0),
230 5); 227 5);
231 else if (AR_SREV_9485(ah)) 228 else if (AR_SREV_9485_11(ah))
232 INIT_INI_ARRAY(&ah->iniModesTxGain, 229 INIT_INI_ARRAY(&ah->iniModesTxGain,
233 ar9485Modes_lowest_ob_db_tx_gain_1_0, 230 ar9485_modes_lowest_ob_db_tx_gain_1_1,
234 ARRAY_SIZE(ar9485Modes_lowest_ob_db_tx_gain_1_0), 231 ARRAY_SIZE(ar9485_modes_lowest_ob_db_tx_gain_1_1),
235 5); 232 5);
236 else 233 else
237 INIT_INI_ARRAY(&ah->iniModesTxGain, 234 INIT_INI_ARRAY(&ah->iniModesTxGain,
@@ -240,15 +237,15 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
240 5); 237 5);
241 break; 238 break;
242 case 1: 239 case 1:
243 if (AR_SREV_9485_11(ah)) 240 if (AR_SREV_9340(ah))
244 INIT_INI_ARRAY(&ah->iniModesTxGain, 241 INIT_INI_ARRAY(&ah->iniModesTxGain,
245 ar9485Modes_high_ob_db_tx_gain_1_1, 242 ar9340Modes_lowest_ob_db_tx_gain_table_1p0,
246 ARRAY_SIZE(ar9485Modes_high_ob_db_tx_gain_1_1), 243 ARRAY_SIZE(ar9340Modes_lowest_ob_db_tx_gain_table_1p0),
247 5); 244 5);
248 else if (AR_SREV_9485(ah)) 245 else if (AR_SREV_9485_11(ah))
249 INIT_INI_ARRAY(&ah->iniModesTxGain, 246 INIT_INI_ARRAY(&ah->iniModesTxGain,
250 ar9485Modes_high_ob_db_tx_gain_1_0, 247 ar9485Modes_high_ob_db_tx_gain_1_1,
251 ARRAY_SIZE(ar9485Modes_high_ob_db_tx_gain_1_0), 248 ARRAY_SIZE(ar9485Modes_high_ob_db_tx_gain_1_1),
252 5); 249 5);
253 else 250 else
254 INIT_INI_ARRAY(&ah->iniModesTxGain, 251 INIT_INI_ARRAY(&ah->iniModesTxGain,
@@ -257,15 +254,15 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
257 5); 254 5);
258 break; 255 break;
259 case 2: 256 case 2:
260 if (AR_SREV_9485_11(ah)) 257 if (AR_SREV_9340(ah))
261 INIT_INI_ARRAY(&ah->iniModesTxGain, 258 INIT_INI_ARRAY(&ah->iniModesTxGain,
262 ar9485Modes_low_ob_db_tx_gain_1_1, 259 ar9340Modes_lowest_ob_db_tx_gain_table_1p0,
263 ARRAY_SIZE(ar9485Modes_low_ob_db_tx_gain_1_1), 260 ARRAY_SIZE(ar9340Modes_lowest_ob_db_tx_gain_table_1p0),
264 5); 261 5);
265 else if (AR_SREV_9485(ah)) 262 else if (AR_SREV_9485_11(ah))
266 INIT_INI_ARRAY(&ah->iniModesTxGain, 263 INIT_INI_ARRAY(&ah->iniModesTxGain,
267 ar9485Modes_low_ob_db_tx_gain_1_0, 264 ar9485Modes_low_ob_db_tx_gain_1_1,
268 ARRAY_SIZE(ar9485Modes_low_ob_db_tx_gain_1_0), 265 ARRAY_SIZE(ar9485Modes_low_ob_db_tx_gain_1_1),
269 5); 266 5);
270 else 267 else
271 INIT_INI_ARRAY(&ah->iniModesTxGain, 268 INIT_INI_ARRAY(&ah->iniModesTxGain,
@@ -274,15 +271,15 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
274 5); 271 5);
275 break; 272 break;
276 case 3: 273 case 3:
277 if (AR_SREV_9485_11(ah)) 274 if (AR_SREV_9340(ah))
278 INIT_INI_ARRAY(&ah->iniModesTxGain, 275 INIT_INI_ARRAY(&ah->iniModesTxGain,
279 ar9485Modes_high_power_tx_gain_1_1, 276 ar9340Modes_lowest_ob_db_tx_gain_table_1p0,
280 ARRAY_SIZE(ar9485Modes_high_power_tx_gain_1_1), 277 ARRAY_SIZE(ar9340Modes_lowest_ob_db_tx_gain_table_1p0),
281 5); 278 5);
282 else if (AR_SREV_9485(ah)) 279 else if (AR_SREV_9485_11(ah))
283 INIT_INI_ARRAY(&ah->iniModesTxGain, 280 INIT_INI_ARRAY(&ah->iniModesTxGain,
284 ar9485Modes_high_power_tx_gain_1_0, 281 ar9485Modes_high_power_tx_gain_1_1,
285 ARRAY_SIZE(ar9485Modes_high_power_tx_gain_1_0), 282 ARRAY_SIZE(ar9485Modes_high_power_tx_gain_1_1),
286 5); 283 5);
287 else 284 else
288 INIT_INI_ARRAY(&ah->iniModesTxGain, 285 INIT_INI_ARRAY(&ah->iniModesTxGain,
@@ -298,15 +295,15 @@ static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
298 switch (ar9003_hw_get_rx_gain_idx(ah)) { 295 switch (ar9003_hw_get_rx_gain_idx(ah)) {
299 case 0: 296 case 0:
300 default: 297 default:
301 if (AR_SREV_9485_11(ah)) 298 if (AR_SREV_9340(ah))
302 INIT_INI_ARRAY(&ah->iniModesRxGain, 299 INIT_INI_ARRAY(&ah->iniModesRxGain,
303 ar9485_common_rx_gain_1_1, 300 ar9340Common_rx_gain_table_1p0,
304 ARRAY_SIZE(ar9485_common_rx_gain_1_1), 301 ARRAY_SIZE(ar9340Common_rx_gain_table_1p0),
305 2); 302 2);
306 else if (AR_SREV_9485(ah)) 303 else if (AR_SREV_9485_11(ah))
307 INIT_INI_ARRAY(&ah->iniModesRxGain, 304 INIT_INI_ARRAY(&ah->iniModesRxGain,
308 ar9485Common_rx_gain_1_0, 305 ar9485Common_wo_xlna_rx_gain_1_1,
309 ARRAY_SIZE(ar9485Common_rx_gain_1_0), 306 ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_1),
310 2); 307 2);
311 else 308 else
312 INIT_INI_ARRAY(&ah->iniModesRxGain, 309 INIT_INI_ARRAY(&ah->iniModesRxGain,
@@ -315,15 +312,15 @@ static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
315 2); 312 2);
316 break; 313 break;
317 case 1: 314 case 1:
318 if (AR_SREV_9485_11(ah)) 315 if (AR_SREV_9340(ah))
319 INIT_INI_ARRAY(&ah->iniModesRxGain, 316 INIT_INI_ARRAY(&ah->iniModesRxGain,
320 ar9485Common_wo_xlna_rx_gain_1_1, 317 ar9340Common_wo_xlna_rx_gain_table_1p0,
321 ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_1), 318 ARRAY_SIZE(ar9340Common_wo_xlna_rx_gain_table_1p0),
322 2); 319 2);
323 else if (AR_SREV_9485(ah)) 320 else if (AR_SREV_9485_11(ah))
324 INIT_INI_ARRAY(&ah->iniModesRxGain, 321 INIT_INI_ARRAY(&ah->iniModesRxGain,
325 ar9485Common_wo_xlna_rx_gain_1_0, 322 ar9485Common_wo_xlna_rx_gain_1_1,
326 ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_0), 323 ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_1),
327 2); 324 2);
328 else 325 else
329 INIT_INI_ARRAY(&ah->iniModesRxGain, 326 INIT_INI_ARRAY(&ah->iniModesRxGain,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index 038a0cbfc6e7..be6adec33ddb 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -329,7 +329,6 @@ static void ar9003_hw_set11n_txdesc(struct ath_hw *ah, void *ds,
329 | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0) 329 | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
330 | SM(txpower, AR_XmitPower) 330 | SM(txpower, AR_XmitPower)
331 | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0) 331 | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
332 | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
333 | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0) 332 | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0)
334 | (flags & ATH9K_TXDESC_LOWRXCHAIN ? AR_LowRxChain : 0); 333 | (flags & ATH9K_TXDESC_LOWRXCHAIN ? AR_LowRxChain : 0);
335 334
@@ -350,6 +349,16 @@ static void ar9003_hw_set11n_txdesc(struct ath_hw *ah, void *ds,
350 ads->ctl22 = 0; 349 ads->ctl22 = 0;
351} 350}
352 351
352static void ar9003_hw_set_clrdmask(struct ath_hw *ah, void *ds, bool val)
353{
354 struct ar9003_txc *ads = (struct ar9003_txc *) ds;
355
356 if (val)
357 ads->ctl11 |= AR_ClrDestMask;
358 else
359 ads->ctl11 &= ~AR_ClrDestMask;
360}
361
353static void ar9003_hw_set11n_ratescenario(struct ath_hw *ah, void *ds, 362static void ar9003_hw_set11n_ratescenario(struct ath_hw *ah, void *ds,
354 void *lastds, 363 void *lastds,
355 u32 durUpdateEn, u32 rtsctsRate, 364 u32 durUpdateEn, u32 rtsctsRate,
@@ -475,27 +484,6 @@ static void ar9003_hw_clr11n_aggr(struct ath_hw *ah, void *ds)
475 ads->ctl12 &= (~AR_IsAggr & ~AR_MoreAggr); 484 ads->ctl12 &= (~AR_IsAggr & ~AR_MoreAggr);
476} 485}
477 486
478static void ar9003_hw_set11n_burstduration(struct ath_hw *ah, void *ds,
479 u32 burstDuration)
480{
481 struct ar9003_txc *ads = (struct ar9003_txc *) ds;
482
483 ads->ctl13 &= ~AR_BurstDur;
484 ads->ctl13 |= SM(burstDuration, AR_BurstDur);
485
486}
487
488static void ar9003_hw_set11n_virtualmorefrag(struct ath_hw *ah, void *ds,
489 u32 vmf)
490{
491 struct ar9003_txc *ads = (struct ar9003_txc *) ds;
492
493 if (vmf)
494 ads->ctl11 |= AR_VirtMoreFrag;
495 else
496 ads->ctl11 &= ~AR_VirtMoreFrag;
497}
498
499void ar9003_hw_set_paprd_txdesc(struct ath_hw *ah, void *ds, u8 chains) 487void ar9003_hw_set_paprd_txdesc(struct ath_hw *ah, void *ds, u8 chains)
500{ 488{
501 struct ar9003_txc *ads = ds; 489 struct ar9003_txc *ads = ds;
@@ -520,8 +508,7 @@ void ar9003_hw_attach_mac_ops(struct ath_hw *hw)
520 ops->set11n_aggr_middle = ar9003_hw_set11n_aggr_middle; 508 ops->set11n_aggr_middle = ar9003_hw_set11n_aggr_middle;
521 ops->set11n_aggr_last = ar9003_hw_set11n_aggr_last; 509 ops->set11n_aggr_last = ar9003_hw_set11n_aggr_last;
522 ops->clr11n_aggr = ar9003_hw_clr11n_aggr; 510 ops->clr11n_aggr = ar9003_hw_clr11n_aggr;
523 ops->set11n_burstduration = ar9003_hw_set11n_burstduration; 511 ops->set_clrdmask = ar9003_hw_set_clrdmask;
524 ops->set11n_virtualmorefrag = ar9003_hw_set11n_virtualmorefrag;
525} 512}
526 513
527void ath9k_hw_set_rx_bufsize(struct ath_hw *ah, u16 buf_size) 514void ath9k_hw_set_rx_bufsize(struct ath_hw *ah, u16 buf_size)
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index eb250d6b8038..25f3c2fdf2bc 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -75,16 +75,42 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
75 freq = centers.synth_center; 75 freq = centers.synth_center;
76 76
77 if (freq < 4800) { /* 2 GHz, fractional mode */ 77 if (freq < 4800) { /* 2 GHz, fractional mode */
78 if (AR_SREV_9485(ah)) 78 if (AR_SREV_9485(ah)) {
79 channelSel = CHANSEL_2G_9485(freq); 79 u32 chan_frac;
80 else 80
81 /*
82 * freq_ref = 40 / (refdiva >> amoderefsel); where refdiva=1 and amoderefsel=0
83 * ndiv = ((chan_mhz * 4) / 3) / freq_ref;
84 * chansel = int(ndiv), chanfrac = (ndiv - chansel) * 0x20000
85 */
86 channelSel = (freq * 4) / 120;
87 chan_frac = (((freq * 4) % 120) * 0x20000) / 120;
88 channelSel = (channelSel << 17) | chan_frac;
89 } else if (AR_SREV_9340(ah)) {
90 if (ah->is_clk_25mhz) {
91 u32 chan_frac;
92
93 channelSel = (freq * 2) / 75;
94 chan_frac = (((freq * 2) % 75) * 0x20000) / 75;
95 channelSel = (channelSel << 17) | chan_frac;
96 } else
97 channelSel = CHANSEL_2G(freq) >> 1;
98 } else
81 channelSel = CHANSEL_2G(freq); 99 channelSel = CHANSEL_2G(freq);
82 /* Set to 2G mode */ 100 /* Set to 2G mode */
83 bMode = 1; 101 bMode = 1;
84 } else { 102 } else {
85 channelSel = CHANSEL_5G(freq); 103 if (AR_SREV_9340(ah) && ah->is_clk_25mhz) {
86 /* Doubler is ON, so, divide channelSel by 2. */ 104 u32 chan_frac;
87 channelSel >>= 1; 105
106 channelSel = (freq * 2) / 75;
107 chan_frac = ((freq % 75) * 0x20000) / 75;
108 channelSel = (channelSel << 17) | chan_frac;
109 } else {
110 channelSel = CHANSEL_5G(freq);
111 /* Doubler is ON, so, divide channelSel by 2. */
112 channelSel >>= 1;
113 }
88 /* Set to 5G mode */ 114 /* Set to 5G mode */
89 bMode = 0; 115 bMode = 0;
90 } 116 }
@@ -142,7 +168,7 @@ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
142 * is out-of-band and can be ignored. 168 * is out-of-band and can be ignored.
143 */ 169 */
144 170
145 if (AR_SREV_9485(ah)) { 171 if (AR_SREV_9485(ah) || AR_SREV_9340(ah)) {
146 spur_fbin_ptr = ar9003_get_spur_chan_ptr(ah, 172 spur_fbin_ptr = ar9003_get_spur_chan_ptr(ah,
147 IS_CHAN_2GHZ(chan)); 173 IS_CHAN_2GHZ(chan));
148 if (spur_fbin_ptr[0] == 0) /* No spur */ 174 if (spur_fbin_ptr[0] == 0) /* No spur */
@@ -167,7 +193,7 @@ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
167 193
168 for (i = 0; i < max_spur_cnts; i++) { 194 for (i = 0; i < max_spur_cnts; i++) {
169 negative = 0; 195 negative = 0;
170 if (AR_SREV_9485(ah)) 196 if (AR_SREV_9485(ah) || AR_SREV_9340(ah))
171 cur_bb_spur = FBIN2FREQ(spur_fbin_ptr[i], 197 cur_bb_spur = FBIN2FREQ(spur_fbin_ptr[i],
172 IS_CHAN_2GHZ(chan)) - synth_freq; 198 IS_CHAN_2GHZ(chan)) - synth_freq;
173 else 199 else
@@ -401,7 +427,7 @@ static void ar9003_hw_spur_mitigate_ofdm(struct ath_hw *ah,
401 427
402 ar9003_hw_spur_ofdm_clear(ah); 428 ar9003_hw_spur_ofdm_clear(ah);
403 429
404 for (i = 0; spurChansPtr[i] && i < 5; i++) { 430 for (i = 0; i < AR_EEPROM_MODAL_SPURS && spurChansPtr[i]; i++) {
405 freq_offset = FBIN2FREQ(spurChansPtr[i], mode) - synth_freq; 431 freq_offset = FBIN2FREQ(spurChansPtr[i], mode) - synth_freq;
406 if (abs(freq_offset) < range) { 432 if (abs(freq_offset) < range) {
407 ar9003_hw_spur_ofdm_work(ah, chan, freq_offset); 433 ar9003_hw_spur_ofdm_work(ah, chan, freq_offset);
@@ -590,29 +616,25 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
590 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); 616 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
591 unsigned int regWrites = 0, i; 617 unsigned int regWrites = 0, i;
592 struct ieee80211_channel *channel = chan->chan; 618 struct ieee80211_channel *channel = chan->chan;
593 u32 modesIndex, freqIndex; 619 u32 modesIndex;
594 620
595 switch (chan->chanmode) { 621 switch (chan->chanmode) {
596 case CHANNEL_A: 622 case CHANNEL_A:
597 case CHANNEL_A_HT20: 623 case CHANNEL_A_HT20:
598 modesIndex = 1; 624 modesIndex = 1;
599 freqIndex = 1;
600 break; 625 break;
601 case CHANNEL_A_HT40PLUS: 626 case CHANNEL_A_HT40PLUS:
602 case CHANNEL_A_HT40MINUS: 627 case CHANNEL_A_HT40MINUS:
603 modesIndex = 2; 628 modesIndex = 2;
604 freqIndex = 1;
605 break; 629 break;
606 case CHANNEL_G: 630 case CHANNEL_G:
607 case CHANNEL_G_HT20: 631 case CHANNEL_G_HT20:
608 case CHANNEL_B: 632 case CHANNEL_B:
609 modesIndex = 4; 633 modesIndex = 4;
610 freqIndex = 2;
611 break; 634 break;
612 case CHANNEL_G_HT40PLUS: 635 case CHANNEL_G_HT40PLUS:
613 case CHANNEL_G_HT40MINUS: 636 case CHANNEL_G_HT40MINUS:
614 modesIndex = 3; 637 modesIndex = 3;
615 freqIndex = 2;
616 break; 638 break;
617 639
618 default: 640 default:
@@ -637,6 +659,9 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
637 REG_WRITE_ARRAY(&ah->iniModesAdditional, 659 REG_WRITE_ARRAY(&ah->iniModesAdditional,
638 modesIndex, regWrites); 660 modesIndex, regWrites);
639 661
662 if (AR_SREV_9340(ah) && !ah->is_clk_25mhz)
663 REG_WRITE_ARRAY(&ah->iniModesAdditional_40M, 1, regWrites);
664
640 ar9003_hw_override_ini(ah); 665 ar9003_hw_override_ini(ah);
641 ar9003_hw_set_channel_regs(ah, chan); 666 ar9003_hw_set_channel_regs(ah, chan);
642 ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask); 667 ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
@@ -1159,9 +1184,52 @@ static void ar9003_hw_set_radar_conf(struct ath_hw *ah)
1159 conf->radar_inband = 8; 1184 conf->radar_inband = 8;
1160} 1185}
1161 1186
1187static void ar9003_hw_antdiv_comb_conf_get(struct ath_hw *ah,
1188 struct ath_hw_antcomb_conf *antconf)
1189{
1190 u32 regval;
1191
1192 regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
1193 antconf->main_lna_conf = (regval & AR_PHY_9485_ANT_DIV_MAIN_LNACONF) >>
1194 AR_PHY_9485_ANT_DIV_MAIN_LNACONF_S;
1195 antconf->alt_lna_conf = (regval & AR_PHY_9485_ANT_DIV_ALT_LNACONF) >>
1196 AR_PHY_9485_ANT_DIV_ALT_LNACONF_S;
1197 antconf->fast_div_bias = (regval & AR_PHY_9485_ANT_FAST_DIV_BIAS) >>
1198 AR_PHY_9485_ANT_FAST_DIV_BIAS_S;
1199 antconf->lna1_lna2_delta = -9;
1200 antconf->div_group = 2;
1201}
1202
1203static void ar9003_hw_antdiv_comb_conf_set(struct ath_hw *ah,
1204 struct ath_hw_antcomb_conf *antconf)
1205{
1206 u32 regval;
1207
1208 regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
1209 regval &= ~(AR_PHY_9485_ANT_DIV_MAIN_LNACONF |
1210 AR_PHY_9485_ANT_DIV_ALT_LNACONF |
1211 AR_PHY_9485_ANT_FAST_DIV_BIAS |
1212 AR_PHY_9485_ANT_DIV_MAIN_GAINTB |
1213 AR_PHY_9485_ANT_DIV_ALT_GAINTB);
1214 regval |= ((antconf->main_lna_conf <<
1215 AR_PHY_9485_ANT_DIV_MAIN_LNACONF_S)
1216 & AR_PHY_9485_ANT_DIV_MAIN_LNACONF);
1217 regval |= ((antconf->alt_lna_conf << AR_PHY_9485_ANT_DIV_ALT_LNACONF_S)
1218 & AR_PHY_9485_ANT_DIV_ALT_LNACONF);
1219 regval |= ((antconf->fast_div_bias << AR_PHY_9485_ANT_FAST_DIV_BIAS_S)
1220 & AR_PHY_9485_ANT_FAST_DIV_BIAS);
1221 regval |= ((antconf->main_gaintb << AR_PHY_9485_ANT_DIV_MAIN_GAINTB_S)
1222 & AR_PHY_9485_ANT_DIV_MAIN_GAINTB);
1223 regval |= ((antconf->alt_gaintb << AR_PHY_9485_ANT_DIV_ALT_GAINTB_S)
1224 & AR_PHY_9485_ANT_DIV_ALT_GAINTB);
1225
1226 REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
1227}
1228
1162void ar9003_hw_attach_phy_ops(struct ath_hw *ah) 1229void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
1163{ 1230{
1164 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah); 1231 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
1232 struct ath_hw_ops *ops = ath9k_hw_ops(ah);
1165 static const u32 ar9300_cca_regs[6] = { 1233 static const u32 ar9300_cca_regs[6] = {
1166 AR_PHY_CCA_0, 1234 AR_PHY_CCA_0,
1167 AR_PHY_CCA_1, 1235 AR_PHY_CCA_1,
@@ -1188,6 +1256,9 @@ void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
1188 priv_ops->ani_cache_ini_regs = ar9003_hw_ani_cache_ini_regs; 1256 priv_ops->ani_cache_ini_regs = ar9003_hw_ani_cache_ini_regs;
1189 priv_ops->set_radar_params = ar9003_hw_set_radar_params; 1257 priv_ops->set_radar_params = ar9003_hw_set_radar_params;
1190 1258
1259 ops->antdiv_comb_conf_get = ar9003_hw_antdiv_comb_conf_get;
1260 ops->antdiv_comb_conf_set = ar9003_hw_antdiv_comb_conf_set;
1261
1191 ar9003_hw_set_nf_limits(ah); 1262 ar9003_hw_set_nf_limits(ah);
1192 ar9003_hw_set_radar_conf(ah); 1263 ar9003_hw_set_radar_conf(ah);
1193 memcpy(ah->nf_regs, ar9300_cca_regs, sizeof(ah->nf_regs)); 1264 memcpy(ah->nf_regs, ar9300_cca_regs, sizeof(ah->nf_regs));
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 8bdda2cf9dd7..c7505b48e5c0 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -261,12 +261,34 @@
261#define AR_PHY_EXT_CCA0 (AR_AGC_BASE + 0x20) 261#define AR_PHY_EXT_CCA0 (AR_AGC_BASE + 0x20)
262#define AR_PHY_RESTART (AR_AGC_BASE + 0x24) 262#define AR_PHY_RESTART (AR_AGC_BASE + 0x24)
263 263
264/*
265 * Antenna Diversity settings
266 */
264#define AR_PHY_MC_GAIN_CTRL (AR_AGC_BASE + 0x28) 267#define AR_PHY_MC_GAIN_CTRL (AR_AGC_BASE + 0x28)
265#define AR_ANT_DIV_CTRL_ALL 0x7e000000 268#define AR_ANT_DIV_CTRL_ALL 0x7e000000
266#define AR_ANT_DIV_CTRL_ALL_S 25 269#define AR_ANT_DIV_CTRL_ALL_S 25
267#define AR_ANT_DIV_ENABLE 0x1000000 270#define AR_ANT_DIV_ENABLE 0x1000000
268#define AR_ANT_DIV_ENABLE_S 24 271#define AR_ANT_DIV_ENABLE_S 24
269 272
273
274#define AR_PHY_9485_ANT_FAST_DIV_BIAS 0x00007e00
275#define AR_PHY_9485_ANT_FAST_DIV_BIAS_S 9
276#define AR_PHY_9485_ANT_DIV_LNADIV 0x01000000
277#define AR_PHY_9485_ANT_DIV_LNADIV_S 24
278#define AR_PHY_9485_ANT_DIV_ALT_LNACONF 0x06000000
279#define AR_PHY_9485_ANT_DIV_ALT_LNACONF_S 25
280#define AR_PHY_9485_ANT_DIV_MAIN_LNACONF 0x18000000
281#define AR_PHY_9485_ANT_DIV_MAIN_LNACONF_S 27
282#define AR_PHY_9485_ANT_DIV_ALT_GAINTB 0x20000000
283#define AR_PHY_9485_ANT_DIV_ALT_GAINTB_S 29
284#define AR_PHY_9485_ANT_DIV_MAIN_GAINTB 0x40000000
285#define AR_PHY_9485_ANT_DIV_MAIN_GAINTB_S 30
286
287#define AR_PHY_9485_ANT_DIV_LNA1_MINUS_LNA2 0x0
288#define AR_PHY_9485_ANT_DIV_LNA2 0x1
289#define AR_PHY_9485_ANT_DIV_LNA1 0x2
290#define AR_PHY_9485_ANT_DIV_LNA1_PLUS_LNA2 0x3
291
270#define AR_PHY_EXTCHN_PWRTHR1 (AR_AGC_BASE + 0x2c) 292#define AR_PHY_EXTCHN_PWRTHR1 (AR_AGC_BASE + 0x2c)
271#define AR_PHY_EXT_CHN_WIN (AR_AGC_BASE + 0x30) 293#define AR_PHY_EXT_CHN_WIN (AR_AGC_BASE + 0x30)
272#define AR_PHY_20_40_DET_THR (AR_AGC_BASE + 0x34) 294#define AR_PHY_20_40_DET_THR (AR_AGC_BASE + 0x34)
@@ -548,15 +570,12 @@
548 570
549#define AR_PHY_TXGAIN_TABLE (AR_SM_BASE + 0x300) 571#define AR_PHY_TXGAIN_TABLE (AR_SM_BASE + 0x300)
550 572
551#define AR_PHY_TX_IQCAL_START_9485 (AR_SM_BASE + 0x3c4) 573#define AR_PHY_TX_IQCAL_CONTROL_1 (AR_SM_BASE + AR_SREV_9485(ah) ? \
552#define AR_PHY_TX_IQCAL_START_DO_CAL_9485 0x80000000 574 0x3c8 : 0x448)
553#define AR_PHY_TX_IQCAL_START_DO_CAL_9485_S 31 575#define AR_PHY_TX_IQCAL_START (AR_SM_BASE + AR_SREV_9485(ah) ? \
554#define AR_PHY_TX_IQCAL_CONTROL_1_9485 (AR_SM_BASE + 0x3c8) 576 0x3c4 : 0x440)
555#define AR_PHY_TX_IQCAL_STATUS_B0_9485 (AR_SM_BASE + 0x3f0) 577#define AR_PHY_TX_IQCAL_STATUS_B0 (AR_SM_BASE + AR_SREV_9485(ah) ? \
556 578 0x3f0 : 0x48c)
557#define AR_PHY_TX_IQCAL_CONTROL_1 (AR_SM_BASE + 0x448)
558#define AR_PHY_TX_IQCAL_START (AR_SM_BASE + 0x440)
559#define AR_PHY_TX_IQCAL_STATUS_B0 (AR_SM_BASE + 0x48c)
560#define AR_PHY_TX_IQCAL_CORR_COEFF_B0(_i) (AR_SM_BASE + \ 579#define AR_PHY_TX_IQCAL_CORR_COEFF_B0(_i) (AR_SM_BASE + \
561 (AR_SREV_9485(ah) ? \ 580 (AR_SREV_9485(ah) ? \
562 0x3d0 : 0x450) + ((_i) << 2)) 581 0x3d0 : 0x450) + ((_i) << 2))
@@ -588,7 +607,7 @@
588#define AR_PHY_65NM_CH0_BIAS2 0x160c4 607#define AR_PHY_65NM_CH0_BIAS2 0x160c4
589#define AR_PHY_65NM_CH0_BIAS4 0x160cc 608#define AR_PHY_65NM_CH0_BIAS4 0x160cc
590#define AR_PHY_65NM_CH0_RXTX4 0x1610c 609#define AR_PHY_65NM_CH0_RXTX4 0x1610c
591#define AR_PHY_65NM_CH0_THERM (AR_SREV_9485(ah) ? 0x1628c : 0x16290) 610#define AR_PHY_65NM_CH0_THERM (AR_SREV_9300(ah) ? 0x16290 : 0x1628c)
592 611
593#define AR_PHY_65NM_CH0_THERM_LOCAL 0x80000000 612#define AR_PHY_65NM_CH0_THERM_LOCAL 0x80000000
594#define AR_PHY_65NM_CH0_THERM_LOCAL_S 31 613#define AR_PHY_65NM_CH0_THERM_LOCAL_S 31
@@ -758,10 +777,10 @@
758#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT 0x01000000 777#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT 0x01000000
759#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_S 24 778#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_S 24
760#define AR_PHY_CHANNEL_STATUS_RX_CLEAR 0x00000004 779#define AR_PHY_CHANNEL_STATUS_RX_CLEAR 0x00000004
761#define AR_PHY_TX_IQCAQL_CONTROL_1_IQCORR_I_Q_COFF_DELPT 0x01fc0000 780#define AR_PHY_TX_IQCAL_CONTROL_1_IQCORR_I_Q_COFF_DELPT 0x01fc0000
762#define AR_PHY_TX_IQCAQL_CONTROL_1_IQCORR_I_Q_COFF_DELPT_S 18 781#define AR_PHY_TX_IQCAL_CONTROL_1_IQCORR_I_Q_COFF_DELPT_S 18
763#define AR_PHY_TX_IQCAL_START_DO_CAL 0x00000001 782#define AR_PHY_TX_IQCAL_START_DO_CAL 0x00000001
764#define AR_PHY_TX_IQCAL_START_DO_CAL_S 0 783#define AR_PHY_TX_IQCAL_START_DO_CAL_S 0
765 784
766#define AR_PHY_TX_IQCAL_STATUS_FAILED 0x00000001 785#define AR_PHY_TX_IQCAL_STATUS_FAILED 0x00000001
767#define AR_PHY_CALIBRATED_GAINS_0 0x3e 786#define AR_PHY_CALIBRATED_GAINS_0 0x3e
diff --git a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
new file mode 100644
index 000000000000..815a8af1beef
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
@@ -0,0 +1,1525 @@
1/*
2 * Copyright (c) 2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef INITVALS_9340_H
18#define INITVALS_9340_H
19
20static const u32 ar9340_1p0_radio_postamble[][5] = {
21 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
22 {0x000160ac, 0xa4646800, 0xa4646800, 0xa4646800, 0xa4646800},
23 {0x0001610c, 0x08000000, 0x08000000, 0x00000000, 0x00000000},
24 {0x00016140, 0x10804000, 0x10804000, 0x50804000, 0x50804000},
25 {0x0001650c, 0x08000000, 0x08000000, 0x00000000, 0x00000000},
26 {0x00016540, 0x10804000, 0x10804000, 0x50804000, 0x50804000},
27};
28
29static const u32 ar9340Modes_lowest_ob_db_tx_gain_table_1p0[][5] = {
30 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
31 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
32 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
33 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
34 {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
35 {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
36 {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
37 {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
38 {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
39 {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
40 {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
41 {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
42 {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
43 {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
44 {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
45 {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
46 {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
47 {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
48 {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
49 {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
50 {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
51 {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
52 {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
53 {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
54 {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
55 {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
56 {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
57 {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
58 {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
59 {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
60 {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
61 {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
62 {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
63 {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
64 {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
65 {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
66 {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
67 {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
68 {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
69 {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
70 {0x0000a598, 0x21820220, 0x21820220, 0x16800402, 0x16800402},
71 {0x0000a59c, 0x27820223, 0x27820223, 0x19800404, 0x19800404},
72 {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
73 {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
74 {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
75 {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
76 {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
77 {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
78 {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
79 {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
80 {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
81 {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3f801861, 0x3f801861},
82 {0x0000a5c8, 0x5782286c, 0x5782286c, 0x43801a81, 0x43801a81},
83 {0x0000a5cc, 0x5c84286b, 0x5c84286b, 0x47801a83, 0x47801a83},
84 {0x0000a5d0, 0x61842a6c, 0x61842a6c, 0x4a801c84, 0x4a801c84},
85 {0x0000a5d4, 0x66862a6c, 0x66862a6c, 0x4e801ce3, 0x4e801ce3},
86 {0x0000a5d8, 0x6b862e6c, 0x6b862e6c, 0x52801ce5, 0x52801ce5},
87 {0x0000a5dc, 0x7086308c, 0x7086308c, 0x56801ce9, 0x56801ce9},
88 {0x0000a5e0, 0x738a308a, 0x738a308a, 0x5a801ceb, 0x5a801ceb},
89 {0x0000a5e4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
90 {0x0000a5e8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
91 {0x0000a5ec, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
92 {0x0000a5f0, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
93 {0x0000a5f4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
94 {0x0000a5f8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
95 {0x0000a5fc, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
96 {0x00016044, 0x056db2db, 0x056db2db, 0x056db2db, 0x056db2db},
97 {0x00016048, 0x24925266, 0x24925266, 0x24925266, 0x24925266},
98 {0x00016444, 0x056db2db, 0x056db2db, 0x056db2db, 0x056db2db},
99 {0x00016448, 0x24925266, 0x24925266, 0x24925266, 0x24925266},
100};
101
102static const u32 ar9340Modes_fast_clock_1p0[][3] = {
103 /* Addr 5G_HT20 5G_HT40 */
104 {0x00001030, 0x00000268, 0x000004d0},
105 {0x00001070, 0x0000018c, 0x00000318},
106 {0x000010b0, 0x00000fd0, 0x00001fa0},
107 {0x00008014, 0x044c044c, 0x08980898},
108 {0x0000801c, 0x148ec02b, 0x148ec057},
109 {0x00008318, 0x000044c0, 0x00008980},
110 {0x00009e00, 0x03721821, 0x03721821},
111 {0x0000a230, 0x0000000b, 0x00000016},
112 {0x0000a254, 0x00000898, 0x00001130},
113};
114
115static const u32 ar9340_1p0_radio_core[][2] = {
116 /* Addr allmodes */
117 {0x00016000, 0x36db6db6},
118 {0x00016004, 0x6db6db40},
119 {0x00016008, 0x73f00000},
120 {0x0001600c, 0x00000000},
121 {0x00016040, 0x7f80fff8},
122 {0x00016044, 0x03b6d2db},
123 {0x00016048, 0x24925266},
124 {0x0001604c, 0x000f0278},
125 {0x00016050, 0x6db6db6c},
126 {0x00016054, 0x6db60000},
127 {0x00016080, 0x00080000},
128 {0x00016084, 0x0e48048c},
129 {0x00016088, 0x14214514},
130 {0x0001608c, 0x119f081c},
131 {0x00016090, 0x24926490},
132 {0x00016094, 0x00000000},
133 {0x00016098, 0xd411eb84},
134 {0x0001609c, 0x03e47f32},
135 {0x000160a0, 0xc2108ffe},
136 {0x000160a4, 0x812fc370},
137 {0x000160a8, 0x423c8000},
138 {0x000160ac, 0xa4646800},
139 {0x000160b0, 0x00fe7f46},
140 {0x000160b4, 0x92480000},
141 {0x000160c0, 0x006db6db},
142 {0x000160c4, 0x6db6db60},
143 {0x000160c8, 0x6db6db6c},
144 {0x000160cc, 0x6de6db6c},
145 {0x000160d0, 0xb6da4924},
146 {0x00016100, 0x04cb0001},
147 {0x00016104, 0xfff80000},
148 {0x00016108, 0x00080010},
149 {0x0001610c, 0x00000000},
150 {0x00016140, 0x50804008},
151 {0x00016144, 0x01884080},
152 {0x00016148, 0x000080c0},
153 {0x00016280, 0x01000015},
154 {0x00016284, 0x05530000},
155 {0x00016288, 0x00318000},
156 {0x0001628c, 0x50000000},
157 {0x00016290, 0x4080294f},
158 {0x00016380, 0x00000000},
159 {0x00016384, 0x00000000},
160 {0x00016388, 0x00800700},
161 {0x0001638c, 0x00800700},
162 {0x00016390, 0x00800700},
163 {0x00016394, 0x00000000},
164 {0x00016398, 0x00000000},
165 {0x0001639c, 0x00000000},
166 {0x000163a0, 0x00000001},
167 {0x000163a4, 0x00000001},
168 {0x000163a8, 0x00000000},
169 {0x000163ac, 0x00000000},
170 {0x000163b0, 0x00000000},
171 {0x000163b4, 0x00000000},
172 {0x000163b8, 0x00000000},
173 {0x000163bc, 0x00000000},
174 {0x000163c0, 0x000000a0},
175 {0x000163c4, 0x000c0000},
176 {0x000163c8, 0x14021402},
177 {0x000163cc, 0x00001402},
178 {0x000163d0, 0x00000000},
179 {0x000163d4, 0x00000000},
180 {0x00016400, 0x36db6db6},
181 {0x00016404, 0x6db6db40},
182 {0x00016408, 0x73f00000},
183 {0x0001640c, 0x00000000},
184 {0x00016440, 0x7f80fff8},
185 {0x00016444, 0x03b6d2db},
186 {0x00016448, 0x24927266},
187 {0x0001644c, 0x000f0278},
188 {0x00016450, 0x6db6db6c},
189 {0x00016454, 0x6db60000},
190 {0x00016500, 0x04cb0001},
191 {0x00016504, 0xfff80000},
192 {0x00016508, 0x00080010},
193 {0x0001650c, 0x00000000},
194 {0x00016540, 0x50804008},
195 {0x00016544, 0x01884080},
196 {0x00016548, 0x000080c0},
197 {0x00016780, 0x00000000},
198 {0x00016784, 0x00000000},
199 {0x00016788, 0x00800700},
200 {0x0001678c, 0x00800700},
201 {0x00016790, 0x00800700},
202 {0x00016794, 0x00000000},
203 {0x00016798, 0x00000000},
204 {0x0001679c, 0x00000000},
205 {0x000167a0, 0x00000001},
206 {0x000167a4, 0x00000001},
207 {0x000167a8, 0x00000000},
208 {0x000167ac, 0x00000000},
209 {0x000167b0, 0x00000000},
210 {0x000167b4, 0x00000000},
211 {0x000167b8, 0x00000000},
212 {0x000167bc, 0x00000000},
213 {0x000167c0, 0x000000a0},
214 {0x000167c4, 0x000c0000},
215 {0x000167c8, 0x14021402},
216 {0x000167cc, 0x00001402},
217 {0x000167d0, 0x00000000},
218 {0x000167d4, 0x00000000},
219};
220
221static const u32 ar9340_1p0_radio_core_40M[][2] = {
222 {0x0001609c, 0x02566f3a},
223 {0x000160ac, 0xa4647c00},
224 {0x000160b0, 0x01885f5a},
225};
226
227static const u32 ar9340_1p0_mac_postamble[][5] = {
228 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
229 {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
230 {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
231 {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
232 {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
233 {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
234 {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
235 {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
236 {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
237};
238
239static const u32 ar9340_1p0_soc_postamble[][5] = {
240 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
241 {0x00007010, 0x00000023, 0x00000023, 0x00000023, 0x00000023},
242};
243
244static const u32 ar9340_1p0_baseband_postamble[][5] = {
245 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
246 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
247 {0x00009820, 0x206a022e, 0x206a022e, 0x206a022e, 0x206a022e},
248 {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
249 {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
250 {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
251 {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
252 {0x00009c00, 0x00000044, 0x000000c4, 0x000000c4, 0x00000044},
253 {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
254 {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020},
255 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
256 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec88d2e, 0x7ec88d2e},
257 {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
258 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
259 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
260 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
261 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
262 {0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
263 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
264 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
265 {0x0000a204, 0x00003fc0, 0x00003fc4, 0x00003fc4, 0x00003fc0},
266 {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
267 {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
268 {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
269 {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
270 {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
271 {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
272 {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
273 {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
274 {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
275 {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
276 {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
277 {0x0000a288, 0x00000220, 0x00000220, 0x00000110, 0x00000110},
278 {0x0000a28c, 0x00011111, 0x00011111, 0x00022222, 0x00022222},
279 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
280 {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071982},
281 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
282 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
283 {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
284 {0x0000ae04, 0x00180000, 0x00180000, 0x00180000, 0x00180000},
285 {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
286 {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
287 {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
288 {0x0000b284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
289};
290
291static const u32 ar9340_1p0_baseband_core[][2] = {
292 /* Addr allmodes */
293 {0x00009800, 0xafe68e30},
294 {0x00009804, 0xfd14e000},
295 {0x00009808, 0x9c0a9f6b},
296 {0x0000980c, 0x04900000},
297 {0x00009814, 0xb280c00a},
298 {0x00009818, 0x00000000},
299 {0x0000981c, 0x00020028},
300 {0x00009834, 0x5f3ca3de},
301 {0x00009838, 0x0108ecff},
302 {0x0000983c, 0x14750600},
303 {0x00009880, 0x201fff00},
304 {0x00009884, 0x00001042},
305 {0x000098a4, 0x00200400},
306 {0x000098b0, 0x52440bbe},
307 {0x000098d0, 0x004b6a8e},
308 {0x000098d4, 0x00000820},
309 {0x000098dc, 0x00000000},
310 {0x000098f0, 0x00000000},
311 {0x000098f4, 0x00000000},
312 {0x00009c04, 0xff55ff55},
313 {0x00009c08, 0x0320ff55},
314 {0x00009c0c, 0x00000000},
315 {0x00009c10, 0x00000000},
316 {0x00009c14, 0x00046384},
317 {0x00009c18, 0x05b6b440},
318 {0x00009c1c, 0x00b6b440},
319 {0x00009d00, 0xc080a333},
320 {0x00009d04, 0x40206c10},
321 {0x00009d08, 0x009c4060},
322 {0x00009d0c, 0x9883800a},
323 {0x00009d10, 0x01834061},
324 {0x00009d14, 0x00c0040b},
325 {0x00009d18, 0x00000000},
326 {0x00009e08, 0x0038230c},
327 {0x00009e24, 0x990bb515},
328 {0x00009e28, 0x0c6f0000},
329 {0x00009e30, 0x06336f77},
330 {0x00009e34, 0x6af6532f},
331 {0x00009e38, 0x0cc80c00},
332 {0x00009e3c, 0xcf946222},
333 {0x00009e40, 0x0d261820},
334 {0x00009e4c, 0x00001004},
335 {0x00009e50, 0x00ff03f1},
336 {0x00009e54, 0x00000000},
337 {0x00009fc0, 0x803e4788},
338 {0x00009fc4, 0x0001efb5},
339 {0x00009fcc, 0x40000014},
340 {0x00009fd0, 0x01193b93},
341 {0x0000a20c, 0x00000000},
342 {0x0000a220, 0x00000000},
343 {0x0000a224, 0x00000000},
344 {0x0000a228, 0x10002310},
345 {0x0000a22c, 0x01036a1e},
346 {0x0000a234, 0x10000fff},
347 {0x0000a23c, 0x00000000},
348 {0x0000a244, 0x0c000000},
349 {0x0000a2a0, 0x00000001},
350 {0x0000a2c0, 0x00000001},
351 {0x0000a2c8, 0x00000000},
352 {0x0000a2cc, 0x18c43433},
353 {0x0000a2d4, 0x00000000},
354 {0x0000a2dc, 0x00000000},
355 {0x0000a2e0, 0x00000000},
356 {0x0000a2e4, 0x00000000},
357 {0x0000a2e8, 0x00000000},
358 {0x0000a2ec, 0x00000000},
359 {0x0000a2f0, 0x00000000},
360 {0x0000a2f4, 0x00000000},
361 {0x0000a2f8, 0x00000000},
362 {0x0000a344, 0x00000000},
363 {0x0000a34c, 0x00000000},
364 {0x0000a350, 0x0000a000},
365 {0x0000a364, 0x00000000},
366 {0x0000a370, 0x00000000},
367 {0x0000a390, 0x00000001},
368 {0x0000a394, 0x00000444},
369 {0x0000a398, 0x001f0e0f},
370 {0x0000a39c, 0x0075393f},
371 {0x0000a3a0, 0xb79f6427},
372 {0x0000a3a4, 0x00000000},
373 {0x0000a3a8, 0xaaaaaaaa},
374 {0x0000a3ac, 0x3c466478},
375 {0x0000a3c0, 0x20202020},
376 {0x0000a3c4, 0x22222220},
377 {0x0000a3c8, 0x20200020},
378 {0x0000a3cc, 0x20202020},
379 {0x0000a3d0, 0x20202020},
380 {0x0000a3d4, 0x20202020},
381 {0x0000a3d8, 0x20202020},
382 {0x0000a3dc, 0x20202020},
383 {0x0000a3e0, 0x20202020},
384 {0x0000a3e4, 0x20202020},
385 {0x0000a3e8, 0x20202020},
386 {0x0000a3ec, 0x20202020},
387 {0x0000a3f0, 0x00000000},
388 {0x0000a3f4, 0x00000246},
389 {0x0000a3f8, 0x0cdbd380},
390 {0x0000a3fc, 0x000f0f01},
391 {0x0000a400, 0x8fa91f01},
392 {0x0000a404, 0x00000000},
393 {0x0000a408, 0x0e79e5c6},
394 {0x0000a40c, 0x00820820},
395 {0x0000a414, 0x1ce739ce},
396 {0x0000a418, 0x2d001dce},
397 {0x0000a41c, 0x1ce739ce},
398 {0x0000a420, 0x000001ce},
399 {0x0000a424, 0x1ce739ce},
400 {0x0000a428, 0x000001ce},
401 {0x0000a42c, 0x1ce739ce},
402 {0x0000a430, 0x1ce739ce},
403 {0x0000a434, 0x00000000},
404 {0x0000a438, 0x00001801},
405 {0x0000a43c, 0x00000000},
406 {0x0000a440, 0x00000000},
407 {0x0000a444, 0x00000000},
408 {0x0000a448, 0x04000080},
409 {0x0000a44c, 0x00000001},
410 {0x0000a450, 0x00010000},
411 {0x0000a458, 0x00000000},
412 {0x0000a600, 0x00000000},
413 {0x0000a604, 0x00000000},
414 {0x0000a608, 0x00000000},
415 {0x0000a60c, 0x00000000},
416 {0x0000a610, 0x00000000},
417 {0x0000a614, 0x00000000},
418 {0x0000a618, 0x00000000},
419 {0x0000a61c, 0x00000000},
420 {0x0000a620, 0x00000000},
421 {0x0000a624, 0x00000000},
422 {0x0000a628, 0x00000000},
423 {0x0000a62c, 0x00000000},
424 {0x0000a630, 0x00000000},
425 {0x0000a634, 0x00000000},
426 {0x0000a638, 0x00000000},
427 {0x0000a63c, 0x00000000},
428 {0x0000a640, 0x00000000},
429 {0x0000a644, 0x3fad9d74},
430 {0x0000a648, 0x0048060a},
431 {0x0000a64c, 0x00000637},
432 {0x0000a670, 0x03020100},
433 {0x0000a674, 0x09080504},
434 {0x0000a678, 0x0d0c0b0a},
435 {0x0000a67c, 0x13121110},
436 {0x0000a680, 0x31301514},
437 {0x0000a684, 0x35343332},
438 {0x0000a688, 0x00000036},
439 {0x0000a690, 0x00000838},
440 {0x0000a7c0, 0x00000000},
441 {0x0000a7c4, 0xfffffffc},
442 {0x0000a7c8, 0x00000000},
443 {0x0000a7cc, 0x00000000},
444 {0x0000a7d0, 0x00000000},
445 {0x0000a7d4, 0x00000004},
446 {0x0000a7dc, 0x00000000},
447 {0x0000a8d0, 0x004b6a8e},
448 {0x0000a8d4, 0x00000820},
449 {0x0000a8dc, 0x00000000},
450 {0x0000a8f0, 0x00000000},
451 {0x0000a8f4, 0x00000000},
452 {0x0000b2d0, 0x00000080},
453 {0x0000b2d4, 0x00000000},
454 {0x0000b2dc, 0x00000000},
455 {0x0000b2e0, 0x00000000},
456 {0x0000b2e4, 0x00000000},
457 {0x0000b2e8, 0x00000000},
458 {0x0000b2ec, 0x00000000},
459 {0x0000b2f0, 0x00000000},
460 {0x0000b2f4, 0x00000000},
461 {0x0000b2f8, 0x00000000},
462 {0x0000b408, 0x0e79e5c0},
463 {0x0000b40c, 0x00820820},
464 {0x0000b420, 0x00000000},
465};
466
467static const u32 ar9340Modes_high_power_tx_gain_table_1p0[][5] = {
468 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
469 {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
470 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
471 {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
472 {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
473 {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
474 {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
475 {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
476 {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
477 {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
478 {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
479 {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
480 {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
481 {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
482 {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
483 {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
484 {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
485 {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
486 {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
487 {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
488 {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
489 {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
490 {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
491 {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
492 {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
493 {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
494 {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
495 {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
496 {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
497 {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
498 {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
499 {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
500 {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
501 {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
502 {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
503 {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
504 {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
505 {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
506 {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
507 {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
508 {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
509 {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
510 {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
511 {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
512 {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
513 {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
514 {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
515 {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
516 {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
517 {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
518 {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
519 {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
520 {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
521 {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
522 {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
523 {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
524 {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
525 {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
526 {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
527 {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
528 {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
529 {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
530 {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
531 {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
532 {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
533 {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
534 {0x00016044, 0x056db2db, 0x056db2db, 0x056db2db, 0x056db2db},
535 {0x00016048, 0x24925266, 0x24925266, 0x24925266, 0x24925266},
536 {0x00016444, 0x056db2db, 0x056db2db, 0x056db2db, 0x056db2db},
537 {0x00016448, 0x24925266, 0x24925266, 0x24925266, 0x24925266},
538};
539
540static const u32 ar9340Modes_high_ob_db_tx_gain_table_1p0[][5] = {
541 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
542 {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
543 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
544 {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
545 {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
546 {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
547 {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
548 {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
549 {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
550 {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
551 {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
552 {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
553 {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
554 {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
555 {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
556 {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
557 {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
558 {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
559 {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
560 {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
561 {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
562 {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
563 {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
564 {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
565 {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
566 {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
567 {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
568 {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
569 {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
570 {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
571 {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
572 {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
573 {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
574 {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
575 {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
576 {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
577 {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
578 {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
579 {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
580 {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
581 {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
582 {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
583 {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
584 {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
585 {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
586 {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
587 {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
588 {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
589 {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
590 {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
591 {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
592 {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
593 {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
594 {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
595 {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
596 {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
597 {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
598 {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
599 {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
600 {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
601 {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
602 {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
603 {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
604 {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
605 {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
606 {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
607 {0x00016044, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4},
608 {0x00016048, 0x8e481266, 0x8e481266, 0x8e481266, 0x8e481266},
609 {0x00016444, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4},
610 {0x00016448, 0x8e481266, 0x8e481266, 0x8e481266, 0x8e481266},
611};
612static const u32 ar9340Modes_ub124_tx_gain_table_1p0[][5] = {
613 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
614 {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
615 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
616 {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
617 {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
618 {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
619 {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
620 {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
621 {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
622 {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
623 {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
624 {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
625 {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
626 {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
627 {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
628 {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
629 {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
630 {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
631 {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
632 {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
633 {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
634 {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
635 {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
636 {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
637 {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
638 {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
639 {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
640 {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
641 {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
642 {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
643 {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
644 {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
645 {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
646 {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
647 {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
648 {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
649 {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
650 {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
651 {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
652 {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
653 {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
654 {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
655 {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
656 {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
657 {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
658 {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
659 {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
660 {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
661 {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
662 {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
663 {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
664 {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
665 {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
666 {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
667 {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
668 {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
669 {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
670 {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
671 {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
672 {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
673 {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
674 {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
675 {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
676 {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
677 {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
678 {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
679 {0x00016044, 0x036db2db, 0x036db2db, 0x036db2db, 0x036db2db},
680 {0x00016048, 0x69b65266, 0x69b65266, 0x69b65266, 0x69b65266},
681 {0x00016444, 0x036db2db, 0x036db2db, 0x036db2db, 0x036db2db},
682 {0x00016448, 0x69b65266, 0x69b65266, 0x69b65266, 0x69b65266},
683};
684
685
686static const u32 ar9340Common_rx_gain_table_1p0[][2] = {
687 /* Addr allmodes */
688 {0x0000a000, 0x00010000},
689 {0x0000a004, 0x00030002},
690 {0x0000a008, 0x00050004},
691 {0x0000a00c, 0x00810080},
692 {0x0000a010, 0x00830082},
693 {0x0000a014, 0x01810180},
694 {0x0000a018, 0x01830182},
695 {0x0000a01c, 0x01850184},
696 {0x0000a020, 0x01890188},
697 {0x0000a024, 0x018b018a},
698 {0x0000a028, 0x018d018c},
699 {0x0000a02c, 0x01910190},
700 {0x0000a030, 0x01930192},
701 {0x0000a034, 0x01950194},
702 {0x0000a038, 0x038a0196},
703 {0x0000a03c, 0x038c038b},
704 {0x0000a040, 0x0390038d},
705 {0x0000a044, 0x03920391},
706 {0x0000a048, 0x03940393},
707 {0x0000a04c, 0x03960395},
708 {0x0000a050, 0x00000000},
709 {0x0000a054, 0x00000000},
710 {0x0000a058, 0x00000000},
711 {0x0000a05c, 0x00000000},
712 {0x0000a060, 0x00000000},
713 {0x0000a064, 0x00000000},
714 {0x0000a068, 0x00000000},
715 {0x0000a06c, 0x00000000},
716 {0x0000a070, 0x00000000},
717 {0x0000a074, 0x00000000},
718 {0x0000a078, 0x00000000},
719 {0x0000a07c, 0x00000000},
720 {0x0000a080, 0x22222229},
721 {0x0000a084, 0x1d1d1d1d},
722 {0x0000a088, 0x1d1d1d1d},
723 {0x0000a08c, 0x1d1d1d1d},
724 {0x0000a090, 0x171d1d1d},
725 {0x0000a094, 0x11111717},
726 {0x0000a098, 0x00030311},
727 {0x0000a09c, 0x00000000},
728 {0x0000a0a0, 0x00000000},
729 {0x0000a0a4, 0x00000000},
730 {0x0000a0a8, 0x00000000},
731 {0x0000a0ac, 0x00000000},
732 {0x0000a0b0, 0x00000000},
733 {0x0000a0b4, 0x00000000},
734 {0x0000a0b8, 0x00000000},
735 {0x0000a0bc, 0x00000000},
736 {0x0000a0c0, 0x001f0000},
737 {0x0000a0c4, 0x01000101},
738 {0x0000a0c8, 0x011e011f},
739 {0x0000a0cc, 0x011c011d},
740 {0x0000a0d0, 0x02030204},
741 {0x0000a0d4, 0x02010202},
742 {0x0000a0d8, 0x021f0200},
743 {0x0000a0dc, 0x0302021e},
744 {0x0000a0e0, 0x03000301},
745 {0x0000a0e4, 0x031e031f},
746 {0x0000a0e8, 0x0402031d},
747 {0x0000a0ec, 0x04000401},
748 {0x0000a0f0, 0x041e041f},
749 {0x0000a0f4, 0x0502041d},
750 {0x0000a0f8, 0x05000501},
751 {0x0000a0fc, 0x051e051f},
752 {0x0000a100, 0x06010602},
753 {0x0000a104, 0x061f0600},
754 {0x0000a108, 0x061d061e},
755 {0x0000a10c, 0x07020703},
756 {0x0000a110, 0x07000701},
757 {0x0000a114, 0x00000000},
758 {0x0000a118, 0x00000000},
759 {0x0000a11c, 0x00000000},
760 {0x0000a120, 0x00000000},
761 {0x0000a124, 0x00000000},
762 {0x0000a128, 0x00000000},
763 {0x0000a12c, 0x00000000},
764 {0x0000a130, 0x00000000},
765 {0x0000a134, 0x00000000},
766 {0x0000a138, 0x00000000},
767 {0x0000a13c, 0x00000000},
768 {0x0000a140, 0x001f0000},
769 {0x0000a144, 0x01000101},
770 {0x0000a148, 0x011e011f},
771 {0x0000a14c, 0x011c011d},
772 {0x0000a150, 0x02030204},
773 {0x0000a154, 0x02010202},
774 {0x0000a158, 0x021f0200},
775 {0x0000a15c, 0x0302021e},
776 {0x0000a160, 0x03000301},
777 {0x0000a164, 0x031e031f},
778 {0x0000a168, 0x0402031d},
779 {0x0000a16c, 0x04000401},
780 {0x0000a170, 0x041e041f},
781 {0x0000a174, 0x0502041d},
782 {0x0000a178, 0x05000501},
783 {0x0000a17c, 0x051e051f},
784 {0x0000a180, 0x06010602},
785 {0x0000a184, 0x061f0600},
786 {0x0000a188, 0x061d061e},
787 {0x0000a18c, 0x07020703},
788 {0x0000a190, 0x07000701},
789 {0x0000a194, 0x00000000},
790 {0x0000a198, 0x00000000},
791 {0x0000a19c, 0x00000000},
792 {0x0000a1a0, 0x00000000},
793 {0x0000a1a4, 0x00000000},
794 {0x0000a1a8, 0x00000000},
795 {0x0000a1ac, 0x00000000},
796 {0x0000a1b0, 0x00000000},
797 {0x0000a1b4, 0x00000000},
798 {0x0000a1b8, 0x00000000},
799 {0x0000a1bc, 0x00000000},
800 {0x0000a1c0, 0x00000000},
801 {0x0000a1c4, 0x00000000},
802 {0x0000a1c8, 0x00000000},
803 {0x0000a1cc, 0x00000000},
804 {0x0000a1d0, 0x00000000},
805 {0x0000a1d4, 0x00000000},
806 {0x0000a1d8, 0x00000000},
807 {0x0000a1dc, 0x00000000},
808 {0x0000a1e0, 0x00000000},
809 {0x0000a1e4, 0x00000000},
810 {0x0000a1e8, 0x00000000},
811 {0x0000a1ec, 0x00000000},
812 {0x0000a1f0, 0x00000396},
813 {0x0000a1f4, 0x00000396},
814 {0x0000a1f8, 0x00000396},
815 {0x0000a1fc, 0x00000196},
816 {0x0000b000, 0x00010000},
817 {0x0000b004, 0x00030002},
818 {0x0000b008, 0x00050004},
819 {0x0000b00c, 0x00810080},
820 {0x0000b010, 0x00830082},
821 {0x0000b014, 0x01810180},
822 {0x0000b018, 0x01830182},
823 {0x0000b01c, 0x01850184},
824 {0x0000b020, 0x02810280},
825 {0x0000b024, 0x02830282},
826 {0x0000b028, 0x02850284},
827 {0x0000b02c, 0x02890288},
828 {0x0000b030, 0x028b028a},
829 {0x0000b034, 0x0388028c},
830 {0x0000b038, 0x038a0389},
831 {0x0000b03c, 0x038c038b},
832 {0x0000b040, 0x0390038d},
833 {0x0000b044, 0x03920391},
834 {0x0000b048, 0x03940393},
835 {0x0000b04c, 0x03960395},
836 {0x0000b050, 0x00000000},
837 {0x0000b054, 0x00000000},
838 {0x0000b058, 0x00000000},
839 {0x0000b05c, 0x00000000},
840 {0x0000b060, 0x00000000},
841 {0x0000b064, 0x00000000},
842 {0x0000b068, 0x00000000},
843 {0x0000b06c, 0x00000000},
844 {0x0000b070, 0x00000000},
845 {0x0000b074, 0x00000000},
846 {0x0000b078, 0x00000000},
847 {0x0000b07c, 0x00000000},
848 {0x0000b080, 0x32323232},
849 {0x0000b084, 0x2f2f3232},
850 {0x0000b088, 0x23282a2d},
851 {0x0000b08c, 0x1c1e2123},
852 {0x0000b090, 0x14171919},
853 {0x0000b094, 0x0e0e1214},
854 {0x0000b098, 0x03050707},
855 {0x0000b09c, 0x00030303},
856 {0x0000b0a0, 0x00000000},
857 {0x0000b0a4, 0x00000000},
858 {0x0000b0a8, 0x00000000},
859 {0x0000b0ac, 0x00000000},
860 {0x0000b0b0, 0x00000000},
861 {0x0000b0b4, 0x00000000},
862 {0x0000b0b8, 0x00000000},
863 {0x0000b0bc, 0x00000000},
864 {0x0000b0c0, 0x003f0020},
865 {0x0000b0c4, 0x00400041},
866 {0x0000b0c8, 0x0140005f},
867 {0x0000b0cc, 0x0160015f},
868 {0x0000b0d0, 0x017e017f},
869 {0x0000b0d4, 0x02410242},
870 {0x0000b0d8, 0x025f0240},
871 {0x0000b0dc, 0x027f0260},
872 {0x0000b0e0, 0x0341027e},
873 {0x0000b0e4, 0x035f0340},
874 {0x0000b0e8, 0x037f0360},
875 {0x0000b0ec, 0x04400441},
876 {0x0000b0f0, 0x0460045f},
877 {0x0000b0f4, 0x0541047f},
878 {0x0000b0f8, 0x055f0540},
879 {0x0000b0fc, 0x057f0560},
880 {0x0000b100, 0x06400641},
881 {0x0000b104, 0x0660065f},
882 {0x0000b108, 0x067e067f},
883 {0x0000b10c, 0x07410742},
884 {0x0000b110, 0x075f0740},
885 {0x0000b114, 0x077f0760},
886 {0x0000b118, 0x07800781},
887 {0x0000b11c, 0x07a0079f},
888 {0x0000b120, 0x07c107bf},
889 {0x0000b124, 0x000007c0},
890 {0x0000b128, 0x00000000},
891 {0x0000b12c, 0x00000000},
892 {0x0000b130, 0x00000000},
893 {0x0000b134, 0x00000000},
894 {0x0000b138, 0x00000000},
895 {0x0000b13c, 0x00000000},
896 {0x0000b140, 0x003f0020},
897 {0x0000b144, 0x00400041},
898 {0x0000b148, 0x0140005f},
899 {0x0000b14c, 0x0160015f},
900 {0x0000b150, 0x017e017f},
901 {0x0000b154, 0x02410242},
902 {0x0000b158, 0x025f0240},
903 {0x0000b15c, 0x027f0260},
904 {0x0000b160, 0x0341027e},
905 {0x0000b164, 0x035f0340},
906 {0x0000b168, 0x037f0360},
907 {0x0000b16c, 0x04400441},
908 {0x0000b170, 0x0460045f},
909 {0x0000b174, 0x0541047f},
910 {0x0000b178, 0x055f0540},
911 {0x0000b17c, 0x057f0560},
912 {0x0000b180, 0x06400641},
913 {0x0000b184, 0x0660065f},
914 {0x0000b188, 0x067e067f},
915 {0x0000b18c, 0x07410742},
916 {0x0000b190, 0x075f0740},
917 {0x0000b194, 0x077f0760},
918 {0x0000b198, 0x07800781},
919 {0x0000b19c, 0x07a0079f},
920 {0x0000b1a0, 0x07c107bf},
921 {0x0000b1a4, 0x000007c0},
922 {0x0000b1a8, 0x00000000},
923 {0x0000b1ac, 0x00000000},
924 {0x0000b1b0, 0x00000000},
925 {0x0000b1b4, 0x00000000},
926 {0x0000b1b8, 0x00000000},
927 {0x0000b1bc, 0x00000000},
928 {0x0000b1c0, 0x00000000},
929 {0x0000b1c4, 0x00000000},
930 {0x0000b1c8, 0x00000000},
931 {0x0000b1cc, 0x00000000},
932 {0x0000b1d0, 0x00000000},
933 {0x0000b1d4, 0x00000000},
934 {0x0000b1d8, 0x00000000},
935 {0x0000b1dc, 0x00000000},
936 {0x0000b1e0, 0x00000000},
937 {0x0000b1e4, 0x00000000},
938 {0x0000b1e8, 0x00000000},
939 {0x0000b1ec, 0x00000000},
940 {0x0000b1f0, 0x00000396},
941 {0x0000b1f4, 0x00000396},
942 {0x0000b1f8, 0x00000396},
943 {0x0000b1fc, 0x00000196},
944};
945
946static const u32 ar9340Modes_low_ob_db_tx_gain_table_1p0[][5] = {
947 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
948 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
949 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
950 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
951 {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
952 {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
953 {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
954 {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
955 {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
956 {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
957 {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
958 {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
959 {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
960 {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
961 {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
962 {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
963 {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
964 {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
965 {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
966 {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
967 {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
968 {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
969 {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
970 {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
971 {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
972 {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
973 {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
974 {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
975 {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
976 {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
977 {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
978 {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
979 {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
980 {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
981 {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
982 {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
983 {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
984 {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
985 {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
986 {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
987 {0x0000a598, 0x21820220, 0x21820220, 0x16800402, 0x16800402},
988 {0x0000a59c, 0x27820223, 0x27820223, 0x19800404, 0x19800404},
989 {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
990 {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
991 {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
992 {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
993 {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
994 {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
995 {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
996 {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
997 {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
998 {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3f801861, 0x3f801861},
999 {0x0000a5c8, 0x5782286c, 0x5782286c, 0x43801a81, 0x43801a81},
1000 {0x0000a5cc, 0x5c84286b, 0x5c84286b, 0x47801a83, 0x47801a83},
1001 {0x0000a5d0, 0x61842a6c, 0x61842a6c, 0x4a801c84, 0x4a801c84},
1002 {0x0000a5d4, 0x66862a6c, 0x66862a6c, 0x4e801ce3, 0x4e801ce3},
1003 {0x0000a5d8, 0x6b862e6c, 0x6b862e6c, 0x52801ce5, 0x52801ce5},
1004 {0x0000a5dc, 0x7086308c, 0x7086308c, 0x56801ce9, 0x56801ce9},
1005 {0x0000a5e0, 0x738a308a, 0x738a308a, 0x5a801ceb, 0x5a801ceb},
1006 {0x0000a5e4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
1007 {0x0000a5e8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
1008 {0x0000a5ec, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
1009 {0x0000a5f0, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
1010 {0x0000a5f4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
1011 {0x0000a5f8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
1012 {0x0000a5fc, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
1013 {0x00016044, 0x056db2db, 0x056db2db, 0x056db2db, 0x056db2db},
1014 {0x00016048, 0x24925266, 0x24925266, 0x24925266, 0x24925266},
1015 {0x00016444, 0x056db2db, 0x056db2db, 0x056db2db, 0x056db2db},
1016 {0x00016448, 0x24925266, 0x24925266, 0x24925266, 0x24925266},
1017};
1018
1019static const u32 ar9340Modes_mixed_ob_db_tx_gain_table_1p0[][5] = {
1020 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1021 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
1022 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1023 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
1024 {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
1025 {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
1026 {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
1027 {0x0000a514, 0x1c000223, 0x1c000223, 0x11000400, 0x11000400},
1028 {0x0000a518, 0x21020220, 0x21020220, 0x15000402, 0x15000402},
1029 {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
1030 {0x0000a520, 0x2b022220, 0x2b022220, 0x1b000603, 0x1b000603},
1031 {0x0000a524, 0x2f022222, 0x2f022222, 0x1f000a02, 0x1f000a02},
1032 {0x0000a528, 0x34022225, 0x34022225, 0x23000a04, 0x23000a04},
1033 {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x26000a20, 0x26000a20},
1034 {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2a000e20, 0x2a000e20},
1035 {0x0000a534, 0x4202242a, 0x4202242a, 0x2e000e22, 0x2e000e22},
1036 {0x0000a538, 0x4702244a, 0x4702244a, 0x31000e24, 0x31000e24},
1037 {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x34001640, 0x34001640},
1038 {0x0000a540, 0x4e02246c, 0x4e02246c, 0x38001660, 0x38001660},
1039 {0x0000a544, 0x5302266c, 0x5302266c, 0x3b001861, 0x3b001861},
1040 {0x0000a548, 0x5702286c, 0x5702286c, 0x3e001a81, 0x3e001a81},
1041 {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x42001a83, 0x42001a83},
1042 {0x0000a550, 0x61042a6c, 0x61042a6c, 0x44001c84, 0x44001c84},
1043 {0x0000a554, 0x66062a6c, 0x66062a6c, 0x48001ce3, 0x48001ce3},
1044 {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x4c001ce5, 0x4c001ce5},
1045 {0x0000a55c, 0x7006308c, 0x7006308c, 0x50001ce9, 0x50001ce9},
1046 {0x0000a560, 0x730a308a, 0x730a308a, 0x54001ceb, 0x54001ceb},
1047 {0x0000a564, 0x770a308c, 0x770a308c, 0x56001eec, 0x56001eec},
1048 {0x0000a568, 0x770a308c, 0x770a308c, 0x56001eec, 0x56001eec},
1049 {0x0000a56c, 0x770a308c, 0x770a308c, 0x56001eec, 0x56001eec},
1050 {0x0000a570, 0x770a308c, 0x770a308c, 0x56001eec, 0x56001eec},
1051 {0x0000a574, 0x770a308c, 0x770a308c, 0x56001eec, 0x56001eec},
1052 {0x0000a578, 0x770a308c, 0x770a308c, 0x56001eec, 0x56001eec},
1053 {0x0000a57c, 0x770a308c, 0x770a308c, 0x56001eec, 0x56001eec},
1054 {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
1055 {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
1056 {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
1057 {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
1058 {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
1059 {0x0000a594, 0x1c800223, 0x1c800223, 0x11800400, 0x11800400},
1060 {0x0000a598, 0x21820220, 0x21820220, 0x15800402, 0x15800402},
1061 {0x0000a59c, 0x27820223, 0x27820223, 0x19800404, 0x19800404},
1062 {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1b800603, 0x1b800603},
1063 {0x0000a5a4, 0x2f822222, 0x2f822222, 0x1f800a02, 0x1f800a02},
1064 {0x0000a5a8, 0x34822225, 0x34822225, 0x23800a04, 0x23800a04},
1065 {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x26800a20, 0x26800a20},
1066 {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2a800e20, 0x2a800e20},
1067 {0x0000a5b4, 0x4282242a, 0x4282242a, 0x2e800e22, 0x2e800e22},
1068 {0x0000a5b8, 0x4782244a, 0x4782244a, 0x31800e24, 0x31800e24},
1069 {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x34801640, 0x34801640},
1070 {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x38801660, 0x38801660},
1071 {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3b801861, 0x3b801861},
1072 {0x0000a5c8, 0x5782286c, 0x5782286c, 0x3e801a81, 0x3e801a81},
1073 {0x0000a5cc, 0x5c84286b, 0x5c84286b, 0x42801a83, 0x42801a83},
1074 {0x0000a5d0, 0x61842a6c, 0x61842a6c, 0x44801c84, 0x44801c84},
1075 {0x0000a5d4, 0x66862a6c, 0x66862a6c, 0x48801ce3, 0x48801ce3},
1076 {0x0000a5d8, 0x6b862e6c, 0x6b862e6c, 0x4c801ce5, 0x4c801ce5},
1077 {0x0000a5dc, 0x7086308c, 0x7086308c, 0x50801ce9, 0x50801ce9},
1078 {0x0000a5e0, 0x738a308a, 0x738a308a, 0x54801ceb, 0x54801ceb},
1079 {0x0000a5e4, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
1080 {0x0000a5e8, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
1081 {0x0000a5ec, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
1082 {0x0000a5f0, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
1083 {0x0000a5f4, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
1084 {0x0000a5f8, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
1085 {0x0000a5fc, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
1086 {0x00016044, 0x056db2db, 0x056db2db, 0x03b6d2e4, 0x03b6d2e4},
1087 {0x00016048, 0x24927266, 0x24927266, 0x8e483266, 0x8e483266},
1088 {0x00016444, 0x056db2db, 0x056db2db, 0x03b6d2e4, 0x03b6d2e4},
1089 {0x00016448, 0x24927266, 0x24927266, 0x8e482266, 0x8e482266},
1090};
1091
1092static const u32 ar9340_1p0_mac_core[][2] = {
1093 /* Addr allmodes */
1094 {0x00000008, 0x00000000},
1095 {0x00000030, 0x00020085},
1096 {0x00000034, 0x00000005},
1097 {0x00000040, 0x00000000},
1098 {0x00000044, 0x00000000},
1099 {0x00000048, 0x00000008},
1100 {0x0000004c, 0x00000010},
1101 {0x00000050, 0x00000000},
1102 {0x00001040, 0x002ffc0f},
1103 {0x00001044, 0x002ffc0f},
1104 {0x00001048, 0x002ffc0f},
1105 {0x0000104c, 0x002ffc0f},
1106 {0x00001050, 0x002ffc0f},
1107 {0x00001054, 0x002ffc0f},
1108 {0x00001058, 0x002ffc0f},
1109 {0x0000105c, 0x002ffc0f},
1110 {0x00001060, 0x002ffc0f},
1111 {0x00001064, 0x002ffc0f},
1112 {0x000010f0, 0x00000100},
1113 {0x00001270, 0x00000000},
1114 {0x000012b0, 0x00000000},
1115 {0x000012f0, 0x00000000},
1116 {0x0000143c, 0x00000000},
1117 {0x0000147c, 0x00000000},
1118 {0x00008000, 0x00000000},
1119 {0x00008004, 0x00000000},
1120 {0x00008008, 0x00000000},
1121 {0x0000800c, 0x00000000},
1122 {0x00008018, 0x00000000},
1123 {0x00008020, 0x00000000},
1124 {0x00008038, 0x00000000},
1125 {0x0000803c, 0x00000000},
1126 {0x00008040, 0x00000000},
1127 {0x00008044, 0x00000000},
1128 {0x00008048, 0x00000000},
1129 {0x0000804c, 0xffffffff},
1130 {0x00008054, 0x00000000},
1131 {0x00008058, 0x00000000},
1132 {0x0000805c, 0x000fc78f},
1133 {0x00008060, 0x0000000f},
1134 {0x00008064, 0x00000000},
1135 {0x00008070, 0x00000310},
1136 {0x00008074, 0x00000020},
1137 {0x00008078, 0x00000000},
1138 {0x0000809c, 0x0000000f},
1139 {0x000080a0, 0x00000000},
1140 {0x000080a4, 0x02ff0000},
1141 {0x000080a8, 0x0e070605},
1142 {0x000080ac, 0x0000000d},
1143 {0x000080b0, 0x00000000},
1144 {0x000080b4, 0x00000000},
1145 {0x000080b8, 0x00000000},
1146 {0x000080bc, 0x00000000},
1147 {0x000080c0, 0x2a800000},
1148 {0x000080c4, 0x06900168},
1149 {0x000080c8, 0x13881c20},
1150 {0x000080cc, 0x01f40000},
1151 {0x000080d0, 0x00252500},
1152 {0x000080d4, 0x00a00000},
1153 {0x000080d8, 0x00400000},
1154 {0x000080dc, 0x00000000},
1155 {0x000080e0, 0xffffffff},
1156 {0x000080e4, 0x0000ffff},
1157 {0x000080e8, 0x3f3f3f3f},
1158 {0x000080ec, 0x00000000},
1159 {0x000080f0, 0x00000000},
1160 {0x000080f4, 0x00000000},
1161 {0x000080fc, 0x00020000},
1162 {0x00008100, 0x00000000},
1163 {0x00008108, 0x00000052},
1164 {0x0000810c, 0x00000000},
1165 {0x00008110, 0x00000000},
1166 {0x00008114, 0x000007ff},
1167 {0x00008118, 0x000000aa},
1168 {0x0000811c, 0x00003210},
1169 {0x00008124, 0x00000000},
1170 {0x00008128, 0x00000000},
1171 {0x0000812c, 0x00000000},
1172 {0x00008130, 0x00000000},
1173 {0x00008134, 0x00000000},
1174 {0x00008138, 0x00000000},
1175 {0x0000813c, 0x0000ffff},
1176 {0x00008144, 0xffffffff},
1177 {0x00008168, 0x00000000},
1178 {0x0000816c, 0x00000000},
1179 {0x00008170, 0x18486200},
1180 {0x00008174, 0x33332210},
1181 {0x00008178, 0x00000000},
1182 {0x0000817c, 0x00020000},
1183 {0x000081c0, 0x00000000},
1184 {0x000081c4, 0x33332210},
1185 {0x000081c8, 0x00000000},
1186 {0x000081cc, 0x00000000},
1187 {0x000081d4, 0x00000000},
1188 {0x000081ec, 0x00000000},
1189 {0x000081f0, 0x00000000},
1190 {0x000081f4, 0x00000000},
1191 {0x000081f8, 0x00000000},
1192 {0x000081fc, 0x00000000},
1193 {0x00008240, 0x00100000},
1194 {0x00008244, 0x0010f424},
1195 {0x00008248, 0x00000800},
1196 {0x0000824c, 0x0001e848},
1197 {0x00008250, 0x00000000},
1198 {0x00008254, 0x00000000},
1199 {0x00008258, 0x00000000},
1200 {0x0000825c, 0x40000000},
1201 {0x00008260, 0x00080922},
1202 {0x00008264, 0x9d400010},
1203 {0x00008268, 0xffffffff},
1204 {0x0000826c, 0x0000ffff},
1205 {0x00008270, 0x00000000},
1206 {0x00008274, 0x40000000},
1207 {0x00008278, 0x003e4180},
1208 {0x0000827c, 0x00000004},
1209 {0x00008284, 0x0000002c},
1210 {0x00008288, 0x0000002c},
1211 {0x0000828c, 0x000000ff},
1212 {0x00008294, 0x00000000},
1213 {0x00008298, 0x00000000},
1214 {0x0000829c, 0x00000000},
1215 {0x00008300, 0x00000140},
1216 {0x00008314, 0x00000000},
1217 {0x0000831c, 0x0000010d},
1218 {0x00008328, 0x00000000},
1219 {0x0000832c, 0x00000007},
1220 {0x00008330, 0x00000302},
1221 {0x00008334, 0x00000700},
1222 {0x00008338, 0x00ff0000},
1223 {0x0000833c, 0x02400000},
1224 {0x00008340, 0x000107ff},
1225 {0x00008344, 0xaa48105b},
1226 {0x00008348, 0x008f0000},
1227 {0x0000835c, 0x00000000},
1228 {0x00008360, 0xffffffff},
1229 {0x00008364, 0xffffffff},
1230 {0x00008368, 0x00000000},
1231 {0x00008370, 0x00000000},
1232 {0x00008374, 0x000000ff},
1233 {0x00008378, 0x00000000},
1234 {0x0000837c, 0x00000000},
1235 {0x00008380, 0xffffffff},
1236 {0x00008384, 0xffffffff},
1237 {0x00008390, 0xffffffff},
1238 {0x00008394, 0xffffffff},
1239 {0x00008398, 0x00000000},
1240 {0x0000839c, 0x00000000},
1241 {0x000083a0, 0x00000000},
1242 {0x000083a4, 0x0000fa14},
1243 {0x000083a8, 0x000f0c00},
1244 {0x000083ac, 0x33332210},
1245 {0x000083b0, 0x33332210},
1246 {0x000083b4, 0x33332210},
1247 {0x000083b8, 0x33332210},
1248 {0x000083bc, 0x00000000},
1249 {0x000083c0, 0x00000000},
1250 {0x000083c4, 0x00000000},
1251 {0x000083c8, 0x00000000},
1252 {0x000083cc, 0x00000200},
1253 {0x000083d0, 0x000301ff},
1254};
1255
1256static const u32 ar9340Common_wo_xlna_rx_gain_table_1p0[][2] = {
1257 /* Addr allmodes */
1258 {0x0000a000, 0x00010000},
1259 {0x0000a004, 0x00030002},
1260 {0x0000a008, 0x00050004},
1261 {0x0000a00c, 0x00810080},
1262 {0x0000a010, 0x00830082},
1263 {0x0000a014, 0x01810180},
1264 {0x0000a018, 0x01830182},
1265 {0x0000a01c, 0x01850184},
1266 {0x0000a020, 0x01890188},
1267 {0x0000a024, 0x018b018a},
1268 {0x0000a028, 0x018d018c},
1269 {0x0000a02c, 0x03820190},
1270 {0x0000a030, 0x03840383},
1271 {0x0000a034, 0x03880385},
1272 {0x0000a038, 0x038a0389},
1273 {0x0000a03c, 0x038c038b},
1274 {0x0000a040, 0x0390038d},
1275 {0x0000a044, 0x03920391},
1276 {0x0000a048, 0x03940393},
1277 {0x0000a04c, 0x03960395},
1278 {0x0000a050, 0x00000000},
1279 {0x0000a054, 0x00000000},
1280 {0x0000a058, 0x00000000},
1281 {0x0000a05c, 0x00000000},
1282 {0x0000a060, 0x00000000},
1283 {0x0000a064, 0x00000000},
1284 {0x0000a068, 0x00000000},
1285 {0x0000a06c, 0x00000000},
1286 {0x0000a070, 0x00000000},
1287 {0x0000a074, 0x00000000},
1288 {0x0000a078, 0x00000000},
1289 {0x0000a07c, 0x00000000},
1290 {0x0000a080, 0x29292929},
1291 {0x0000a084, 0x29292929},
1292 {0x0000a088, 0x29292929},
1293 {0x0000a08c, 0x29292929},
1294 {0x0000a090, 0x22292929},
1295 {0x0000a094, 0x1d1d2222},
1296 {0x0000a098, 0x0c111117},
1297 {0x0000a09c, 0x00030303},
1298 {0x0000a0a0, 0x00000000},
1299 {0x0000a0a4, 0x00000000},
1300 {0x0000a0a8, 0x00000000},
1301 {0x0000a0ac, 0x00000000},
1302 {0x0000a0b0, 0x00000000},
1303 {0x0000a0b4, 0x00000000},
1304 {0x0000a0b8, 0x00000000},
1305 {0x0000a0bc, 0x00000000},
1306 {0x0000a0c0, 0x001f0000},
1307 {0x0000a0c4, 0x01000101},
1308 {0x0000a0c8, 0x011e011f},
1309 {0x0000a0cc, 0x011c011d},
1310 {0x0000a0d0, 0x02030204},
1311 {0x0000a0d4, 0x02010202},
1312 {0x0000a0d8, 0x021f0200},
1313 {0x0000a0dc, 0x0302021e},
1314 {0x0000a0e0, 0x03000301},
1315 {0x0000a0e4, 0x031e031f},
1316 {0x0000a0e8, 0x0402031d},
1317 {0x0000a0ec, 0x04000401},
1318 {0x0000a0f0, 0x041e041f},
1319 {0x0000a0f4, 0x0502041d},
1320 {0x0000a0f8, 0x05000501},
1321 {0x0000a0fc, 0x051e051f},
1322 {0x0000a100, 0x06010602},
1323 {0x0000a104, 0x061f0600},
1324 {0x0000a108, 0x061d061e},
1325 {0x0000a10c, 0x07020703},
1326 {0x0000a110, 0x07000701},
1327 {0x0000a114, 0x00000000},
1328 {0x0000a118, 0x00000000},
1329 {0x0000a11c, 0x00000000},
1330 {0x0000a120, 0x00000000},
1331 {0x0000a124, 0x00000000},
1332 {0x0000a128, 0x00000000},
1333 {0x0000a12c, 0x00000000},
1334 {0x0000a130, 0x00000000},
1335 {0x0000a134, 0x00000000},
1336 {0x0000a138, 0x00000000},
1337 {0x0000a13c, 0x00000000},
1338 {0x0000a140, 0x001f0000},
1339 {0x0000a144, 0x01000101},
1340 {0x0000a148, 0x011e011f},
1341 {0x0000a14c, 0x011c011d},
1342 {0x0000a150, 0x02030204},
1343 {0x0000a154, 0x02010202},
1344 {0x0000a158, 0x021f0200},
1345 {0x0000a15c, 0x0302021e},
1346 {0x0000a160, 0x03000301},
1347 {0x0000a164, 0x031e031f},
1348 {0x0000a168, 0x0402031d},
1349 {0x0000a16c, 0x04000401},
1350 {0x0000a170, 0x041e041f},
1351 {0x0000a174, 0x0502041d},
1352 {0x0000a178, 0x05000501},
1353 {0x0000a17c, 0x051e051f},
1354 {0x0000a180, 0x06010602},
1355 {0x0000a184, 0x061f0600},
1356 {0x0000a188, 0x061d061e},
1357 {0x0000a18c, 0x07020703},
1358 {0x0000a190, 0x07000701},
1359 {0x0000a194, 0x00000000},
1360 {0x0000a198, 0x00000000},
1361 {0x0000a19c, 0x00000000},
1362 {0x0000a1a0, 0x00000000},
1363 {0x0000a1a4, 0x00000000},
1364 {0x0000a1a8, 0x00000000},
1365 {0x0000a1ac, 0x00000000},
1366 {0x0000a1b0, 0x00000000},
1367 {0x0000a1b4, 0x00000000},
1368 {0x0000a1b8, 0x00000000},
1369 {0x0000a1bc, 0x00000000},
1370 {0x0000a1c0, 0x00000000},
1371 {0x0000a1c4, 0x00000000},
1372 {0x0000a1c8, 0x00000000},
1373 {0x0000a1cc, 0x00000000},
1374 {0x0000a1d0, 0x00000000},
1375 {0x0000a1d4, 0x00000000},
1376 {0x0000a1d8, 0x00000000},
1377 {0x0000a1dc, 0x00000000},
1378 {0x0000a1e0, 0x00000000},
1379 {0x0000a1e4, 0x00000000},
1380 {0x0000a1e8, 0x00000000},
1381 {0x0000a1ec, 0x00000000},
1382 {0x0000a1f0, 0x00000396},
1383 {0x0000a1f4, 0x00000396},
1384 {0x0000a1f8, 0x00000396},
1385 {0x0000a1fc, 0x00000196},
1386 {0x0000b000, 0x00010000},
1387 {0x0000b004, 0x00030002},
1388 {0x0000b008, 0x00050004},
1389 {0x0000b00c, 0x00810080},
1390 {0x0000b010, 0x00830082},
1391 {0x0000b014, 0x01810180},
1392 {0x0000b018, 0x01830182},
1393 {0x0000b01c, 0x01850184},
1394 {0x0000b020, 0x02810280},
1395 {0x0000b024, 0x02830282},
1396 {0x0000b028, 0x02850284},
1397 {0x0000b02c, 0x02890288},
1398 {0x0000b030, 0x028b028a},
1399 {0x0000b034, 0x0388028c},
1400 {0x0000b038, 0x038a0389},
1401 {0x0000b03c, 0x038c038b},
1402 {0x0000b040, 0x0390038d},
1403 {0x0000b044, 0x03920391},
1404 {0x0000b048, 0x03940393},
1405 {0x0000b04c, 0x03960395},
1406 {0x0000b050, 0x00000000},
1407 {0x0000b054, 0x00000000},
1408 {0x0000b058, 0x00000000},
1409 {0x0000b05c, 0x00000000},
1410 {0x0000b060, 0x00000000},
1411 {0x0000b064, 0x00000000},
1412 {0x0000b068, 0x00000000},
1413 {0x0000b06c, 0x00000000},
1414 {0x0000b070, 0x00000000},
1415 {0x0000b074, 0x00000000},
1416 {0x0000b078, 0x00000000},
1417 {0x0000b07c, 0x00000000},
1418 {0x0000b080, 0x32323232},
1419 {0x0000b084, 0x2f2f3232},
1420 {0x0000b088, 0x23282a2d},
1421 {0x0000b08c, 0x1c1e2123},
1422 {0x0000b090, 0x14171919},
1423 {0x0000b094, 0x0e0e1214},
1424 {0x0000b098, 0x03050707},
1425 {0x0000b09c, 0x00030303},
1426 {0x0000b0a0, 0x00000000},
1427 {0x0000b0a4, 0x00000000},
1428 {0x0000b0a8, 0x00000000},
1429 {0x0000b0ac, 0x00000000},
1430 {0x0000b0b0, 0x00000000},
1431 {0x0000b0b4, 0x00000000},
1432 {0x0000b0b8, 0x00000000},
1433 {0x0000b0bc, 0x00000000},
1434 {0x0000b0c0, 0x003f0020},
1435 {0x0000b0c4, 0x00400041},
1436 {0x0000b0c8, 0x0140005f},
1437 {0x0000b0cc, 0x0160015f},
1438 {0x0000b0d0, 0x017e017f},
1439 {0x0000b0d4, 0x02410242},
1440 {0x0000b0d8, 0x025f0240},
1441 {0x0000b0dc, 0x027f0260},
1442 {0x0000b0e0, 0x0341027e},
1443 {0x0000b0e4, 0x035f0340},
1444 {0x0000b0e8, 0x037f0360},
1445 {0x0000b0ec, 0x04400441},
1446 {0x0000b0f0, 0x0460045f},
1447 {0x0000b0f4, 0x0541047f},
1448 {0x0000b0f8, 0x055f0540},
1449 {0x0000b0fc, 0x057f0560},
1450 {0x0000b100, 0x06400641},
1451 {0x0000b104, 0x0660065f},
1452 {0x0000b108, 0x067e067f},
1453 {0x0000b10c, 0x07410742},
1454 {0x0000b110, 0x075f0740},
1455 {0x0000b114, 0x077f0760},
1456 {0x0000b118, 0x07800781},
1457 {0x0000b11c, 0x07a0079f},
1458 {0x0000b120, 0x07c107bf},
1459 {0x0000b124, 0x000007c0},
1460 {0x0000b128, 0x00000000},
1461 {0x0000b12c, 0x00000000},
1462 {0x0000b130, 0x00000000},
1463 {0x0000b134, 0x00000000},
1464 {0x0000b138, 0x00000000},
1465 {0x0000b13c, 0x00000000},
1466 {0x0000b140, 0x003f0020},
1467 {0x0000b144, 0x00400041},
1468 {0x0000b148, 0x0140005f},
1469 {0x0000b14c, 0x0160015f},
1470 {0x0000b150, 0x017e017f},
1471 {0x0000b154, 0x02410242},
1472 {0x0000b158, 0x025f0240},
1473 {0x0000b15c, 0x027f0260},
1474 {0x0000b160, 0x0341027e},
1475 {0x0000b164, 0x035f0340},
1476 {0x0000b168, 0x037f0360},
1477 {0x0000b16c, 0x04400441},
1478 {0x0000b170, 0x0460045f},
1479 {0x0000b174, 0x0541047f},
1480 {0x0000b178, 0x055f0540},
1481 {0x0000b17c, 0x057f0560},
1482 {0x0000b180, 0x06400641},
1483 {0x0000b184, 0x0660065f},
1484 {0x0000b188, 0x067e067f},
1485 {0x0000b18c, 0x07410742},
1486 {0x0000b190, 0x075f0740},
1487 {0x0000b194, 0x077f0760},
1488 {0x0000b198, 0x07800781},
1489 {0x0000b19c, 0x07a0079f},
1490 {0x0000b1a0, 0x07c107bf},
1491 {0x0000b1a4, 0x000007c0},
1492 {0x0000b1a8, 0x00000000},
1493 {0x0000b1ac, 0x00000000},
1494 {0x0000b1b0, 0x00000000},
1495 {0x0000b1b4, 0x00000000},
1496 {0x0000b1b8, 0x00000000},
1497 {0x0000b1bc, 0x00000000},
1498 {0x0000b1c0, 0x00000000},
1499 {0x0000b1c4, 0x00000000},
1500 {0x0000b1c8, 0x00000000},
1501 {0x0000b1cc, 0x00000000},
1502 {0x0000b1d0, 0x00000000},
1503 {0x0000b1d4, 0x00000000},
1504 {0x0000b1d8, 0x00000000},
1505 {0x0000b1dc, 0x00000000},
1506 {0x0000b1e0, 0x00000000},
1507 {0x0000b1e4, 0x00000000},
1508 {0x0000b1e8, 0x00000000},
1509 {0x0000b1ec, 0x00000000},
1510 {0x0000b1f0, 0x00000396},
1511 {0x0000b1f4, 0x00000396},
1512 {0x0000b1f8, 0x00000396},
1513 {0x0000b1fc, 0x00000196},
1514};
1515
1516static const u32 ar9340_1p0_soc_preamble[][2] = {
1517 /* Addr allmodes */
1518 {0x000040a4, 0x00a0c1c9},
1519 {0x00007008, 0x00000000},
1520 {0x00007020, 0x00000000},
1521 {0x00007034, 0x00000002},
1522 {0x00007038, 0x000004c2},
1523};
1524
1525#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
index 71cc0a3a29fb..fbdde29f0ab8 100644
--- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
@@ -17,931 +17,6 @@
17#ifndef INITVALS_9485_H 17#ifndef INITVALS_9485_H
18#define INITVALS_9485_H 18#define INITVALS_9485_H
19 19
20static const u32 ar9485Common_1_0[][2] = {
21 /* Addr allmodes */
22 {0x00007010, 0x00000022},
23 {0x00007020, 0x00000000},
24 {0x00007034, 0x00000002},
25 {0x00007038, 0x000004c2},
26};
27
28static const u32 ar9485_1_0_mac_postamble[][5] = {
29 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
30 {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
31 {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
32 {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
33 {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
34 {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
35 {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
36 {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
37 {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
38};
39
40static const u32 ar9485_1_0_pcie_phy_pll_on_clkreq_disable_L1[][2] = {
41 /* Addr allmodes */
42 {0x00018c00, 0x10212e5e},
43 {0x00018c04, 0x000801d8},
44 {0x00018c08, 0x0000580c},
45};
46
47static const u32 ar9485Common_wo_xlna_rx_gain_1_0[][2] = {
48 /* Addr allmodes */
49 {0x0000a000, 0x00010000},
50 {0x0000a004, 0x00030002},
51 {0x0000a008, 0x00050004},
52 {0x0000a00c, 0x00810080},
53 {0x0000a010, 0x01800082},
54 {0x0000a014, 0x01820181},
55 {0x0000a018, 0x01840183},
56 {0x0000a01c, 0x01880185},
57 {0x0000a020, 0x018a0189},
58 {0x0000a024, 0x02850284},
59 {0x0000a028, 0x02890288},
60 {0x0000a02c, 0x03850384},
61 {0x0000a030, 0x03890388},
62 {0x0000a034, 0x038b038a},
63 {0x0000a038, 0x038d038c},
64 {0x0000a03c, 0x03910390},
65 {0x0000a040, 0x03930392},
66 {0x0000a044, 0x03950394},
67 {0x0000a048, 0x00000396},
68 {0x0000a04c, 0x00000000},
69 {0x0000a050, 0x00000000},
70 {0x0000a054, 0x00000000},
71 {0x0000a058, 0x00000000},
72 {0x0000a05c, 0x00000000},
73 {0x0000a060, 0x00000000},
74 {0x0000a064, 0x00000000},
75 {0x0000a068, 0x00000000},
76 {0x0000a06c, 0x00000000},
77 {0x0000a070, 0x00000000},
78 {0x0000a074, 0x00000000},
79 {0x0000a078, 0x00000000},
80 {0x0000a07c, 0x00000000},
81 {0x0000a080, 0x28282828},
82 {0x0000a084, 0x28282828},
83 {0x0000a088, 0x28282828},
84 {0x0000a08c, 0x28282828},
85 {0x0000a090, 0x28282828},
86 {0x0000a094, 0x21212128},
87 {0x0000a098, 0x171c1c1c},
88 {0x0000a09c, 0x02020212},
89 {0x0000a0a0, 0x00000202},
90 {0x0000a0a4, 0x00000000},
91 {0x0000a0a8, 0x00000000},
92 {0x0000a0ac, 0x00000000},
93 {0x0000a0b0, 0x00000000},
94 {0x0000a0b4, 0x00000000},
95 {0x0000a0b8, 0x00000000},
96 {0x0000a0bc, 0x00000000},
97 {0x0000a0c0, 0x001f0000},
98 {0x0000a0c4, 0x111f1100},
99 {0x0000a0c8, 0x111d111e},
100 {0x0000a0cc, 0x111b111c},
101 {0x0000a0d0, 0x22032204},
102 {0x0000a0d4, 0x22012202},
103 {0x0000a0d8, 0x221f2200},
104 {0x0000a0dc, 0x221d221e},
105 {0x0000a0e0, 0x33013302},
106 {0x0000a0e4, 0x331f3300},
107 {0x0000a0e8, 0x4402331e},
108 {0x0000a0ec, 0x44004401},
109 {0x0000a0f0, 0x441e441f},
110 {0x0000a0f4, 0x55015502},
111 {0x0000a0f8, 0x551f5500},
112 {0x0000a0fc, 0x6602551e},
113 {0x0000a100, 0x66006601},
114 {0x0000a104, 0x661e661f},
115 {0x0000a108, 0x7703661d},
116 {0x0000a10c, 0x77017702},
117 {0x0000a110, 0x00007700},
118 {0x0000a114, 0x00000000},
119 {0x0000a118, 0x00000000},
120 {0x0000a11c, 0x00000000},
121 {0x0000a120, 0x00000000},
122 {0x0000a124, 0x00000000},
123 {0x0000a128, 0x00000000},
124 {0x0000a12c, 0x00000000},
125 {0x0000a130, 0x00000000},
126 {0x0000a134, 0x00000000},
127 {0x0000a138, 0x00000000},
128 {0x0000a13c, 0x00000000},
129 {0x0000a140, 0x001f0000},
130 {0x0000a144, 0x111f1100},
131 {0x0000a148, 0x111d111e},
132 {0x0000a14c, 0x111b111c},
133 {0x0000a150, 0x22032204},
134 {0x0000a154, 0x22012202},
135 {0x0000a158, 0x221f2200},
136 {0x0000a15c, 0x221d221e},
137 {0x0000a160, 0x33013302},
138 {0x0000a164, 0x331f3300},
139 {0x0000a168, 0x4402331e},
140 {0x0000a16c, 0x44004401},
141 {0x0000a170, 0x441e441f},
142 {0x0000a174, 0x55015502},
143 {0x0000a178, 0x551f5500},
144 {0x0000a17c, 0x6602551e},
145 {0x0000a180, 0x66006601},
146 {0x0000a184, 0x661e661f},
147 {0x0000a188, 0x7703661d},
148 {0x0000a18c, 0x77017702},
149 {0x0000a190, 0x00007700},
150 {0x0000a194, 0x00000000},
151 {0x0000a198, 0x00000000},
152 {0x0000a19c, 0x00000000},
153 {0x0000a1a0, 0x00000000},
154 {0x0000a1a4, 0x00000000},
155 {0x0000a1a8, 0x00000000},
156 {0x0000a1ac, 0x00000000},
157 {0x0000a1b0, 0x00000000},
158 {0x0000a1b4, 0x00000000},
159 {0x0000a1b8, 0x00000000},
160 {0x0000a1bc, 0x00000000},
161 {0x0000a1c0, 0x00000000},
162 {0x0000a1c4, 0x00000000},
163 {0x0000a1c8, 0x00000000},
164 {0x0000a1cc, 0x00000000},
165 {0x0000a1d0, 0x00000000},
166 {0x0000a1d4, 0x00000000},
167 {0x0000a1d8, 0x00000000},
168 {0x0000a1dc, 0x00000000},
169 {0x0000a1e0, 0x00000000},
170 {0x0000a1e4, 0x00000000},
171 {0x0000a1e8, 0x00000000},
172 {0x0000a1ec, 0x00000000},
173 {0x0000a1f0, 0x00000396},
174 {0x0000a1f4, 0x00000396},
175 {0x0000a1f8, 0x00000396},
176 {0x0000a1fc, 0x00000296},
177};
178
179static const u32 ar9485Modes_high_power_tx_gain_1_0[][5] = {
180 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
181 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
182 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
183 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
184 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
185 {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
186 {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
187 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
188 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
189 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
190 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
191 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
192 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
193 {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
194 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x2e000a20, 0x2e000a20},
195 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x34000e20, 0x34000e20},
196 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x38000e22, 0x38000e22},
197 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x3c000e24, 0x3c000e24},
198 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x40000e26, 0x40000e26},
199 {0x0000a544, 0x6502feca, 0x6502feca, 0x43001640, 0x43001640},
200 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x46001660, 0x46001660},
201 {0x0000a54c, 0x7203feca, 0x7203feca, 0x49001861, 0x49001861},
202 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x4c001a81, 0x4c001a81},
203 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x4f001a83, 0x4f001a83},
204 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x54001c85, 0x54001c85},
205 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x58001ce5, 0x58001ce5},
206 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5b001ce9, 0x5b001ce9},
207 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x60001eeb, 0x60001eeb},
208 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
209 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
210 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
211 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
212 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
213 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
214 {0x00016044, 0x05b6b2db, 0x05b6b2db, 0x05b6b2db, 0x05b6b2db},
215};
216
217static const u32 ar9485_1_0[][2] = {
218 /* Addr allmodes */
219 {0x0000a580, 0x00000000},
220 {0x0000a584, 0x00000000},
221 {0x0000a588, 0x00000000},
222 {0x0000a58c, 0x00000000},
223 {0x0000a590, 0x00000000},
224 {0x0000a594, 0x00000000},
225 {0x0000a598, 0x00000000},
226 {0x0000a59c, 0x00000000},
227 {0x0000a5a0, 0x00000000},
228 {0x0000a5a4, 0x00000000},
229 {0x0000a5a8, 0x00000000},
230 {0x0000a5ac, 0x00000000},
231 {0x0000a5b0, 0x00000000},
232 {0x0000a5b4, 0x00000000},
233 {0x0000a5b8, 0x00000000},
234 {0x0000a5bc, 0x00000000},
235};
236
237static const u32 ar9485_1_0_radio_core[][2] = {
238 /* Addr allmodes */
239 {0x00016000, 0x36db6db6},
240 {0x00016004, 0x6db6db40},
241 {0x00016008, 0x73800000},
242 {0x0001600c, 0x00000000},
243 {0x00016040, 0x7f80fff8},
244 {0x00016048, 0x6c92426e},
245 {0x0001604c, 0x000f0278},
246 {0x00016050, 0x6db6db6c},
247 {0x00016054, 0x6db60000},
248 {0x00016080, 0x00080000},
249 {0x00016084, 0x0e48048c},
250 {0x00016088, 0x14214514},
251 {0x0001608c, 0x119f081e},
252 {0x00016090, 0x24926490},
253 {0x00016098, 0xd28b3330},
254 {0x000160a0, 0xc2108ffe},
255 {0x000160a4, 0x812fc370},
256 {0x000160a8, 0x423c8000},
257 {0x000160b4, 0x92480040},
258 {0x000160c0, 0x006db6db},
259 {0x000160c4, 0x0186db60},
260 {0x000160c8, 0x6db6db6c},
261 {0x000160cc, 0x6de6fbe0},
262 {0x000160d0, 0xf7dfcf3c},
263 {0x00016100, 0x04cb0001},
264 {0x00016104, 0xfff80015},
265 {0x00016108, 0x00080010},
266 {0x00016144, 0x01884080},
267 {0x00016148, 0x00008040},
268 {0x00016180, 0x08453333},
269 {0x00016184, 0x18e82f01},
270 {0x00016188, 0x00000000},
271 {0x0001618c, 0x00000000},
272 {0x00016240, 0x08400000},
273 {0x00016244, 0x1bf90f00},
274 {0x00016248, 0x00000000},
275 {0x0001624c, 0x00000000},
276 {0x00016280, 0x01000015},
277 {0x00016284, 0x00d30000},
278 {0x00016288, 0x00318000},
279 {0x0001628c, 0x50000000},
280 {0x00016290, 0x4b96210f},
281 {0x00016380, 0x00000000},
282 {0x00016384, 0x00000000},
283 {0x00016388, 0x00800700},
284 {0x0001638c, 0x00800700},
285 {0x00016390, 0x00800700},
286 {0x00016394, 0x00000000},
287 {0x00016398, 0x00000000},
288 {0x0001639c, 0x00000000},
289 {0x000163a0, 0x00000001},
290 {0x000163a4, 0x00000001},
291 {0x000163a8, 0x00000000},
292 {0x000163ac, 0x00000000},
293 {0x000163b0, 0x00000000},
294 {0x000163b4, 0x00000000},
295 {0x000163b8, 0x00000000},
296 {0x000163bc, 0x00000000},
297 {0x000163c0, 0x000000a0},
298 {0x000163c4, 0x000c0000},
299 {0x000163c8, 0x14021402},
300 {0x000163cc, 0x00001402},
301 {0x000163d0, 0x00000000},
302 {0x000163d4, 0x00000000},
303 {0x00016c40, 0x1319c178},
304 {0x00016c44, 0x10000000},
305};
306
307static const u32 ar9485Modes_lowest_ob_db_tx_gain_1_0[][5] = {
308 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
309 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
310 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
311 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
312 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
313 {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
314 {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
315 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
316 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
317 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
318 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
319 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
320 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
321 {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
322 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x2e000a20, 0x2e000a20},
323 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x34000e20, 0x34000e20},
324 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x38000e22, 0x38000e22},
325 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x3c000e24, 0x3c000e24},
326 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x40000e26, 0x40000e26},
327 {0x0000a544, 0x6502feca, 0x6502feca, 0x43001640, 0x43001640},
328 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x46001660, 0x46001660},
329 {0x0000a54c, 0x7203feca, 0x7203feca, 0x49001861, 0x49001861},
330 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x4c001a81, 0x4c001a81},
331 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x4f001a83, 0x4f001a83},
332 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x54001c85, 0x54001c85},
333 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x58001ce5, 0x58001ce5},
334 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5b001ce9, 0x5b001ce9},
335 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x60001eeb, 0x60001eeb},
336 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
337 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
338 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
339 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
340 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
341 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
342 {0x00016044, 0x05b6b2db, 0x05b6b2db, 0x05b6b2db, 0x05b6b2db},
343};
344
345static const u32 ar9485_1_0_baseband_core[][2] = {
346 /* Addr allmodes */
347 {0x00009800, 0xafe68e30},
348 {0x00009804, 0xfd14e000},
349 {0x00009808, 0x9c0a8f6b},
350 {0x0000980c, 0x04800000},
351 {0x00009814, 0x9280c00a},
352 {0x00009818, 0x00000000},
353 {0x0000981c, 0x00020028},
354 {0x00009834, 0x5f3ca3de},
355 {0x00009838, 0x0108ecff},
356 {0x0000983c, 0x14750600},
357 {0x00009880, 0x201fff00},
358 {0x00009884, 0x00001042},
359 {0x000098a4, 0x00200400},
360 {0x000098b0, 0x52440bbe},
361 {0x000098bc, 0x00000002},
362 {0x000098d0, 0x004b6a8e},
363 {0x000098d4, 0x00000820},
364 {0x000098dc, 0x00000000},
365 {0x000098f0, 0x00000000},
366 {0x000098f4, 0x00000000},
367 {0x00009c04, 0x00000000},
368 {0x00009c08, 0x03200000},
369 {0x00009c0c, 0x00000000},
370 {0x00009c10, 0x00000000},
371 {0x00009c14, 0x00046384},
372 {0x00009c18, 0x05b6b440},
373 {0x00009c1c, 0x00b6b440},
374 {0x00009d00, 0xc080a333},
375 {0x00009d04, 0x40206c10},
376 {0x00009d08, 0x009c4060},
377 {0x00009d0c, 0x1883800a},
378 {0x00009d10, 0x01834061},
379 {0x00009d14, 0x00c00400},
380 {0x00009d18, 0x00000000},
381 {0x00009d1c, 0x00000000},
382 {0x00009e08, 0x0038233c},
383 {0x00009e24, 0x990bb515},
384 {0x00009e28, 0x0a6f0000},
385 {0x00009e30, 0x06336f77},
386 {0x00009e34, 0x6af6532f},
387 {0x00009e38, 0x0cc80c00},
388 {0x00009e40, 0x0d261820},
389 {0x00009e4c, 0x00001004},
390 {0x00009e50, 0x00ff03f1},
391 {0x00009fc0, 0x80be4788},
392 {0x00009fc4, 0x0001efb5},
393 {0x00009fcc, 0x40000014},
394 {0x0000a20c, 0x00000000},
395 {0x0000a210, 0x00000000},
396 {0x0000a220, 0x00000000},
397 {0x0000a224, 0x00000000},
398 {0x0000a228, 0x10002310},
399 {0x0000a23c, 0x00000000},
400 {0x0000a244, 0x0c000000},
401 {0x0000a2a0, 0x00000001},
402 {0x0000a2c0, 0x00000001},
403 {0x0000a2c8, 0x00000000},
404 {0x0000a2cc, 0x18c43433},
405 {0x0000a2d4, 0x00000000},
406 {0x0000a2dc, 0x00000000},
407 {0x0000a2e0, 0x00000000},
408 {0x0000a2e4, 0x00000000},
409 {0x0000a2e8, 0x00000000},
410 {0x0000a2ec, 0x00000000},
411 {0x0000a2f0, 0x00000000},
412 {0x0000a2f4, 0x00000000},
413 {0x0000a2f8, 0x00000000},
414 {0x0000a344, 0x00000000},
415 {0x0000a34c, 0x00000000},
416 {0x0000a350, 0x0000a000},
417 {0x0000a364, 0x00000000},
418 {0x0000a370, 0x00000000},
419 {0x0000a390, 0x00000001},
420 {0x0000a394, 0x00000444},
421 {0x0000a398, 0x001f0e0f},
422 {0x0000a39c, 0x0075393f},
423 {0x0000a3a0, 0xb79f6427},
424 {0x0000a3a4, 0x00000000},
425 {0x0000a3a8, 0xaaaaaaaa},
426 {0x0000a3ac, 0x3c466478},
427 {0x0000a3c0, 0x20202020},
428 {0x0000a3c4, 0x22222220},
429 {0x0000a3c8, 0x20200020},
430 {0x0000a3cc, 0x20202020},
431 {0x0000a3d0, 0x20202020},
432 {0x0000a3d4, 0x20202020},
433 {0x0000a3d8, 0x20202020},
434 {0x0000a3dc, 0x20202020},
435 {0x0000a3e0, 0x20202020},
436 {0x0000a3e4, 0x20202020},
437 {0x0000a3e8, 0x20202020},
438 {0x0000a3ec, 0x20202020},
439 {0x0000a3f0, 0x00000000},
440 {0x0000a3f4, 0x00000006},
441 {0x0000a3f8, 0x0cdbd380},
442 {0x0000a3fc, 0x000f0f01},
443 {0x0000a400, 0x8fa91f01},
444 {0x0000a404, 0x00000000},
445 {0x0000a408, 0x0e79e5c6},
446 {0x0000a40c, 0x00820820},
447 {0x0000a414, 0x1ce739ce},
448 {0x0000a418, 0x2d0011ce},
449 {0x0000a41c, 0x1ce739ce},
450 {0x0000a420, 0x000001ce},
451 {0x0000a424, 0x1ce739ce},
452 {0x0000a428, 0x000001ce},
453 {0x0000a42c, 0x1ce739ce},
454 {0x0000a430, 0x1ce739ce},
455 {0x0000a434, 0x00000000},
456 {0x0000a438, 0x00001801},
457 {0x0000a43c, 0x00000000},
458 {0x0000a440, 0x00000000},
459 {0x0000a444, 0x00000000},
460 {0x0000a448, 0x04000000},
461 {0x0000a44c, 0x00000001},
462 {0x0000a450, 0x00010000},
463 {0x0000a458, 0x00000000},
464 {0x0000a5c4, 0x3fad9d74},
465 {0x0000a5c8, 0x0048060a},
466 {0x0000a5cc, 0x00000637},
467 {0x0000a760, 0x03020100},
468 {0x0000a764, 0x09080504},
469 {0x0000a768, 0x0d0c0b0a},
470 {0x0000a76c, 0x13121110},
471 {0x0000a770, 0x31301514},
472 {0x0000a774, 0x35343332},
473 {0x0000a778, 0x00000036},
474 {0x0000a780, 0x00000838},
475 {0x0000a7c0, 0x00000000},
476 {0x0000a7c4, 0xfffffffc},
477 {0x0000a7c8, 0x00000000},
478 {0x0000a7cc, 0x00000000},
479 {0x0000a7d0, 0x00000000},
480 {0x0000a7d4, 0x00000004},
481 {0x0000a7dc, 0x00000001},
482};
483
484static const u32 ar9485Modes_high_ob_db_tx_gain_1_0[][5] = {
485 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
486 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
487 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
488 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
489 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
490 {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
491 {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
492 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
493 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
494 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
495 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
496 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
497 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
498 {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
499 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x2e000a20, 0x2e000a20},
500 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x34000e20, 0x34000e20},
501 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x38000e22, 0x38000e22},
502 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x3c000e24, 0x3c000e24},
503 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x40000e26, 0x40000e26},
504 {0x0000a544, 0x6502feca, 0x6502feca, 0x43001640, 0x43001640},
505 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x46001660, 0x46001660},
506 {0x0000a54c, 0x7203feca, 0x7203feca, 0x49001861, 0x49001861},
507 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x4c001a81, 0x4c001a81},
508 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x4f001a83, 0x4f001a83},
509 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x54001c85, 0x54001c85},
510 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x58001ce5, 0x58001ce5},
511 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5b001ce9, 0x5b001ce9},
512 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x60001eeb, 0x60001eeb},
513 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
514 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
515 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
516 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
517 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
518 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
519 {0x00016044, 0x05b6b2db, 0x05b6b2db, 0x05b6b2db, 0x05b6b2db},
520};
521
522static const u32 ar9485Common_rx_gain_1_0[][2] = {
523 /* Addr allmodes */
524 {0x0000a000, 0x00010000},
525 {0x0000a004, 0x00030002},
526 {0x0000a008, 0x00050004},
527 {0x0000a00c, 0x00810080},
528 {0x0000a010, 0x01800082},
529 {0x0000a014, 0x01820181},
530 {0x0000a018, 0x01840183},
531 {0x0000a01c, 0x01880185},
532 {0x0000a020, 0x018a0189},
533 {0x0000a024, 0x02850284},
534 {0x0000a028, 0x02890288},
535 {0x0000a02c, 0x03850384},
536 {0x0000a030, 0x03890388},
537 {0x0000a034, 0x038b038a},
538 {0x0000a038, 0x038d038c},
539 {0x0000a03c, 0x03910390},
540 {0x0000a040, 0x03930392},
541 {0x0000a044, 0x03950394},
542 {0x0000a048, 0x00000396},
543 {0x0000a04c, 0x00000000},
544 {0x0000a050, 0x00000000},
545 {0x0000a054, 0x00000000},
546 {0x0000a058, 0x00000000},
547 {0x0000a05c, 0x00000000},
548 {0x0000a060, 0x00000000},
549 {0x0000a064, 0x00000000},
550 {0x0000a068, 0x00000000},
551 {0x0000a06c, 0x00000000},
552 {0x0000a070, 0x00000000},
553 {0x0000a074, 0x00000000},
554 {0x0000a078, 0x00000000},
555 {0x0000a07c, 0x00000000},
556 {0x0000a080, 0x28282828},
557 {0x0000a084, 0x28282828},
558 {0x0000a088, 0x28282828},
559 {0x0000a08c, 0x28282828},
560 {0x0000a090, 0x28282828},
561 {0x0000a094, 0x21212128},
562 {0x0000a098, 0x171c1c1c},
563 {0x0000a09c, 0x02020212},
564 {0x0000a0a0, 0x00000202},
565 {0x0000a0a4, 0x00000000},
566 {0x0000a0a8, 0x00000000},
567 {0x0000a0ac, 0x00000000},
568 {0x0000a0b0, 0x00000000},
569 {0x0000a0b4, 0x00000000},
570 {0x0000a0b8, 0x00000000},
571 {0x0000a0bc, 0x00000000},
572 {0x0000a0c0, 0x001f0000},
573 {0x0000a0c4, 0x111f1100},
574 {0x0000a0c8, 0x111d111e},
575 {0x0000a0cc, 0x111b111c},
576 {0x0000a0d0, 0x22032204},
577 {0x0000a0d4, 0x22012202},
578 {0x0000a0d8, 0x221f2200},
579 {0x0000a0dc, 0x221d221e},
580 {0x0000a0e0, 0x33013302},
581 {0x0000a0e4, 0x331f3300},
582 {0x0000a0e8, 0x4402331e},
583 {0x0000a0ec, 0x44004401},
584 {0x0000a0f0, 0x441e441f},
585 {0x0000a0f4, 0x55015502},
586 {0x0000a0f8, 0x551f5500},
587 {0x0000a0fc, 0x6602551e},
588 {0x0000a100, 0x66006601},
589 {0x0000a104, 0x661e661f},
590 {0x0000a108, 0x7703661d},
591 {0x0000a10c, 0x77017702},
592 {0x0000a110, 0x00007700},
593 {0x0000a114, 0x00000000},
594 {0x0000a118, 0x00000000},
595 {0x0000a11c, 0x00000000},
596 {0x0000a120, 0x00000000},
597 {0x0000a124, 0x00000000},
598 {0x0000a128, 0x00000000},
599 {0x0000a12c, 0x00000000},
600 {0x0000a130, 0x00000000},
601 {0x0000a134, 0x00000000},
602 {0x0000a138, 0x00000000},
603 {0x0000a13c, 0x00000000},
604 {0x0000a140, 0x001f0000},
605 {0x0000a144, 0x111f1100},
606 {0x0000a148, 0x111d111e},
607 {0x0000a14c, 0x111b111c},
608 {0x0000a150, 0x22032204},
609 {0x0000a154, 0x22012202},
610 {0x0000a158, 0x221f2200},
611 {0x0000a15c, 0x221d221e},
612 {0x0000a160, 0x33013302},
613 {0x0000a164, 0x331f3300},
614 {0x0000a168, 0x4402331e},
615 {0x0000a16c, 0x44004401},
616 {0x0000a170, 0x441e441f},
617 {0x0000a174, 0x55015502},
618 {0x0000a178, 0x551f5500},
619 {0x0000a17c, 0x6602551e},
620 {0x0000a180, 0x66006601},
621 {0x0000a184, 0x661e661f},
622 {0x0000a188, 0x7703661d},
623 {0x0000a18c, 0x77017702},
624 {0x0000a190, 0x00007700},
625 {0x0000a194, 0x00000000},
626 {0x0000a198, 0x00000000},
627 {0x0000a19c, 0x00000000},
628 {0x0000a1a0, 0x00000000},
629 {0x0000a1a4, 0x00000000},
630 {0x0000a1a8, 0x00000000},
631 {0x0000a1ac, 0x00000000},
632 {0x0000a1b0, 0x00000000},
633 {0x0000a1b4, 0x00000000},
634 {0x0000a1b8, 0x00000000},
635 {0x0000a1bc, 0x00000000},
636 {0x0000a1c0, 0x00000000},
637 {0x0000a1c4, 0x00000000},
638 {0x0000a1c8, 0x00000000},
639 {0x0000a1cc, 0x00000000},
640 {0x0000a1d0, 0x00000000},
641 {0x0000a1d4, 0x00000000},
642 {0x0000a1d8, 0x00000000},
643 {0x0000a1dc, 0x00000000},
644 {0x0000a1e0, 0x00000000},
645 {0x0000a1e4, 0x00000000},
646 {0x0000a1e8, 0x00000000},
647 {0x0000a1ec, 0x00000000},
648 {0x0000a1f0, 0x00000396},
649 {0x0000a1f4, 0x00000396},
650 {0x0000a1f8, 0x00000396},
651 {0x0000a1fc, 0x00000296},
652};
653
654static const u32 ar9485_1_0_pcie_phy_pll_on_clkreq_enable_L1[][2] = {
655 /* Addr allmodes */
656 {0x00018c00, 0x10252e5e},
657 {0x00018c04, 0x000801d8},
658 {0x00018c08, 0x0000580c},
659};
660
661static const u32 ar9485_1_0_pcie_phy_clkreq_enable_L1[][2] = {
662 /* Addr allmodes */
663 {0x00018c00, 0x10253e5e},
664 {0x00018c04, 0x000801d8},
665 {0x00018c08, 0x0000580c},
666};
667
668static const u32 ar9485_1_0_soc_preamble[][2] = {
669 /* Addr allmodes */
670 {0x00004090, 0x00aa10aa},
671 {0x000040a4, 0x00a0c9c9},
672 {0x00007048, 0x00000004},
673};
674
675static const u32 ar9485_fast_clock_1_0_baseband_postamble[][3] = {
676 /* Addr 5G_HT20 5G_HT40 */
677 {0x00009e00, 0x03721821, 0x03721821},
678 {0x0000a230, 0x0000400b, 0x00004016},
679 {0x0000a254, 0x00000898, 0x00001130},
680};
681
682static const u32 ar9485_1_0_baseband_postamble[][5] = {
683 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
684 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
685 {0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
686 {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
687 {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
688 {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
689 {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
690 {0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
691 {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
692 {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020},
693 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
694 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e},
695 {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
696 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
697 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
698 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
699 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
700 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
701 {0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
702 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
703 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
704 {0x0000a204, 0x01303fc0, 0x01303fc4, 0x01303fc4, 0x01303fc0},
705 {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
706 {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
707 {0x0000a234, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff},
708 {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
709 {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
710 {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
711 {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
712 {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
713 {0x0000a260, 0x3a021501, 0x3a021501, 0x3a021501, 0x3a021501},
714 {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
715 {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
716 {0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0},
717 {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
718 {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
719 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
720 {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071982},
721 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
722 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
723 {0x0000be04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
724 {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
725};
726
727static const u32 ar9485Modes_low_ob_db_tx_gain_1_0[][5] = {
728 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
729 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
730 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
731 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
732 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
733 {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
734 {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
735 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
736 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
737 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
738 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
739 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
740 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
741 {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
742 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x2e000a20, 0x2e000a20},
743 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x34000e20, 0x34000e20},
744 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x38000e22, 0x38000e22},
745 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x3c000e24, 0x3c000e24},
746 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x40000e26, 0x40000e26},
747 {0x0000a544, 0x6502feca, 0x6502feca, 0x43001640, 0x43001640},
748 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x46001660, 0x46001660},
749 {0x0000a54c, 0x7203feca, 0x7203feca, 0x49001861, 0x49001861},
750 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x4c001a81, 0x4c001a81},
751 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x4f001a83, 0x4f001a83},
752 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x54001c85, 0x54001c85},
753 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x58001ce5, 0x58001ce5},
754 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5b001ce9, 0x5b001ce9},
755 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x60001eeb, 0x60001eeb},
756 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
757 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
758 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
759 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
760 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
761 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
762 {0x00016044, 0x05b6b2db, 0x05b6b2db, 0x05b6b2db, 0x05b6b2db},
763};
764
765static const u32 ar9485_1_0_pcie_phy_clkreq_disable_L1[][2] = {
766 /* Addr allmodes */
767 {0x00018c00, 0x10213e5e},
768 {0x00018c04, 0x000801d8},
769 {0x00018c08, 0x0000580c},
770};
771
772static const u32 ar9485_1_0_radio_postamble[][2] = {
773 /* Addr allmodes */
774 {0x0001609c, 0x0b283f31},
775 {0x000160ac, 0x24611800},
776 {0x000160b0, 0x03284f3e},
777 {0x0001610c, 0x00170000},
778 {0x00016140, 0x10804008},
779};
780
781static const u32 ar9485_1_0_mac_core[][2] = {
782 /* Addr allmodes */
783 {0x00000008, 0x00000000},
784 {0x00000030, 0x00020085},
785 {0x00000034, 0x00000005},
786 {0x00000040, 0x00000000},
787 {0x00000044, 0x00000000},
788 {0x00000048, 0x00000008},
789 {0x0000004c, 0x00000010},
790 {0x00000050, 0x00000000},
791 {0x00001040, 0x002ffc0f},
792 {0x00001044, 0x002ffc0f},
793 {0x00001048, 0x002ffc0f},
794 {0x0000104c, 0x002ffc0f},
795 {0x00001050, 0x002ffc0f},
796 {0x00001054, 0x002ffc0f},
797 {0x00001058, 0x002ffc0f},
798 {0x0000105c, 0x002ffc0f},
799 {0x00001060, 0x002ffc0f},
800 {0x00001064, 0x002ffc0f},
801 {0x000010f0, 0x00000100},
802 {0x00001270, 0x00000000},
803 {0x000012b0, 0x00000000},
804 {0x000012f0, 0x00000000},
805 {0x0000143c, 0x00000000},
806 {0x0000147c, 0x00000000},
807 {0x00008000, 0x00000000},
808 {0x00008004, 0x00000000},
809 {0x00008008, 0x00000000},
810 {0x0000800c, 0x00000000},
811 {0x00008018, 0x00000000},
812 {0x00008020, 0x00000000},
813 {0x00008038, 0x00000000},
814 {0x0000803c, 0x00000000},
815 {0x00008040, 0x00000000},
816 {0x00008044, 0x00000000},
817 {0x00008048, 0x00000000},
818 {0x0000804c, 0xffffffff},
819 {0x00008054, 0x00000000},
820 {0x00008058, 0x00000000},
821 {0x0000805c, 0x000fc78f},
822 {0x00008060, 0x0000000f},
823 {0x00008064, 0x00000000},
824 {0x00008070, 0x00000310},
825 {0x00008074, 0x00000020},
826 {0x00008078, 0x00000000},
827 {0x0000809c, 0x0000000f},
828 {0x000080a0, 0x00000000},
829 {0x000080a4, 0x02ff0000},
830 {0x000080a8, 0x0e070605},
831 {0x000080ac, 0x0000000d},
832 {0x000080b0, 0x00000000},
833 {0x000080b4, 0x00000000},
834 {0x000080b8, 0x00000000},
835 {0x000080bc, 0x00000000},
836 {0x000080c0, 0x2a800000},
837 {0x000080c4, 0x06900168},
838 {0x000080c8, 0x13881c20},
839 {0x000080cc, 0x01f40000},
840 {0x000080d0, 0x00252500},
841 {0x000080d4, 0x00a00000},
842 {0x000080d8, 0x00400000},
843 {0x000080dc, 0x00000000},
844 {0x000080e0, 0xffffffff},
845 {0x000080e4, 0x0000ffff},
846 {0x000080e8, 0x3f3f3f3f},
847 {0x000080ec, 0x00000000},
848 {0x000080f0, 0x00000000},
849 {0x000080f4, 0x00000000},
850 {0x000080fc, 0x00020000},
851 {0x00008100, 0x00000000},
852 {0x00008108, 0x00000052},
853 {0x0000810c, 0x00000000},
854 {0x00008110, 0x00000000},
855 {0x00008114, 0x000007ff},
856 {0x00008118, 0x000000aa},
857 {0x0000811c, 0x00003210},
858 {0x00008124, 0x00000000},
859 {0x00008128, 0x00000000},
860 {0x0000812c, 0x00000000},
861 {0x00008130, 0x00000000},
862 {0x00008134, 0x00000000},
863 {0x00008138, 0x00000000},
864 {0x0000813c, 0x0000ffff},
865 {0x00008144, 0xffffffff},
866 {0x00008168, 0x00000000},
867 {0x0000816c, 0x00000000},
868 {0x00008170, 0x18486200},
869 {0x00008174, 0x33332210},
870 {0x00008178, 0x00000000},
871 {0x0000817c, 0x00020000},
872 {0x000081c0, 0x00000000},
873 {0x000081c4, 0x33332210},
874 {0x000081c8, 0x00000000},
875 {0x000081cc, 0x00000000},
876 {0x000081d4, 0x00000000},
877 {0x000081ec, 0x00000000},
878 {0x000081f0, 0x00000000},
879 {0x000081f4, 0x00000000},
880 {0x000081f8, 0x00000000},
881 {0x000081fc, 0x00000000},
882 {0x00008240, 0x00100000},
883 {0x00008244, 0x0010f400},
884 {0x00008248, 0x00000800},
885 {0x0000824c, 0x0001e800},
886 {0x00008250, 0x00000000},
887 {0x00008254, 0x00000000},
888 {0x00008258, 0x00000000},
889 {0x0000825c, 0x40000000},
890 {0x00008260, 0x00080922},
891 {0x00008264, 0x9ca00010},
892 {0x00008268, 0xffffffff},
893 {0x0000826c, 0x0000ffff},
894 {0x00008270, 0x00000000},
895 {0x00008274, 0x40000000},
896 {0x00008278, 0x003e4180},
897 {0x0000827c, 0x00000004},
898 {0x00008284, 0x0000002c},
899 {0x00008288, 0x0000002c},
900 {0x0000828c, 0x000000ff},
901 {0x00008294, 0x00000000},
902 {0x00008298, 0x00000000},
903 {0x0000829c, 0x00000000},
904 {0x00008300, 0x00000140},
905 {0x00008314, 0x00000000},
906 {0x0000831c, 0x0000010d},
907 {0x00008328, 0x00000000},
908 {0x0000832c, 0x00000007},
909 {0x00008330, 0x00000302},
910 {0x00008334, 0x00000700},
911 {0x00008338, 0x00ff0000},
912 {0x0000833c, 0x02400000},
913 {0x00008340, 0x000107ff},
914 {0x00008344, 0xa248105b},
915 {0x00008348, 0x008f0000},
916 {0x0000835c, 0x00000000},
917 {0x00008360, 0xffffffff},
918 {0x00008364, 0xffffffff},
919 {0x00008368, 0x00000000},
920 {0x00008370, 0x00000000},
921 {0x00008374, 0x000000ff},
922 {0x00008378, 0x00000000},
923 {0x0000837c, 0x00000000},
924 {0x00008380, 0xffffffff},
925 {0x00008384, 0xffffffff},
926 {0x00008390, 0xffffffff},
927 {0x00008394, 0xffffffff},
928 {0x00008398, 0x00000000},
929 {0x0000839c, 0x00000000},
930 {0x000083a0, 0x00000000},
931 {0x000083a4, 0x0000fa14},
932 {0x000083a8, 0x000f0c00},
933 {0x000083ac, 0x33332210},
934 {0x000083b0, 0x33332210},
935 {0x000083b4, 0x33332210},
936 {0x000083b8, 0x33332210},
937 {0x000083bc, 0x00000000},
938 {0x000083c0, 0x00000000},
939 {0x000083c4, 0x00000000},
940 {0x000083c8, 0x00000000},
941 {0x000083cc, 0x00000200},
942 {0x000083d0, 0x000301ff},
943};
944
945static const u32 ar9485_1_1_mac_core[][2] = { 20static const u32 ar9485_1_1_mac_core[][2] = {
946 /* Addr allmodes */ 21 /* Addr allmodes */
947 {0x00000008, 0x00000000}, 22 {0x00000008, 0x00000000},
@@ -1321,7 +396,7 @@ static const u32 ar9485Modes_high_ob_db_tx_gain_1_1[][5] = {
1321 {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865}, 396 {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
1322 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86}, 397 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
1323 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9}, 398 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
1324 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb}, 399 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
1325 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb}, 400 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
1326 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb}, 401 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
1327 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb}, 402 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
@@ -1394,7 +469,7 @@ static const u32 ar9485_modes_lowest_ob_db_tx_gain_1_1[][5] = {
1394 {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865}, 469 {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
1395 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86}, 470 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
1396 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9}, 471 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
1397 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb}, 472 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
1398 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb}, 473 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
1399 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb}, 474 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
1400 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb}, 475 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
@@ -1560,7 +635,7 @@ static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
1560 {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865}, 635 {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
1561 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86}, 636 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
1562 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9}, 637 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
1563 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb}, 638 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
1564 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb}, 639 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
1565 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb}, 640 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
1566 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb}, 641 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
@@ -1653,7 +728,7 @@ static const u32 ar9485_modes_green_ob_db_tx_gain_1_1[][5] = {
1653 {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865}, 728 {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
1654 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86}, 729 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
1655 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9}, 730 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
1656 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb}, 731 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
1657 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb}, 732 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
1658 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb}, 733 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
1659 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb}, 734 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
@@ -1752,7 +827,7 @@ static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = {
1752 {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865}, 827 {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
1753 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86}, 828 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
1754 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9}, 829 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
1755 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb}, 830 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
1756 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb}, 831 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
1757 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb}, 832 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
1758 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb}, 833 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 099bd4183ad0..03b37d7be1c3 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -62,7 +62,6 @@ struct ath_node;
62#define ATH_TXQ_SETUP(sc, i) ((sc)->tx.txqsetup & (1<<i)) 62#define ATH_TXQ_SETUP(sc, i) ((sc)->tx.txqsetup & (1<<i))
63 63
64struct ath_config { 64struct ath_config {
65 u32 ath_aggr_prot;
66 u16 txpowlimit; 65 u16 txpowlimit;
67 u8 cabqReadytime; 66 u8 cabqReadytime;
68}; 67};
@@ -120,13 +119,11 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
120/* RX / TX */ 119/* RX / TX */
121/***********/ 120/***********/
122 121
123#define ATH_MAX_ANTENNA 3
124#define ATH_RXBUF 512 122#define ATH_RXBUF 512
125#define ATH_TXBUF 512 123#define ATH_TXBUF 512
126#define ATH_TXBUF_RESERVE 5 124#define ATH_TXBUF_RESERVE 5
127#define ATH_MAX_QDEPTH (ATH_TXBUF / 4 - ATH_TXBUF_RESERVE) 125#define ATH_MAX_QDEPTH (ATH_TXBUF / 4 - ATH_TXBUF_RESERVE)
128#define ATH_TXMAXTRY 13 126#define ATH_TXMAXTRY 13
129#define ATH_MGT_TXMAXTRY 4
130 127
131#define TID_TO_WME_AC(_tid) \ 128#define TID_TO_WME_AC(_tid) \
132 ((((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \ 129 ((((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \
@@ -202,6 +199,7 @@ struct ath_atx_ac {
202 int sched; 199 int sched;
203 struct list_head list; 200 struct list_head list;
204 struct list_head tid_q; 201 struct list_head tid_q;
202 bool clear_ps_filter;
205}; 203};
206 204
207struct ath_frame_info { 205struct ath_frame_info {
@@ -257,8 +255,12 @@ struct ath_node {
257#endif 255#endif
258 struct ath_atx_tid tid[WME_NUM_TID]; 256 struct ath_atx_tid tid[WME_NUM_TID];
259 struct ath_atx_ac ac[WME_NUM_AC]; 257 struct ath_atx_ac ac[WME_NUM_AC];
258 int ps_key;
259
260 u16 maxampdu; 260 u16 maxampdu;
261 u8 mpdudensity; 261 u8 mpdudensity;
262
263 bool sleeping;
262}; 264};
263 265
264#define AGGR_CLEANUP BIT(1) 266#define AGGR_CLEANUP BIT(1)
@@ -340,17 +342,18 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
340void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid); 342void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
341void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid); 343void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
342 344
345void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an);
346bool ath_tx_aggr_sleep(struct ath_softc *sc, struct ath_node *an);
347
343/********/ 348/********/
344/* VIFs */ 349/* VIFs */
345/********/ 350/********/
346 351
347struct ath_vif { 352struct ath_vif {
348 int av_bslot; 353 int av_bslot;
349 bool is_bslot_active; 354 bool is_bslot_active, primary_sta_vif;
350 __le64 tsf_adjust; /* TSF adjustment for staggered beacons */ 355 __le64 tsf_adjust; /* TSF adjustment for staggered beacons */
351 enum nl80211_iftype av_opmode;
352 struct ath_buf *av_bcbuf; 356 struct ath_buf *av_bcbuf;
353 u8 bssid[ETH_ALEN]; /* current BSSID from config_interface */
354}; 357};
355 358
356/*******************/ 359/*******************/
@@ -362,7 +365,7 @@ struct ath_vif {
362 * number of BSSIDs) if a given beacon does not go out even after waiting this 365 * number of BSSIDs) if a given beacon does not go out even after waiting this
363 * number of beacon intervals, the game's up. 366 * number of beacon intervals, the game's up.
364 */ 367 */
365#define BSTUCK_THRESH (9 * ATH_BCBUF) 368#define BSTUCK_THRESH 9
366#define ATH_BCBUF 4 369#define ATH_BCBUF 4
367#define ATH_DEFAULT_BINTVAL 100 /* TU */ 370#define ATH_DEFAULT_BINTVAL 100 /* TU */
368#define ATH_DEFAULT_BMISS_LIMIT 10 371#define ATH_DEFAULT_BMISS_LIMIT 10
@@ -386,7 +389,7 @@ struct ath_beacon {
386 u32 beaconq; 389 u32 beaconq;
387 u32 bmisscnt; 390 u32 bmisscnt;
388 u32 ast_be_xmit; 391 u32 ast_be_xmit;
389 u64 bc_tstamp; 392 u32 bc_tstamp;
390 struct ieee80211_vif *bslot[ATH_BCBUF]; 393 struct ieee80211_vif *bslot[ATH_BCBUF];
391 int slottime; 394 int slottime;
392 int slotupdate; 395 int slotupdate;
@@ -401,6 +404,7 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif);
401int ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_vif *vif); 404int ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_vif *vif);
402void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp); 405void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp);
403int ath_beaconq_config(struct ath_softc *sc); 406int ath_beaconq_config(struct ath_softc *sc);
407void ath_set_beacon(struct ath_softc *sc);
404void ath9k_set_beaconing_status(struct ath_softc *sc, bool status); 408void ath9k_set_beaconing_status(struct ath_softc *sc, bool status);
405 409
406/*******/ 410/*******/
@@ -418,6 +422,7 @@ void ath9k_set_beaconing_status(struct ath_softc *sc, bool status);
418#define ATH_PAPRD_TIMEOUT 100 /* msecs */ 422#define ATH_PAPRD_TIMEOUT 100 /* msecs */
419 423
420void ath_hw_check(struct work_struct *work); 424void ath_hw_check(struct work_struct *work);
425void ath_hw_pll_work(struct work_struct *work);
421void ath_paprd_calibrate(struct work_struct *work); 426void ath_paprd_calibrate(struct work_struct *work);
422void ath_ani_calibrate(unsigned long data); 427void ath_ani_calibrate(unsigned long data);
423 428
@@ -448,6 +453,7 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc);
448 453
449#define ATH_LED_PIN_DEF 1 454#define ATH_LED_PIN_DEF 1
450#define ATH_LED_PIN_9287 8 455#define ATH_LED_PIN_9287 8
456#define ATH_LED_PIN_9300 10
451#define ATH_LED_PIN_9485 6 457#define ATH_LED_PIN_9485 6
452 458
453#ifdef CONFIG_MAC80211_LEDS 459#ifdef CONFIG_MAC80211_LEDS
@@ -477,7 +483,6 @@ static inline void ath_deinit_leds(struct ath_softc *sc)
477#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO 30 483#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO 30
478#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO2 20 484#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO2 20
479 485
480#define ATH_ANT_DIV_COMB_LNA1_LNA2_DELTA -3
481#define ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA -1 486#define ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA -1
482#define ATH_ANT_DIV_COMB_LNA1_DELTA_HI -4 487#define ATH_ANT_DIV_COMB_LNA1_DELTA_HI -4
483#define ATH_ANT_DIV_COMB_LNA1_DELTA_MID -2 488#define ATH_ANT_DIV_COMB_LNA1_DELTA_MID -2
@@ -550,6 +555,7 @@ struct ath_ant_comb {
550#define SC_OP_BT_SCAN BIT(13) 555#define SC_OP_BT_SCAN BIT(13)
551#define SC_OP_ANI_RUN BIT(14) 556#define SC_OP_ANI_RUN BIT(14)
552#define SC_OP_ENABLE_APM BIT(15) 557#define SC_OP_ENABLE_APM BIT(15)
558#define SC_OP_PRIM_STA_VIF BIT(16)
553 559
554/* Powersave flags */ 560/* Powersave flags */
555#define PS_WAIT_FOR_BEACON BIT(0) 561#define PS_WAIT_FOR_BEACON BIT(0)
@@ -557,6 +563,7 @@ struct ath_ant_comb {
557#define PS_WAIT_FOR_PSPOLL_DATA BIT(2) 563#define PS_WAIT_FOR_PSPOLL_DATA BIT(2)
558#define PS_WAIT_FOR_TX_ACK BIT(3) 564#define PS_WAIT_FOR_TX_ACK BIT(3)
559#define PS_BEACON_SYNC BIT(4) 565#define PS_BEACON_SYNC BIT(4)
566#define PS_TSFOOR_SYNC BIT(5)
560 567
561struct ath_rate_table; 568struct ath_rate_table;
562 569
@@ -667,7 +674,7 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw);
667bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode); 674bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode);
668bool ath9k_uses_beacons(int type); 675bool ath9k_uses_beacons(int type);
669 676
670#ifdef CONFIG_PCI 677#ifdef CONFIG_ATH9K_PCI
671int ath_pci_init(void); 678int ath_pci_init(void);
672void ath_pci_exit(void); 679void ath_pci_exit(void);
673#else 680#else
@@ -675,7 +682,7 @@ static inline int ath_pci_init(void) { return 0; };
675static inline void ath_pci_exit(void) {}; 682static inline void ath_pci_exit(void) {};
676#endif 683#endif
677 684
678#ifdef CONFIG_ATHEROS_AR71XX 685#ifdef CONFIG_ATH9K_AHB
679int ath_ahb_init(void); 686int ath_ahb_init(void);
680void ath_ahb_exit(void); 687void ath_ahb_exit(void);
681#else 688#else
@@ -688,8 +695,6 @@ void ath9k_ps_restore(struct ath_softc *sc);
688 695
689u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate); 696u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate);
690 697
691void ath9k_set_bssid_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
692
693void ath_start_rfkill_poll(struct ath_softc *sc); 698void ath_start_rfkill_poll(struct ath_softc *sc);
694extern void ath9k_rfkill_poll_state(struct ieee80211_hw *hw); 699extern void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
695void ath9k_calculate_iter_data(struct ieee80211_hw *hw, 700void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 6d2a545fc35e..637dbc5f7b67 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -57,8 +57,8 @@ int ath_beaconq_config(struct ath_softc *sc)
57 57
58/* 58/*
59 * Associates the beacon frame buffer with a transmit descriptor. Will set 59 * Associates the beacon frame buffer with a transmit descriptor. Will set
60 * up all required antenna switch parameters, rate codes, and channel flags. 60 * up rate codes, and channel flags. Beacons are always sent out at the
61 * Beacons are always sent out at the lowest rate, and are not retried. 61 * lowest rate, and are not retried.
62*/ 62*/
63static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp, 63static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
64 struct ath_buf *bf, int rateidx) 64 struct ath_buf *bf, int rateidx)
@@ -68,7 +68,7 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
68 struct ath_common *common = ath9k_hw_common(ah); 68 struct ath_common *common = ath9k_hw_common(ah);
69 struct ath_desc *ds; 69 struct ath_desc *ds;
70 struct ath9k_11n_rate_series series[4]; 70 struct ath9k_11n_rate_series series[4];
71 int flags, antenna, ctsrate = 0, ctsduration = 0; 71 int flags, ctsrate = 0, ctsduration = 0;
72 struct ieee80211_supported_band *sband; 72 struct ieee80211_supported_band *sband;
73 u8 rate = 0; 73 u8 rate = 0;
74 74
@@ -76,12 +76,6 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
76 flags = ATH9K_TXDESC_NOACK; 76 flags = ATH9K_TXDESC_NOACK;
77 77
78 ds->ds_link = 0; 78 ds->ds_link = 0;
79 /*
80 * Switch antenna every beacon.
81 * Should only switch every beacon period, not for every SWBA
82 * XXX assumes two antennae
83 */
84 antenna = ((sc->beacon.ast_be_xmit / sc->nbcnvifs) & 1 ? 2 : 1);
85 79
86 sband = &sc->sbands[common->hw->conf.channel->band]; 80 sband = &sc->sbands[common->hw->conf.channel->band];
87 rate = sband->bitrates[rateidx].hw_value; 81 rate = sband->bitrates[rateidx].hw_value;
@@ -278,7 +272,7 @@ int ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_vif *vif)
278 return -ENOMEM; 272 return -ENOMEM;
279 273
280 tstamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp; 274 tstamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
281 sc->beacon.bc_tstamp = le64_to_cpu(tstamp); 275 sc->beacon.bc_tstamp = (u32) le64_to_cpu(tstamp);
282 /* Calculate a TSF adjustment factor required for staggered beacons. */ 276 /* Calculate a TSF adjustment factor required for staggered beacons. */
283 if (avp->av_bslot > 0) { 277 if (avp->av_bslot > 0) {
284 u64 tsfadjust; 278 u64 tsfadjust;
@@ -294,8 +288,8 @@ int ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_vif *vif)
294 * adjustment. Other slots are adjusted to get the timestamp 288 * adjustment. Other slots are adjusted to get the timestamp
295 * close to the TBTT for the BSS. 289 * close to the TBTT for the BSS.
296 */ 290 */
297 tsfadjust = intval * avp->av_bslot / ATH_BCBUF; 291 tsfadjust = TU_TO_USEC(intval * avp->av_bslot) / ATH_BCBUF;
298 avp->tsf_adjust = cpu_to_le64(TU_TO_USEC(tsfadjust)); 292 avp->tsf_adjust = cpu_to_le64(tsfadjust);
299 293
300 ath_dbg(common, ATH_DBG_BEACON, 294 ath_dbg(common, ATH_DBG_BEACON,
301 "stagger beacons, bslot %d intval %u tsfadjust %llu\n", 295 "stagger beacons, bslot %d intval %u tsfadjust %llu\n",
@@ -326,9 +320,11 @@ void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp)
326 if (avp->av_bcbuf != NULL) { 320 if (avp->av_bcbuf != NULL) {
327 struct ath_buf *bf; 321 struct ath_buf *bf;
328 322
323 avp->is_bslot_active = false;
329 if (avp->av_bslot != -1) { 324 if (avp->av_bslot != -1) {
330 sc->beacon.bslot[avp->av_bslot] = NULL; 325 sc->beacon.bslot[avp->av_bslot] = NULL;
331 sc->nbcnvifs--; 326 sc->nbcnvifs--;
327 avp->av_bslot = -1;
332 } 328 }
333 329
334 bf = avp->av_bcbuf; 330 bf = avp->av_bcbuf;
@@ -369,12 +365,13 @@ void ath_beacon_tasklet(unsigned long data)
369 if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0) { 365 if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0) {
370 sc->beacon.bmisscnt++; 366 sc->beacon.bmisscnt++;
371 367
372 if (sc->beacon.bmisscnt < BSTUCK_THRESH) { 368 if (sc->beacon.bmisscnt < BSTUCK_THRESH * sc->nbcnvifs) {
373 ath_dbg(common, ATH_DBG_BSTUCK, 369 ath_dbg(common, ATH_DBG_BSTUCK,
374 "missed %u consecutive beacons\n", 370 "missed %u consecutive beacons\n",
375 sc->beacon.bmisscnt); 371 sc->beacon.bmisscnt);
376 ath9k_hw_stop_dma_queue(ah, sc->beacon.beaconq); 372 ath9k_hw_stop_dma_queue(ah, sc->beacon.beaconq);
377 ath9k_hw_bstuck_nfcal(ah); 373 if (sc->beacon.bmisscnt > 3)
374 ath9k_hw_bstuck_nfcal(ah);
378 } else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) { 375 } else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) {
379 ath_dbg(common, ATH_DBG_BSTUCK, 376 ath_dbg(common, ATH_DBG_BSTUCK,
380 "beacon is officially stuck\n"); 377 "beacon is officially stuck\n");
@@ -385,13 +382,6 @@ void ath_beacon_tasklet(unsigned long data)
385 return; 382 return;
386 } 383 }
387 384
388 if (sc->beacon.bmisscnt != 0) {
389 ath_dbg(common, ATH_DBG_BSTUCK,
390 "resume beacon xmit after %u misses\n",
391 sc->beacon.bmisscnt);
392 sc->beacon.bmisscnt = 0;
393 }
394
395 /* 385 /*
396 * Generate beacon frames. we are sending frames 386 * Generate beacon frames. we are sending frames
397 * staggered so calculate the slot for this frame based 387 * staggered so calculate the slot for this frame based
@@ -401,21 +391,14 @@ void ath_beacon_tasklet(unsigned long data)
401 intval = cur_conf->beacon_interval ? : ATH_DEFAULT_BINTVAL; 391 intval = cur_conf->beacon_interval ? : ATH_DEFAULT_BINTVAL;
402 392
403 tsf = ath9k_hw_gettsf64(ah); 393 tsf = ath9k_hw_gettsf64(ah);
404 tsftu = TSF_TO_TU(tsf>>32, tsf); 394 tsf += TU_TO_USEC(ah->config.sw_beacon_response_time);
405 slot = ((tsftu % intval) * ATH_BCBUF) / intval; 395 tsftu = TSF_TO_TU((tsf * ATH_BCBUF) >>32, tsf * ATH_BCBUF);
406 /* 396 slot = (tsftu % (intval * ATH_BCBUF)) / intval;
407 * Reverse the slot order to get slot 0 on the TBTT offset that does
408 * not require TSF adjustment and other slots adding
409 * slot/ATH_BCBUF * beacon_int to timestamp. For example, with
410 * ATH_BCBUF = 4, we process beacon slots as follows: 3 2 1 0 3 2 1 ..
411 * and slot 0 is at correct offset to TBTT.
412 */
413 slot = ATH_BCBUF - slot - 1;
414 vif = sc->beacon.bslot[slot]; 397 vif = sc->beacon.bslot[slot];
415 398
416 ath_dbg(common, ATH_DBG_BEACON, 399 ath_dbg(common, ATH_DBG_BEACON,
417 "slot %d [tsf %llu tsftu %u intval %u] vif %p\n", 400 "slot %d [tsf %llu tsftu %u intval %u] vif %p\n",
418 slot, tsf, tsftu, intval, vif); 401 slot, tsf, tsftu / ATH_BCBUF, intval, vif);
419 402
420 bfaddr = 0; 403 bfaddr = 0;
421 if (vif) { 404 if (vif) {
@@ -424,6 +407,13 @@ void ath_beacon_tasklet(unsigned long data)
424 bfaddr = bf->bf_daddr; 407 bfaddr = bf->bf_daddr;
425 bc = 1; 408 bc = 1;
426 } 409 }
410
411 if (sc->beacon.bmisscnt != 0) {
412 ath_dbg(common, ATH_DBG_BSTUCK,
413 "resume beacon xmit after %u misses\n",
414 sc->beacon.bmisscnt);
415 sc->beacon.bmisscnt = 0;
416 }
427 } 417 }
428 418
429 /* 419 /*
@@ -463,13 +453,17 @@ static void ath9k_beacon_init(struct ath_softc *sc,
463 u32 next_beacon, 453 u32 next_beacon,
464 u32 beacon_period) 454 u32 beacon_period)
465{ 455{
466 if (beacon_period & ATH9K_BEACON_RESET_TSF) 456 if (sc->sc_flags & SC_OP_TSF_RESET) {
467 ath9k_ps_wakeup(sc); 457 ath9k_ps_wakeup(sc);
458 ath9k_hw_reset_tsf(sc->sc_ah);
459 }
468 460
469 ath9k_hw_beaconinit(sc->sc_ah, next_beacon, beacon_period); 461 ath9k_hw_beaconinit(sc->sc_ah, next_beacon, beacon_period);
470 462
471 if (beacon_period & ATH9K_BEACON_RESET_TSF) 463 if (sc->sc_flags & SC_OP_TSF_RESET) {
472 ath9k_ps_restore(sc); 464 ath9k_ps_restore(sc);
465 sc->sc_flags &= ~SC_OP_TSF_RESET;
466 }
473} 467}
474 468
475/* 469/*
@@ -484,18 +478,14 @@ static void ath_beacon_config_ap(struct ath_softc *sc,
484 u32 nexttbtt, intval; 478 u32 nexttbtt, intval;
485 479
486 /* NB: the beacon interval is kept internally in TU's */ 480 /* NB: the beacon interval is kept internally in TU's */
487 intval = conf->beacon_interval & ATH9K_BEACON_PERIOD; 481 intval = TU_TO_USEC(conf->beacon_interval & ATH9K_BEACON_PERIOD);
488 intval /= ATH_BCBUF; /* for staggered beacons */ 482 intval /= ATH_BCBUF; /* for staggered beacons */
489 nexttbtt = intval; 483 nexttbtt = intval;
490 484
491 if (sc->sc_flags & SC_OP_TSF_RESET)
492 intval |= ATH9K_BEACON_RESET_TSF;
493
494 /* 485 /*
495 * In AP mode we enable the beacon timers and SWBA interrupts to 486 * In AP mode we enable the beacon timers and SWBA interrupts to
496 * prepare beacon frames. 487 * prepare beacon frames.
497 */ 488 */
498 intval |= ATH9K_BEACON_ENA;
499 ah->imask |= ATH9K_INT_SWBA; 489 ah->imask |= ATH9K_INT_SWBA;
500 ath_beaconq_config(sc); 490 ath_beaconq_config(sc);
501 491
@@ -505,11 +495,6 @@ static void ath_beacon_config_ap(struct ath_softc *sc,
505 ath9k_beacon_init(sc, nexttbtt, intval); 495 ath9k_beacon_init(sc, nexttbtt, intval);
506 sc->beacon.bmisscnt = 0; 496 sc->beacon.bmisscnt = 0;
507 ath9k_hw_set_interrupts(ah, ah->imask); 497 ath9k_hw_set_interrupts(ah, ah->imask);
508
509 /* Clear the reset TSF flag, so that subsequent beacon updation
510 will not reset the HW TSF. */
511
512 sc->sc_flags &= ~SC_OP_TSF_RESET;
513} 498}
514 499
515/* 500/*
@@ -635,7 +620,13 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
635 ath9k_hw_disable_interrupts(ah); 620 ath9k_hw_disable_interrupts(ah);
636 ath9k_hw_set_sta_beacon_timers(ah, &bs); 621 ath9k_hw_set_sta_beacon_timers(ah, &bs);
637 ah->imask |= ATH9K_INT_BMISS; 622 ah->imask |= ATH9K_INT_BMISS;
638 ath9k_hw_set_interrupts(ah, ah->imask); 623
624 /*
625 * If the beacon config is called beacause of TSFOOR,
626 * Interrupts will be enabled back at the end of ath9k_tasklet
627 */
628 if (!(sc->ps_flags & PS_TSFOOR_SYNC))
629 ath9k_hw_set_interrupts(ah, ah->imask);
639} 630}
640 631
641static void ath_beacon_config_adhoc(struct ath_softc *sc, 632static void ath_beacon_config_adhoc(struct ath_softc *sc,
@@ -643,25 +634,20 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
643{ 634{
644 struct ath_hw *ah = sc->sc_ah; 635 struct ath_hw *ah = sc->sc_ah;
645 struct ath_common *common = ath9k_hw_common(ah); 636 struct ath_common *common = ath9k_hw_common(ah);
646 u64 tsf; 637 u32 tsf, delta, intval, nexttbtt;
647 u32 tsftu, intval, nexttbtt; 638
648 639 tsf = ath9k_hw_gettsf32(ah) + TU_TO_USEC(FUDGE);
649 intval = conf->beacon_interval & ATH9K_BEACON_PERIOD; 640 intval = TU_TO_USEC(conf->beacon_interval & ATH9K_BEACON_PERIOD);
650 641
651 642 if (!sc->beacon.bc_tstamp)
652 /* Pull nexttbtt forward to reflect the current TSF */ 643 nexttbtt = tsf + intval;
653 644 else {
654 nexttbtt = TSF_TO_TU(sc->beacon.bc_tstamp >> 32, sc->beacon.bc_tstamp); 645 if (tsf > sc->beacon.bc_tstamp)
655 if (nexttbtt == 0) 646 delta = (tsf - sc->beacon.bc_tstamp);
656 nexttbtt = intval; 647 else
657 else if (intval) 648 delta = (tsf + 1 + (~0U - sc->beacon.bc_tstamp));
658 nexttbtt = roundup(nexttbtt, intval); 649 nexttbtt = tsf + roundup(delta, intval);
659 650 }
660 tsf = ath9k_hw_gettsf64(ah);
661 tsftu = TSF_TO_TU((u32)(tsf>>32), (u32)tsf) + FUDGE;
662 do {
663 nexttbtt += intval;
664 } while (nexttbtt < tsftu);
665 651
666 ath_dbg(common, ATH_DBG_BEACON, 652 ath_dbg(common, ATH_DBG_BEACON,
667 "IBSS nexttbtt %u intval %u (%u)\n", 653 "IBSS nexttbtt %u intval %u (%u)\n",
@@ -672,7 +658,6 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
672 * if we need to manually prepare beacon frames. Otherwise we use a 658 * if we need to manually prepare beacon frames. Otherwise we use a
673 * self-linked tx descriptor and let the hardware deal with things. 659 * self-linked tx descriptor and let the hardware deal with things.
674 */ 660 */
675 intval |= ATH9K_BEACON_ENA;
676 ah->imask |= ATH9K_INT_SWBA; 661 ah->imask |= ATH9K_INT_SWBA;
677 662
678 ath_beaconq_config(sc); 663 ath_beaconq_config(sc);
@@ -682,25 +667,71 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
682 ath9k_hw_disable_interrupts(ah); 667 ath9k_hw_disable_interrupts(ah);
683 ath9k_beacon_init(sc, nexttbtt, intval); 668 ath9k_beacon_init(sc, nexttbtt, intval);
684 sc->beacon.bmisscnt = 0; 669 sc->beacon.bmisscnt = 0;
685 ath9k_hw_set_interrupts(ah, ah->imask); 670 /*
671 * If the beacon config is called beacause of TSFOOR,
672 * Interrupts will be enabled back at the end of ath9k_tasklet
673 */
674 if (!(sc->ps_flags & PS_TSFOOR_SYNC))
675 ath9k_hw_set_interrupts(ah, ah->imask);
686} 676}
687 677
688void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif) 678static bool ath9k_allow_beacon_config(struct ath_softc *sc,
679 struct ieee80211_vif *vif)
689{ 680{
690 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf; 681 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
691 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 682 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
692 enum nl80211_iftype iftype; 683 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
684 struct ath_vif *avp = (void *)vif->drv_priv;
693 685
694 /* Setup the beacon configuration parameters */ 686 /*
695 if (vif) { 687 * Can not have different beacon interval on multiple
696 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; 688 * AP interface case
697 iftype = vif->type; 689 */
698 cur_conf->beacon_interval = bss_conf->beacon_int; 690 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) &&
699 cur_conf->dtim_period = bss_conf->dtim_period; 691 (sc->nbcnvifs > 1) &&
700 } else { 692 (vif->type == NL80211_IFTYPE_AP) &&
701 iftype = sc->sc_ah->opmode; 693 (cur_conf->beacon_interval != bss_conf->beacon_int)) {
694 ath_dbg(common, ATH_DBG_CONFIG,
695 "Changing beacon interval of multiple \
696 AP interfaces !\n");
697 return false;
702 } 698 }
699 /*
700 * Can not configure station vif's beacon config
701 * while on AP opmode
702 */
703 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) &&
704 (vif->type != NL80211_IFTYPE_AP)) {
705 ath_dbg(common, ATH_DBG_CONFIG,
706 "STA vif's beacon not allowed on AP mode\n");
707 return false;
708 }
709 /*
710 * Do not allow beacon config if HW was already configured
711 * with another STA vif
712 */
713 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) &&
714 (vif->type == NL80211_IFTYPE_STATION) &&
715 (sc->sc_flags & SC_OP_BEACONS) &&
716 !avp->primary_sta_vif) {
717 ath_dbg(common, ATH_DBG_CONFIG,
718 "Beacon already configured for a station interface\n");
719 return false;
720 }
721 return true;
722}
703 723
724void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
725{
726 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
727 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
728
729 if (!ath9k_allow_beacon_config(sc, vif))
730 return;
731
732 /* Setup the beacon configuration parameters */
733 cur_conf->beacon_interval = bss_conf->beacon_int;
734 cur_conf->dtim_period = bss_conf->dtim_period;
704 cur_conf->listen_interval = 1; 735 cur_conf->listen_interval = 1;
705 cur_conf->dtim_count = 1; 736 cur_conf->dtim_count = 1;
706 cur_conf->bmiss_timeout = 737 cur_conf->bmiss_timeout =
@@ -723,9 +754,37 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
723 if (cur_conf->dtim_period == 0) 754 if (cur_conf->dtim_period == 0)
724 cur_conf->dtim_period = 1; 755 cur_conf->dtim_period = 1;
725 756
726 switch (iftype) { 757 ath_set_beacon(sc);
758}
759
760static bool ath_has_valid_bslot(struct ath_softc *sc)
761{
762 struct ath_vif *avp;
763 int slot;
764 bool found = false;
765
766 for (slot = 0; slot < ATH_BCBUF; slot++) {
767 if (sc->beacon.bslot[slot]) {
768 avp = (void *)sc->beacon.bslot[slot]->drv_priv;
769 if (avp->is_bslot_active) {
770 found = true;
771 break;
772 }
773 }
774 }
775 return found;
776}
777
778
779void ath_set_beacon(struct ath_softc *sc)
780{
781 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
782 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
783
784 switch (sc->sc_ah->opmode) {
727 case NL80211_IFTYPE_AP: 785 case NL80211_IFTYPE_AP:
728 ath_beacon_config_ap(sc, cur_conf); 786 if (ath_has_valid_bslot(sc))
787 ath_beacon_config_ap(sc, cur_conf);
729 break; 788 break;
730 case NL80211_IFTYPE_ADHOC: 789 case NL80211_IFTYPE_ADHOC:
731 case NL80211_IFTYPE_MESH_POINT: 790 case NL80211_IFTYPE_MESH_POINT:
@@ -746,26 +805,15 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
746void ath9k_set_beaconing_status(struct ath_softc *sc, bool status) 805void ath9k_set_beaconing_status(struct ath_softc *sc, bool status)
747{ 806{
748 struct ath_hw *ah = sc->sc_ah; 807 struct ath_hw *ah = sc->sc_ah;
749 struct ath_vif *avp; 808
750 int slot; 809 if (!ath_has_valid_bslot(sc))
751 bool found = false; 810 return;
752 811
753 ath9k_ps_wakeup(sc); 812 ath9k_ps_wakeup(sc);
754 if (status) { 813 if (status) {
755 for (slot = 0; slot < ATH_BCBUF; slot++) { 814 /* Re-enable beaconing */
756 if (sc->beacon.bslot[slot]) { 815 ah->imask |= ATH9K_INT_SWBA;
757 avp = (void *)sc->beacon.bslot[slot]->drv_priv; 816 ath9k_hw_set_interrupts(ah, ah->imask);
758 if (avp->is_bslot_active) {
759 found = true;
760 break;
761 }
762 }
763 }
764 if (found) {
765 /* Re-enable beaconing */
766 ah->imask |= ATH9K_INT_SWBA;
767 ath9k_hw_set_interrupts(ah, ah->imask);
768 }
769 } else { 817 } else {
770 /* Disable SWBA interrupt */ 818 /* Disable SWBA interrupt */
771 ah->imask &= ~ATH9K_INT_SWBA; 819 ah->imask &= ~ATH9K_INT_SWBA;
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index d33bf204c995..23f15a7ca7f1 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -51,6 +51,10 @@ void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
51 .bt_hold_rx_clear = true, 51 .bt_hold_rx_clear = true,
52 }; 52 };
53 u32 i; 53 u32 i;
54 bool rxclear_polarity = ath_bt_config.bt_rxclear_polarity;
55
56 if (AR_SREV_9300_20_OR_LATER(ah))
57 rxclear_polarity = !ath_bt_config.bt_rxclear_polarity;
54 58
55 btcoex_hw->bt_coex_mode = 59 btcoex_hw->bt_coex_mode =
56 (btcoex_hw->bt_coex_mode & AR_BT_QCU_THRESH) | 60 (btcoex_hw->bt_coex_mode & AR_BT_QCU_THRESH) |
@@ -59,7 +63,7 @@ void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
59 SM(ath_bt_config.bt_txframe_extend, AR_BT_TX_FRAME_EXTEND) | 63 SM(ath_bt_config.bt_txframe_extend, AR_BT_TX_FRAME_EXTEND) |
60 SM(ath_bt_config.bt_mode, AR_BT_MODE) | 64 SM(ath_bt_config.bt_mode, AR_BT_MODE) |
61 SM(ath_bt_config.bt_quiet_collision, AR_BT_QUIET) | 65 SM(ath_bt_config.bt_quiet_collision, AR_BT_QUIET) |
62 SM(ath_bt_config.bt_rxclear_polarity, AR_BT_RX_CLEAR_POLARITY) | 66 SM(rxclear_polarity, AR_BT_RX_CLEAR_POLARITY) |
63 SM(ath_bt_config.bt_priority_time, AR_BT_PRIORITY_TIME) | 67 SM(ath_bt_config.bt_priority_time, AR_BT_PRIORITY_TIME) |
64 SM(ath_bt_config.bt_first_slot_time, AR_BT_FIRST_SLOT_TIME) | 68 SM(ath_bt_config.bt_first_slot_time, AR_BT_FIRST_SLOT_TIME) |
65 SM(qnum, AR_BT_QCU_THRESH); 69 SM(qnum, AR_BT_QCU_THRESH);
@@ -142,6 +146,7 @@ void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
142} 146}
143EXPORT_SYMBOL(ath9k_hw_btcoex_set_weight); 147EXPORT_SYMBOL(ath9k_hw_btcoex_set_weight);
144 148
149
145static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah) 150static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah)
146{ 151{
147 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; 152 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
@@ -152,9 +157,22 @@ static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah)
152 * enable coex 3-wire 157 * enable coex 3-wire
153 */ 158 */
154 REG_WRITE(ah, AR_BT_COEX_MODE, btcoex_hw->bt_coex_mode); 159 REG_WRITE(ah, AR_BT_COEX_MODE, btcoex_hw->bt_coex_mode);
155 REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex_hw->bt_coex_weights);
156 REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex_hw->bt_coex_mode2); 160 REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex_hw->bt_coex_mode2);
157 161
162
163 if (AR_SREV_9300_20_OR_LATER(ah)) {
164 REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, ah->bt_coex_wlan_weight[0]);
165 REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS1, ah->bt_coex_wlan_weight[1]);
166 REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS0, ah->bt_coex_bt_weight[0]);
167 REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS1, ah->bt_coex_bt_weight[1]);
168 REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS2, ah->bt_coex_bt_weight[2]);
169 REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS3, ah->bt_coex_bt_weight[3]);
170
171 } else
172 REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex_hw->bt_coex_weights);
173
174
175
158 if (AR_SREV_9271(ah)) { 176 if (AR_SREV_9271(ah)) {
159 val = REG_READ(ah, 0x50040); 177 val = REG_READ(ah, 0x50040);
160 val &= 0xFFFFFEFF; 178 val &= 0xFFFFFEFF;
@@ -202,10 +220,86 @@ void ath9k_hw_btcoex_disable(struct ath_hw *ah)
202 220
203 if (btcoex_hw->scheme == ATH_BTCOEX_CFG_3WIRE) { 221 if (btcoex_hw->scheme == ATH_BTCOEX_CFG_3WIRE) {
204 REG_WRITE(ah, AR_BT_COEX_MODE, AR_BT_QUIET | AR_BT_MODE); 222 REG_WRITE(ah, AR_BT_COEX_MODE, AR_BT_QUIET | AR_BT_MODE);
205 REG_WRITE(ah, AR_BT_COEX_WEIGHT, 0);
206 REG_WRITE(ah, AR_BT_COEX_MODE2, 0); 223 REG_WRITE(ah, AR_BT_COEX_MODE2, 0);
224
225 if (AR_SREV_9300_20_OR_LATER(ah)) {
226 REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, 0);
227 REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS1, 0);
228 REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS0, 0);
229 REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS1, 0);
230 REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS2, 0);
231 REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS3, 0);
232 } else
233 REG_WRITE(ah, AR_BT_COEX_WEIGHT, 0);
234
207 } 235 }
208 236
209 ah->btcoex_hw.enabled = false; 237 ah->btcoex_hw.enabled = false;
210} 238}
211EXPORT_SYMBOL(ath9k_hw_btcoex_disable); 239EXPORT_SYMBOL(ath9k_hw_btcoex_disable);
240
241static void ar9003_btcoex_bt_stomp(struct ath_hw *ah,
242 enum ath_stomp_type stomp_type)
243{
244 ah->bt_coex_bt_weight[0] = AR9300_BT_WGHT;
245 ah->bt_coex_bt_weight[1] = AR9300_BT_WGHT;
246 ah->bt_coex_bt_weight[2] = AR9300_BT_WGHT;
247 ah->bt_coex_bt_weight[3] = AR9300_BT_WGHT;
248
249
250 switch (stomp_type) {
251 case ATH_BTCOEX_STOMP_ALL:
252 ah->bt_coex_wlan_weight[0] = AR9300_STOMP_ALL_WLAN_WGHT0;
253 ah->bt_coex_wlan_weight[1] = AR9300_STOMP_ALL_WLAN_WGHT1;
254 break;
255 case ATH_BTCOEX_STOMP_LOW:
256 ah->bt_coex_wlan_weight[0] = AR9300_STOMP_LOW_WLAN_WGHT0;
257 ah->bt_coex_wlan_weight[1] = AR9300_STOMP_LOW_WLAN_WGHT1;
258 break;
259 case ATH_BTCOEX_STOMP_NONE:
260 ah->bt_coex_wlan_weight[0] = AR9300_STOMP_NONE_WLAN_WGHT0;
261 ah->bt_coex_wlan_weight[1] = AR9300_STOMP_NONE_WLAN_WGHT1;
262 break;
263
264 default:
265 ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
266 "Invalid Stomptype\n");
267 break;
268 }
269
270 ath9k_hw_btcoex_enable(ah);
271}
272
273/*
274 * Configures appropriate weight based on stomp type.
275 */
276void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah,
277 enum ath_stomp_type stomp_type)
278{
279 if (AR_SREV_9300_20_OR_LATER(ah)) {
280 ar9003_btcoex_bt_stomp(ah, stomp_type);
281 return;
282 }
283
284 switch (stomp_type) {
285 case ATH_BTCOEX_STOMP_ALL:
286 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
287 AR_STOMP_ALL_WLAN_WGHT);
288 break;
289 case ATH_BTCOEX_STOMP_LOW:
290 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
291 AR_STOMP_LOW_WLAN_WGHT);
292 break;
293 case ATH_BTCOEX_STOMP_NONE:
294 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
295 AR_STOMP_NONE_WLAN_WGHT);
296 break;
297 default:
298 ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
299 "Invalid Stomptype\n");
300 break;
301 }
302
303 ath9k_hw_btcoex_enable(ah);
304}
305EXPORT_SYMBOL(ath9k_hw_btcoex_bt_stomp);
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index 588dfd464dd1..a9efca83d676 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -19,9 +19,13 @@
19 19
20#include "hw.h" 20#include "hw.h"
21 21
22#define ATH_WLANACTIVE_GPIO 5 22#define ATH_WLANACTIVE_GPIO_9280 5
23#define ATH_BTACTIVE_GPIO 6 23#define ATH_BTACTIVE_GPIO_9280 6
24#define ATH_BTPRIORITY_GPIO 7 24#define ATH_BTPRIORITY_GPIO_9285 7
25
26#define ATH_WLANACTIVE_GPIO_9300 5
27#define ATH_BTACTIVE_GPIO_9300 4
28#define ATH_BTPRIORITY_GPIO_9300 8
25 29
26#define ATH_BTCOEX_DEF_BT_PERIOD 45 30#define ATH_BTCOEX_DEF_BT_PERIOD 45
27#define ATH_BTCOEX_DEF_DUTY_CYCLE 55 31#define ATH_BTCOEX_DEF_DUTY_CYCLE 55
@@ -32,6 +36,14 @@
32#define ATH_BT_CNT_THRESHOLD 3 36#define ATH_BT_CNT_THRESHOLD 3
33#define ATH_BT_CNT_SCAN_THRESHOLD 15 37#define ATH_BT_CNT_SCAN_THRESHOLD 15
34 38
39/* Defines the BT AR_BT_COEX_WGHT used */
40enum ath_stomp_type {
41 ATH_BTCOEX_NO_STOMP,
42 ATH_BTCOEX_STOMP_ALL,
43 ATH_BTCOEX_STOMP_LOW,
44 ATH_BTCOEX_STOMP_NONE
45};
46
35enum ath_btcoex_scheme { 47enum ath_btcoex_scheme {
36 ATH_BTCOEX_CFG_NONE, 48 ATH_BTCOEX_CFG_NONE,
37 ATH_BTCOEX_CFG_2WIRE, 49 ATH_BTCOEX_CFG_2WIRE,
@@ -57,5 +69,7 @@ void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
57 u32 wlan_weight); 69 u32 wlan_weight);
58void ath9k_hw_btcoex_enable(struct ath_hw *ah); 70void ath9k_hw_btcoex_enable(struct ath_hw *ah);
59void ath9k_hw_btcoex_disable(struct ath_hw *ah); 71void ath9k_hw_btcoex_disable(struct ath_hw *ah);
72void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah,
73 enum ath_stomp_type stomp_type);
60 74
61#endif 75#endif
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 8649581fa4dd..558b228a717f 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -69,15 +69,21 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
69 int16_t *nfarray) 69 int16_t *nfarray)
70{ 70{
71 struct ath_common *common = ath9k_hw_common(ah); 71 struct ath_common *common = ath9k_hw_common(ah);
72 struct ieee80211_conf *conf = &common->hw->conf;
72 struct ath_nf_limits *limit; 73 struct ath_nf_limits *limit;
73 struct ath9k_nfcal_hist *h; 74 struct ath9k_nfcal_hist *h;
74 bool high_nf_mid = false; 75 bool high_nf_mid = false;
76 u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
75 int i; 77 int i;
76 78
77 h = cal->nfCalHist; 79 h = cal->nfCalHist;
78 limit = ath9k_hw_get_nf_limits(ah, ah->curchan); 80 limit = ath9k_hw_get_nf_limits(ah, ah->curchan);
79 81
80 for (i = 0; i < NUM_NF_READINGS; i++) { 82 for (i = 0; i < NUM_NF_READINGS; i++) {
83 if (!(chainmask & (1 << i)) ||
84 ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf)))
85 continue;
86
81 h[i].nfCalBuffer[h[i].currIndex] = nfarray[i]; 87 h[i].nfCalBuffer[h[i].currIndex] = nfarray[i];
82 88
83 if (++h[i].currIndex >= ATH9K_NF_CAL_HIST_MAX) 89 if (++h[i].currIndex >= ATH9K_NF_CAL_HIST_MAX)
@@ -225,6 +231,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
225 int32_t val; 231 int32_t val;
226 u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask; 232 u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
227 struct ath_common *common = ath9k_hw_common(ah); 233 struct ath_common *common = ath9k_hw_common(ah);
234 struct ieee80211_conf *conf = &common->hw->conf;
228 s16 default_nf = ath9k_hw_get_default_nf(ah, chan); 235 s16 default_nf = ath9k_hw_get_default_nf(ah, chan);
229 236
230 if (ah->caldata) 237 if (ah->caldata)
@@ -234,6 +241,9 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
234 if (chainmask & (1 << i)) { 241 if (chainmask & (1 << i)) {
235 s16 nfval; 242 s16 nfval;
236 243
244 if ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf))
245 continue;
246
237 if (h) 247 if (h)
238 nfval = h[i].privNF; 248 nfval = h[i].privNF;
239 else 249 else
@@ -293,6 +303,9 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
293 ENABLE_REGWRITE_BUFFER(ah); 303 ENABLE_REGWRITE_BUFFER(ah);
294 for (i = 0; i < NUM_NF_READINGS; i++) { 304 for (i = 0; i < NUM_NF_READINGS; i++) {
295 if (chainmask & (1 << i)) { 305 if (chainmask & (1 << i)) {
306 if ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf))
307 continue;
308
296 val = REG_READ(ah, ah->nf_regs[i]); 309 val = REG_READ(ah, ah->nf_regs[i]);
297 val &= 0xFFFFFE00; 310 val &= 0xFFFFFE00;
298 val |= (((u32) (-50) << 1) & 0x1ff); 311 val |= (((u32) (-50) << 1) & 0x1ff);
@@ -396,14 +409,6 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
396 } 409 }
397} 410}
398 411
399s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan)
400{
401 if (!ah->curchan || !ah->curchan->noisefloor)
402 return ath9k_hw_get_default_nf(ah, chan);
403
404 return ah->curchan->noisefloor;
405}
406EXPORT_SYMBOL(ath9k_hw_getchan_noise);
407 412
408void ath9k_hw_bstuck_nfcal(struct ath_hw *ah) 413void ath9k_hw_bstuck_nfcal(struct ath_hw *ah)
409{ 414{
diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
index b8973eb8d858..4420780fa3b8 100644
--- a/drivers/net/wireless/ath/ath9k/calib.h
+++ b/drivers/net/wireless/ath/ath9k/calib.h
@@ -106,7 +106,6 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan);
106void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah, 106void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
107 struct ath9k_channel *chan); 107 struct ath9k_channel *chan);
108void ath9k_hw_bstuck_nfcal(struct ath_hw *ah); 108void ath9k_hw_bstuck_nfcal(struct ath_hw *ah);
109s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan);
110void ath9k_hw_reset_calibration(struct ath_hw *ah, 109void ath9k_hw_reset_calibration(struct ath_hw *ah,
111 struct ath9k_cal_list *currCal); 110 struct ath9k_cal_list *currCal);
112 111
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index 615e68276e72..74535e6dfb82 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -116,7 +116,7 @@ void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
116 116
117 if (chan->band == IEEE80211_BAND_2GHZ) { 117 if (chan->band == IEEE80211_BAND_2GHZ) {
118 ichan->chanmode = CHANNEL_G; 118 ichan->chanmode = CHANNEL_G;
119 ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM | CHANNEL_G; 119 ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM;
120 } else { 120 } else {
121 ichan->chanmode = CHANNEL_A; 121 ichan->chanmode = CHANNEL_A;
122 ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM; 122 ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
@@ -158,37 +158,6 @@ int ath9k_cmn_count_streams(unsigned int chainmask, int max)
158} 158}
159EXPORT_SYMBOL(ath9k_cmn_count_streams); 159EXPORT_SYMBOL(ath9k_cmn_count_streams);
160 160
161/*
162 * Configures appropriate weight based on stomp type.
163 */
164void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common,
165 enum ath_stomp_type stomp_type)
166{
167 struct ath_hw *ah = common->ah;
168
169 switch (stomp_type) {
170 case ATH_BTCOEX_STOMP_ALL:
171 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
172 AR_STOMP_ALL_WLAN_WGHT);
173 break;
174 case ATH_BTCOEX_STOMP_LOW:
175 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
176 AR_STOMP_LOW_WLAN_WGHT);
177 break;
178 case ATH_BTCOEX_STOMP_NONE:
179 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
180 AR_STOMP_NONE_WLAN_WGHT);
181 break;
182 default:
183 ath_dbg(common, ATH_DBG_BTCOEX,
184 "Invalid Stomptype\n");
185 break;
186 }
187
188 ath9k_hw_btcoex_enable(ah);
189}
190EXPORT_SYMBOL(ath9k_cmn_btcoex_bt_stomp);
191
192void ath9k_cmn_update_txpow(struct ath_hw *ah, u16 cur_txpow, 161void ath9k_cmn_update_txpow(struct ath_hw *ah, u16 cur_txpow,
193 u16 new_txpow, u16 *txpower) 162 u16 new_txpow, u16 *txpower)
194{ 163{
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index b2f7b5f89097..5124f1420b3a 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -50,14 +50,6 @@
50#define ATH_EP_RND(x, mul) \ 50#define ATH_EP_RND(x, mul) \
51 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul)) 51 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
52 52
53/* Defines the BT AR_BT_COEX_WGHT used */
54enum ath_stomp_type {
55 ATH_BTCOEX_NO_STOMP,
56 ATH_BTCOEX_STOMP_ALL,
57 ATH_BTCOEX_STOMP_LOW,
58 ATH_BTCOEX_STOMP_NONE
59};
60
61int ath9k_cmn_padpos(__le16 frame_control); 53int ath9k_cmn_padpos(__le16 frame_control);
62int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb); 54int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
63void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan, 55void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 8df5a92a20f1..bad1a87249b6 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -326,6 +326,8 @@ void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status)
326 sc->debug.stats.istats.dtimsync++; 326 sc->debug.stats.istats.dtimsync++;
327 if (status & ATH9K_INT_DTIM) 327 if (status & ATH9K_INT_DTIM)
328 sc->debug.stats.istats.dtim++; 328 sc->debug.stats.istats.dtim++;
329 if (status & ATH9K_INT_TSFOOR)
330 sc->debug.stats.istats.tsfoor++;
329} 331}
330 332
331static ssize_t read_file_interrupt(struct file *file, char __user *user_buf, 333static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
@@ -380,8 +382,11 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
380 len += snprintf(buf + len, sizeof(buf) - len, 382 len += snprintf(buf + len, sizeof(buf) - len,
381 "%8s: %10u\n", "DTIM", sc->debug.stats.istats.dtim); 383 "%8s: %10u\n", "DTIM", sc->debug.stats.istats.dtim);
382 len += snprintf(buf + len, sizeof(buf) - len, 384 len += snprintf(buf + len, sizeof(buf) - len,
385 "%8s: %10u\n", "TSFOOR", sc->debug.stats.istats.tsfoor);
386 len += snprintf(buf + len, sizeof(buf) - len,
383 "%8s: %10u\n", "TOTAL", sc->debug.stats.istats.total); 387 "%8s: %10u\n", "TOTAL", sc->debug.stats.istats.total);
384 388
389
385 if (len > sizeof(buf)) 390 if (len > sizeof(buf))
386 len = sizeof(buf); 391 len = sizeof(buf);
387 392
@@ -845,7 +850,7 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
845 850
846 struct ath_softc *sc = file->private_data; 851 struct ath_softc *sc = file->private_data;
847 char *buf; 852 char *buf;
848 unsigned int len = 0, size = 1152; 853 unsigned int len = 0, size = 1400;
849 ssize_t retval = 0; 854 ssize_t retval = 0;
850 855
851 buf = kzalloc(size, GFP_KERNEL); 856 buf = kzalloc(size, GFP_KERNEL);
@@ -874,6 +879,34 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
874 "%18s : %10u\n", "DECRYPT BUSY ERR", 879 "%18s : %10u\n", "DECRYPT BUSY ERR",
875 sc->debug.stats.rxstats.decrypt_busy_err); 880 sc->debug.stats.rxstats.decrypt_busy_err);
876 881
882 len += snprintf(buf + len, size - len,
883 "%18s : %10d\n", "RSSI-CTL0",
884 sc->debug.stats.rxstats.rs_rssi_ctl0);
885
886 len += snprintf(buf + len, size - len,
887 "%18s : %10d\n", "RSSI-CTL1",
888 sc->debug.stats.rxstats.rs_rssi_ctl1);
889
890 len += snprintf(buf + len, size - len,
891 "%18s : %10d\n", "RSSI-CTL2",
892 sc->debug.stats.rxstats.rs_rssi_ctl2);
893
894 len += snprintf(buf + len, size - len,
895 "%18s : %10d\n", "RSSI-EXT0",
896 sc->debug.stats.rxstats.rs_rssi_ext0);
897
898 len += snprintf(buf + len, size - len,
899 "%18s : %10d\n", "RSSI-EXT1",
900 sc->debug.stats.rxstats.rs_rssi_ext1);
901
902 len += snprintf(buf + len, size - len,
903 "%18s : %10d\n", "RSSI-EXT2",
904 sc->debug.stats.rxstats.rs_rssi_ext2);
905
906 len += snprintf(buf + len, size - len,
907 "%18s : %10d\n", "Rx Antenna",
908 sc->debug.stats.rxstats.rs_antenna);
909
877 PHY_ERR("UNDERRUN", ATH9K_PHYERR_UNDERRUN); 910 PHY_ERR("UNDERRUN", ATH9K_PHYERR_UNDERRUN);
878 PHY_ERR("TIMING", ATH9K_PHYERR_TIMING); 911 PHY_ERR("TIMING", ATH9K_PHYERR_TIMING);
879 PHY_ERR("PARITY", ATH9K_PHYERR_PARITY); 912 PHY_ERR("PARITY", ATH9K_PHYERR_PARITY);
@@ -948,6 +981,16 @@ void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
948 RX_PHY_ERR_INC(phyerr); 981 RX_PHY_ERR_INC(phyerr);
949 } 982 }
950 983
984 sc->debug.stats.rxstats.rs_rssi_ctl0 = rs->rs_rssi_ctl0;
985 sc->debug.stats.rxstats.rs_rssi_ctl1 = rs->rs_rssi_ctl1;
986 sc->debug.stats.rxstats.rs_rssi_ctl2 = rs->rs_rssi_ctl2;
987
988 sc->debug.stats.rxstats.rs_rssi_ext0 = rs->rs_rssi_ext0;
989 sc->debug.stats.rxstats.rs_rssi_ext1 = rs->rs_rssi_ext1;
990 sc->debug.stats.rxstats.rs_rssi_ext2 = rs->rs_rssi_ext2;
991
992 sc->debug.stats.rxstats.rs_antenna = rs->rs_antenna;
993
951#undef RX_STAT_INC 994#undef RX_STAT_INC
952#undef RX_PHY_ERR_INC 995#undef RX_PHY_ERR_INC
953} 996}
@@ -1088,67 +1131,43 @@ int ath9k_init_debug(struct ath_hw *ah)
1088 return -ENOMEM; 1131 return -ENOMEM;
1089 1132
1090#ifdef CONFIG_ATH_DEBUG 1133#ifdef CONFIG_ATH_DEBUG
1091 if (!debugfs_create_file("debug", S_IRUSR | S_IWUSR, 1134 debugfs_create_file("debug", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
1092 sc->debug.debugfs_phy, sc, &fops_debug)) 1135 sc, &fops_debug);
1093 goto err;
1094#endif 1136#endif
1095 1137 debugfs_create_file("dma", S_IRUSR, sc->debug.debugfs_phy, sc,
1096 if (!debugfs_create_file("dma", S_IRUSR, sc->debug.debugfs_phy, 1138 &fops_dma);
1097 sc, &fops_dma)) 1139 debugfs_create_file("interrupt", S_IRUSR, sc->debug.debugfs_phy, sc,
1098 goto err; 1140 &fops_interrupt);
1099 1141 debugfs_create_file("wiphy", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
1100 if (!debugfs_create_file("interrupt", S_IRUSR, sc->debug.debugfs_phy, 1142 sc, &fops_wiphy);
1101 sc, &fops_interrupt)) 1143 debugfs_create_file("xmit", S_IRUSR, sc->debug.debugfs_phy, sc,
1102 goto err; 1144 &fops_xmit);
1103 1145 debugfs_create_file("stations", S_IRUSR, sc->debug.debugfs_phy, sc,
1104 if (!debugfs_create_file("wiphy", S_IRUSR | S_IWUSR, 1146 &fops_stations);
1105 sc->debug.debugfs_phy, sc, &fops_wiphy)) 1147 debugfs_create_file("misc", S_IRUSR, sc->debug.debugfs_phy, sc,
1106 goto err; 1148 &fops_misc);
1107 1149 debugfs_create_file("recv", S_IRUSR, sc->debug.debugfs_phy, sc,
1108 if (!debugfs_create_file("xmit", S_IRUSR, sc->debug.debugfs_phy, 1150 &fops_recv);
1109 sc, &fops_xmit)) 1151 debugfs_create_file("rx_chainmask", S_IRUSR | S_IWUSR,
1110 goto err; 1152 sc->debug.debugfs_phy, sc, &fops_rx_chainmask);
1111 1153 debugfs_create_file("tx_chainmask", S_IRUSR | S_IWUSR,
1112 if (!debugfs_create_file("stations", S_IRUSR, sc->debug.debugfs_phy, 1154 sc->debug.debugfs_phy, sc, &fops_tx_chainmask);
1113 sc, &fops_stations)) 1155 debugfs_create_file("regidx", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
1114 goto err; 1156 sc, &fops_regidx);
1115 1157 debugfs_create_file("regval", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
1116 if (!debugfs_create_file("misc", S_IRUSR, sc->debug.debugfs_phy, 1158 sc, &fops_regval);
1117 sc, &fops_misc)) 1159 debugfs_create_bool("ignore_extcca", S_IRUSR | S_IWUSR,
1118 goto err; 1160 sc->debug.debugfs_phy,
1119 1161 &ah->config.cwm_ignore_extcca);
1120 if (!debugfs_create_file("recv", S_IRUSR, sc->debug.debugfs_phy, 1162 debugfs_create_file("regdump", S_IRUSR, sc->debug.debugfs_phy, sc,
1121 sc, &fops_recv)) 1163 &fops_regdump);
1122 goto err; 1164
1123 1165 debugfs_create_u32("gpio_mask", S_IRUSR | S_IWUSR,
1124 if (!debugfs_create_file("rx_chainmask", S_IRUSR | S_IWUSR, 1166 sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask);
1125 sc->debug.debugfs_phy, sc, &fops_rx_chainmask)) 1167
1126 goto err; 1168 debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR,
1127 1169 sc->debug.debugfs_phy, &sc->sc_ah->gpio_val);
1128 if (!debugfs_create_file("tx_chainmask", S_IRUSR | S_IWUSR,
1129 sc->debug.debugfs_phy, sc, &fops_tx_chainmask))
1130 goto err;
1131
1132 if (!debugfs_create_file("regidx", S_IRUSR | S_IWUSR,
1133 sc->debug.debugfs_phy, sc, &fops_regidx))
1134 goto err;
1135
1136 if (!debugfs_create_file("regval", S_IRUSR | S_IWUSR,
1137 sc->debug.debugfs_phy, sc, &fops_regval))
1138 goto err;
1139
1140 if (!debugfs_create_bool("ignore_extcca", S_IRUSR | S_IWUSR,
1141 sc->debug.debugfs_phy, &ah->config.cwm_ignore_extcca))
1142 goto err;
1143
1144 if (!debugfs_create_file("regdump", S_IRUSR, sc->debug.debugfs_phy,
1145 sc, &fops_regdump))
1146 goto err;
1147 1170
1148 sc->debug.regidx = 0; 1171 sc->debug.regidx = 0;
1149 return 0; 1172 return 0;
1150err:
1151 debugfs_remove_recursive(sc->debug.debugfs_phy);
1152 sc->debug.debugfs_phy = NULL;
1153 return -ENOMEM;
1154} 1173}
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 59338de0ce19..5488a324cc10 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -54,6 +54,9 @@ struct ath_buf;
54 * @dtimsync: DTIM sync lossage 54 * @dtimsync: DTIM sync lossage
55 * @dtim: RX Beacon with DTIM 55 * @dtim: RX Beacon with DTIM
56 * @bb_watchdog: Baseband watchdog 56 * @bb_watchdog: Baseband watchdog
57 * @tsfoor: TSF out of range, indicates that the corrected TSF received
58 * from a beacon differs from the PCU's internal TSF by more than a
59 * (programmable) threshold
57 */ 60 */
58struct ath_interrupt_stats { 61struct ath_interrupt_stats {
59 u32 total; 62 u32 total;
@@ -78,6 +81,7 @@ struct ath_interrupt_stats {
78 u32 dtimsync; 81 u32 dtimsync;
79 u32 dtim; 82 u32 dtim;
80 u32 bb_watchdog; 83 u32 bb_watchdog;
84 u32 tsfoor;
81}; 85};
82 86
83/** 87/**
@@ -157,6 +161,13 @@ struct ath_rx_stats {
157 u32 post_delim_crc_err; 161 u32 post_delim_crc_err;
158 u32 decrypt_busy_err; 162 u32 decrypt_busy_err;
159 u32 phy_err_stats[ATH9K_PHYERR_MAX]; 163 u32 phy_err_stats[ATH9K_PHYERR_MAX];
164 int8_t rs_rssi_ctl0;
165 int8_t rs_rssi_ctl1;
166 int8_t rs_rssi_ctl2;
167 int8_t rs_rssi_ext0;
168 int8_t rs_rssi_ext1;
169 int8_t rs_rssi_ext2;
170 u8 rs_antenna;
160}; 171};
161 172
162struct ath_stats { 173struct ath_stats {
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index bd82447f5b78..3e316133f114 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -436,7 +436,11 @@ struct modal_eep_4k_header {
436 u8 db2_2:4, db2_3:4; 436 u8 db2_2:4, db2_3:4;
437 u8 db2_4:4, reserved:4; 437 u8 db2_4:4, reserved:4;
438#endif 438#endif
439 u8 futureModal[4]; 439 u8 tx_diversity;
440 u8 flc_pwr_thresh;
441 u8 bb_scale_smrt_antenna;
442#define EEP_4K_BB_DESIRED_SCALE_MASK 0x1f
443 u8 futureModal[1];
440 struct spur_chan spurChans[AR_EEPROM_MODAL_SPURS]; 444 struct spur_chan spurChans[AR_EEPROM_MODAL_SPURS];
441} __packed; 445} __packed;
442 446
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index bc77a308c901..6f714dd72365 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -781,6 +781,7 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
781{ 781{
782 struct modal_eep_4k_header *pModal; 782 struct modal_eep_4k_header *pModal;
783 struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k; 783 struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
784 struct base_eep_header_4k *pBase = &eep->baseEepHeader;
784 u8 txRxAttenLocal; 785 u8 txRxAttenLocal;
785 u8 ob[5], db1[5], db2[5]; 786 u8 ob[5], db1[5], db2[5];
786 u8 ant_div_control1, ant_div_control2; 787 u8 ant_div_control1, ant_div_control2;
@@ -1003,6 +1004,31 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
1003 AR_PHY_SETTLING_SWITCH, 1004 AR_PHY_SETTLING_SWITCH,
1004 pModal->swSettleHt40); 1005 pModal->swSettleHt40);
1005 } 1006 }
1007 if (AR_SREV_9271(ah) || AR_SREV_9285(ah)) {
1008 u8 bb_desired_scale = (pModal->bb_scale_smrt_antenna &
1009 EEP_4K_BB_DESIRED_SCALE_MASK);
1010 if ((pBase->txGainType == 0) && (bb_desired_scale != 0)) {
1011 u32 pwrctrl, mask, clr;
1012
1013 mask = BIT(0)|BIT(5)|BIT(10)|BIT(15)|BIT(20)|BIT(25);
1014 pwrctrl = mask * bb_desired_scale;
1015 clr = mask * 0x1f;
1016 REG_RMW(ah, AR_PHY_TX_PWRCTRL8, pwrctrl, clr);
1017 REG_RMW(ah, AR_PHY_TX_PWRCTRL10, pwrctrl, clr);
1018 REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL12, pwrctrl, clr);
1019
1020 mask = BIT(0)|BIT(5)|BIT(15);
1021 pwrctrl = mask * bb_desired_scale;
1022 clr = mask * 0x1f;
1023 REG_RMW(ah, AR_PHY_TX_PWRCTRL9, pwrctrl, clr);
1024
1025 mask = BIT(0)|BIT(5);
1026 pwrctrl = mask * bb_desired_scale;
1027 clr = mask * 0x1f;
1028 REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL11, pwrctrl, clr);
1029 REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL13, pwrctrl, clr);
1030 }
1031 }
1006} 1032}
1007 1033
1008static u16 ath9k_hw_4k_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz) 1034static u16 ath9k_hw_4k_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 8cd8333cc086..b87db4763098 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -319,10 +319,9 @@ static void ath9k_hw_set_ar9287_power_cal_table(struct ath_hw *ah,
319 u16 numXpdGain, xpdMask; 319 u16 numXpdGain, xpdMask;
320 u16 xpdGainValues[AR5416_NUM_PD_GAINS] = {0, 0, 0, 0}; 320 u16 xpdGainValues[AR5416_NUM_PD_GAINS] = {0, 0, 0, 0};
321 u32 reg32, regOffset, regChainOffset, regval; 321 u32 reg32, regOffset, regChainOffset, regval;
322 int16_t modalIdx, diff = 0; 322 int16_t diff = 0;
323 struct ar9287_eeprom *pEepData = &ah->eeprom.map9287; 323 struct ar9287_eeprom *pEepData = &ah->eeprom.map9287;
324 324
325 modalIdx = IS_CHAN_2GHZ(chan) ? 1 : 0;
326 xpdMask = pEepData->modalHeader.xpdGain; 325 xpdMask = pEepData->modalHeader.xpdGain;
327 326
328 if ((pEepData->baseEepHeader.version & AR9287_EEP_VER_MINOR_MASK) >= 327 if ((pEepData->baseEepHeader.version & AR9287_EEP_VER_MINOR_MASK) >=
@@ -392,6 +391,8 @@ static void ath9k_hw_set_ar9287_power_cal_table(struct ath_hw *ah,
392 numXpdGain); 391 numXpdGain);
393 } 392 }
394 393
394 ENABLE_REGWRITE_BUFFER(ah);
395
395 if (i == 0) { 396 if (i == 0) {
396 if (!ath9k_hw_ar9287_get_eeprom(ah, 397 if (!ath9k_hw_ar9287_get_eeprom(ah,
397 EEP_OL_PWRCTRL)) { 398 EEP_OL_PWRCTRL)) {
@@ -442,6 +443,7 @@ static void ath9k_hw_set_ar9287_power_cal_table(struct ath_hw *ah,
442 regOffset += 4; 443 regOffset += 4;
443 } 444 }
444 } 445 }
446 REGWRITE_BUFFER_FLUSH(ah);
445 } 447 }
446 } 448 }
447 449
@@ -757,6 +759,8 @@ static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah,
757 ratesArray[i] -= AR9287_PWR_TABLE_OFFSET_DB * 2; 759 ratesArray[i] -= AR9287_PWR_TABLE_OFFSET_DB * 2;
758 } 760 }
759 761
762 ENABLE_REGWRITE_BUFFER(ah);
763
760 /* OFDM power per rate */ 764 /* OFDM power per rate */
761 REG_WRITE(ah, AR_PHY_POWER_TX_RATE1, 765 REG_WRITE(ah, AR_PHY_POWER_TX_RATE1,
762 ATH9K_POW_SM(ratesArray[rate18mb], 24) 766 ATH9K_POW_SM(ratesArray[rate18mb], 24)
@@ -840,6 +844,7 @@ static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah,
840 | ATH9K_POW_SM(ratesArray[rateDupOfdm], 8) 844 | ATH9K_POW_SM(ratesArray[rateDupOfdm], 8)
841 | ATH9K_POW_SM(ratesArray[rateDupCck], 0)); 845 | ATH9K_POW_SM(ratesArray[rateDupCck], 0));
842 } 846 }
847 REGWRITE_BUFFER_FLUSH(ah);
843} 848}
844 849
845static void ath9k_hw_ar9287_set_addac(struct ath_hw *ah, 850static void ath9k_hw_ar9287_set_addac(struct ath_hw *ah,
@@ -852,35 +857,12 @@ static void ath9k_hw_ar9287_set_board_values(struct ath_hw *ah,
852{ 857{
853 struct ar9287_eeprom *eep = &ah->eeprom.map9287; 858 struct ar9287_eeprom *eep = &ah->eeprom.map9287;
854 struct modal_eep_ar9287_header *pModal = &eep->modalHeader; 859 struct modal_eep_ar9287_header *pModal = &eep->modalHeader;
855 u16 antWrites[AR9287_ANT_16S];
856 u32 regChainOffset, regval; 860 u32 regChainOffset, regval;
857 u8 txRxAttenLocal; 861 u8 txRxAttenLocal;
858 int i, j, offset_num; 862 int i;
859 863
860 pModal = &eep->modalHeader; 864 pModal = &eep->modalHeader;
861 865
862 antWrites[0] = (u16)((pModal->antCtrlCommon >> 28) & 0xF);
863 antWrites[1] = (u16)((pModal->antCtrlCommon >> 24) & 0xF);
864 antWrites[2] = (u16)((pModal->antCtrlCommon >> 20) & 0xF);
865 antWrites[3] = (u16)((pModal->antCtrlCommon >> 16) & 0xF);
866 antWrites[4] = (u16)((pModal->antCtrlCommon >> 12) & 0xF);
867 antWrites[5] = (u16)((pModal->antCtrlCommon >> 8) & 0xF);
868 antWrites[6] = (u16)((pModal->antCtrlCommon >> 4) & 0xF);
869 antWrites[7] = (u16)(pModal->antCtrlCommon & 0xF);
870
871 offset_num = 8;
872
873 for (i = 0, j = offset_num; i < AR9287_MAX_CHAINS; i++) {
874 antWrites[j++] = (u16)((pModal->antCtrlChain[i] >> 28) & 0xf);
875 antWrites[j++] = (u16)((pModal->antCtrlChain[i] >> 10) & 0x3);
876 antWrites[j++] = (u16)((pModal->antCtrlChain[i] >> 8) & 0x3);
877 antWrites[j++] = 0;
878 antWrites[j++] = (u16)((pModal->antCtrlChain[i] >> 6) & 0x3);
879 antWrites[j++] = (u16)((pModal->antCtrlChain[i] >> 4) & 0x3);
880 antWrites[j++] = (u16)((pModal->antCtrlChain[i] >> 2) & 0x3);
881 antWrites[j++] = (u16)(pModal->antCtrlChain[i] & 0x3);
882 }
883
884 REG_WRITE(ah, AR_PHY_SWITCH_COM, pModal->antCtrlCommon); 866 REG_WRITE(ah, AR_PHY_SWITCH_COM, pModal->antCtrlCommon);
885 867
886 for (i = 0; i < AR9287_MAX_CHAINS; i++) { 868 for (i = 0; i < AR9287_MAX_CHAINS; i++) {
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index fccd87df7300..c031854b569f 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -231,6 +231,10 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
231 integer = swab32(pModal->antCtrlChain[i]); 231 integer = swab32(pModal->antCtrlChain[i]);
232 pModal->antCtrlChain[i] = integer; 232 pModal->antCtrlChain[i] = integer;
233 } 233 }
234 for (i = 0; i < 3; i++) {
235 word = swab16(pModal->xpaBiasLvlFreq[i]);
236 pModal->xpaBiasLvlFreq[i] = word;
237 }
234 238
235 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { 239 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
236 word = swab16(pModal->spurChans[i].spurChan); 240 word = swab16(pModal->spurChans[i].spurChan);
@@ -799,6 +803,8 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
799 pwr_table_offset, 803 pwr_table_offset,
800 &diff); 804 &diff);
801 805
806 ENABLE_REGWRITE_BUFFER(ah);
807
802 if ((i == 0) || AR_SREV_5416_20_OR_LATER(ah)) { 808 if ((i == 0) || AR_SREV_5416_20_OR_LATER(ah)) {
803 if (OLC_FOR_AR9280_20_LATER) { 809 if (OLC_FOR_AR9280_20_LATER) {
804 REG_WRITE(ah, 810 REG_WRITE(ah,
@@ -847,6 +853,7 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
847 853
848 regOffset += 4; 854 regOffset += 4;
849 } 855 }
856 REGWRITE_BUFFER_FLUSH(ah);
850 } 857 }
851 } 858 }
852 859
@@ -1205,6 +1212,8 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
1205 } 1212 }
1206 } 1213 }
1207 1214
1215 ENABLE_REGWRITE_BUFFER(ah);
1216
1208 REG_WRITE(ah, AR_PHY_POWER_TX_RATE1, 1217 REG_WRITE(ah, AR_PHY_POWER_TX_RATE1,
1209 ATH9K_POW_SM(ratesArray[rate18mb], 24) 1218 ATH9K_POW_SM(ratesArray[rate18mb], 24)
1210 | ATH9K_POW_SM(ratesArray[rate12mb], 16) 1219 | ATH9K_POW_SM(ratesArray[rate12mb], 16)
@@ -1291,6 +1300,8 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
1291 REG_WRITE(ah, AR_PHY_POWER_TX_SUB, 1300 REG_WRITE(ah, AR_PHY_POWER_TX_SUB,
1292 ATH9K_POW_SM(pModal->pwrDecreaseFor3Chain, 6) 1301 ATH9K_POW_SM(pModal->pwrDecreaseFor3Chain, 6)
1293 | ATH9K_POW_SM(pModal->pwrDecreaseFor2Chain, 0)); 1302 | ATH9K_POW_SM(pModal->pwrDecreaseFor2Chain, 0));
1303
1304 REGWRITE_BUFFER_FLUSH(ah);
1294} 1305}
1295 1306
1296static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz) 1307static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 0fb8f8ac275a..0349b3a1cc58 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -41,12 +41,16 @@ void ath_init_leds(struct ath_softc *sc)
41{ 41{
42 int ret; 42 int ret;
43 43
44 if (AR_SREV_9287(sc->sc_ah)) 44 if (sc->sc_ah->led_pin < 0) {
45 sc->sc_ah->led_pin = ATH_LED_PIN_9287; 45 if (AR_SREV_9287(sc->sc_ah))
46 else if (AR_SREV_9485(sc->sc_ah)) 46 sc->sc_ah->led_pin = ATH_LED_PIN_9287;
47 sc->sc_ah->led_pin = ATH_LED_PIN_9485; 47 else if (AR_SREV_9485(sc->sc_ah))
48 else 48 sc->sc_ah->led_pin = ATH_LED_PIN_9485;
49 sc->sc_ah->led_pin = ATH_LED_PIN_DEF; 49 else if (AR_SREV_9300(sc->sc_ah))
50 sc->sc_ah->led_pin = ATH_LED_PIN_9300;
51 else
52 sc->sc_ah->led_pin = ATH_LED_PIN_DEF;
53 }
50 54
51 /* Configure gpio 1 for output */ 55 /* Configure gpio 1 for output */
52 ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin, 56 ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin,
@@ -136,10 +140,10 @@ static void ath_detect_bt_priority(struct ath_softc *sc)
136 140
137static void ath9k_gen_timer_start(struct ath_hw *ah, 141static void ath9k_gen_timer_start(struct ath_hw *ah,
138 struct ath_gen_timer *timer, 142 struct ath_gen_timer *timer,
139 u32 timer_next, 143 u32 trig_timeout,
140 u32 timer_period) 144 u32 timer_period)
141{ 145{
142 ath9k_hw_gen_timer_start(ah, timer, timer_next, timer_period); 146 ath9k_hw_gen_timer_start(ah, timer, trig_timeout, timer_period);
143 147
144 if ((ah->imask & ATH9K_INT_GENTIMER) == 0) { 148 if ((ah->imask & ATH9K_INT_GENTIMER) == 0) {
145 ath9k_hw_disable_interrupts(ah); 149 ath9k_hw_disable_interrupts(ah);
@@ -172,17 +176,17 @@ static void ath_btcoex_period_timer(unsigned long data)
172 struct ath_softc *sc = (struct ath_softc *) data; 176 struct ath_softc *sc = (struct ath_softc *) data;
173 struct ath_hw *ah = sc->sc_ah; 177 struct ath_hw *ah = sc->sc_ah;
174 struct ath_btcoex *btcoex = &sc->btcoex; 178 struct ath_btcoex *btcoex = &sc->btcoex;
175 struct ath_common *common = ath9k_hw_common(ah);
176 u32 timer_period; 179 u32 timer_period;
177 bool is_btscan; 180 bool is_btscan;
178 181
182 ath9k_ps_wakeup(sc);
179 ath_detect_bt_priority(sc); 183 ath_detect_bt_priority(sc);
180 184
181 is_btscan = sc->sc_flags & SC_OP_BT_SCAN; 185 is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
182 186
183 spin_lock_bh(&btcoex->btcoex_lock); 187 spin_lock_bh(&btcoex->btcoex_lock);
184 188
185 ath9k_cmn_btcoex_bt_stomp(common, is_btscan ? ATH_BTCOEX_STOMP_ALL : 189 ath9k_hw_btcoex_bt_stomp(ah, is_btscan ? ATH_BTCOEX_STOMP_ALL :
186 btcoex->bt_stomp_type); 190 btcoex->bt_stomp_type);
187 191
188 spin_unlock_bh(&btcoex->btcoex_lock); 192 spin_unlock_bh(&btcoex->btcoex_lock);
@@ -193,11 +197,12 @@ static void ath_btcoex_period_timer(unsigned long data)
193 197
194 timer_period = is_btscan ? btcoex->btscan_no_stomp : 198 timer_period = is_btscan ? btcoex->btscan_no_stomp :
195 btcoex->btcoex_no_stomp; 199 btcoex->btcoex_no_stomp;
196 ath9k_gen_timer_start(ah, btcoex->no_stomp_timer, 0, 200 ath9k_gen_timer_start(ah, btcoex->no_stomp_timer, timer_period,
197 timer_period * 10); 201 timer_period * 10);
198 btcoex->hw_timer_enabled = true; 202 btcoex->hw_timer_enabled = true;
199 } 203 }
200 204
205 ath9k_ps_restore(sc);
201 mod_timer(&btcoex->period_timer, jiffies + 206 mod_timer(&btcoex->period_timer, jiffies +
202 msecs_to_jiffies(ATH_BTCOEX_DEF_BT_PERIOD)); 207 msecs_to_jiffies(ATH_BTCOEX_DEF_BT_PERIOD));
203} 208}
@@ -217,14 +222,16 @@ static void ath_btcoex_no_stomp_timer(void *arg)
217 ath_dbg(common, ATH_DBG_BTCOEX, 222 ath_dbg(common, ATH_DBG_BTCOEX,
218 "no stomp timer running\n"); 223 "no stomp timer running\n");
219 224
225 ath9k_ps_wakeup(sc);
220 spin_lock_bh(&btcoex->btcoex_lock); 226 spin_lock_bh(&btcoex->btcoex_lock);
221 227
222 if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW || is_btscan) 228 if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW || is_btscan)
223 ath9k_cmn_btcoex_bt_stomp(common, ATH_BTCOEX_STOMP_NONE); 229 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
224 else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL) 230 else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
225 ath9k_cmn_btcoex_bt_stomp(common, ATH_BTCOEX_STOMP_LOW); 231 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW);
226 232
227 spin_unlock_bh(&btcoex->btcoex_lock); 233 spin_unlock_bh(&btcoex->btcoex_lock);
234 ath9k_ps_restore(sc);
228} 235}
229 236
230int ath_init_btcoex_timer(struct ath_softc *sc) 237int ath_init_btcoex_timer(struct ath_softc *sc)
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 2d10239ce829..2e3a33a53406 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -17,11 +17,9 @@
17#include "htc.h" 17#include "htc.h"
18 18
19/* identify firmware images */ 19/* identify firmware images */
20#define FIRMWARE_AR7010 "ar7010.fw" 20#define FIRMWARE_AR7010_1_1 "htc_7010.fw"
21#define FIRMWARE_AR7010_1_1 "ar7010_1_1.fw" 21#define FIRMWARE_AR9271 "htc_9271.fw"
22#define FIRMWARE_AR9271 "ar9271.fw"
23 22
24MODULE_FIRMWARE(FIRMWARE_AR7010);
25MODULE_FIRMWARE(FIRMWARE_AR7010_1_1); 23MODULE_FIRMWARE(FIRMWARE_AR7010_1_1);
26MODULE_FIRMWARE(FIRMWARE_AR9271); 24MODULE_FIRMWARE(FIRMWARE_AR9271);
27 25
@@ -80,7 +78,7 @@ static void hif_usb_regout_cb(struct urb *urb)
80 78
81 if (cmd) { 79 if (cmd) {
82 ath9k_htc_txcompletion_cb(cmd->hif_dev->htc_handle, 80 ath9k_htc_txcompletion_cb(cmd->hif_dev->htc_handle,
83 cmd->skb, 1); 81 cmd->skb, true);
84 kfree(cmd); 82 kfree(cmd);
85 } 83 }
86 84
@@ -126,6 +124,90 @@ static int hif_usb_send_regout(struct hif_device_usb *hif_dev,
126 return ret; 124 return ret;
127} 125}
128 126
127static void hif_usb_mgmt_cb(struct urb *urb)
128{
129 struct cmd_buf *cmd = (struct cmd_buf *)urb->context;
130 struct hif_device_usb *hif_dev = cmd->hif_dev;
131 bool txok = true;
132
133 if (!cmd || !cmd->skb || !cmd->hif_dev)
134 return;
135
136 switch (urb->status) {
137 case 0:
138 break;
139 case -ENOENT:
140 case -ECONNRESET:
141 case -ENODEV:
142 case -ESHUTDOWN:
143 txok = false;
144
145 /*
146 * If the URBs are being flushed, no need to complete
147 * this packet.
148 */
149 spin_lock(&hif_dev->tx.tx_lock);
150 if (hif_dev->tx.flags & HIF_USB_TX_FLUSH) {
151 spin_unlock(&hif_dev->tx.tx_lock);
152 dev_kfree_skb_any(cmd->skb);
153 kfree(cmd);
154 return;
155 }
156 spin_unlock(&hif_dev->tx.tx_lock);
157
158 break;
159 default:
160 txok = false;
161 break;
162 }
163
164 skb_pull(cmd->skb, 4);
165 ath9k_htc_txcompletion_cb(cmd->hif_dev->htc_handle,
166 cmd->skb, txok);
167 kfree(cmd);
168}
169
170static int hif_usb_send_mgmt(struct hif_device_usb *hif_dev,
171 struct sk_buff *skb)
172{
173 struct urb *urb;
174 struct cmd_buf *cmd;
175 int ret = 0;
176 __le16 *hdr;
177
178 urb = usb_alloc_urb(0, GFP_ATOMIC);
179 if (urb == NULL)
180 return -ENOMEM;
181
182 cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
183 if (cmd == NULL) {
184 usb_free_urb(urb);
185 return -ENOMEM;
186 }
187
188 cmd->skb = skb;
189 cmd->hif_dev = hif_dev;
190
191 hdr = (__le16 *) skb_push(skb, 4);
192 *hdr++ = cpu_to_le16(skb->len - 4);
193 *hdr++ = cpu_to_le16(ATH_USB_TX_STREAM_MODE_TAG);
194
195 usb_fill_bulk_urb(urb, hif_dev->udev,
196 usb_sndbulkpipe(hif_dev->udev, USB_WLAN_TX_PIPE),
197 skb->data, skb->len,
198 hif_usb_mgmt_cb, cmd);
199
200 usb_anchor_urb(urb, &hif_dev->mgmt_submitted);
201 ret = usb_submit_urb(urb, GFP_ATOMIC);
202 if (ret) {
203 usb_unanchor_urb(urb);
204 kfree(cmd);
205 }
206 usb_free_urb(urb);
207
208 return ret;
209}
210
129static inline void ath9k_skb_queue_purge(struct hif_device_usb *hif_dev, 211static inline void ath9k_skb_queue_purge(struct hif_device_usb *hif_dev,
130 struct sk_buff_head *list) 212 struct sk_buff_head *list)
131{ 213{
@@ -133,7 +215,22 @@ static inline void ath9k_skb_queue_purge(struct hif_device_usb *hif_dev,
133 215
134 while ((skb = __skb_dequeue(list)) != NULL) { 216 while ((skb = __skb_dequeue(list)) != NULL) {
135 dev_kfree_skb_any(skb); 217 dev_kfree_skb_any(skb);
136 TX_STAT_INC(skb_dropped); 218 }
219}
220
221static inline void ath9k_skb_queue_complete(struct hif_device_usb *hif_dev,
222 struct sk_buff_head *queue,
223 bool txok)
224{
225 struct sk_buff *skb;
226
227 while ((skb = __skb_dequeue(queue)) != NULL) {
228 ath9k_htc_txcompletion_cb(hif_dev->htc_handle,
229 skb, txok);
230 if (txok)
231 TX_STAT_INC(skb_success);
232 else
233 TX_STAT_INC(skb_failed);
137 } 234 }
138} 235}
139 236
@@ -141,7 +238,7 @@ static void hif_usb_tx_cb(struct urb *urb)
141{ 238{
142 struct tx_buf *tx_buf = (struct tx_buf *) urb->context; 239 struct tx_buf *tx_buf = (struct tx_buf *) urb->context;
143 struct hif_device_usb *hif_dev; 240 struct hif_device_usb *hif_dev;
144 struct sk_buff *skb; 241 bool txok = true;
145 242
146 if (!tx_buf || !tx_buf->hif_dev) 243 if (!tx_buf || !tx_buf->hif_dev)
147 return; 244 return;
@@ -155,10 +252,7 @@ static void hif_usb_tx_cb(struct urb *urb)
155 case -ECONNRESET: 252 case -ECONNRESET:
156 case -ENODEV: 253 case -ENODEV:
157 case -ESHUTDOWN: 254 case -ESHUTDOWN:
158 /* 255 txok = false;
159 * The URB has been killed, free the SKBs.
160 */
161 ath9k_skb_queue_purge(hif_dev, &tx_buf->skb_queue);
162 256
163 /* 257 /*
164 * If the URBs are being flushed, no need to add this 258 * If the URBs are being flushed, no need to add this
@@ -167,41 +261,19 @@ static void hif_usb_tx_cb(struct urb *urb)
167 spin_lock(&hif_dev->tx.tx_lock); 261 spin_lock(&hif_dev->tx.tx_lock);
168 if (hif_dev->tx.flags & HIF_USB_TX_FLUSH) { 262 if (hif_dev->tx.flags & HIF_USB_TX_FLUSH) {
169 spin_unlock(&hif_dev->tx.tx_lock); 263 spin_unlock(&hif_dev->tx.tx_lock);
264 ath9k_skb_queue_purge(hif_dev, &tx_buf->skb_queue);
170 return; 265 return;
171 } 266 }
172 spin_unlock(&hif_dev->tx.tx_lock); 267 spin_unlock(&hif_dev->tx.tx_lock);
173 268
174 /* 269 break;
175 * In the stop() case, this URB has to be added to
176 * the free list.
177 */
178 goto add_free;
179 default: 270 default:
271 txok = false;
180 break; 272 break;
181 } 273 }
182 274
183 /* 275 ath9k_skb_queue_complete(hif_dev, &tx_buf->skb_queue, txok);
184 * Check if TX has been stopped, this is needed because
185 * this CB could have been invoked just after the TX lock
186 * was released in hif_stop() and kill_urb() hasn't been
187 * called yet.
188 */
189 spin_lock(&hif_dev->tx.tx_lock);
190 if (hif_dev->tx.flags & HIF_USB_TX_STOP) {
191 spin_unlock(&hif_dev->tx.tx_lock);
192 ath9k_skb_queue_purge(hif_dev, &tx_buf->skb_queue);
193 goto add_free;
194 }
195 spin_unlock(&hif_dev->tx.tx_lock);
196
197 /* Complete the queued SKBs. */
198 while ((skb = __skb_dequeue(&tx_buf->skb_queue)) != NULL) {
199 ath9k_htc_txcompletion_cb(hif_dev->htc_handle,
200 skb, 1);
201 TX_STAT_INC(skb_completed);
202 }
203 276
204add_free:
205 /* Re-initialize the SKB queue */ 277 /* Re-initialize the SKB queue */
206 tx_buf->len = tx_buf->offset = 0; 278 tx_buf->len = tx_buf->offset = 0;
207 __skb_queue_head_init(&tx_buf->skb_queue); 279 __skb_queue_head_init(&tx_buf->skb_queue);
@@ -274,7 +346,7 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev)
274 ret = usb_submit_urb(tx_buf->urb, GFP_ATOMIC); 346 ret = usb_submit_urb(tx_buf->urb, GFP_ATOMIC);
275 if (ret) { 347 if (ret) {
276 tx_buf->len = tx_buf->offset = 0; 348 tx_buf->len = tx_buf->offset = 0;
277 ath9k_skb_queue_purge(hif_dev, &tx_buf->skb_queue); 349 ath9k_skb_queue_complete(hif_dev, &tx_buf->skb_queue, false);
278 __skb_queue_head_init(&tx_buf->skb_queue); 350 __skb_queue_head_init(&tx_buf->skb_queue);
279 list_move_tail(&tx_buf->list, &hif_dev->tx.tx_buf); 351 list_move_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
280 hif_dev->tx.tx_buf_cnt++; 352 hif_dev->tx.tx_buf_cnt++;
@@ -286,10 +358,11 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev)
286 return ret; 358 return ret;
287} 359}
288 360
289static int hif_usb_send_tx(struct hif_device_usb *hif_dev, struct sk_buff *skb, 361static int hif_usb_send_tx(struct hif_device_usb *hif_dev, struct sk_buff *skb)
290 struct ath9k_htc_tx_ctl *tx_ctl)
291{ 362{
363 struct ath9k_htc_tx_ctl *tx_ctl;
292 unsigned long flags; 364 unsigned long flags;
365 int ret = 0;
293 366
294 spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); 367 spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
295 368
@@ -304,26 +377,36 @@ static int hif_usb_send_tx(struct hif_device_usb *hif_dev, struct sk_buff *skb,
304 return -ENOMEM; 377 return -ENOMEM;
305 } 378 }
306 379
307 __skb_queue_tail(&hif_dev->tx.tx_skb_queue, skb); 380 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
308 hif_dev->tx.tx_skb_cnt++;
309 381
310 /* Send normal frames immediately */ 382 tx_ctl = HTC_SKB_CB(skb);
311 if (!tx_ctl || (tx_ctl && (tx_ctl->type == ATH9K_HTC_NORMAL))) 383
312 __hif_usb_tx(hif_dev); 384 /* Mgmt/Beacon frames don't use the TX buffer pool */
385 if ((tx_ctl->type == ATH9K_HTC_MGMT) ||
386 (tx_ctl->type == ATH9K_HTC_BEACON)) {
387 ret = hif_usb_send_mgmt(hif_dev, skb);
388 }
389
390 spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
391
392 if ((tx_ctl->type == ATH9K_HTC_NORMAL) ||
393 (tx_ctl->type == ATH9K_HTC_AMPDU)) {
394 __skb_queue_tail(&hif_dev->tx.tx_skb_queue, skb);
395 hif_dev->tx.tx_skb_cnt++;
396 }
313 397
314 /* Check if AMPDUs have to be sent immediately */ 398 /* Check if AMPDUs have to be sent immediately */
315 if (tx_ctl && (tx_ctl->type == ATH9K_HTC_AMPDU) && 399 if ((hif_dev->tx.tx_buf_cnt == MAX_TX_URB_NUM) &&
316 (hif_dev->tx.tx_buf_cnt == MAX_TX_URB_NUM) &&
317 (hif_dev->tx.tx_skb_cnt < 2)) { 400 (hif_dev->tx.tx_skb_cnt < 2)) {
318 __hif_usb_tx(hif_dev); 401 __hif_usb_tx(hif_dev);
319 } 402 }
320 403
321 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); 404 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
322 405
323 return 0; 406 return ret;
324} 407}
325 408
326static void hif_usb_start(void *hif_handle, u8 pipe_id) 409static void hif_usb_start(void *hif_handle)
327{ 410{
328 struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle; 411 struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
329 unsigned long flags; 412 unsigned long flags;
@@ -335,14 +418,14 @@ static void hif_usb_start(void *hif_handle, u8 pipe_id)
335 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); 418 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
336} 419}
337 420
338static void hif_usb_stop(void *hif_handle, u8 pipe_id) 421static void hif_usb_stop(void *hif_handle)
339{ 422{
340 struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle; 423 struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
341 struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL; 424 struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL;
342 unsigned long flags; 425 unsigned long flags;
343 426
344 spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); 427 spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
345 ath9k_skb_queue_purge(hif_dev, &hif_dev->tx.tx_skb_queue); 428 ath9k_skb_queue_complete(hif_dev, &hif_dev->tx.tx_skb_queue, false);
346 hif_dev->tx.tx_skb_cnt = 0; 429 hif_dev->tx.tx_skb_cnt = 0;
347 hif_dev->tx.flags |= HIF_USB_TX_STOP; 430 hif_dev->tx.flags |= HIF_USB_TX_STOP;
348 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); 431 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
@@ -352,17 +435,18 @@ static void hif_usb_stop(void *hif_handle, u8 pipe_id)
352 &hif_dev->tx.tx_pending, list) { 435 &hif_dev->tx.tx_pending, list) {
353 usb_kill_urb(tx_buf->urb); 436 usb_kill_urb(tx_buf->urb);
354 } 437 }
438
439 usb_kill_anchored_urbs(&hif_dev->mgmt_submitted);
355} 440}
356 441
357static int hif_usb_send(void *hif_handle, u8 pipe_id, struct sk_buff *skb, 442static int hif_usb_send(void *hif_handle, u8 pipe_id, struct sk_buff *skb)
358 struct ath9k_htc_tx_ctl *tx_ctl)
359{ 443{
360 struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle; 444 struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
361 int ret = 0; 445 int ret = 0;
362 446
363 switch (pipe_id) { 447 switch (pipe_id) {
364 case USB_WLAN_TX_PIPE: 448 case USB_WLAN_TX_PIPE:
365 ret = hif_usb_send_tx(hif_dev, skb, tx_ctl); 449 ret = hif_usb_send_tx(hif_dev, skb);
366 break; 450 break;
367 case USB_REG_OUT_PIPE: 451 case USB_REG_OUT_PIPE:
368 ret = hif_usb_send_regout(hif_dev, skb); 452 ret = hif_usb_send_regout(hif_dev, skb);
@@ -377,6 +461,40 @@ static int hif_usb_send(void *hif_handle, u8 pipe_id, struct sk_buff *skb,
377 return ret; 461 return ret;
378} 462}
379 463
464static inline bool check_index(struct sk_buff *skb, u8 idx)
465{
466 struct ath9k_htc_tx_ctl *tx_ctl;
467
468 tx_ctl = HTC_SKB_CB(skb);
469
470 if ((tx_ctl->type == ATH9K_HTC_AMPDU) &&
471 (tx_ctl->sta_idx == idx))
472 return true;
473
474 return false;
475}
476
477static void hif_usb_sta_drain(void *hif_handle, u8 idx)
478{
479 struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
480 struct sk_buff *skb, *tmp;
481 unsigned long flags;
482
483 spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
484
485 skb_queue_walk_safe(&hif_dev->tx.tx_skb_queue, skb, tmp) {
486 if (check_index(skb, idx)) {
487 __skb_unlink(skb, &hif_dev->tx.tx_skb_queue);
488 ath9k_htc_txcompletion_cb(hif_dev->htc_handle,
489 skb, false);
490 hif_dev->tx.tx_skb_cnt--;
491 TX_STAT_INC(skb_failed);
492 }
493 }
494
495 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
496}
497
380static struct ath9k_htc_hif hif_usb = { 498static struct ath9k_htc_hif hif_usb = {
381 .transport = ATH9K_HIF_USB, 499 .transport = ATH9K_HIF_USB,
382 .name = "ath9k_hif_usb", 500 .name = "ath9k_hif_usb",
@@ -386,6 +504,7 @@ static struct ath9k_htc_hif hif_usb = {
386 504
387 .start = hif_usb_start, 505 .start = hif_usb_start,
388 .stop = hif_usb_stop, 506 .stop = hif_usb_stop,
507 .sta_drain = hif_usb_sta_drain,
389 .send = hif_usb_send, 508 .send = hif_usb_send,
390}; 509};
391 510
@@ -567,6 +686,9 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
567 case -ESHUTDOWN: 686 case -ESHUTDOWN:
568 goto free; 687 goto free;
569 default: 688 default:
689 skb_reset_tail_pointer(skb);
690 skb_trim(skb, 0);
691
570 goto resubmit; 692 goto resubmit;
571 } 693 }
572 694
@@ -591,23 +713,15 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
591 USB_REG_IN_PIPE), 713 USB_REG_IN_PIPE),
592 nskb->data, MAX_REG_IN_BUF_SIZE, 714 nskb->data, MAX_REG_IN_BUF_SIZE,
593 ath9k_hif_usb_reg_in_cb, nskb); 715 ath9k_hif_usb_reg_in_cb, nskb);
594
595 ret = usb_submit_urb(urb, GFP_ATOMIC);
596 if (ret) {
597 kfree_skb(nskb);
598 urb->context = NULL;
599 }
600
601 return;
602 } 716 }
603 717
604resubmit: 718resubmit:
605 skb_reset_tail_pointer(skb); 719 usb_anchor_urb(urb, &hif_dev->reg_in_submitted);
606 skb_trim(skb, 0);
607
608 ret = usb_submit_urb(urb, GFP_ATOMIC); 720 ret = usb_submit_urb(urb, GFP_ATOMIC);
609 if (ret) 721 if (ret) {
722 usb_unanchor_urb(urb);
610 goto free; 723 goto free;
724 }
611 725
612 return; 726 return;
613free: 727free:
@@ -641,6 +755,8 @@ static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev)
641 kfree(tx_buf->buf); 755 kfree(tx_buf->buf);
642 kfree(tx_buf); 756 kfree(tx_buf);
643 } 757 }
758
759 usb_kill_anchored_urbs(&hif_dev->mgmt_submitted);
644} 760}
645 761
646static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev) 762static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev)
@@ -652,6 +768,7 @@ static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev)
652 INIT_LIST_HEAD(&hif_dev->tx.tx_pending); 768 INIT_LIST_HEAD(&hif_dev->tx.tx_pending);
653 spin_lock_init(&hif_dev->tx.tx_lock); 769 spin_lock_init(&hif_dev->tx.tx_lock);
654 __skb_queue_head_init(&hif_dev->tx.tx_skb_queue); 770 __skb_queue_head_init(&hif_dev->tx.tx_skb_queue);
771 init_usb_anchor(&hif_dev->mgmt_submitted);
655 772
656 for (i = 0; i < MAX_TX_URB_NUM; i++) { 773 for (i = 0; i < MAX_TX_URB_NUM; i++) {
657 tx_buf = kzalloc(sizeof(struct tx_buf), GFP_KERNEL); 774 tx_buf = kzalloc(sizeof(struct tx_buf), GFP_KERNEL);
@@ -748,43 +865,67 @@ err_urb:
748 return ret; 865 return ret;
749} 866}
750 867
751static void ath9k_hif_usb_dealloc_reg_in_urb(struct hif_device_usb *hif_dev) 868static void ath9k_hif_usb_dealloc_reg_in_urbs(struct hif_device_usb *hif_dev)
752{ 869{
753 if (hif_dev->reg_in_urb) { 870 usb_kill_anchored_urbs(&hif_dev->reg_in_submitted);
754 usb_kill_urb(hif_dev->reg_in_urb);
755 if (hif_dev->reg_in_urb->context)
756 kfree_skb((void *)hif_dev->reg_in_urb->context);
757 usb_free_urb(hif_dev->reg_in_urb);
758 hif_dev->reg_in_urb = NULL;
759 }
760} 871}
761 872
762static int ath9k_hif_usb_alloc_reg_in_urb(struct hif_device_usb *hif_dev) 873static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev)
763{ 874{
764 struct sk_buff *skb; 875 struct urb *urb = NULL;
876 struct sk_buff *skb = NULL;
877 int i, ret;
765 878
766 hif_dev->reg_in_urb = usb_alloc_urb(0, GFP_KERNEL); 879 init_usb_anchor(&hif_dev->reg_in_submitted);
767 if (hif_dev->reg_in_urb == NULL)
768 return -ENOMEM;
769 880
770 skb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_KERNEL); 881 for (i = 0; i < MAX_REG_IN_URB_NUM; i++) {
771 if (!skb)
772 goto err;
773 882
774 usb_fill_bulk_urb(hif_dev->reg_in_urb, hif_dev->udev, 883 /* Allocate URB */
775 usb_rcvbulkpipe(hif_dev->udev, 884 urb = usb_alloc_urb(0, GFP_KERNEL);
776 USB_REG_IN_PIPE), 885 if (urb == NULL) {
777 skb->data, MAX_REG_IN_BUF_SIZE, 886 ret = -ENOMEM;
778 ath9k_hif_usb_reg_in_cb, skb); 887 goto err_urb;
888 }
779 889
780 if (usb_submit_urb(hif_dev->reg_in_urb, GFP_KERNEL) != 0) 890 /* Allocate buffer */
781 goto err; 891 skb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_KERNEL);
892 if (!skb) {
893 ret = -ENOMEM;
894 goto err_skb;
895 }
896
897 usb_fill_bulk_urb(urb, hif_dev->udev,
898 usb_rcvbulkpipe(hif_dev->udev,
899 USB_REG_IN_PIPE),
900 skb->data, MAX_REG_IN_BUF_SIZE,
901 ath9k_hif_usb_reg_in_cb, skb);
902
903 /* Anchor URB */
904 usb_anchor_urb(urb, &hif_dev->reg_in_submitted);
905
906 /* Submit URB */
907 ret = usb_submit_urb(urb, GFP_KERNEL);
908 if (ret) {
909 usb_unanchor_urb(urb);
910 goto err_submit;
911 }
912
913 /*
914 * Drop reference count.
915 * This ensures that the URB is freed when killing them.
916 */
917 usb_free_urb(urb);
918 }
782 919
783 return 0; 920 return 0;
784 921
785err: 922err_submit:
786 ath9k_hif_usb_dealloc_reg_in_urb(hif_dev); 923 kfree_skb(skb);
787 return -ENOMEM; 924err_skb:
925 usb_free_urb(urb);
926err_urb:
927 ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev);
928 return ret;
788} 929}
789 930
790static int ath9k_hif_usb_alloc_urbs(struct hif_device_usb *hif_dev) 931static int ath9k_hif_usb_alloc_urbs(struct hif_device_usb *hif_dev)
@@ -801,7 +942,7 @@ static int ath9k_hif_usb_alloc_urbs(struct hif_device_usb *hif_dev)
801 goto err_rx; 942 goto err_rx;
802 943
803 /* Register Read */ 944 /* Register Read */
804 if (ath9k_hif_usb_alloc_reg_in_urb(hif_dev) < 0) 945 if (ath9k_hif_usb_alloc_reg_in_urbs(hif_dev) < 0)
805 goto err_reg; 946 goto err_reg;
806 947
807 return 0; 948 return 0;
@@ -816,7 +957,7 @@ err:
816static void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev) 957static void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev)
817{ 958{
818 usb_kill_anchored_urbs(&hif_dev->regout_submitted); 959 usb_kill_anchored_urbs(&hif_dev->regout_submitted);
819 ath9k_hif_usb_dealloc_reg_in_urb(hif_dev); 960 ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev);
820 ath9k_hif_usb_dealloc_tx_urbs(hif_dev); 961 ath9k_hif_usb_dealloc_tx_urbs(hif_dev);
821 ath9k_hif_usb_dealloc_rx_urbs(hif_dev); 962 ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
822} 963}
@@ -1026,10 +1167,7 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface,
1026 /* Find out which firmware to load */ 1167 /* Find out which firmware to load */
1027 1168
1028 if (IS_AR7010_DEVICE(id->driver_info)) 1169 if (IS_AR7010_DEVICE(id->driver_info))
1029 if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x0202) 1170 hif_dev->fw_name = FIRMWARE_AR7010_1_1;
1030 hif_dev->fw_name = FIRMWARE_AR7010_1_1;
1031 else
1032 hif_dev->fw_name = FIRMWARE_AR7010;
1033 else 1171 else
1034 hif_dev->fw_name = FIRMWARE_AR9271; 1172 hif_dev->fw_name = FIRMWARE_AR9271;
1035 1173
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h
index 7b9d863d4035..2bdcdbc14b1e 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.h
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.h
@@ -17,6 +17,9 @@
17#ifndef HTC_USB_H 17#ifndef HTC_USB_H
18#define HTC_USB_H 18#define HTC_USB_H
19 19
20#define MAJOR_VERSION_REQ 1
21#define MINOR_VERSION_REQ 2
22
20#define IS_AR7010_DEVICE(_v) (((_v) == AR9280_USB) || ((_v) == AR9287_USB)) 23#define IS_AR7010_DEVICE(_v) (((_v) == AR9280_USB) || ((_v) == AR9287_USB))
21 24
22#define AR9271_FIRMWARE 0x501000 25#define AR9271_FIRMWARE 0x501000
@@ -31,7 +34,7 @@
31 34
32/* FIXME: Verify these numbers (with Windows) */ 35/* FIXME: Verify these numbers (with Windows) */
33#define MAX_TX_URB_NUM 8 36#define MAX_TX_URB_NUM 8
34#define MAX_TX_BUF_NUM 1024 37#define MAX_TX_BUF_NUM 256
35#define MAX_TX_BUF_SIZE 32768 38#define MAX_TX_BUF_SIZE 32768
36#define MAX_TX_AGGR_NUM 20 39#define MAX_TX_AGGR_NUM 20
37 40
@@ -40,7 +43,7 @@
40#define MAX_PKT_NUM_IN_TRANSFER 10 43#define MAX_PKT_NUM_IN_TRANSFER 10
41 44
42#define MAX_REG_OUT_URB_NUM 1 45#define MAX_REG_OUT_URB_NUM 1
43#define MAX_REG_OUT_BUF_NUM 8 46#define MAX_REG_IN_URB_NUM 64
44 47
45#define MAX_REG_IN_BUF_SIZE 64 48#define MAX_REG_IN_BUF_SIZE 64
46 49
@@ -90,9 +93,10 @@ struct hif_device_usb {
90 const struct firmware *firmware; 93 const struct firmware *firmware;
91 struct htc_target *htc_handle; 94 struct htc_target *htc_handle;
92 struct hif_usb_tx tx; 95 struct hif_usb_tx tx;
93 struct urb *reg_in_urb;
94 struct usb_anchor regout_submitted; 96 struct usb_anchor regout_submitted;
95 struct usb_anchor rx_submitted; 97 struct usb_anchor rx_submitted;
98 struct usb_anchor reg_in_submitted;
99 struct usb_anchor mgmt_submitted;
96 struct sk_buff *remain_skb; 100 struct sk_buff *remain_skb;
97 const char *fw_name; 101 const char *fw_name;
98 int rx_remain_len; 102 int rx_remain_len;
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 753a245c5ad1..dfc7a982fc7e 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -66,13 +66,13 @@ enum htc_opmode {
66 HTC_M_WDS = 2 66 HTC_M_WDS = 2
67}; 67};
68 68
69#define ATH9K_HTC_HDRSPACE sizeof(struct htc_frame_hdr) 69#define ATH9K_HTC_AMPDU 1
70#define ATH9K_HTC_AMPDU 1
71#define ATH9K_HTC_NORMAL 2 70#define ATH9K_HTC_NORMAL 2
71#define ATH9K_HTC_BEACON 3
72#define ATH9K_HTC_MGMT 4
72 73
73#define ATH9K_HTC_TX_CTSONLY 0x1 74#define ATH9K_HTC_TX_CTSONLY 0x1
74#define ATH9K_HTC_TX_RTSCTS 0x2 75#define ATH9K_HTC_TX_RTSCTS 0x2
75#define ATH9K_HTC_TX_USE_MIN_RATE 0x100
76 76
77struct tx_frame_hdr { 77struct tx_frame_hdr {
78 u8 data_type; 78 u8 data_type;
@@ -82,7 +82,8 @@ struct tx_frame_hdr {
82 __be32 flags; /* ATH9K_HTC_TX_* */ 82 __be32 flags; /* ATH9K_HTC_TX_* */
83 u8 key_type; 83 u8 key_type;
84 u8 keyix; 84 u8 keyix;
85 u8 reserved[26]; 85 u8 cookie;
86 u8 pad;
86} __packed; 87} __packed;
87 88
88struct tx_mgmt_hdr { 89struct tx_mgmt_hdr {
@@ -92,50 +93,34 @@ struct tx_mgmt_hdr {
92 u8 flags; 93 u8 flags;
93 u8 key_type; 94 u8 key_type;
94 u8 keyix; 95 u8 keyix;
95 u16 reserved; 96 u8 cookie;
97 u8 pad;
96} __packed; 98} __packed;
97 99
98struct tx_beacon_header { 100struct tx_beacon_header {
99 u8 len_changed;
100 u8 vif_index; 101 u8 vif_index;
102 u8 len_changed;
101 u16 rev; 103 u16 rev;
102} __packed; 104} __packed;
103 105
104struct ath9k_htc_target_hw { 106#define MAX_TX_AMPDU_SUBFRAMES_9271 17
105 u32 flags; 107#define MAX_TX_AMPDU_SUBFRAMES_7010 22
106 u32 flags_ext;
107 u32 ampdu_limit;
108 u8 ampdu_subframes;
109 u8 tx_chainmask;
110 u8 tx_chainmask_legacy;
111 u8 rtscts_ratecode;
112 u8 protmode;
113} __packed;
114 108
115struct ath9k_htc_cap_target { 109struct ath9k_htc_cap_target {
116 u32 flags; 110 __be32 ampdu_limit;
117 u32 flags_ext;
118 u32 ampdu_limit;
119 u8 ampdu_subframes; 111 u8 ampdu_subframes;
112 u8 enable_coex;
120 u8 tx_chainmask; 113 u8 tx_chainmask;
121 u8 tx_chainmask_legacy; 114 u8 pad;
122 u8 rtscts_ratecode;
123 u8 protmode;
124} __packed; 115} __packed;
125 116
126struct ath9k_htc_target_vif { 117struct ath9k_htc_target_vif {
127 u8 index; 118 u8 index;
128 u8 des_bssid[ETH_ALEN]; 119 u8 opmode;
129 __be32 opmode;
130 u8 myaddr[ETH_ALEN]; 120 u8 myaddr[ETH_ALEN];
131 u8 bssid[ETH_ALEN];
132 u32 flags;
133 u32 flags_ext;
134 u16 ps_sta;
135 __be16 rtsthreshold;
136 u8 ath_cap; 121 u8 ath_cap;
137 u8 node; 122 __be16 rtsthreshold;
138 s8 mcast_rate; 123 u8 pad;
139} __packed; 124} __packed;
140 125
141#define ATH_HTC_STA_AUTH 0x0001 126#define ATH_HTC_STA_AUTH 0x0001
@@ -143,27 +128,16 @@ struct ath9k_htc_target_vif {
143#define ATH_HTC_STA_ERP 0x0004 128#define ATH_HTC_STA_ERP 0x0004
144#define ATH_HTC_STA_HT 0x0008 129#define ATH_HTC_STA_HT 0x0008
145 130
146/* FIXME: UAPSD variables */
147struct ath9k_htc_target_sta { 131struct ath9k_htc_target_sta {
148 u16 associd;
149 u16 txpower;
150 u32 ucastkey;
151 u8 macaddr[ETH_ALEN]; 132 u8 macaddr[ETH_ALEN];
152 u8 bssid[ETH_ALEN]; 133 u8 bssid[ETH_ALEN];
153 u8 sta_index; 134 u8 sta_index;
154 u8 vif_index; 135 u8 vif_index;
155 u8 vif_sta;
156 __be16 flags; /* ATH_HTC_STA_* */
157 u16 htcap;
158 u8 valid;
159 u16 capinfo;
160 struct ath9k_htc_target_hw *hw;
161 struct ath9k_htc_target_vif *vif;
162 u16 txseqmgmt;
163 u8 is_vif_sta; 136 u8 is_vif_sta;
164 u16 maxampdu; 137 __be16 flags; /* ATH_HTC_STA_* */
165 u16 iv16; 138 __be16 htcap;
166 u32 iv32; 139 __be16 maxampdu;
140 u8 pad;
167} __packed; 141} __packed;
168 142
169struct ath9k_htc_target_aggr { 143struct ath9k_htc_target_aggr {
@@ -197,12 +171,38 @@ struct ath9k_htc_target_rate {
197 struct ath9k_htc_rate rates; 171 struct ath9k_htc_rate rates;
198}; 172};
199 173
200struct ath9k_htc_target_stats { 174struct ath9k_htc_target_rate_mask {
201 __be32 tx_shortretry; 175 u8 vif_index;
202 __be32 tx_longretry; 176 u8 band;
203 __be32 tx_xretries; 177 __be32 mask;
204 __be32 ht_txunaggr_xretry; 178 u16 pad;
205 __be32 ht_tx_xretries; 179} __packed;
180
181struct ath9k_htc_target_int_stats {
182 __be32 rx;
183 __be32 rxorn;
184 __be32 rxeol;
185 __be32 txurn;
186 __be32 txto;
187 __be32 cst;
188} __packed;
189
190struct ath9k_htc_target_tx_stats {
191 __be32 xretries;
192 __be32 fifoerr;
193 __be32 filtered;
194 __be32 timer_exp;
195 __be32 shortretries;
196 __be32 longretries;
197 __be32 qnull;
198 __be32 encap_fail;
199 __be32 nobuf;
200} __packed;
201
202struct ath9k_htc_target_rx_stats {
203 __be32 nobuf;
204 __be32 host_send;
205 __be32 host_done;
206} __packed; 206} __packed;
207 207
208#define ATH9K_HTC_MAX_VIF 2 208#define ATH9K_HTC_MAX_VIF 2
@@ -244,6 +244,8 @@ struct ath9k_htc_vif {
244 u8 index; 244 u8 index;
245 u16 seq_no; 245 u16 seq_no;
246 bool beacon_configured; 246 bool beacon_configured;
247 int bslot;
248 __le64 tsfadjust;
247}; 249};
248 250
249struct ath9k_vif_iter_data { 251struct ath9k_vif_iter_data {
@@ -282,23 +284,65 @@ struct ath9k_htc_rx {
282 spinlock_t rxbuflock; 284 spinlock_t rxbuflock;
283}; 285};
284 286
287#define ATH9K_HTC_TX_CLEANUP_INTERVAL 50 /* ms */
288#define ATH9K_HTC_TX_TIMEOUT_INTERVAL 2500 /* ms */
289#define ATH9K_HTC_TX_RESERVE 10
290#define ATH9K_HTC_TX_TIMEOUT_COUNT 20
291#define ATH9K_HTC_TX_THRESHOLD (MAX_TX_BUF_NUM - ATH9K_HTC_TX_RESERVE)
292
293#define ATH9K_HTC_OP_TX_QUEUES_STOP BIT(0)
294#define ATH9K_HTC_OP_TX_DRAIN BIT(1)
295
296struct ath9k_htc_tx {
297 u8 flags;
298 int queued_cnt;
299 struct sk_buff_head mgmt_ep_queue;
300 struct sk_buff_head cab_ep_queue;
301 struct sk_buff_head data_be_queue;
302 struct sk_buff_head data_bk_queue;
303 struct sk_buff_head data_vi_queue;
304 struct sk_buff_head data_vo_queue;
305 struct sk_buff_head tx_failed;
306 DECLARE_BITMAP(tx_slot, MAX_TX_BUF_NUM);
307 struct timer_list cleanup_timer;
308 spinlock_t tx_lock;
309};
310
285struct ath9k_htc_tx_ctl { 311struct ath9k_htc_tx_ctl {
286 u8 type; /* ATH9K_HTC_* */ 312 u8 type; /* ATH9K_HTC_* */
313 u8 epid;
314 u8 txok;
315 u8 sta_idx;
316 unsigned long timestamp;
287}; 317};
288 318
319static inline struct ath9k_htc_tx_ctl *HTC_SKB_CB(struct sk_buff *skb)
320{
321 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
322
323 BUILD_BUG_ON(sizeof(struct ath9k_htc_tx_ctl) >
324 IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
325 return (struct ath9k_htc_tx_ctl *) &tx_info->driver_data;
326}
327
289#ifdef CONFIG_ATH9K_HTC_DEBUGFS 328#ifdef CONFIG_ATH9K_HTC_DEBUGFS
290 329
291#define TX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c++) 330#define TX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c++)
292#define RX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.rx_stats.c++) 331#define RX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.rx_stats.c++)
332#define CAB_STAT_INC priv->debug.tx_stats.cab_queued++
293 333
294#define TX_QSTAT_INC(q) (priv->debug.tx_stats.queue_stats[q]++) 334#define TX_QSTAT_INC(q) (priv->debug.tx_stats.queue_stats[q]++)
295 335
336void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv,
337 struct ath_htc_rx_status *rxs);
338
296struct ath_tx_stats { 339struct ath_tx_stats {
297 u32 buf_queued; 340 u32 buf_queued;
298 u32 buf_completed; 341 u32 buf_completed;
299 u32 skb_queued; 342 u32 skb_queued;
300 u32 skb_completed; 343 u32 skb_success;
301 u32 skb_dropped; 344 u32 skb_failed;
345 u32 cab_queued;
302 u32 queue_stats[WME_NUM_AC]; 346 u32 queue_stats[WME_NUM_AC];
303}; 347};
304 348
@@ -306,55 +350,57 @@ struct ath_rx_stats {
306 u32 skb_allocated; 350 u32 skb_allocated;
307 u32 skb_completed; 351 u32 skb_completed;
308 u32 skb_dropped; 352 u32 skb_dropped;
353 u32 err_crc;
354 u32 err_decrypt_crc;
355 u32 err_mic;
356 u32 err_pre_delim;
357 u32 err_post_delim;
358 u32 err_decrypt_busy;
359 u32 err_phy;
360 u32 err_phy_stats[ATH9K_PHYERR_MAX];
309}; 361};
310 362
311struct ath9k_debug { 363struct ath9k_debug {
312 struct dentry *debugfs_phy; 364 struct dentry *debugfs_phy;
313 struct dentry *debugfs_tgt_stats;
314 struct dentry *debugfs_xmit;
315 struct dentry *debugfs_recv;
316 struct ath_tx_stats tx_stats; 365 struct ath_tx_stats tx_stats;
317 struct ath_rx_stats rx_stats; 366 struct ath_rx_stats rx_stats;
318 u32 txrate;
319}; 367};
320 368
321#else 369#else
322 370
323#define TX_STAT_INC(c) do { } while (0) 371#define TX_STAT_INC(c) do { } while (0)
324#define RX_STAT_INC(c) do { } while (0) 372#define RX_STAT_INC(c) do { } while (0)
373#define CAB_STAT_INC do { } while (0)
325 374
326#define TX_QSTAT_INC(c) do { } while (0) 375#define TX_QSTAT_INC(c) do { } while (0)
327 376
377static inline void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv,
378 struct ath_htc_rx_status *rxs)
379{
380}
381
328#endif /* CONFIG_ATH9K_HTC_DEBUGFS */ 382#endif /* CONFIG_ATH9K_HTC_DEBUGFS */
329 383
330#define ATH_LED_PIN_DEF 1 384#define ATH_LED_PIN_DEF 1
331#define ATH_LED_PIN_9287 8 385#define ATH_LED_PIN_9287 10
332#define ATH_LED_PIN_9271 15 386#define ATH_LED_PIN_9271 15
333#define ATH_LED_PIN_7010 12 387#define ATH_LED_PIN_7010 12
334#define ATH_LED_ON_DURATION_IDLE 350 /* in msecs */
335#define ATH_LED_OFF_DURATION_IDLE 250 /* in msecs */
336
337enum ath_led_type {
338 ATH_LED_RADIO,
339 ATH_LED_ASSOC,
340 ATH_LED_TX,
341 ATH_LED_RX
342};
343 388
344struct ath_led { 389#define BSTUCK_THRESHOLD 10
345 struct ath9k_htc_priv *priv; 390
346 struct led_classdev led_cdev; 391/*
347 enum ath_led_type led_type; 392 * Adjust these when the max. no of beaconing interfaces is
348 struct delayed_work brightness_work; 393 * increased.
349 char name[32]; 394 */
350 bool registered; 395#define DEFAULT_SWBA_RESPONSE 40 /* in TUs */
351 int brightness; 396#define MIN_SWBA_RESPONSE 10 /* in TUs */
352};
353 397
354struct htc_beacon_config { 398struct htc_beacon_config {
399 struct ieee80211_vif *bslot[ATH9K_HTC_MAX_BCN_VIF];
355 u16 beacon_interval; 400 u16 beacon_interval;
356 u16 dtim_period; 401 u16 dtim_period;
357 u16 bmiss_timeout; 402 u16 bmiss_timeout;
403 u32 bmiss_cnt;
358}; 404};
359 405
360struct ath_btcoex { 406struct ath_btcoex {
@@ -372,14 +418,11 @@ void ath_htc_cancel_btcoex_work(struct ath9k_htc_priv *priv);
372 418
373#define OP_INVALID BIT(0) 419#define OP_INVALID BIT(0)
374#define OP_SCANNING BIT(1) 420#define OP_SCANNING BIT(1)
375#define OP_LED_ASSOCIATED BIT(2) 421#define OP_ENABLE_BEACON BIT(2)
376#define OP_LED_ON BIT(3) 422#define OP_BT_PRIORITY_DETECTED BIT(3)
377#define OP_ENABLE_BEACON BIT(4) 423#define OP_BT_SCAN BIT(4)
378#define OP_LED_DEINIT BIT(5) 424#define OP_ANI_RUNNING BIT(5)
379#define OP_BT_PRIORITY_DETECTED BIT(6) 425#define OP_TSF_RESET BIT(6)
380#define OP_BT_SCAN BIT(7)
381#define OP_ANI_RUNNING BIT(8)
382#define OP_TSF_RESET BIT(9)
383 426
384struct ath9k_htc_priv { 427struct ath9k_htc_priv {
385 struct device *dev; 428 struct device *dev;
@@ -388,6 +431,9 @@ struct ath9k_htc_priv {
388 struct htc_target *htc; 431 struct htc_target *htc;
389 struct wmi *wmi; 432 struct wmi *wmi;
390 433
434 u16 fw_version_major;
435 u16 fw_version_minor;
436
391 enum htc_endpoint_id wmi_cmd_ep; 437 enum htc_endpoint_id wmi_cmd_ep;
392 enum htc_endpoint_id beacon_ep; 438 enum htc_endpoint_id beacon_ep;
393 enum htc_endpoint_id cab_ep; 439 enum htc_endpoint_id cab_ep;
@@ -411,27 +457,23 @@ struct ath9k_htc_priv {
411 u16 txpowlimit; 457 u16 txpowlimit;
412 u16 nvifs; 458 u16 nvifs;
413 u16 nstations; 459 u16 nstations;
414 u32 bmiss_cnt;
415 bool rearm_ani; 460 bool rearm_ani;
416 bool reconfig_beacon; 461 bool reconfig_beacon;
462 unsigned int rxfilter;
417 463
418 struct ath9k_hw_cal_data caldata; 464 struct ath9k_hw_cal_data caldata;
465 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
419 466
420 spinlock_t beacon_lock; 467 spinlock_t beacon_lock;
468 struct htc_beacon_config cur_beacon_conf;
421 469
422 bool tx_queues_stop; 470 struct ath9k_htc_rx rx;
423 spinlock_t tx_lock; 471 struct ath9k_htc_tx tx;
424 472
425 struct ieee80211_vif *vif;
426 struct htc_beacon_config cur_beacon_conf;
427 unsigned int rxfilter;
428 struct tasklet_struct swba_tasklet; 473 struct tasklet_struct swba_tasklet;
429 struct tasklet_struct rx_tasklet; 474 struct tasklet_struct rx_tasklet;
430 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
431 struct ath9k_htc_rx rx;
432 struct tasklet_struct tx_tasklet;
433 struct sk_buff_head tx_queue;
434 struct delayed_work ani_work; 475 struct delayed_work ani_work;
476 struct tasklet_struct tx_failed_tasklet;
435 struct work_struct ps_work; 477 struct work_struct ps_work;
436 struct work_struct fatal_work; 478 struct work_struct fatal_work;
437 479
@@ -440,15 +482,13 @@ struct ath9k_htc_priv {
440 bool ps_enabled; 482 bool ps_enabled;
441 bool ps_idle; 483 bool ps_idle;
442 484
443 struct ath_led radio_led; 485#ifdef CONFIG_MAC80211_LEDS
444 struct ath_led assoc_led; 486 enum led_brightness brightness;
445 struct ath_led tx_led; 487 bool led_registered;
446 struct ath_led rx_led; 488 char led_name[32];
447 struct delayed_work ath9k_led_blink_work; 489 struct led_classdev led_cdev;
448 int led_on_duration; 490 struct work_struct led_work;
449 int led_off_duration; 491#endif
450 int led_on_cnt;
451 int led_off_cnt;
452 492
453 int beaconq; 493 int beaconq;
454 int cabq; 494 int cabq;
@@ -470,11 +510,18 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz)
470 510
471void ath9k_htc_reset(struct ath9k_htc_priv *priv); 511void ath9k_htc_reset(struct ath9k_htc_priv *priv);
472 512
513void ath9k_htc_assign_bslot(struct ath9k_htc_priv *priv,
514 struct ieee80211_vif *vif);
515void ath9k_htc_remove_bslot(struct ath9k_htc_priv *priv,
516 struct ieee80211_vif *vif);
517void ath9k_htc_set_tsfadjust(struct ath9k_htc_priv *priv,
518 struct ieee80211_vif *vif);
473void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv); 519void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv);
474void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv, 520void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
475 struct ieee80211_vif *vif); 521 struct ieee80211_vif *vif);
476void ath9k_htc_beacon_reconfig(struct ath9k_htc_priv *priv); 522void ath9k_htc_beacon_reconfig(struct ath9k_htc_priv *priv);
477void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending); 523void ath9k_htc_swba(struct ath9k_htc_priv *priv,
524 struct wmi_event_swba *swba);
478 525
479void ath9k_htc_rxep(void *priv, struct sk_buff *skb, 526void ath9k_htc_rxep(void *priv, struct sk_buff *skb,
480 enum htc_endpoint_id ep_id); 527 enum htc_endpoint_id ep_id);
@@ -483,7 +530,8 @@ void ath9k_htc_txep(void *priv, struct sk_buff *skb, enum htc_endpoint_id ep_id,
483void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb, 530void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb,
484 enum htc_endpoint_id ep_id, bool txok); 531 enum htc_endpoint_id ep_id, bool txok);
485 532
486int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv); 533int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv,
534 u8 enable_coex);
487void ath9k_htc_station_work(struct work_struct *work); 535void ath9k_htc_station_work(struct work_struct *work);
488void ath9k_htc_aggr_work(struct work_struct *work); 536void ath9k_htc_aggr_work(struct work_struct *work);
489void ath9k_htc_ani_work(struct work_struct *work); 537void ath9k_htc_ani_work(struct work_struct *work);
@@ -491,14 +539,23 @@ void ath9k_htc_start_ani(struct ath9k_htc_priv *priv);
491void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv); 539void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv);
492 540
493int ath9k_tx_init(struct ath9k_htc_priv *priv); 541int ath9k_tx_init(struct ath9k_htc_priv *priv);
494void ath9k_tx_tasklet(unsigned long data); 542int ath9k_htc_tx_start(struct ath9k_htc_priv *priv,
495int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb); 543 struct sk_buff *skb, u8 slot, bool is_cab);
496void ath9k_tx_cleanup(struct ath9k_htc_priv *priv); 544void ath9k_tx_cleanup(struct ath9k_htc_priv *priv);
497bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv, int subtype); 545bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv, int subtype);
498int ath9k_htc_cabq_setup(struct ath9k_htc_priv *priv); 546int ath9k_htc_cabq_setup(struct ath9k_htc_priv *priv);
499int get_hw_qnum(u16 queue, int *hwq_map); 547int get_hw_qnum(u16 queue, int *hwq_map);
500int ath_htc_txq_update(struct ath9k_htc_priv *priv, int qnum, 548int ath_htc_txq_update(struct ath9k_htc_priv *priv, int qnum,
501 struct ath9k_tx_queue_info *qinfo); 549 struct ath9k_tx_queue_info *qinfo);
550void ath9k_htc_check_stop_queues(struct ath9k_htc_priv *priv);
551void ath9k_htc_check_wake_queues(struct ath9k_htc_priv *priv);
552int ath9k_htc_tx_get_slot(struct ath9k_htc_priv *priv);
553void ath9k_htc_tx_clear_slot(struct ath9k_htc_priv *priv, int slot);
554void ath9k_htc_tx_drain(struct ath9k_htc_priv *priv);
555void ath9k_htc_txstatus(struct ath9k_htc_priv *priv, void *wmi_event);
556void ath9k_htc_tx_failed(struct ath9k_htc_priv *priv);
557void ath9k_tx_failed_tasklet(unsigned long data);
558void ath9k_htc_tx_cleanup_timer(unsigned long data);
502 559
503int ath9k_rx_init(struct ath9k_htc_priv *priv); 560int ath9k_rx_init(struct ath9k_htc_priv *priv);
504void ath9k_rx_cleanup(struct ath9k_htc_priv *priv); 561void ath9k_rx_cleanup(struct ath9k_htc_priv *priv);
@@ -516,9 +573,24 @@ void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv);
516void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw); 573void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw);
517void ath9k_htc_radio_enable(struct ieee80211_hw *hw); 574void ath9k_htc_radio_enable(struct ieee80211_hw *hw);
518void ath9k_htc_radio_disable(struct ieee80211_hw *hw); 575void ath9k_htc_radio_disable(struct ieee80211_hw *hw);
519void ath9k_led_stop_brightness(struct ath9k_htc_priv *priv); 576
577#ifdef CONFIG_MAC80211_LEDS
520void ath9k_init_leds(struct ath9k_htc_priv *priv); 578void ath9k_init_leds(struct ath9k_htc_priv *priv);
521void ath9k_deinit_leds(struct ath9k_htc_priv *priv); 579void ath9k_deinit_leds(struct ath9k_htc_priv *priv);
580void ath9k_led_work(struct work_struct *work);
581#else
582static inline void ath9k_init_leds(struct ath9k_htc_priv *priv)
583{
584}
585
586static inline void ath9k_deinit_leds(struct ath9k_htc_priv *priv)
587{
588}
589
590static inline void ath9k_led_work(struct work_struct *work)
591{
592}
593#endif
522 594
523int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev, 595int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
524 u16 devid, char *product, u32 drv_info); 596 u16 devid, char *product, u32 drv_info);
@@ -528,15 +600,9 @@ void ath9k_htc_suspend(struct htc_target *htc_handle);
528int ath9k_htc_resume(struct htc_target *htc_handle); 600int ath9k_htc_resume(struct htc_target *htc_handle);
529#endif 601#endif
530#ifdef CONFIG_ATH9K_HTC_DEBUGFS 602#ifdef CONFIG_ATH9K_HTC_DEBUGFS
531int ath9k_htc_debug_create_root(void);
532void ath9k_htc_debug_remove_root(void);
533int ath9k_htc_init_debug(struct ath_hw *ah); 603int ath9k_htc_init_debug(struct ath_hw *ah);
534void ath9k_htc_exit_debug(struct ath_hw *ah);
535#else 604#else
536static inline int ath9k_htc_debug_create_root(void) { return 0; };
537static inline void ath9k_htc_debug_remove_root(void) {};
538static inline int ath9k_htc_init_debug(struct ath_hw *ah) { return 0; }; 605static inline int ath9k_htc_init_debug(struct ath_hw *ah) { return 0; };
539static inline void ath9k_htc_exit_debug(struct ath_hw *ah) {};
540#endif /* CONFIG_ATH9K_HTC_DEBUGFS */ 606#endif /* CONFIG_ATH9K_HTC_DEBUGFS */
541 607
542#endif /* HTC_H */ 608#endif /* HTC_H */
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index 8d1d8792436d..0ded2c66d5ff 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -18,6 +18,50 @@
18 18
19#define FUDGE 2 19#define FUDGE 2
20 20
21void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv)
22{
23 struct ath_hw *ah = priv->ah;
24 struct ath9k_tx_queue_info qi, qi_be;
25
26 memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
27 memset(&qi_be, 0, sizeof(struct ath9k_tx_queue_info));
28
29 ath9k_hw_get_txq_props(ah, priv->beaconq, &qi);
30
31 if (priv->ah->opmode == NL80211_IFTYPE_AP) {
32 qi.tqi_aifs = 1;
33 qi.tqi_cwmin = 0;
34 qi.tqi_cwmax = 0;
35 } else if (priv->ah->opmode == NL80211_IFTYPE_ADHOC) {
36 int qnum = priv->hwq_map[WME_AC_BE];
37
38 ath9k_hw_get_txq_props(ah, qnum, &qi_be);
39
40 qi.tqi_aifs = qi_be.tqi_aifs;
41
42 /*
43 * For WIFI Beacon Distribution
44 * Long slot time : 2x cwmin
45 * Short slot time : 4x cwmin
46 */
47 if (ah->slottime == ATH9K_SLOT_TIME_20)
48 qi.tqi_cwmin = 2*qi_be.tqi_cwmin;
49 else
50 qi.tqi_cwmin = 4*qi_be.tqi_cwmin;
51
52 qi.tqi_cwmax = qi_be.tqi_cwmax;
53
54 }
55
56 if (!ath9k_hw_set_txq_props(ah, priv->beaconq, &qi)) {
57 ath_err(ath9k_hw_common(ah),
58 "Unable to update beacon queue %u!\n", priv->beaconq);
59 } else {
60 ath9k_hw_resettxqueue(ah, priv->beaconq);
61 }
62}
63
64
21static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv, 65static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
22 struct htc_beacon_config *bss_conf) 66 struct htc_beacon_config *bss_conf)
23{ 67{
@@ -30,7 +74,7 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
30 __be32 htc_imask = 0; 74 __be32 htc_imask = 0;
31 u64 tsf; 75 u64 tsf;
32 int num_beacons, offset, dtim_dec_count, cfp_dec_count; 76 int num_beacons, offset, dtim_dec_count, cfp_dec_count;
33 int ret; 77 int ret __attribute__ ((unused));
34 u8 cmd_rsp; 78 u8 cmd_rsp;
35 79
36 memset(&bs, 0, sizeof(bs)); 80 memset(&bs, 0, sizeof(bs));
@@ -146,7 +190,7 @@ static void ath9k_htc_beacon_config_ap(struct ath9k_htc_priv *priv,
146 enum ath9k_int imask = 0; 190 enum ath9k_int imask = 0;
147 u32 nexttbtt, intval, tsftu; 191 u32 nexttbtt, intval, tsftu;
148 __be32 htc_imask = 0; 192 __be32 htc_imask = 0;
149 int ret; 193 int ret __attribute__ ((unused));
150 u8 cmd_rsp; 194 u8 cmd_rsp;
151 u64 tsf; 195 u64 tsf;
152 196
@@ -154,8 +198,17 @@ static void ath9k_htc_beacon_config_ap(struct ath9k_htc_priv *priv,
154 intval /= ATH9K_HTC_MAX_BCN_VIF; 198 intval /= ATH9K_HTC_MAX_BCN_VIF;
155 nexttbtt = intval; 199 nexttbtt = intval;
156 200
201 /*
202 * To reduce beacon misses under heavy TX load,
203 * set the beacon response time to a larger value.
204 */
205 if (intval > DEFAULT_SWBA_RESPONSE)
206 priv->ah->config.sw_beacon_response_time = DEFAULT_SWBA_RESPONSE;
207 else
208 priv->ah->config.sw_beacon_response_time = MIN_SWBA_RESPONSE;
209
157 if (priv->op_flags & OP_TSF_RESET) { 210 if (priv->op_flags & OP_TSF_RESET) {
158 intval |= ATH9K_BEACON_RESET_TSF; 211 ath9k_hw_reset_tsf(priv->ah);
159 priv->op_flags &= ~OP_TSF_RESET; 212 priv->op_flags &= ~OP_TSF_RESET;
160 } else { 213 } else {
161 /* 214 /*
@@ -168,18 +221,20 @@ static void ath9k_htc_beacon_config_ap(struct ath9k_htc_priv *priv,
168 } while (nexttbtt < tsftu); 221 } while (nexttbtt < tsftu);
169 } 222 }
170 223
171 intval |= ATH9K_BEACON_ENA;
172
173 if (priv->op_flags & OP_ENABLE_BEACON) 224 if (priv->op_flags & OP_ENABLE_BEACON)
174 imask |= ATH9K_INT_SWBA; 225 imask |= ATH9K_INT_SWBA;
175 226
176 ath_dbg(common, ATH_DBG_CONFIG, 227 ath_dbg(common, ATH_DBG_CONFIG,
177 "AP Beacon config, intval: %d, nexttbtt: %u imask: 0x%x\n", 228 "AP Beacon config, intval: %d, nexttbtt: %u, resp_time: %d "
178 bss_conf->beacon_interval, nexttbtt, imask); 229 "imask: 0x%x\n",
230 bss_conf->beacon_interval, nexttbtt,
231 priv->ah->config.sw_beacon_response_time, imask);
232
233 ath9k_htc_beaconq_config(priv);
179 234
180 WMI_CMD(WMI_DISABLE_INTR_CMDID); 235 WMI_CMD(WMI_DISABLE_INTR_CMDID);
181 ath9k_hw_beaconinit(priv->ah, nexttbtt, intval); 236 ath9k_hw_beaconinit(priv->ah, TU_TO_USEC(nexttbtt), TU_TO_USEC(intval));
182 priv->bmiss_cnt = 0; 237 priv->cur_beacon_conf.bmiss_cnt = 0;
183 htc_imask = cpu_to_be32(imask); 238 htc_imask = cpu_to_be32(imask);
184 WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask); 239 WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
185} 240}
@@ -191,7 +246,7 @@ static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv,
191 enum ath9k_int imask = 0; 246 enum ath9k_int imask = 0;
192 u32 nexttbtt, intval, tsftu; 247 u32 nexttbtt, intval, tsftu;
193 __be32 htc_imask = 0; 248 __be32 htc_imask = 0;
194 int ret; 249 int ret __attribute__ ((unused));
195 u8 cmd_rsp; 250 u8 cmd_rsp;
196 u64 tsf; 251 u64 tsf;
197 252
@@ -207,17 +262,26 @@ static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv,
207 nexttbtt += intval; 262 nexttbtt += intval;
208 } while (nexttbtt < tsftu); 263 } while (nexttbtt < tsftu);
209 264
210 intval |= ATH9K_BEACON_ENA; 265 /*
266 * Only one IBSS interfce is allowed.
267 */
268 if (intval > DEFAULT_SWBA_RESPONSE)
269 priv->ah->config.sw_beacon_response_time = DEFAULT_SWBA_RESPONSE;
270 else
271 priv->ah->config.sw_beacon_response_time = MIN_SWBA_RESPONSE;
272
211 if (priv->op_flags & OP_ENABLE_BEACON) 273 if (priv->op_flags & OP_ENABLE_BEACON)
212 imask |= ATH9K_INT_SWBA; 274 imask |= ATH9K_INT_SWBA;
213 275
214 ath_dbg(common, ATH_DBG_CONFIG, 276 ath_dbg(common, ATH_DBG_CONFIG,
215 "IBSS Beacon config, intval: %d, nexttbtt: %u, imask: 0x%x\n", 277 "IBSS Beacon config, intval: %d, nexttbtt: %u, "
216 bss_conf->beacon_interval, nexttbtt, imask); 278 "resp_time: %d, imask: 0x%x\n",
279 bss_conf->beacon_interval, nexttbtt,
280 priv->ah->config.sw_beacon_response_time, imask);
217 281
218 WMI_CMD(WMI_DISABLE_INTR_CMDID); 282 WMI_CMD(WMI_DISABLE_INTR_CMDID);
219 ath9k_hw_beaconinit(priv->ah, nexttbtt, intval); 283 ath9k_hw_beaconinit(priv->ah, TU_TO_USEC(nexttbtt), TU_TO_USEC(intval));
220 priv->bmiss_cnt = 0; 284 priv->cur_beacon_conf.bmiss_cnt = 0;
221 htc_imask = cpu_to_be32(imask); 285 htc_imask = cpu_to_be32(imask);
222 WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask); 286 WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
223} 287}
@@ -228,38 +292,101 @@ void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb,
228 dev_kfree_skb_any(skb); 292 dev_kfree_skb_any(skb);
229} 293}
230 294
231void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending) 295static void ath9k_htc_send_buffered(struct ath9k_htc_priv *priv,
296 int slot)
232{ 297{
233 struct ath9k_htc_vif *avp = (void *)priv->vif->drv_priv; 298 struct ath_common *common = ath9k_hw_common(priv->ah);
299 struct ieee80211_vif *vif;
300 struct sk_buff *skb;
301 struct ieee80211_hdr *hdr;
302 int padpos, padsize, ret, tx_slot;
303
304 spin_lock_bh(&priv->beacon_lock);
305
306 vif = priv->cur_beacon_conf.bslot[slot];
307
308 skb = ieee80211_get_buffered_bc(priv->hw, vif);
309
310 while(skb) {
311 hdr = (struct ieee80211_hdr *) skb->data;
312
313 padpos = ath9k_cmn_padpos(hdr->frame_control);
314 padsize = padpos & 3;
315 if (padsize && skb->len > padpos) {
316 if (skb_headroom(skb) < padsize) {
317 dev_kfree_skb_any(skb);
318 goto next;
319 }
320 skb_push(skb, padsize);
321 memmove(skb->data, skb->data + padsize, padpos);
322 }
323
324 tx_slot = ath9k_htc_tx_get_slot(priv);
325 if (tx_slot < 0) {
326 ath_dbg(common, ATH_DBG_XMIT, "No free CAB slot\n");
327 dev_kfree_skb_any(skb);
328 goto next;
329 }
330
331 ret = ath9k_htc_tx_start(priv, skb, tx_slot, true);
332 if (ret != 0) {
333 ath9k_htc_tx_clear_slot(priv, tx_slot);
334 dev_kfree_skb_any(skb);
335
336 ath_dbg(common, ATH_DBG_XMIT,
337 "Failed to send CAB frame\n");
338 } else {
339 spin_lock_bh(&priv->tx.tx_lock);
340 priv->tx.queued_cnt++;
341 spin_unlock_bh(&priv->tx.tx_lock);
342 }
343 next:
344 skb = ieee80211_get_buffered_bc(priv->hw, vif);
345 }
346
347 spin_unlock_bh(&priv->beacon_lock);
348}
349
350static void ath9k_htc_send_beacon(struct ath9k_htc_priv *priv,
351 int slot)
352{
353 struct ath_common *common = ath9k_hw_common(priv->ah);
354 struct ieee80211_vif *vif;
355 struct ath9k_htc_vif *avp;
234 struct tx_beacon_header beacon_hdr; 356 struct tx_beacon_header beacon_hdr;
235 struct ath9k_htc_tx_ctl tx_ctl; 357 struct ath9k_htc_tx_ctl *tx_ctl;
236 struct ieee80211_tx_info *info; 358 struct ieee80211_tx_info *info;
359 struct ieee80211_mgmt *mgmt;
237 struct sk_buff *beacon; 360 struct sk_buff *beacon;
238 u8 *tx_fhdr; 361 u8 *tx_fhdr;
362 int ret;
239 363
240 memset(&beacon_hdr, 0, sizeof(struct tx_beacon_header)); 364 memset(&beacon_hdr, 0, sizeof(struct tx_beacon_header));
241 memset(&tx_ctl, 0, sizeof(struct ath9k_htc_tx_ctl));
242
243 /* FIXME: Handle BMISS */
244 if (beacon_pending != 0) {
245 priv->bmiss_cnt++;
246 return;
247 }
248 365
249 spin_lock_bh(&priv->beacon_lock); 366 spin_lock_bh(&priv->beacon_lock);
250 367
368 vif = priv->cur_beacon_conf.bslot[slot];
369 avp = (struct ath9k_htc_vif *)vif->drv_priv;
370
251 if (unlikely(priv->op_flags & OP_SCANNING)) { 371 if (unlikely(priv->op_flags & OP_SCANNING)) {
252 spin_unlock_bh(&priv->beacon_lock); 372 spin_unlock_bh(&priv->beacon_lock);
253 return; 373 return;
254 } 374 }
255 375
256 /* Get a new beacon */ 376 /* Get a new beacon */
257 beacon = ieee80211_beacon_get(priv->hw, priv->vif); 377 beacon = ieee80211_beacon_get(priv->hw, vif);
258 if (!beacon) { 378 if (!beacon) {
259 spin_unlock_bh(&priv->beacon_lock); 379 spin_unlock_bh(&priv->beacon_lock);
260 return; 380 return;
261 } 381 }
262 382
383 /*
384 * Update the TSF adjust value here, the HW will
385 * add this value for every beacon.
386 */
387 mgmt = (struct ieee80211_mgmt *)beacon->data;
388 mgmt->u.beacon.timestamp = avp->tsfadjust;
389
263 info = IEEE80211_SKB_CB(beacon); 390 info = IEEE80211_SKB_CB(beacon);
264 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 391 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
265 struct ieee80211_hdr *hdr = 392 struct ieee80211_hdr *hdr =
@@ -269,45 +396,149 @@ void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending)
269 hdr->seq_ctrl |= cpu_to_le16(avp->seq_no); 396 hdr->seq_ctrl |= cpu_to_le16(avp->seq_no);
270 } 397 }
271 398
272 tx_ctl.type = ATH9K_HTC_NORMAL; 399 tx_ctl = HTC_SKB_CB(beacon);
400 memset(tx_ctl, 0, sizeof(*tx_ctl));
401
402 tx_ctl->type = ATH9K_HTC_BEACON;
403 tx_ctl->epid = priv->beacon_ep;
404
273 beacon_hdr.vif_index = avp->index; 405 beacon_hdr.vif_index = avp->index;
274 tx_fhdr = skb_push(beacon, sizeof(beacon_hdr)); 406 tx_fhdr = skb_push(beacon, sizeof(beacon_hdr));
275 memcpy(tx_fhdr, (u8 *) &beacon_hdr, sizeof(beacon_hdr)); 407 memcpy(tx_fhdr, (u8 *) &beacon_hdr, sizeof(beacon_hdr));
276 408
277 htc_send(priv->htc, beacon, priv->beacon_ep, &tx_ctl); 409 ret = htc_send(priv->htc, beacon);
410 if (ret != 0) {
411 if (ret == -ENOMEM) {
412 ath_dbg(common, ATH_DBG_BSTUCK,
413 "Failed to send beacon, no free TX buffer\n");
414 }
415 dev_kfree_skb_any(beacon);
416 }
278 417
279 spin_unlock_bh(&priv->beacon_lock); 418 spin_unlock_bh(&priv->beacon_lock);
280} 419}
281 420
282/* Currently, only for IBSS */ 421static int ath9k_htc_choose_bslot(struct ath9k_htc_priv *priv,
283void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv) 422 struct wmi_event_swba *swba)
284{ 423{
285 struct ath_hw *ah = priv->ah; 424 struct ath_common *common = ath9k_hw_common(priv->ah);
286 struct ath9k_tx_queue_info qi, qi_be; 425 u64 tsf;
287 int qnum = priv->hwq_map[WME_AC_BE]; 426 u32 tsftu;
427 u16 intval;
428 int slot;
288 429
289 memset(&qi, 0, sizeof(struct ath9k_tx_queue_info)); 430 intval = priv->cur_beacon_conf.beacon_interval & ATH9K_BEACON_PERIOD;
290 memset(&qi_be, 0, sizeof(struct ath9k_tx_queue_info));
291 431
292 ath9k_hw_get_txq_props(ah, qnum, &qi_be); 432 tsf = be64_to_cpu(swba->tsf);
433 tsftu = TSF_TO_TU(tsf >> 32, tsf);
434 slot = ((tsftu % intval) * ATH9K_HTC_MAX_BCN_VIF) / intval;
435 slot = ATH9K_HTC_MAX_BCN_VIF - slot - 1;
293 436
294 qi.tqi_aifs = qi_be.tqi_aifs; 437 ath_dbg(common, ATH_DBG_BEACON,
295 /* For WIFI Beacon Distribution 438 "Choose slot: %d, tsf: %llu, tsftu: %u, intval: %u\n",
296 * Long slot time : 2x cwmin 439 slot, tsf, tsftu, intval);
297 * Short slot time : 4x cwmin
298 */
299 if (ah->slottime == ATH9K_SLOT_TIME_20)
300 qi.tqi_cwmin = 2*qi_be.tqi_cwmin;
301 else
302 qi.tqi_cwmin = 4*qi_be.tqi_cwmin;
303 qi.tqi_cwmax = qi_be.tqi_cwmax;
304 440
305 if (!ath9k_hw_set_txq_props(ah, priv->beaconq, &qi)) { 441 return slot;
306 ath_err(ath9k_hw_common(ah), 442}
307 "Unable to update beacon queue %u!\n", qnum); 443
308 } else { 444void ath9k_htc_swba(struct ath9k_htc_priv *priv,
309 ath9k_hw_resettxqueue(ah, priv->beaconq); 445 struct wmi_event_swba *swba)
446{
447 struct ath_common *common = ath9k_hw_common(priv->ah);
448 int slot;
449
450 if (swba->beacon_pending != 0) {
451 priv->cur_beacon_conf.bmiss_cnt++;
452 if (priv->cur_beacon_conf.bmiss_cnt > BSTUCK_THRESHOLD) {
453 ath_dbg(common, ATH_DBG_BSTUCK,
454 "Beacon stuck, HW reset\n");
455 ieee80211_queue_work(priv->hw,
456 &priv->fatal_work);
457 }
458 return;
310 } 459 }
460
461 if (priv->cur_beacon_conf.bmiss_cnt) {
462 ath_dbg(common, ATH_DBG_BSTUCK,
463 "Resuming beacon xmit after %u misses\n",
464 priv->cur_beacon_conf.bmiss_cnt);
465 priv->cur_beacon_conf.bmiss_cnt = 0;
466 }
467
468 slot = ath9k_htc_choose_bslot(priv, swba);
469 spin_lock_bh(&priv->beacon_lock);
470 if (priv->cur_beacon_conf.bslot[slot] == NULL) {
471 spin_unlock_bh(&priv->beacon_lock);
472 return;
473 }
474 spin_unlock_bh(&priv->beacon_lock);
475
476 ath9k_htc_send_buffered(priv, slot);
477 ath9k_htc_send_beacon(priv, slot);
478}
479
480void ath9k_htc_assign_bslot(struct ath9k_htc_priv *priv,
481 struct ieee80211_vif *vif)
482{
483 struct ath_common *common = ath9k_hw_common(priv->ah);
484 struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *)vif->drv_priv;
485 int i = 0;
486
487 spin_lock_bh(&priv->beacon_lock);
488 for (i = 0; i < ATH9K_HTC_MAX_BCN_VIF; i++) {
489 if (priv->cur_beacon_conf.bslot[i] == NULL) {
490 avp->bslot = i;
491 break;
492 }
493 }
494
495 priv->cur_beacon_conf.bslot[avp->bslot] = vif;
496 spin_unlock_bh(&priv->beacon_lock);
497
498 ath_dbg(common, ATH_DBG_CONFIG,
499 "Added interface at beacon slot: %d\n", avp->bslot);
500}
501
502void ath9k_htc_remove_bslot(struct ath9k_htc_priv *priv,
503 struct ieee80211_vif *vif)
504{
505 struct ath_common *common = ath9k_hw_common(priv->ah);
506 struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *)vif->drv_priv;
507
508 spin_lock_bh(&priv->beacon_lock);
509 priv->cur_beacon_conf.bslot[avp->bslot] = NULL;
510 spin_unlock_bh(&priv->beacon_lock);
511
512 ath_dbg(common, ATH_DBG_CONFIG,
513 "Removed interface at beacon slot: %d\n", avp->bslot);
514}
515
516/*
517 * Calculate the TSF adjustment value for all slots
518 * other than zero.
519 */
520void ath9k_htc_set_tsfadjust(struct ath9k_htc_priv *priv,
521 struct ieee80211_vif *vif)
522{
523 struct ath_common *common = ath9k_hw_common(priv->ah);
524 struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *)vif->drv_priv;
525 struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
526 u64 tsfadjust;
527
528 if (avp->bslot == 0)
529 return;
530
531 /*
532 * The beacon interval cannot be different for multi-AP mode,
533 * and we reach here only for VIF slots greater than zero,
534 * so beacon_interval is guaranteed to be set in cur_conf.
535 */
536 tsfadjust = cur_conf->beacon_interval * avp->bslot / ATH9K_HTC_MAX_BCN_VIF;
537 avp->tsfadjust = cpu_to_le64(TU_TO_USEC(tsfadjust));
538
539 ath_dbg(common, ATH_DBG_CONFIG,
540 "tsfadjust is: %llu for bslot: %d\n",
541 (unsigned long long)tsfadjust, avp->bslot);
311} 542}
312 543
313static void ath9k_htc_beacon_iter(void *data, u8 *mac, struct ieee80211_vif *vif) 544static void ath9k_htc_beacon_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
new file mode 100644
index 000000000000..aa48b3abbc48
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
@@ -0,0 +1,960 @@
1/*
2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "htc.h"
18
19static int ath9k_debugfs_open(struct inode *inode, struct file *file)
20{
21 file->private_data = inode->i_private;
22 return 0;
23}
24
25static ssize_t read_file_tgt_int_stats(struct file *file, char __user *user_buf,
26 size_t count, loff_t *ppos)
27{
28 struct ath9k_htc_priv *priv = file->private_data;
29 struct ath9k_htc_target_int_stats cmd_rsp;
30 char buf[512];
31 unsigned int len = 0;
32 int ret = 0;
33
34 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
35
36 ath9k_htc_ps_wakeup(priv);
37
38 WMI_CMD(WMI_INT_STATS_CMDID);
39 if (ret) {
40 ath9k_htc_ps_restore(priv);
41 return -EINVAL;
42 }
43
44 ath9k_htc_ps_restore(priv);
45
46 len += snprintf(buf + len, sizeof(buf) - len,
47 "%20s : %10u\n", "RX",
48 be32_to_cpu(cmd_rsp.rx));
49
50 len += snprintf(buf + len, sizeof(buf) - len,
51 "%20s : %10u\n", "RXORN",
52 be32_to_cpu(cmd_rsp.rxorn));
53
54 len += snprintf(buf + len, sizeof(buf) - len,
55 "%20s : %10u\n", "RXEOL",
56 be32_to_cpu(cmd_rsp.rxeol));
57
58 len += snprintf(buf + len, sizeof(buf) - len,
59 "%20s : %10u\n", "TXURN",
60 be32_to_cpu(cmd_rsp.txurn));
61
62 len += snprintf(buf + len, sizeof(buf) - len,
63 "%20s : %10u\n", "TXTO",
64 be32_to_cpu(cmd_rsp.txto));
65
66 len += snprintf(buf + len, sizeof(buf) - len,
67 "%20s : %10u\n", "CST",
68 be32_to_cpu(cmd_rsp.cst));
69
70 if (len > sizeof(buf))
71 len = sizeof(buf);
72
73 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
74}
75
76static const struct file_operations fops_tgt_int_stats = {
77 .read = read_file_tgt_int_stats,
78 .open = ath9k_debugfs_open,
79 .owner = THIS_MODULE,
80 .llseek = default_llseek,
81};
82
83static ssize_t read_file_tgt_tx_stats(struct file *file, char __user *user_buf,
84 size_t count, loff_t *ppos)
85{
86 struct ath9k_htc_priv *priv = file->private_data;
87 struct ath9k_htc_target_tx_stats cmd_rsp;
88 char buf[512];
89 unsigned int len = 0;
90 int ret = 0;
91
92 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
93
94 ath9k_htc_ps_wakeup(priv);
95
96 WMI_CMD(WMI_TX_STATS_CMDID);
97 if (ret) {
98 ath9k_htc_ps_restore(priv);
99 return -EINVAL;
100 }
101
102 ath9k_htc_ps_restore(priv);
103
104 len += snprintf(buf + len, sizeof(buf) - len,
105 "%20s : %10u\n", "Xretries",
106 be32_to_cpu(cmd_rsp.xretries));
107
108 len += snprintf(buf + len, sizeof(buf) - len,
109 "%20s : %10u\n", "FifoErr",
110 be32_to_cpu(cmd_rsp.fifoerr));
111
112 len += snprintf(buf + len, sizeof(buf) - len,
113 "%20s : %10u\n", "Filtered",
114 be32_to_cpu(cmd_rsp.filtered));
115
116 len += snprintf(buf + len, sizeof(buf) - len,
117 "%20s : %10u\n", "TimerExp",
118 be32_to_cpu(cmd_rsp.timer_exp));
119
120 len += snprintf(buf + len, sizeof(buf) - len,
121 "%20s : %10u\n", "ShortRetries",
122 be32_to_cpu(cmd_rsp.shortretries));
123
124 len += snprintf(buf + len, sizeof(buf) - len,
125 "%20s : %10u\n", "LongRetries",
126 be32_to_cpu(cmd_rsp.longretries));
127
128 len += snprintf(buf + len, sizeof(buf) - len,
129 "%20s : %10u\n", "QueueNull",
130 be32_to_cpu(cmd_rsp.qnull));
131
132 len += snprintf(buf + len, sizeof(buf) - len,
133 "%20s : %10u\n", "EncapFail",
134 be32_to_cpu(cmd_rsp.encap_fail));
135
136 len += snprintf(buf + len, sizeof(buf) - len,
137 "%20s : %10u\n", "NoBuf",
138 be32_to_cpu(cmd_rsp.nobuf));
139
140 if (len > sizeof(buf))
141 len = sizeof(buf);
142
143 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
144}
145
146static const struct file_operations fops_tgt_tx_stats = {
147 .read = read_file_tgt_tx_stats,
148 .open = ath9k_debugfs_open,
149 .owner = THIS_MODULE,
150 .llseek = default_llseek,
151};
152
153static ssize_t read_file_tgt_rx_stats(struct file *file, char __user *user_buf,
154 size_t count, loff_t *ppos)
155{
156 struct ath9k_htc_priv *priv = file->private_data;
157 struct ath9k_htc_target_rx_stats cmd_rsp;
158 char buf[512];
159 unsigned int len = 0;
160 int ret = 0;
161
162 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
163
164 ath9k_htc_ps_wakeup(priv);
165
166 WMI_CMD(WMI_RX_STATS_CMDID);
167 if (ret) {
168 ath9k_htc_ps_restore(priv);
169 return -EINVAL;
170 }
171
172 ath9k_htc_ps_restore(priv);
173
174 len += snprintf(buf + len, sizeof(buf) - len,
175 "%20s : %10u\n", "NoBuf",
176 be32_to_cpu(cmd_rsp.nobuf));
177
178 len += snprintf(buf + len, sizeof(buf) - len,
179 "%20s : %10u\n", "HostSend",
180 be32_to_cpu(cmd_rsp.host_send));
181
182 len += snprintf(buf + len, sizeof(buf) - len,
183 "%20s : %10u\n", "HostDone",
184 be32_to_cpu(cmd_rsp.host_done));
185
186 if (len > sizeof(buf))
187 len = sizeof(buf);
188
189 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
190}
191
192static const struct file_operations fops_tgt_rx_stats = {
193 .read = read_file_tgt_rx_stats,
194 .open = ath9k_debugfs_open,
195 .owner = THIS_MODULE,
196 .llseek = default_llseek,
197};
198
199static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
200 size_t count, loff_t *ppos)
201{
202 struct ath9k_htc_priv *priv = file->private_data;
203 char buf[512];
204 unsigned int len = 0;
205
206 len += snprintf(buf + len, sizeof(buf) - len,
207 "%20s : %10u\n", "Buffers queued",
208 priv->debug.tx_stats.buf_queued);
209 len += snprintf(buf + len, sizeof(buf) - len,
210 "%20s : %10u\n", "Buffers completed",
211 priv->debug.tx_stats.buf_completed);
212 len += snprintf(buf + len, sizeof(buf) - len,
213 "%20s : %10u\n", "SKBs queued",
214 priv->debug.tx_stats.skb_queued);
215 len += snprintf(buf + len, sizeof(buf) - len,
216 "%20s : %10u\n", "SKBs success",
217 priv->debug.tx_stats.skb_success);
218 len += snprintf(buf + len, sizeof(buf) - len,
219 "%20s : %10u\n", "SKBs failed",
220 priv->debug.tx_stats.skb_failed);
221 len += snprintf(buf + len, sizeof(buf) - len,
222 "%20s : %10u\n", "CAB queued",
223 priv->debug.tx_stats.cab_queued);
224
225 len += snprintf(buf + len, sizeof(buf) - len,
226 "%20s : %10u\n", "BE queued",
227 priv->debug.tx_stats.queue_stats[WME_AC_BE]);
228 len += snprintf(buf + len, sizeof(buf) - len,
229 "%20s : %10u\n", "BK queued",
230 priv->debug.tx_stats.queue_stats[WME_AC_BK]);
231 len += snprintf(buf + len, sizeof(buf) - len,
232 "%20s : %10u\n", "VI queued",
233 priv->debug.tx_stats.queue_stats[WME_AC_VI]);
234 len += snprintf(buf + len, sizeof(buf) - len,
235 "%20s : %10u\n", "VO queued",
236 priv->debug.tx_stats.queue_stats[WME_AC_VO]);
237
238 if (len > sizeof(buf))
239 len = sizeof(buf);
240
241 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
242}
243
244static const struct file_operations fops_xmit = {
245 .read = read_file_xmit,
246 .open = ath9k_debugfs_open,
247 .owner = THIS_MODULE,
248 .llseek = default_llseek,
249};
250
251void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv,
252 struct ath_htc_rx_status *rxs)
253{
254#define RX_PHY_ERR_INC(c) priv->debug.rx_stats.err_phy_stats[c]++
255
256 if (rxs->rs_status & ATH9K_RXERR_CRC)
257 priv->debug.rx_stats.err_crc++;
258 if (rxs->rs_status & ATH9K_RXERR_DECRYPT)
259 priv->debug.rx_stats.err_decrypt_crc++;
260 if (rxs->rs_status & ATH9K_RXERR_MIC)
261 priv->debug.rx_stats.err_mic++;
262 if (rxs->rs_status & ATH9K_RX_DELIM_CRC_PRE)
263 priv->debug.rx_stats.err_pre_delim++;
264 if (rxs->rs_status & ATH9K_RX_DELIM_CRC_POST)
265 priv->debug.rx_stats.err_post_delim++;
266 if (rxs->rs_status & ATH9K_RX_DECRYPT_BUSY)
267 priv->debug.rx_stats.err_decrypt_busy++;
268
269 if (rxs->rs_status & ATH9K_RXERR_PHY) {
270 priv->debug.rx_stats.err_phy++;
271 if (rxs->rs_phyerr < ATH9K_PHYERR_MAX)
272 RX_PHY_ERR_INC(rxs->rs_phyerr);
273 }
274
275#undef RX_PHY_ERR_INC
276}
277
278static ssize_t read_file_recv(struct file *file, char __user *user_buf,
279 size_t count, loff_t *ppos)
280{
281#define PHY_ERR(s, p) \
282 len += snprintf(buf + len, size - len, "%20s : %10u\n", s, \
283 priv->debug.rx_stats.err_phy_stats[p]);
284
285 struct ath9k_htc_priv *priv = file->private_data;
286 char *buf;
287 unsigned int len = 0, size = 1500;
288 ssize_t retval = 0;
289
290 buf = kzalloc(size, GFP_KERNEL);
291 if (buf == NULL)
292 return -ENOMEM;
293
294 len += snprintf(buf + len, size - len,
295 "%20s : %10u\n", "SKBs allocated",
296 priv->debug.rx_stats.skb_allocated);
297 len += snprintf(buf + len, size - len,
298 "%20s : %10u\n", "SKBs completed",
299 priv->debug.rx_stats.skb_completed);
300 len += snprintf(buf + len, size - len,
301 "%20s : %10u\n", "SKBs Dropped",
302 priv->debug.rx_stats.skb_dropped);
303
304 len += snprintf(buf + len, size - len,
305 "%20s : %10u\n", "CRC ERR",
306 priv->debug.rx_stats.err_crc);
307 len += snprintf(buf + len, size - len,
308 "%20s : %10u\n", "DECRYPT CRC ERR",
309 priv->debug.rx_stats.err_decrypt_crc);
310 len += snprintf(buf + len, size - len,
311 "%20s : %10u\n", "MIC ERR",
312 priv->debug.rx_stats.err_mic);
313 len += snprintf(buf + len, size - len,
314 "%20s : %10u\n", "PRE-DELIM CRC ERR",
315 priv->debug.rx_stats.err_pre_delim);
316 len += snprintf(buf + len, size - len,
317 "%20s : %10u\n", "POST-DELIM CRC ERR",
318 priv->debug.rx_stats.err_post_delim);
319 len += snprintf(buf + len, size - len,
320 "%20s : %10u\n", "DECRYPT BUSY ERR",
321 priv->debug.rx_stats.err_decrypt_busy);
322 len += snprintf(buf + len, size - len,
323 "%20s : %10u\n", "TOTAL PHY ERR",
324 priv->debug.rx_stats.err_phy);
325
326
327 PHY_ERR("UNDERRUN", ATH9K_PHYERR_UNDERRUN);
328 PHY_ERR("TIMING", ATH9K_PHYERR_TIMING);
329 PHY_ERR("PARITY", ATH9K_PHYERR_PARITY);
330 PHY_ERR("RATE", ATH9K_PHYERR_RATE);
331 PHY_ERR("LENGTH", ATH9K_PHYERR_LENGTH);
332 PHY_ERR("RADAR", ATH9K_PHYERR_RADAR);
333 PHY_ERR("SERVICE", ATH9K_PHYERR_SERVICE);
334 PHY_ERR("TOR", ATH9K_PHYERR_TOR);
335 PHY_ERR("OFDM-TIMING", ATH9K_PHYERR_OFDM_TIMING);
336 PHY_ERR("OFDM-SIGNAL-PARITY", ATH9K_PHYERR_OFDM_SIGNAL_PARITY);
337 PHY_ERR("OFDM-RATE", ATH9K_PHYERR_OFDM_RATE_ILLEGAL);
338 PHY_ERR("OFDM-LENGTH", ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL);
339 PHY_ERR("OFDM-POWER-DROP", ATH9K_PHYERR_OFDM_POWER_DROP);
340 PHY_ERR("OFDM-SERVICE", ATH9K_PHYERR_OFDM_SERVICE);
341 PHY_ERR("OFDM-RESTART", ATH9K_PHYERR_OFDM_RESTART);
342 PHY_ERR("FALSE-RADAR-EXT", ATH9K_PHYERR_FALSE_RADAR_EXT);
343 PHY_ERR("CCK-TIMING", ATH9K_PHYERR_CCK_TIMING);
344 PHY_ERR("CCK-HEADER-CRC", ATH9K_PHYERR_CCK_HEADER_CRC);
345 PHY_ERR("CCK-RATE", ATH9K_PHYERR_CCK_RATE_ILLEGAL);
346 PHY_ERR("CCK-SERVICE", ATH9K_PHYERR_CCK_SERVICE);
347 PHY_ERR("CCK-RESTART", ATH9K_PHYERR_CCK_RESTART);
348 PHY_ERR("CCK-LENGTH", ATH9K_PHYERR_CCK_LENGTH_ILLEGAL);
349 PHY_ERR("CCK-POWER-DROP", ATH9K_PHYERR_CCK_POWER_DROP);
350 PHY_ERR("HT-CRC", ATH9K_PHYERR_HT_CRC_ERROR);
351 PHY_ERR("HT-LENGTH", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
352 PHY_ERR("HT-RATE", ATH9K_PHYERR_HT_RATE_ILLEGAL);
353
354 if (len > size)
355 len = size;
356
357 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
358 kfree(buf);
359
360 return retval;
361
362#undef PHY_ERR
363}
364
365static const struct file_operations fops_recv = {
366 .read = read_file_recv,
367 .open = ath9k_debugfs_open,
368 .owner = THIS_MODULE,
369 .llseek = default_llseek,
370};
371
372static ssize_t read_file_slot(struct file *file, char __user *user_buf,
373 size_t count, loff_t *ppos)
374{
375 struct ath9k_htc_priv *priv = file->private_data;
376 char buf[512];
377 unsigned int len = 0;
378
379 spin_lock_bh(&priv->tx.tx_lock);
380
381 len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
382
383 len += bitmap_scnprintf(buf + len, sizeof(buf) - len,
384 priv->tx.tx_slot, MAX_TX_BUF_NUM);
385
386 len += snprintf(buf + len, sizeof(buf) - len, "\n");
387
388 len += snprintf(buf + len, sizeof(buf) - len,
389 "Used slots : %d\n",
390 bitmap_weight(priv->tx.tx_slot, MAX_TX_BUF_NUM));
391
392 spin_unlock_bh(&priv->tx.tx_lock);
393
394 if (len > sizeof(buf))
395 len = sizeof(buf);
396
397 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
398}
399
400static const struct file_operations fops_slot = {
401 .read = read_file_slot,
402 .open = ath9k_debugfs_open,
403 .owner = THIS_MODULE,
404 .llseek = default_llseek,
405};
406
407static ssize_t read_file_queue(struct file *file, char __user *user_buf,
408 size_t count, loff_t *ppos)
409{
410 struct ath9k_htc_priv *priv = file->private_data;
411 char buf[512];
412 unsigned int len = 0;
413
414 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
415 "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
416
417 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
418 "Cab endpoint", skb_queue_len(&priv->tx.cab_ep_queue));
419
420 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
421 "Data BE endpoint", skb_queue_len(&priv->tx.data_be_queue));
422
423 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
424 "Data BK endpoint", skb_queue_len(&priv->tx.data_bk_queue));
425
426 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
427 "Data VI endpoint", skb_queue_len(&priv->tx.data_vi_queue));
428
429 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
430 "Data VO endpoint", skb_queue_len(&priv->tx.data_vo_queue));
431
432 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
433 "Failed queue", skb_queue_len(&priv->tx.tx_failed));
434
435 spin_lock_bh(&priv->tx.tx_lock);
436 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
437 "Queued count", priv->tx.queued_cnt);
438 spin_unlock_bh(&priv->tx.tx_lock);
439
440 if (len > sizeof(buf))
441 len = sizeof(buf);
442
443 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
444
445}
446
447static const struct file_operations fops_queue = {
448 .read = read_file_queue,
449 .open = ath9k_debugfs_open,
450 .owner = THIS_MODULE,
451 .llseek = default_llseek,
452};
453
454static ssize_t read_file_debug(struct file *file, char __user *user_buf,
455 size_t count, loff_t *ppos)
456{
457 struct ath9k_htc_priv *priv = file->private_data;
458 struct ath_common *common = ath9k_hw_common(priv->ah);
459 char buf[32];
460 unsigned int len;
461
462 len = sprintf(buf, "0x%08x\n", common->debug_mask);
463 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
464}
465
466static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
467 size_t count, loff_t *ppos)
468{
469 struct ath9k_htc_priv *priv = file->private_data;
470 struct ath_common *common = ath9k_hw_common(priv->ah);
471 unsigned long mask;
472 char buf[32];
473 ssize_t len;
474
475 len = min(count, sizeof(buf) - 1);
476 if (copy_from_user(buf, user_buf, len))
477 return -EFAULT;
478
479 buf[len] = '\0';
480 if (strict_strtoul(buf, 0, &mask))
481 return -EINVAL;
482
483 common->debug_mask = mask;
484 return count;
485}
486
487static const struct file_operations fops_debug = {
488 .read = read_file_debug,
489 .write = write_file_debug,
490 .open = ath9k_debugfs_open,
491 .owner = THIS_MODULE,
492 .llseek = default_llseek,
493};
494
495static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
496 size_t count, loff_t *ppos)
497{
498 struct ath9k_htc_priv *priv = file->private_data;
499 struct ath_common *common = ath9k_hw_common(priv->ah);
500 struct base_eep_header *pBase = NULL;
501 unsigned int len = 0, size = 1500;
502 ssize_t retval = 0;
503 char *buf;
504
505 /*
506 * This can be done since all the 3 EEPROM families have the
507 * same base header upto a certain point, and we are interested in
508 * the data only upto that point.
509 */
510
511 if (AR_SREV_9271(priv->ah))
512 pBase = (struct base_eep_header *)
513 &priv->ah->eeprom.map4k.baseEepHeader;
514 else if (priv->ah->hw_version.usbdev == AR9280_USB)
515 pBase = (struct base_eep_header *)
516 &priv->ah->eeprom.def.baseEepHeader;
517 else if (priv->ah->hw_version.usbdev == AR9287_USB)
518 pBase = (struct base_eep_header *)
519 &priv->ah->eeprom.map9287.baseEepHeader;
520
521 if (pBase == NULL) {
522 ath_err(common, "Unknown EEPROM type\n");
523 return 0;
524 }
525
526 buf = kzalloc(size, GFP_KERNEL);
527 if (buf == NULL)
528 return -ENOMEM;
529
530 len += snprintf(buf + len, size - len,
531 "%20s : %10d\n", "Major Version",
532 pBase->version >> 12);
533 len += snprintf(buf + len, size - len,
534 "%20s : %10d\n", "Minor Version",
535 pBase->version & 0xFFF);
536 len += snprintf(buf + len, size - len,
537 "%20s : %10d\n", "Checksum",
538 pBase->checksum);
539 len += snprintf(buf + len, size - len,
540 "%20s : %10d\n", "Length",
541 pBase->length);
542 len += snprintf(buf + len, size - len,
543 "%20s : %10d\n", "RegDomain1",
544 pBase->regDmn[0]);
545 len += snprintf(buf + len, size - len,
546 "%20s : %10d\n", "RegDomain2",
547 pBase->regDmn[1]);
548 len += snprintf(buf + len, size - len,
549 "%20s : %10d\n",
550 "TX Mask", pBase->txMask);
551 len += snprintf(buf + len, size - len,
552 "%20s : %10d\n",
553 "RX Mask", pBase->rxMask);
554 len += snprintf(buf + len, size - len,
555 "%20s : %10d\n",
556 "Allow 5GHz",
557 !!(pBase->opCapFlags & AR5416_OPFLAGS_11A));
558 len += snprintf(buf + len, size - len,
559 "%20s : %10d\n",
560 "Allow 2GHz",
561 !!(pBase->opCapFlags & AR5416_OPFLAGS_11G));
562 len += snprintf(buf + len, size - len,
563 "%20s : %10d\n",
564 "Disable 2GHz HT20",
565 !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT20));
566 len += snprintf(buf + len, size - len,
567 "%20s : %10d\n",
568 "Disable 2GHz HT40",
569 !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT40));
570 len += snprintf(buf + len, size - len,
571 "%20s : %10d\n",
572 "Disable 5Ghz HT20",
573 !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT20));
574 len += snprintf(buf + len, size - len,
575 "%20s : %10d\n",
576 "Disable 5Ghz HT40",
577 !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT40));
578 len += snprintf(buf + len, size - len,
579 "%20s : %10d\n",
580 "Big Endian",
581 !!(pBase->eepMisc & 0x01));
582 len += snprintf(buf + len, size - len,
583 "%20s : %10d\n",
584 "Cal Bin Major Ver",
585 (pBase->binBuildNumber >> 24) & 0xFF);
586 len += snprintf(buf + len, size - len,
587 "%20s : %10d\n",
588 "Cal Bin Minor Ver",
589 (pBase->binBuildNumber >> 16) & 0xFF);
590 len += snprintf(buf + len, size - len,
591 "%20s : %10d\n",
592 "Cal Bin Build",
593 (pBase->binBuildNumber >> 8) & 0xFF);
594
595 /*
596 * UB91 specific data.
597 */
598 if (AR_SREV_9271(priv->ah)) {
599 struct base_eep_header_4k *pBase4k =
600 &priv->ah->eeprom.map4k.baseEepHeader;
601
602 len += snprintf(buf + len, size - len,
603 "%20s : %10d\n",
604 "TX Gain type",
605 pBase4k->txGainType);
606 }
607
608 /*
609 * UB95 specific data.
610 */
611 if (priv->ah->hw_version.usbdev == AR9287_USB) {
612 struct base_eep_ar9287_header *pBase9287 =
613 &priv->ah->eeprom.map9287.baseEepHeader;
614
615 len += snprintf(buf + len, size - len,
616 "%20s : %10ddB\n",
617 "Power Table Offset",
618 pBase9287->pwrTableOffset);
619
620 len += snprintf(buf + len, size - len,
621 "%20s : %10d\n",
622 "OpenLoop Power Ctrl",
623 pBase9287->openLoopPwrCntl);
624 }
625
626 len += snprintf(buf + len, size - len,
627 "%20s : %02X:%02X:%02X:%02X:%02X:%02X\n",
628 "MacAddress",
629 pBase->macAddr[0], pBase->macAddr[1], pBase->macAddr[2],
630 pBase->macAddr[3], pBase->macAddr[4], pBase->macAddr[5]);
631 if (len > size)
632 len = size;
633
634 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
635 kfree(buf);
636
637 return retval;
638}
639
640static const struct file_operations fops_base_eeprom = {
641 .read = read_file_base_eeprom,
642 .open = ath9k_debugfs_open,
643 .owner = THIS_MODULE,
644 .llseek = default_llseek,
645};
646
647static ssize_t read_4k_modal_eeprom(struct file *file,
648 char __user *user_buf,
649 size_t count, loff_t *ppos)
650{
651#define PR_EEP(_s, _val) \
652 do { \
653 len += snprintf(buf + len, size - len, "%20s : %10d\n", \
654 _s, (_val)); \
655 } while (0)
656
657 struct ath9k_htc_priv *priv = file->private_data;
658 struct modal_eep_4k_header *pModal = &priv->ah->eeprom.map4k.modalHeader;
659 unsigned int len = 0, size = 2048;
660 ssize_t retval = 0;
661 char *buf;
662
663 buf = kzalloc(size, GFP_KERNEL);
664 if (buf == NULL)
665 return -ENOMEM;
666
667 PR_EEP("Chain0 Ant. Control", pModal->antCtrlChain[0]);
668 PR_EEP("Ant. Common Control", pModal->antCtrlCommon);
669 PR_EEP("Chain0 Ant. Gain", pModal->antennaGainCh[0]);
670 PR_EEP("Switch Settle", pModal->switchSettling);
671 PR_EEP("Chain0 TxRxAtten", pModal->txRxAttenCh[0]);
672 PR_EEP("Chain0 RxTxMargin", pModal->rxTxMarginCh[0]);
673 PR_EEP("ADC Desired size", pModal->adcDesiredSize);
674 PR_EEP("PGA Desired size", pModal->pgaDesiredSize);
675 PR_EEP("Chain0 xlna Gain", pModal->xlnaGainCh[0]);
676 PR_EEP("txEndToXpaOff", pModal->txEndToXpaOff);
677 PR_EEP("txEndToRxOn", pModal->txEndToRxOn);
678 PR_EEP("txFrameToXpaOn", pModal->txFrameToXpaOn);
679 PR_EEP("CCA Threshold)", pModal->thresh62);
680 PR_EEP("Chain0 NF Threshold", pModal->noiseFloorThreshCh[0]);
681 PR_EEP("xpdGain", pModal->xpdGain);
682 PR_EEP("External PD", pModal->xpd);
683 PR_EEP("Chain0 I Coefficient", pModal->iqCalICh[0]);
684 PR_EEP("Chain0 Q Coefficient", pModal->iqCalQCh[0]);
685 PR_EEP("pdGainOverlap", pModal->pdGainOverlap);
686 PR_EEP("O/D Bias Version", pModal->version);
687 PR_EEP("CCK OutputBias", pModal->ob_0);
688 PR_EEP("BPSK OutputBias", pModal->ob_1);
689 PR_EEP("QPSK OutputBias", pModal->ob_2);
690 PR_EEP("16QAM OutputBias", pModal->ob_3);
691 PR_EEP("64QAM OutputBias", pModal->ob_4);
692 PR_EEP("CCK Driver1_Bias", pModal->db1_0);
693 PR_EEP("BPSK Driver1_Bias", pModal->db1_1);
694 PR_EEP("QPSK Driver1_Bias", pModal->db1_2);
695 PR_EEP("16QAM Driver1_Bias", pModal->db1_3);
696 PR_EEP("64QAM Driver1_Bias", pModal->db1_4);
697 PR_EEP("CCK Driver2_Bias", pModal->db2_0);
698 PR_EEP("BPSK Driver2_Bias", pModal->db2_1);
699 PR_EEP("QPSK Driver2_Bias", pModal->db2_2);
700 PR_EEP("16QAM Driver2_Bias", pModal->db2_3);
701 PR_EEP("64QAM Driver2_Bias", pModal->db2_4);
702 PR_EEP("xPA Bias Level", pModal->xpaBiasLvl);
703 PR_EEP("txFrameToDataStart", pModal->txFrameToDataStart);
704 PR_EEP("txFrameToPaOn", pModal->txFrameToPaOn);
705 PR_EEP("HT40 Power Inc.", pModal->ht40PowerIncForPdadc);
706 PR_EEP("Chain0 bswAtten", pModal->bswAtten[0]);
707 PR_EEP("Chain0 bswMargin", pModal->bswMargin[0]);
708 PR_EEP("HT40 Switch Settle", pModal->swSettleHt40);
709 PR_EEP("Chain0 xatten2Db", pModal->xatten2Db[0]);
710 PR_EEP("Chain0 xatten2Margin", pModal->xatten2Margin[0]);
711 PR_EEP("Ant. Diversity ctl1", pModal->antdiv_ctl1);
712 PR_EEP("Ant. Diversity ctl2", pModal->antdiv_ctl2);
713 PR_EEP("TX Diversity", pModal->tx_diversity);
714
715 if (len > size)
716 len = size;
717
718 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
719 kfree(buf);
720
721 return retval;
722
723#undef PR_EEP
724}
725
726static ssize_t read_def_modal_eeprom(struct file *file,
727 char __user *user_buf,
728 size_t count, loff_t *ppos)
729{
730#define PR_EEP(_s, _val) \
731 do { \
732 if (pBase->opCapFlags & AR5416_OPFLAGS_11G) { \
733 pModal = &priv->ah->eeprom.def.modalHeader[1]; \
734 len += snprintf(buf + len, size - len, "%20s : %8d%7s", \
735 _s, (_val), "|"); \
736 } \
737 if (pBase->opCapFlags & AR5416_OPFLAGS_11A) { \
738 pModal = &priv->ah->eeprom.def.modalHeader[0]; \
739 len += snprintf(buf + len, size - len, "%9d\n", \
740 (_val)); \
741 } \
742 } while (0)
743
744 struct ath9k_htc_priv *priv = file->private_data;
745 struct base_eep_header *pBase = &priv->ah->eeprom.def.baseEepHeader;
746 struct modal_eep_header *pModal = NULL;
747 unsigned int len = 0, size = 3500;
748 ssize_t retval = 0;
749 char *buf;
750
751 buf = kzalloc(size, GFP_KERNEL);
752 if (buf == NULL)
753 return -ENOMEM;
754
755 len += snprintf(buf + len, size - len,
756 "%31s %15s\n", "2G", "5G");
757 len += snprintf(buf + len, size - len,
758 "%32s %16s\n", "====", "====\n");
759
760 PR_EEP("Chain0 Ant. Control", pModal->antCtrlChain[0]);
761 PR_EEP("Chain1 Ant. Control", pModal->antCtrlChain[1]);
762 PR_EEP("Chain2 Ant. Control", pModal->antCtrlChain[2]);
763 PR_EEP("Ant. Common Control", pModal->antCtrlCommon);
764 PR_EEP("Chain0 Ant. Gain", pModal->antennaGainCh[0]);
765 PR_EEP("Chain1 Ant. Gain", pModal->antennaGainCh[1]);
766 PR_EEP("Chain2 Ant. Gain", pModal->antennaGainCh[2]);
767 PR_EEP("Switch Settle", pModal->switchSettling);
768 PR_EEP("Chain0 TxRxAtten", pModal->txRxAttenCh[0]);
769 PR_EEP("Chain1 TxRxAtten", pModal->txRxAttenCh[1]);
770 PR_EEP("Chain2 TxRxAtten", pModal->txRxAttenCh[2]);
771 PR_EEP("Chain0 RxTxMargin", pModal->rxTxMarginCh[0]);
772 PR_EEP("Chain1 RxTxMargin", pModal->rxTxMarginCh[1]);
773 PR_EEP("Chain2 RxTxMargin", pModal->rxTxMarginCh[2]);
774 PR_EEP("ADC Desired size", pModal->adcDesiredSize);
775 PR_EEP("PGA Desired size", pModal->pgaDesiredSize);
776 PR_EEP("Chain0 xlna Gain", pModal->xlnaGainCh[0]);
777 PR_EEP("Chain1 xlna Gain", pModal->xlnaGainCh[1]);
778 PR_EEP("Chain2 xlna Gain", pModal->xlnaGainCh[2]);
779 PR_EEP("txEndToXpaOff", pModal->txEndToXpaOff);
780 PR_EEP("txEndToRxOn", pModal->txEndToRxOn);
781 PR_EEP("txFrameToXpaOn", pModal->txFrameToXpaOn);
782 PR_EEP("CCA Threshold)", pModal->thresh62);
783 PR_EEP("Chain0 NF Threshold", pModal->noiseFloorThreshCh[0]);
784 PR_EEP("Chain1 NF Threshold", pModal->noiseFloorThreshCh[1]);
785 PR_EEP("Chain2 NF Threshold", pModal->noiseFloorThreshCh[2]);
786 PR_EEP("xpdGain", pModal->xpdGain);
787 PR_EEP("External PD", pModal->xpd);
788 PR_EEP("Chain0 I Coefficient", pModal->iqCalICh[0]);
789 PR_EEP("Chain1 I Coefficient", pModal->iqCalICh[1]);
790 PR_EEP("Chain2 I Coefficient", pModal->iqCalICh[2]);
791 PR_EEP("Chain0 Q Coefficient", pModal->iqCalQCh[0]);
792 PR_EEP("Chain1 Q Coefficient", pModal->iqCalQCh[1]);
793 PR_EEP("Chain2 Q Coefficient", pModal->iqCalQCh[2]);
794 PR_EEP("pdGainOverlap", pModal->pdGainOverlap);
795 PR_EEP("Chain0 OutputBias", pModal->ob);
796 PR_EEP("Chain0 DriverBias", pModal->db);
797 PR_EEP("xPA Bias Level", pModal->xpaBiasLvl);
798 PR_EEP("2chain pwr decrease", pModal->pwrDecreaseFor2Chain);
799 PR_EEP("3chain pwr decrease", pModal->pwrDecreaseFor3Chain);
800 PR_EEP("txFrameToDataStart", pModal->txFrameToDataStart);
801 PR_EEP("txFrameToPaOn", pModal->txFrameToPaOn);
802 PR_EEP("HT40 Power Inc.", pModal->ht40PowerIncForPdadc);
803 PR_EEP("Chain0 bswAtten", pModal->bswAtten[0]);
804 PR_EEP("Chain1 bswAtten", pModal->bswAtten[1]);
805 PR_EEP("Chain2 bswAtten", pModal->bswAtten[2]);
806 PR_EEP("Chain0 bswMargin", pModal->bswMargin[0]);
807 PR_EEP("Chain1 bswMargin", pModal->bswMargin[1]);
808 PR_EEP("Chain2 bswMargin", pModal->bswMargin[2]);
809 PR_EEP("HT40 Switch Settle", pModal->swSettleHt40);
810 PR_EEP("Chain0 xatten2Db", pModal->xatten2Db[0]);
811 PR_EEP("Chain1 xatten2Db", pModal->xatten2Db[1]);
812 PR_EEP("Chain2 xatten2Db", pModal->xatten2Db[2]);
813 PR_EEP("Chain0 xatten2Margin", pModal->xatten2Margin[0]);
814 PR_EEP("Chain1 xatten2Margin", pModal->xatten2Margin[1]);
815 PR_EEP("Chain2 xatten2Margin", pModal->xatten2Margin[2]);
816 PR_EEP("Chain1 OutputBias", pModal->ob_ch1);
817 PR_EEP("Chain1 DriverBias", pModal->db_ch1);
818 PR_EEP("LNA Control", pModal->lna_ctl);
819 PR_EEP("XPA Bias Freq0", pModal->xpaBiasLvlFreq[0]);
820 PR_EEP("XPA Bias Freq1", pModal->xpaBiasLvlFreq[1]);
821 PR_EEP("XPA Bias Freq2", pModal->xpaBiasLvlFreq[2]);
822
823 if (len > size)
824 len = size;
825
826 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
827 kfree(buf);
828
829 return retval;
830
831#undef PR_EEP
832}
833
834static ssize_t read_9287_modal_eeprom(struct file *file,
835 char __user *user_buf,
836 size_t count, loff_t *ppos)
837{
838#define PR_EEP(_s, _val) \
839 do { \
840 len += snprintf(buf + len, size - len, "%20s : %10d\n", \
841 _s, (_val)); \
842 } while (0)
843
844 struct ath9k_htc_priv *priv = file->private_data;
845 struct modal_eep_ar9287_header *pModal = &priv->ah->eeprom.map9287.modalHeader;
846 unsigned int len = 0, size = 3000;
847 ssize_t retval = 0;
848 char *buf;
849
850 buf = kzalloc(size, GFP_KERNEL);
851 if (buf == NULL)
852 return -ENOMEM;
853
854 PR_EEP("Chain0 Ant. Control", pModal->antCtrlChain[0]);
855 PR_EEP("Chain1 Ant. Control", pModal->antCtrlChain[1]);
856 PR_EEP("Ant. Common Control", pModal->antCtrlCommon);
857 PR_EEP("Chain0 Ant. Gain", pModal->antennaGainCh[0]);
858 PR_EEP("Chain1 Ant. Gain", pModal->antennaGainCh[1]);
859 PR_EEP("Switch Settle", pModal->switchSettling);
860 PR_EEP("Chain0 TxRxAtten", pModal->txRxAttenCh[0]);
861 PR_EEP("Chain1 TxRxAtten", pModal->txRxAttenCh[1]);
862 PR_EEP("Chain0 RxTxMargin", pModal->rxTxMarginCh[0]);
863 PR_EEP("Chain1 RxTxMargin", pModal->rxTxMarginCh[1]);
864 PR_EEP("ADC Desired size", pModal->adcDesiredSize);
865 PR_EEP("txEndToXpaOff", pModal->txEndToXpaOff);
866 PR_EEP("txEndToRxOn", pModal->txEndToRxOn);
867 PR_EEP("txFrameToXpaOn", pModal->txFrameToXpaOn);
868 PR_EEP("CCA Threshold)", pModal->thresh62);
869 PR_EEP("Chain0 NF Threshold", pModal->noiseFloorThreshCh[0]);
870 PR_EEP("Chain1 NF Threshold", pModal->noiseFloorThreshCh[1]);
871 PR_EEP("xpdGain", pModal->xpdGain);
872 PR_EEP("External PD", pModal->xpd);
873 PR_EEP("Chain0 I Coefficient", pModal->iqCalICh[0]);
874 PR_EEP("Chain1 I Coefficient", pModal->iqCalICh[1]);
875 PR_EEP("Chain0 Q Coefficient", pModal->iqCalQCh[0]);
876 PR_EEP("Chain1 Q Coefficient", pModal->iqCalQCh[1]);
877 PR_EEP("pdGainOverlap", pModal->pdGainOverlap);
878 PR_EEP("xPA Bias Level", pModal->xpaBiasLvl);
879 PR_EEP("txFrameToDataStart", pModal->txFrameToDataStart);
880 PR_EEP("txFrameToPaOn", pModal->txFrameToPaOn);
881 PR_EEP("HT40 Power Inc.", pModal->ht40PowerIncForPdadc);
882 PR_EEP("Chain0 bswAtten", pModal->bswAtten[0]);
883 PR_EEP("Chain1 bswAtten", pModal->bswAtten[1]);
884 PR_EEP("Chain0 bswMargin", pModal->bswMargin[0]);
885 PR_EEP("Chain1 bswMargin", pModal->bswMargin[1]);
886 PR_EEP("HT40 Switch Settle", pModal->swSettleHt40);
887 PR_EEP("AR92x7 Version", pModal->version);
888 PR_EEP("DriverBias1", pModal->db1);
889 PR_EEP("DriverBias2", pModal->db1);
890 PR_EEP("CCK OutputBias", pModal->ob_cck);
891 PR_EEP("PSK OutputBias", pModal->ob_psk);
892 PR_EEP("QAM OutputBias", pModal->ob_qam);
893 PR_EEP("PAL_OFF OutputBias", pModal->ob_pal_off);
894
895 if (len > size)
896 len = size;
897
898 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
899 kfree(buf);
900
901 return retval;
902
903#undef PR_EEP
904}
905
906static ssize_t read_file_modal_eeprom(struct file *file, char __user *user_buf,
907 size_t count, loff_t *ppos)
908{
909 struct ath9k_htc_priv *priv = file->private_data;
910
911 if (AR_SREV_9271(priv->ah))
912 return read_4k_modal_eeprom(file, user_buf, count, ppos);
913 else if (priv->ah->hw_version.usbdev == AR9280_USB)
914 return read_def_modal_eeprom(file, user_buf, count, ppos);
915 else if (priv->ah->hw_version.usbdev == AR9287_USB)
916 return read_9287_modal_eeprom(file, user_buf, count, ppos);
917
918 return 0;
919}
920
921static const struct file_operations fops_modal_eeprom = {
922 .read = read_file_modal_eeprom,
923 .open = ath9k_debugfs_open,
924 .owner = THIS_MODULE,
925 .llseek = default_llseek,
926};
927
928int ath9k_htc_init_debug(struct ath_hw *ah)
929{
930 struct ath_common *common = ath9k_hw_common(ah);
931 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
932
933 priv->debug.debugfs_phy = debugfs_create_dir(KBUILD_MODNAME,
934 priv->hw->wiphy->debugfsdir);
935 if (!priv->debug.debugfs_phy)
936 return -ENOMEM;
937
938 debugfs_create_file("tgt_int_stats", S_IRUSR, priv->debug.debugfs_phy,
939 priv, &fops_tgt_int_stats);
940 debugfs_create_file("tgt_tx_stats", S_IRUSR, priv->debug.debugfs_phy,
941 priv, &fops_tgt_tx_stats);
942 debugfs_create_file("tgt_rx_stats", S_IRUSR, priv->debug.debugfs_phy,
943 priv, &fops_tgt_rx_stats);
944 debugfs_create_file("xmit", S_IRUSR, priv->debug.debugfs_phy,
945 priv, &fops_xmit);
946 debugfs_create_file("recv", S_IRUSR, priv->debug.debugfs_phy,
947 priv, &fops_recv);
948 debugfs_create_file("slot", S_IRUSR, priv->debug.debugfs_phy,
949 priv, &fops_slot);
950 debugfs_create_file("queue", S_IRUSR, priv->debug.debugfs_phy,
951 priv, &fops_queue);
952 debugfs_create_file("debug", S_IRUSR | S_IWUSR, priv->debug.debugfs_phy,
953 priv, &fops_debug);
954 debugfs_create_file("base_eeprom", S_IRUSR, priv->debug.debugfs_phy,
955 priv, &fops_base_eeprom);
956 debugfs_create_file("modal_eeprom", S_IRUSR, priv->debug.debugfs_phy,
957 priv, &fops_modal_eeprom);
958
959 return 0;
960}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
index 7e630a81b453..af57fe5aab98 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
@@ -65,17 +65,19 @@ static void ath_btcoex_period_work(struct work_struct *work)
65 u32 timer_period; 65 u32 timer_period;
66 bool is_btscan; 66 bool is_btscan;
67 int ret; 67 int ret;
68 u8 cmd_rsp, aggr;
69 68
70 ath_detect_bt_priority(priv); 69 ath_detect_bt_priority(priv);
71 70
72 is_btscan = !!(priv->op_flags & OP_BT_SCAN); 71 is_btscan = !!(priv->op_flags & OP_BT_SCAN);
73 72
74 aggr = priv->op_flags & OP_BT_PRIORITY_DETECTED; 73 ret = ath9k_htc_update_cap_target(priv,
75 74 !!(priv->op_flags & OP_BT_PRIORITY_DETECTED));
76 WMI_CMD_BUF(WMI_AGGR_LIMIT_CMD, &aggr); 75 if (ret) {
76 ath_err(common, "Unable to set BTCOEX parameters\n");
77 return;
78 }
77 79
78 ath9k_cmn_btcoex_bt_stomp(common, is_btscan ? ATH_BTCOEX_STOMP_ALL : 80 ath9k_hw_btcoex_bt_stomp(priv->ah, is_btscan ? ATH_BTCOEX_STOMP_ALL :
79 btcoex->bt_stomp_type); 81 btcoex->bt_stomp_type);
80 82
81 timer_period = is_btscan ? btcoex->btscan_no_stomp : 83 timer_period = is_btscan ? btcoex->btscan_no_stomp :
@@ -103,9 +105,9 @@ static void ath_btcoex_duty_cycle_work(struct work_struct *work)
103 "time slice work for bt and wlan\n"); 105 "time slice work for bt and wlan\n");
104 106
105 if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW || is_btscan) 107 if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW || is_btscan)
106 ath9k_cmn_btcoex_bt_stomp(common, ATH_BTCOEX_STOMP_NONE); 108 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
107 else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL) 109 else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
108 ath9k_cmn_btcoex_bt_stomp(common, ATH_BTCOEX_STOMP_LOW); 110 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW);
109} 111}
110 112
111void ath_htc_init_btcoex_work(struct ath9k_htc_priv *priv) 113void ath_htc_init_btcoex_work(struct ath9k_htc_priv *priv)
@@ -152,140 +154,41 @@ void ath_htc_cancel_btcoex_work(struct ath9k_htc_priv *priv)
152/* LED */ 154/* LED */
153/*******/ 155/*******/
154 156
155static void ath9k_led_blink_work(struct work_struct *work) 157#ifdef CONFIG_MAC80211_LEDS
158void ath9k_led_work(struct work_struct *work)
156{ 159{
157 struct ath9k_htc_priv *priv = container_of(work, struct ath9k_htc_priv, 160 struct ath9k_htc_priv *priv = container_of(work,
158 ath9k_led_blink_work.work); 161 struct ath9k_htc_priv,
162 led_work);
159 163
160 if (!(priv->op_flags & OP_LED_ASSOCIATED)) 164 ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin,
161 return; 165 (priv->brightness == LED_OFF));
162
163 if ((priv->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
164 (priv->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
165 ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 0);
166 else
167 ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin,
168 (priv->op_flags & OP_LED_ON) ? 1 : 0);
169
170 ieee80211_queue_delayed_work(priv->hw,
171 &priv->ath9k_led_blink_work,
172 (priv->op_flags & OP_LED_ON) ?
173 msecs_to_jiffies(priv->led_off_duration) :
174 msecs_to_jiffies(priv->led_on_duration));
175
176 priv->led_on_duration = priv->led_on_cnt ?
177 max((ATH_LED_ON_DURATION_IDLE - priv->led_on_cnt), 25) :
178 ATH_LED_ON_DURATION_IDLE;
179 priv->led_off_duration = priv->led_off_cnt ?
180 max((ATH_LED_OFF_DURATION_IDLE - priv->led_off_cnt), 10) :
181 ATH_LED_OFF_DURATION_IDLE;
182 priv->led_on_cnt = priv->led_off_cnt = 0;
183
184 if (priv->op_flags & OP_LED_ON)
185 priv->op_flags &= ~OP_LED_ON;
186 else
187 priv->op_flags |= OP_LED_ON;
188}
189
190static void ath9k_led_brightness_work(struct work_struct *work)
191{
192 struct ath_led *led = container_of(work, struct ath_led,
193 brightness_work.work);
194 struct ath9k_htc_priv *priv = led->priv;
195
196 switch (led->brightness) {
197 case LED_OFF:
198 if (led->led_type == ATH_LED_ASSOC ||
199 led->led_type == ATH_LED_RADIO) {
200 ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin,
201 (led->led_type == ATH_LED_RADIO));
202 priv->op_flags &= ~OP_LED_ASSOCIATED;
203 if (led->led_type == ATH_LED_RADIO)
204 priv->op_flags &= ~OP_LED_ON;
205 } else {
206 priv->led_off_cnt++;
207 }
208 break;
209 case LED_FULL:
210 if (led->led_type == ATH_LED_ASSOC) {
211 priv->op_flags |= OP_LED_ASSOCIATED;
212 ieee80211_queue_delayed_work(priv->hw,
213 &priv->ath9k_led_blink_work, 0);
214 } else if (led->led_type == ATH_LED_RADIO) {
215 ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 0);
216 priv->op_flags |= OP_LED_ON;
217 } else {
218 priv->led_on_cnt++;
219 }
220 break;
221 default:
222 break;
223 }
224} 166}
225 167
226static void ath9k_led_brightness(struct led_classdev *led_cdev, 168static void ath9k_led_brightness(struct led_classdev *led_cdev,
227 enum led_brightness brightness) 169 enum led_brightness brightness)
228{ 170{
229 struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev); 171 struct ath9k_htc_priv *priv = container_of(led_cdev,
230 struct ath9k_htc_priv *priv = led->priv; 172 struct ath9k_htc_priv,
231 173 led_cdev);
232 led->brightness = brightness;
233 if (!(priv->op_flags & OP_LED_DEINIT))
234 ieee80211_queue_delayed_work(priv->hw,
235 &led->brightness_work, 0);
236}
237
238void ath9k_led_stop_brightness(struct ath9k_htc_priv *priv)
239{
240 cancel_delayed_work_sync(&priv->radio_led.brightness_work);
241 cancel_delayed_work_sync(&priv->assoc_led.brightness_work);
242 cancel_delayed_work_sync(&priv->tx_led.brightness_work);
243 cancel_delayed_work_sync(&priv->rx_led.brightness_work);
244}
245
246static int ath9k_register_led(struct ath9k_htc_priv *priv, struct ath_led *led,
247 char *trigger)
248{
249 int ret;
250
251 led->priv = priv;
252 led->led_cdev.name = led->name;
253 led->led_cdev.default_trigger = trigger;
254 led->led_cdev.brightness_set = ath9k_led_brightness;
255 174
256 ret = led_classdev_register(wiphy_dev(priv->hw->wiphy), &led->led_cdev); 175 /* Not locked, but it's just a tiny green light..*/
257 if (ret) 176 priv->brightness = brightness;
258 ath_err(ath9k_hw_common(priv->ah), 177 ieee80211_queue_work(priv->hw, &priv->led_work);
259 "Failed to register led:%s", led->name);
260 else
261 led->registered = 1;
262
263 INIT_DELAYED_WORK(&led->brightness_work, ath9k_led_brightness_work);
264
265 return ret;
266}
267
268static void ath9k_unregister_led(struct ath_led *led)
269{
270 if (led->registered) {
271 led_classdev_unregister(&led->led_cdev);
272 led->registered = 0;
273 }
274} 178}
275 179
276void ath9k_deinit_leds(struct ath9k_htc_priv *priv) 180void ath9k_deinit_leds(struct ath9k_htc_priv *priv)
277{ 181{
278 priv->op_flags |= OP_LED_DEINIT; 182 if (!priv->led_registered)
279 ath9k_unregister_led(&priv->assoc_led); 183 return;
280 priv->op_flags &= ~OP_LED_ASSOCIATED; 184
281 ath9k_unregister_led(&priv->tx_led); 185 ath9k_led_brightness(&priv->led_cdev, LED_OFF);
282 ath9k_unregister_led(&priv->rx_led); 186 led_classdev_unregister(&priv->led_cdev);
283 ath9k_unregister_led(&priv->radio_led); 187 cancel_work_sync(&priv->led_work);
284} 188}
285 189
286void ath9k_init_leds(struct ath9k_htc_priv *priv) 190void ath9k_init_leds(struct ath9k_htc_priv *priv)
287{ 191{
288 char *trigger;
289 int ret; 192 int ret;
290 193
291 if (AR_SREV_9287(priv->ah)) 194 if (AR_SREV_9287(priv->ah))
@@ -303,48 +206,21 @@ void ath9k_init_leds(struct ath9k_htc_priv *priv)
303 /* LED off, active low */ 206 /* LED off, active low */
304 ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 1); 207 ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 1);
305 208
306 INIT_DELAYED_WORK(&priv->ath9k_led_blink_work, ath9k_led_blink_work); 209 snprintf(priv->led_name, sizeof(priv->led_name),
307 210 "ath9k_htc-%s", wiphy_name(priv->hw->wiphy));
308 trigger = ieee80211_get_radio_led_name(priv->hw); 211 priv->led_cdev.name = priv->led_name;
309 snprintf(priv->radio_led.name, sizeof(priv->radio_led.name), 212 priv->led_cdev.brightness_set = ath9k_led_brightness;
310 "ath9k-%s::radio", wiphy_name(priv->hw->wiphy));
311 ret = ath9k_register_led(priv, &priv->radio_led, trigger);
312 priv->radio_led.led_type = ATH_LED_RADIO;
313 if (ret)
314 goto fail;
315
316 trigger = ieee80211_get_assoc_led_name(priv->hw);
317 snprintf(priv->assoc_led.name, sizeof(priv->assoc_led.name),
318 "ath9k-%s::assoc", wiphy_name(priv->hw->wiphy));
319 ret = ath9k_register_led(priv, &priv->assoc_led, trigger);
320 priv->assoc_led.led_type = ATH_LED_ASSOC;
321 if (ret)
322 goto fail;
323
324 trigger = ieee80211_get_tx_led_name(priv->hw);
325 snprintf(priv->tx_led.name, sizeof(priv->tx_led.name),
326 "ath9k-%s::tx", wiphy_name(priv->hw->wiphy));
327 ret = ath9k_register_led(priv, &priv->tx_led, trigger);
328 priv->tx_led.led_type = ATH_LED_TX;
329 if (ret)
330 goto fail;
331
332 trigger = ieee80211_get_rx_led_name(priv->hw);
333 snprintf(priv->rx_led.name, sizeof(priv->rx_led.name),
334 "ath9k-%s::rx", wiphy_name(priv->hw->wiphy));
335 ret = ath9k_register_led(priv, &priv->rx_led, trigger);
336 priv->rx_led.led_type = ATH_LED_RX;
337 if (ret)
338 goto fail;
339
340 priv->op_flags &= ~OP_LED_DEINIT;
341 213
342 return; 214 ret = led_classdev_register(wiphy_dev(priv->hw->wiphy), &priv->led_cdev);
215 if (ret < 0)
216 return;
217
218 INIT_WORK(&priv->led_work, ath9k_led_work);
219 priv->led_registered = true;
343 220
344fail: 221 return;
345 cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
346 ath9k_deinit_leds(priv);
347} 222}
223#endif
348 224
349/*******************/ 225/*******************/
350/* Rfkill */ 226/* Rfkill */
@@ -398,9 +274,9 @@ void ath9k_htc_radio_enable(struct ieee80211_hw *hw)
398 274
399 /* Start TX */ 275 /* Start TX */
400 htc_start(priv->htc); 276 htc_start(priv->htc);
401 spin_lock_bh(&priv->tx_lock); 277 spin_lock_bh(&priv->tx.tx_lock);
402 priv->tx_queues_stop = false; 278 priv->tx.flags &= ~ATH9K_HTC_OP_TX_QUEUES_STOP;
403 spin_unlock_bh(&priv->tx_lock); 279 spin_unlock_bh(&priv->tx.tx_lock);
404 ieee80211_wake_queues(hw); 280 ieee80211_wake_queues(hw);
405 281
406 WMI_CMD(WMI_ENABLE_INTR_CMDID); 282 WMI_CMD(WMI_ENABLE_INTR_CMDID);
@@ -429,13 +305,15 @@ void ath9k_htc_radio_disable(struct ieee80211_hw *hw)
429 305
430 /* Stop TX */ 306 /* Stop TX */
431 ieee80211_stop_queues(hw); 307 ieee80211_stop_queues(hw);
432 htc_stop(priv->htc); 308 ath9k_htc_tx_drain(priv);
433 WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID); 309 WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
434 skb_queue_purge(&priv->tx_queue);
435 310
436 /* Stop RX */ 311 /* Stop RX */
437 WMI_CMD(WMI_STOP_RECV_CMDID); 312 WMI_CMD(WMI_STOP_RECV_CMDID);
438 313
314 /* Clear the WMI event queue */
315 ath9k_wmi_event_drain(priv);
316
439 /* 317 /*
440 * The MIB counters have to be disabled here, 318 * The MIB counters have to be disabled here,
441 * since the target doesn't do it. 319 * since the target doesn't do it.
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index fc67c937e172..bfdc8a887183 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -117,6 +117,21 @@ static struct ieee80211_rate ath9k_legacy_rates[] = {
117 RATE(540, 0x0c, 0), 117 RATE(540, 0x0c, 0),
118}; 118};
119 119
120#ifdef CONFIG_MAC80211_LEDS
121static const struct ieee80211_tpt_blink ath9k_htc_tpt_blink[] = {
122 { .throughput = 0 * 1024, .blink_time = 334 },
123 { .throughput = 1 * 1024, .blink_time = 260 },
124 { .throughput = 5 * 1024, .blink_time = 220 },
125 { .throughput = 10 * 1024, .blink_time = 190 },
126 { .throughput = 20 * 1024, .blink_time = 170 },
127 { .throughput = 50 * 1024, .blink_time = 150 },
128 { .throughput = 70 * 1024, .blink_time = 130 },
129 { .throughput = 100 * 1024, .blink_time = 110 },
130 { .throughput = 200 * 1024, .blink_time = 80 },
131 { .throughput = 300 * 1024, .blink_time = 50 },
132};
133#endif
134
120static int ath9k_htc_wait_for_target(struct ath9k_htc_priv *priv) 135static int ath9k_htc_wait_for_target(struct ath9k_htc_priv *priv)
121{ 136{
122 int time_left; 137 int time_left;
@@ -140,7 +155,6 @@ static int ath9k_htc_wait_for_target(struct ath9k_htc_priv *priv)
140 155
141static void ath9k_deinit_priv(struct ath9k_htc_priv *priv) 156static void ath9k_deinit_priv(struct ath9k_htc_priv *priv)
142{ 157{
143 ath9k_htc_exit_debug(priv->ah);
144 ath9k_hw_deinit(priv->ah); 158 ath9k_hw_deinit(priv->ah);
145 kfree(priv->ah); 159 kfree(priv->ah);
146 priv->ah = NULL; 160 priv->ah = NULL;
@@ -244,7 +258,7 @@ static int ath9k_init_htc_services(struct ath9k_htc_priv *priv, u16 devid,
244 */ 258 */
245 259
246 if (IS_AR7010_DEVICE(drv_info)) 260 if (IS_AR7010_DEVICE(drv_info))
247 priv->htc->credits = 45; 261 priv->htc->credits = 48;
248 else 262 else
249 priv->htc->credits = 33; 263 priv->htc->credits = 33;
250 264
@@ -430,13 +444,16 @@ static void ath9k_regwrite_flush(void *hw_priv)
430 mutex_unlock(&priv->wmi->multi_write_mutex); 444 mutex_unlock(&priv->wmi->multi_write_mutex);
431} 445}
432 446
433static const struct ath_ops ath9k_common_ops = { 447static u32 ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr)
434 .read = ath9k_regread, 448{
435 .multi_read = ath9k_multi_regread, 449 u32 val;
436 .write = ath9k_regwrite, 450
437 .enable_write_buffer = ath9k_enable_regwrite_buffer, 451 val = ath9k_regread(hw_priv, reg_offset);
438 .write_flush = ath9k_regwrite_flush, 452 val &= ~clr;
439}; 453 val |= set;
454 ath9k_regwrite(hw_priv, val, reg_offset);
455 return val;
456}
440 457
441static void ath_usb_read_cachesize(struct ath_common *common, int *csz) 458static void ath_usb_read_cachesize(struct ath_common *common, int *csz)
442{ 459{
@@ -561,13 +578,7 @@ static void ath9k_init_crypto(struct ath9k_htc_priv *priv)
561 int i = 0; 578 int i = 0;
562 579
563 /* Get the hardware key cache size. */ 580 /* Get the hardware key cache size. */
564 common->keymax = priv->ah->caps.keycache_size; 581 common->keymax = AR_KEYTABLE_SIZE;
565 if (common->keymax > ATH_KEYMAX) {
566 ath_dbg(common, ATH_DBG_ANY,
567 "Warning, using only %u entries in %u key cache\n",
568 ATH_KEYMAX, common->keymax);
569 common->keymax = ATH_KEYMAX;
570 }
571 582
572 if (priv->ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA) 583 if (priv->ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA)
573 common->crypt_caps |= ATH_CRYPT_CAP_MIC_COMBINED; 584 common->crypt_caps |= ATH_CRYPT_CAP_MIC_COMBINED;
@@ -646,7 +657,7 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
646{ 657{
647 struct ath_hw *ah = NULL; 658 struct ath_hw *ah = NULL;
648 struct ath_common *common; 659 struct ath_common *common;
649 int ret = 0, csz = 0; 660 int i, ret = 0, csz = 0;
650 661
651 priv->op_flags |= OP_INVALID; 662 priv->op_flags |= OP_INVALID;
652 663
@@ -658,30 +669,35 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
658 ah->hw_version.subsysid = 0; /* FIXME */ 669 ah->hw_version.subsysid = 0; /* FIXME */
659 ah->hw_version.usbdev = drv_info; 670 ah->hw_version.usbdev = drv_info;
660 ah->ah_flags |= AH_USE_EEPROM; 671 ah->ah_flags |= AH_USE_EEPROM;
672 ah->reg_ops.read = ath9k_regread;
673 ah->reg_ops.multi_read = ath9k_multi_regread;
674 ah->reg_ops.write = ath9k_regwrite;
675 ah->reg_ops.enable_write_buffer = ath9k_enable_regwrite_buffer;
676 ah->reg_ops.write_flush = ath9k_regwrite_flush;
677 ah->reg_ops.rmw = ath9k_reg_rmw;
661 priv->ah = ah; 678 priv->ah = ah;
662 679
663 common = ath9k_hw_common(ah); 680 common = ath9k_hw_common(ah);
664 common->ops = &ath9k_common_ops; 681 common->ops = &ah->reg_ops;
665 common->bus_ops = &ath9k_usb_bus_ops; 682 common->bus_ops = &ath9k_usb_bus_ops;
666 common->ah = ah; 683 common->ah = ah;
667 common->hw = priv->hw; 684 common->hw = priv->hw;
668 common->priv = priv; 685 common->priv = priv;
669 common->debug_mask = ath9k_debug; 686 common->debug_mask = ath9k_debug;
670 687
671 spin_lock_init(&priv->wmi->wmi_lock);
672 spin_lock_init(&priv->beacon_lock); 688 spin_lock_init(&priv->beacon_lock);
673 spin_lock_init(&priv->tx_lock); 689 spin_lock_init(&priv->tx.tx_lock);
674 mutex_init(&priv->mutex); 690 mutex_init(&priv->mutex);
675 mutex_init(&priv->htc_pm_lock); 691 mutex_init(&priv->htc_pm_lock);
676 tasklet_init(&priv->swba_tasklet, ath9k_swba_tasklet,
677 (unsigned long)priv);
678 tasklet_init(&priv->rx_tasklet, ath9k_rx_tasklet, 692 tasklet_init(&priv->rx_tasklet, ath9k_rx_tasklet,
679 (unsigned long)priv); 693 (unsigned long)priv);
680 tasklet_init(&priv->tx_tasklet, ath9k_tx_tasklet, 694 tasklet_init(&priv->tx_failed_tasklet, ath9k_tx_failed_tasklet,
681 (unsigned long)priv); 695 (unsigned long)priv);
682 INIT_DELAYED_WORK(&priv->ani_work, ath9k_htc_ani_work); 696 INIT_DELAYED_WORK(&priv->ani_work, ath9k_htc_ani_work);
683 INIT_WORK(&priv->ps_work, ath9k_ps_work); 697 INIT_WORK(&priv->ps_work, ath9k_ps_work);
684 INIT_WORK(&priv->fatal_work, ath9k_fatal_work); 698 INIT_WORK(&priv->fatal_work, ath9k_fatal_work);
699 setup_timer(&priv->tx.cleanup_timer, ath9k_htc_tx_cleanup_timer,
700 (unsigned long)priv);
685 701
686 /* 702 /*
687 * Cache line size is used to size and align various 703 * Cache line size is used to size and align various
@@ -698,16 +714,13 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
698 goto err_hw; 714 goto err_hw;
699 } 715 }
700 716
701 ret = ath9k_htc_init_debug(ah);
702 if (ret) {
703 ath_err(common, "Unable to create debugfs files\n");
704 goto err_debug;
705 }
706
707 ret = ath9k_init_queues(priv); 717 ret = ath9k_init_queues(priv);
708 if (ret) 718 if (ret)
709 goto err_queues; 719 goto err_queues;
710 720
721 for (i = 0; i < ATH9K_HTC_MAX_BCN_VIF; i++)
722 priv->cur_beacon_conf.bslot[i] = NULL;
723
711 ath9k_init_crypto(priv); 724 ath9k_init_crypto(priv);
712 ath9k_init_channels_rates(priv); 725 ath9k_init_channels_rates(priv);
713 ath9k_init_misc(priv); 726 ath9k_init_misc(priv);
@@ -720,8 +733,6 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
720 return 0; 733 return 0;
721 734
722err_queues: 735err_queues:
723 ath9k_htc_exit_debug(ah);
724err_debug:
725 ath9k_hw_deinit(ah); 736 ath9k_hw_deinit(ah);
726err_hw: 737err_hw:
727 738
@@ -742,17 +753,27 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
742 IEEE80211_HW_HAS_RATE_CONTROL | 753 IEEE80211_HW_HAS_RATE_CONTROL |
743 IEEE80211_HW_RX_INCLUDES_FCS | 754 IEEE80211_HW_RX_INCLUDES_FCS |
744 IEEE80211_HW_SUPPORTS_PS | 755 IEEE80211_HW_SUPPORTS_PS |
745 IEEE80211_HW_PS_NULLFUNC_STACK; 756 IEEE80211_HW_PS_NULLFUNC_STACK |
757 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
746 758
747 hw->wiphy->interface_modes = 759 hw->wiphy->interface_modes =
748 BIT(NL80211_IFTYPE_STATION) | 760 BIT(NL80211_IFTYPE_STATION) |
749 BIT(NL80211_IFTYPE_ADHOC); 761 BIT(NL80211_IFTYPE_ADHOC) |
762 BIT(NL80211_IFTYPE_AP) |
763 BIT(NL80211_IFTYPE_P2P_GO) |
764 BIT(NL80211_IFTYPE_P2P_CLIENT);
750 765
751 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 766 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
752 767
753 hw->queues = 4; 768 hw->queues = 4;
754 hw->channel_change_time = 5000; 769 hw->channel_change_time = 5000;
755 hw->max_listen_interval = 10; 770 hw->max_listen_interval = 10;
771
772 if (AR_SREV_9271(priv->ah))
773 hw->max_tx_aggregation_subframes = MAX_TX_AMPDU_SUBFRAMES_9271;
774 else
775 hw->max_tx_aggregation_subframes = MAX_TX_AMPDU_SUBFRAMES_7010;
776
756 hw->vif_data_size = sizeof(struct ath9k_htc_vif); 777 hw->vif_data_size = sizeof(struct ath9k_htc_vif);
757 hw->sta_data_size = sizeof(struct ath9k_htc_sta); 778 hw->sta_data_size = sizeof(struct ath9k_htc_sta);
758 779
@@ -779,6 +800,43 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
779 SET_IEEE80211_PERM_ADDR(hw, common->macaddr); 800 SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
780} 801}
781 802
803static int ath9k_init_firmware_version(struct ath9k_htc_priv *priv)
804{
805 struct ieee80211_hw *hw = priv->hw;
806 struct wmi_fw_version cmd_rsp;
807 int ret;
808
809 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
810
811 WMI_CMD(WMI_GET_FW_VERSION);
812 if (ret)
813 return -EINVAL;
814
815 priv->fw_version_major = be16_to_cpu(cmd_rsp.major);
816 priv->fw_version_minor = be16_to_cpu(cmd_rsp.minor);
817
818 snprintf(hw->wiphy->fw_version, ETHTOOL_BUSINFO_LEN, "%d.%d",
819 priv->fw_version_major,
820 priv->fw_version_minor);
821
822 dev_info(priv->dev, "ath9k_htc: FW Version: %d.%d\n",
823 priv->fw_version_major,
824 priv->fw_version_minor);
825
826 /*
827 * Check if the available FW matches the driver's
828 * required version.
829 */
830 if (priv->fw_version_major != MAJOR_VERSION_REQ ||
831 priv->fw_version_minor != MINOR_VERSION_REQ) {
832 dev_err(priv->dev, "ath9k_htc: Please upgrade to FW version %d.%d\n",
833 MAJOR_VERSION_REQ, MINOR_VERSION_REQ);
834 return -EINVAL;
835 }
836
837 return 0;
838}
839
782static int ath9k_init_device(struct ath9k_htc_priv *priv, 840static int ath9k_init_device(struct ath9k_htc_priv *priv,
783 u16 devid, char *product, u32 drv_info) 841 u16 devid, char *product, u32 drv_info)
784{ 842{
@@ -798,6 +856,10 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv,
798 common = ath9k_hw_common(ah); 856 common = ath9k_hw_common(ah);
799 ath9k_set_hw_capab(priv, hw); 857 ath9k_set_hw_capab(priv, hw);
800 858
859 error = ath9k_init_firmware_version(priv);
860 if (error != 0)
861 goto err_fw;
862
801 /* Initialize regulatory */ 863 /* Initialize regulatory */
802 error = ath_regd_init(&common->regulatory, priv->hw->wiphy, 864 error = ath_regd_init(&common->regulatory, priv->hw->wiphy,
803 ath9k_reg_notifier); 865 ath9k_reg_notifier);
@@ -816,6 +878,13 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv,
816 if (error != 0) 878 if (error != 0)
817 goto err_rx; 879 goto err_rx;
818 880
881#ifdef CONFIG_MAC80211_LEDS
882 /* must be initialized before ieee80211_register_hw */
883 priv->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(priv->hw,
884 IEEE80211_TPT_LEDTRIG_FL_RADIO, ath9k_htc_tpt_blink,
885 ARRAY_SIZE(ath9k_htc_tpt_blink));
886#endif
887
819 /* Register with mac80211 */ 888 /* Register with mac80211 */
820 error = ieee80211_register_hw(hw); 889 error = ieee80211_register_hw(hw);
821 if (error) 890 if (error)
@@ -828,6 +897,12 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv,
828 goto err_world; 897 goto err_world;
829 } 898 }
830 899
900 error = ath9k_htc_init_debug(priv->ah);
901 if (error) {
902 ath_err(common, "Unable to create debugfs files\n");
903 goto err_world;
904 }
905
831 ath_dbg(common, ATH_DBG_CONFIG, 906 ath_dbg(common, ATH_DBG_CONFIG,
832 "WMI:%d, BCN:%d, CAB:%d, UAPSD:%d, MGMT:%d, " 907 "WMI:%d, BCN:%d, CAB:%d, UAPSD:%d, MGMT:%d, "
833 "BE:%d, BK:%d, VI:%d, VO:%d\n", 908 "BE:%d, BK:%d, VI:%d, VO:%d\n",
@@ -858,6 +933,8 @@ err_rx:
858err_tx: 933err_tx:
859 /* Nothing */ 934 /* Nothing */
860err_regd: 935err_regd:
936 /* Nothing */
937err_fw:
861 ath9k_deinit_priv(priv); 938 ath9k_deinit_priv(priv);
862err_init: 939err_init:
863 return error; 940 return error;
@@ -946,38 +1023,20 @@ int ath9k_htc_resume(struct htc_target *htc_handle)
946 1023
947static int __init ath9k_htc_init(void) 1024static int __init ath9k_htc_init(void)
948{ 1025{
949 int error; 1026 if (ath9k_hif_usb_init() < 0) {
950
951 error = ath9k_htc_debug_create_root();
952 if (error < 0) {
953 printk(KERN_ERR
954 "ath9k_htc: Unable to create debugfs root: %d\n",
955 error);
956 goto err_dbg;
957 }
958
959 error = ath9k_hif_usb_init();
960 if (error < 0) {
961 printk(KERN_ERR 1027 printk(KERN_ERR
962 "ath9k_htc: No USB devices found," 1028 "ath9k_htc: No USB devices found,"
963 " driver not installed.\n"); 1029 " driver not installed.\n");
964 error = -ENODEV; 1030 return -ENODEV;
965 goto err_usb;
966 } 1031 }
967 1032
968 return 0; 1033 return 0;
969
970err_usb:
971 ath9k_htc_debug_remove_root();
972err_dbg:
973 return error;
974} 1034}
975module_init(ath9k_htc_init); 1035module_init(ath9k_htc_init);
976 1036
977static void __exit ath9k_htc_exit(void) 1037static void __exit ath9k_htc_exit(void)
978{ 1038{
979 ath9k_hif_usb_exit(); 1039 ath9k_hif_usb_exit();
980 ath9k_htc_debug_remove_root();
981 printk(KERN_INFO "ath9k_htc: Driver unloaded\n"); 1040 printk(KERN_INFO "ath9k_htc: Driver unloaded\n");
982} 1041}
983module_exit(ath9k_htc_exit); 1042module_exit(ath9k_htc_exit);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index db8c0c044e9e..5aa104fe7eeb 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -16,10 +16,6 @@
16 16
17#include "htc.h" 17#include "htc.h"
18 18
19#ifdef CONFIG_ATH9K_HTC_DEBUGFS
20static struct dentry *ath9k_debugfs_root;
21#endif
22
23/*************/ 19/*************/
24/* Utilities */ 20/* Utilities */
25/*************/ 21/*************/
@@ -197,11 +193,16 @@ void ath9k_htc_reset(struct ath9k_htc_priv *priv)
197 193
198 ath9k_htc_stop_ani(priv); 194 ath9k_htc_stop_ani(priv);
199 ieee80211_stop_queues(priv->hw); 195 ieee80211_stop_queues(priv->hw);
200 htc_stop(priv->htc); 196
197 del_timer_sync(&priv->tx.cleanup_timer);
198 ath9k_htc_tx_drain(priv);
199
201 WMI_CMD(WMI_DISABLE_INTR_CMDID); 200 WMI_CMD(WMI_DISABLE_INTR_CMDID);
202 WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID); 201 WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
203 WMI_CMD(WMI_STOP_RECV_CMDID); 202 WMI_CMD(WMI_STOP_RECV_CMDID);
204 203
204 ath9k_wmi_event_drain(priv);
205
205 caldata = &priv->caldata; 206 caldata = &priv->caldata;
206 ret = ath9k_hw_reset(ah, ah->curchan, caldata, false); 207 ret = ath9k_hw_reset(ah, ah->curchan, caldata, false);
207 if (ret) { 208 if (ret) {
@@ -225,6 +226,9 @@ void ath9k_htc_reset(struct ath9k_htc_priv *priv)
225 ath9k_htc_vif_reconfig(priv); 226 ath9k_htc_vif_reconfig(priv);
226 ieee80211_wake_queues(priv->hw); 227 ieee80211_wake_queues(priv->hw);
227 228
229 mod_timer(&priv->tx.cleanup_timer,
230 jiffies + msecs_to_jiffies(ATH9K_HTC_TX_CLEANUP_INTERVAL));
231
228 ath9k_htc_ps_restore(priv); 232 ath9k_htc_ps_restore(priv);
229 mutex_unlock(&priv->mutex); 233 mutex_unlock(&priv->mutex);
230} 234}
@@ -250,11 +254,16 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
250 fastcc = !!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL); 254 fastcc = !!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL);
251 255
252 ath9k_htc_ps_wakeup(priv); 256 ath9k_htc_ps_wakeup(priv);
253 htc_stop(priv->htc); 257
258 del_timer_sync(&priv->tx.cleanup_timer);
259 ath9k_htc_tx_drain(priv);
260
254 WMI_CMD(WMI_DISABLE_INTR_CMDID); 261 WMI_CMD(WMI_DISABLE_INTR_CMDID);
255 WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID); 262 WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
256 WMI_CMD(WMI_STOP_RECV_CMDID); 263 WMI_CMD(WMI_STOP_RECV_CMDID);
257 264
265 ath9k_wmi_event_drain(priv);
266
258 ath_dbg(common, ATH_DBG_CONFIG, 267 ath_dbg(common, ATH_DBG_CONFIG,
259 "(%u MHz) -> (%u MHz), HT: %d, HT40: %d fastcc: %d\n", 268 "(%u MHz) -> (%u MHz), HT: %d, HT40: %d fastcc: %d\n",
260 priv->ah->curchan->channel, 269 priv->ah->curchan->channel,
@@ -263,6 +272,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
263 272
264 if (!fastcc) 273 if (!fastcc)
265 caldata = &priv->caldata; 274 caldata = &priv->caldata;
275
266 ret = ath9k_hw_reset(ah, hchan, caldata, fastcc); 276 ret = ath9k_hw_reset(ah, hchan, caldata, fastcc);
267 if (ret) { 277 if (ret) {
268 ath_err(common, 278 ath_err(common,
@@ -296,6 +306,9 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
296 !(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) 306 !(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
297 ath9k_htc_vif_reconfig(priv); 307 ath9k_htc_vif_reconfig(priv);
298 308
309 mod_timer(&priv->tx.cleanup_timer,
310 jiffies + msecs_to_jiffies(ATH9K_HTC_TX_CLEANUP_INTERVAL));
311
299err: 312err:
300 ath9k_htc_ps_restore(priv); 313 ath9k_htc_ps_restore(priv);
301 return ret; 314 return ret;
@@ -319,6 +332,11 @@ static void __ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
319 memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN); 332 memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN);
320 hvif.index = priv->mon_vif_idx; 333 hvif.index = priv->mon_vif_idx;
321 WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif); 334 WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
335 if (ret) {
336 ath_err(common, "Unable to remove monitor interface at idx: %d\n",
337 priv->mon_vif_idx);
338 }
339
322 priv->nvifs--; 340 priv->nvifs--;
323 priv->vif_slot &= ~(1 << priv->mon_vif_idx); 341 priv->vif_slot &= ~(1 << priv->mon_vif_idx);
324} 342}
@@ -349,7 +367,7 @@ static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv)
349 memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif)); 367 memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
350 memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN); 368 memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN);
351 369
352 hvif.opmode = cpu_to_be32(HTC_M_MONITOR); 370 hvif.opmode = HTC_M_MONITOR;
353 hvif.index = ffz(priv->vif_slot); 371 hvif.index = ffz(priv->vif_slot);
354 372
355 WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif); 373 WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif);
@@ -382,7 +400,7 @@ static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv)
382 tsta.is_vif_sta = 1; 400 tsta.is_vif_sta = 1;
383 tsta.sta_index = sta_idx; 401 tsta.sta_index = sta_idx;
384 tsta.vif_index = hvif.index; 402 tsta.vif_index = hvif.index;
385 tsta.maxampdu = 0xffff; 403 tsta.maxampdu = cpu_to_be16(0xffff);
386 404
387 WMI_CMD_BUF(WMI_NODE_CREATE_CMDID, &tsta); 405 WMI_CMD_BUF(WMI_NODE_CREATE_CMDID, &tsta);
388 if (ret) { 406 if (ret) {
@@ -449,6 +467,7 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
449 struct ath9k_htc_sta *ista; 467 struct ath9k_htc_sta *ista;
450 int ret, sta_idx; 468 int ret, sta_idx;
451 u8 cmd_rsp; 469 u8 cmd_rsp;
470 u16 maxampdu;
452 471
453 if (priv->nstations >= ATH9K_HTC_MAX_STA) 472 if (priv->nstations >= ATH9K_HTC_MAX_STA)
454 return -ENOBUFS; 473 return -ENOBUFS;
@@ -463,9 +482,7 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
463 ista = (struct ath9k_htc_sta *) sta->drv_priv; 482 ista = (struct ath9k_htc_sta *) sta->drv_priv;
464 memcpy(&tsta.macaddr, sta->addr, ETH_ALEN); 483 memcpy(&tsta.macaddr, sta->addr, ETH_ALEN);
465 memcpy(&tsta.bssid, common->curbssid, ETH_ALEN); 484 memcpy(&tsta.bssid, common->curbssid, ETH_ALEN);
466 tsta.associd = common->curaid;
467 tsta.is_vif_sta = 0; 485 tsta.is_vif_sta = 0;
468 tsta.valid = true;
469 ista->index = sta_idx; 486 ista->index = sta_idx;
470 } else { 487 } else {
471 memcpy(&tsta.macaddr, vif->addr, ETH_ALEN); 488 memcpy(&tsta.macaddr, vif->addr, ETH_ALEN);
@@ -474,7 +491,15 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
474 491
475 tsta.sta_index = sta_idx; 492 tsta.sta_index = sta_idx;
476 tsta.vif_index = avp->index; 493 tsta.vif_index = avp->index;
477 tsta.maxampdu = 0xffff; 494
495 if (!sta) {
496 tsta.maxampdu = cpu_to_be16(0xffff);
497 } else {
498 maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
499 sta->ht_cap.ampdu_factor);
500 tsta.maxampdu = cpu_to_be16(maxampdu);
501 }
502
478 if (sta && sta->ht_cap.ht_supported) 503 if (sta && sta->ht_cap.ht_supported)
479 tsta.flags = cpu_to_be16(ATH_HTC_STA_HT); 504 tsta.flags = cpu_to_be16(ATH_HTC_STA_HT);
480 505
@@ -547,7 +572,8 @@ static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv,
547 return 0; 572 return 0;
548} 573}
549 574
550int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv) 575int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv,
576 u8 enable_coex)
551{ 577{
552 struct ath9k_htc_cap_target tcap; 578 struct ath9k_htc_cap_target tcap;
553 int ret; 579 int ret;
@@ -555,13 +581,9 @@ int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv)
555 581
556 memset(&tcap, 0, sizeof(struct ath9k_htc_cap_target)); 582 memset(&tcap, 0, sizeof(struct ath9k_htc_cap_target));
557 583
558 /* FIXME: Values are hardcoded */ 584 tcap.ampdu_limit = cpu_to_be32(0xffff);
559 tcap.flags = 0x240c40; 585 tcap.ampdu_subframes = priv->hw->max_tx_aggregation_subframes;
560 tcap.flags_ext = 0x80601000; 586 tcap.enable_coex = enable_coex;
561 tcap.ampdu_limit = 0xffff0000;
562 tcap.ampdu_subframes = 20;
563 tcap.tx_chainmask_legacy = priv->ah->caps.tx_chainmask;
564 tcap.protmode = 1;
565 tcap.tx_chainmask = priv->ah->caps.tx_chainmask; 587 tcap.tx_chainmask = priv->ah->caps.tx_chainmask;
566 588
567 WMI_CMD_BUF(WMI_TARGET_IC_UPDATE_CMDID, &tcap); 589 WMI_CMD_BUF(WMI_TARGET_IC_UPDATE_CMDID, &tcap);
@@ -709,218 +731,13 @@ static int ath9k_htc_tx_aggr_oper(struct ath9k_htc_priv *priv,
709 (aggr.aggr_enable) ? "Starting" : "Stopping", 731 (aggr.aggr_enable) ? "Starting" : "Stopping",
710 sta->addr, tid); 732 sta->addr, tid);
711 733
712 spin_lock_bh(&priv->tx_lock); 734 spin_lock_bh(&priv->tx.tx_lock);
713 ista->tid_state[tid] = (aggr.aggr_enable && !ret) ? AGGR_START : AGGR_STOP; 735 ista->tid_state[tid] = (aggr.aggr_enable && !ret) ? AGGR_START : AGGR_STOP;
714 spin_unlock_bh(&priv->tx_lock); 736 spin_unlock_bh(&priv->tx.tx_lock);
715 737
716 return ret; 738 return ret;
717} 739}
718 740
719/*********/
720/* DEBUG */
721/*********/
722
723#ifdef CONFIG_ATH9K_HTC_DEBUGFS
724
725static int ath9k_debugfs_open(struct inode *inode, struct file *file)
726{
727 file->private_data = inode->i_private;
728 return 0;
729}
730
731static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf,
732 size_t count, loff_t *ppos)
733{
734 struct ath9k_htc_priv *priv = file->private_data;
735 struct ath9k_htc_target_stats cmd_rsp;
736 char buf[512];
737 unsigned int len = 0;
738 int ret = 0;
739
740 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
741
742 WMI_CMD(WMI_TGT_STATS_CMDID);
743 if (ret)
744 return -EINVAL;
745
746
747 len += snprintf(buf + len, sizeof(buf) - len,
748 "%19s : %10u\n", "TX Short Retries",
749 be32_to_cpu(cmd_rsp.tx_shortretry));
750 len += snprintf(buf + len, sizeof(buf) - len,
751 "%19s : %10u\n", "TX Long Retries",
752 be32_to_cpu(cmd_rsp.tx_longretry));
753 len += snprintf(buf + len, sizeof(buf) - len,
754 "%19s : %10u\n", "TX Xretries",
755 be32_to_cpu(cmd_rsp.tx_xretries));
756 len += snprintf(buf + len, sizeof(buf) - len,
757 "%19s : %10u\n", "TX Unaggr. Xretries",
758 be32_to_cpu(cmd_rsp.ht_txunaggr_xretry));
759 len += snprintf(buf + len, sizeof(buf) - len,
760 "%19s : %10u\n", "TX Xretries (HT)",
761 be32_to_cpu(cmd_rsp.ht_tx_xretries));
762 len += snprintf(buf + len, sizeof(buf) - len,
763 "%19s : %10u\n", "TX Rate", priv->debug.txrate);
764
765 if (len > sizeof(buf))
766 len = sizeof(buf);
767
768 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
769}
770
771static const struct file_operations fops_tgt_stats = {
772 .read = read_file_tgt_stats,
773 .open = ath9k_debugfs_open,
774 .owner = THIS_MODULE,
775 .llseek = default_llseek,
776};
777
778static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
779 size_t count, loff_t *ppos)
780{
781 struct ath9k_htc_priv *priv = file->private_data;
782 char buf[512];
783 unsigned int len = 0;
784
785 len += snprintf(buf + len, sizeof(buf) - len,
786 "%20s : %10u\n", "Buffers queued",
787 priv->debug.tx_stats.buf_queued);
788 len += snprintf(buf + len, sizeof(buf) - len,
789 "%20s : %10u\n", "Buffers completed",
790 priv->debug.tx_stats.buf_completed);
791 len += snprintf(buf + len, sizeof(buf) - len,
792 "%20s : %10u\n", "SKBs queued",
793 priv->debug.tx_stats.skb_queued);
794 len += snprintf(buf + len, sizeof(buf) - len,
795 "%20s : %10u\n", "SKBs completed",
796 priv->debug.tx_stats.skb_completed);
797 len += snprintf(buf + len, sizeof(buf) - len,
798 "%20s : %10u\n", "SKBs dropped",
799 priv->debug.tx_stats.skb_dropped);
800
801 len += snprintf(buf + len, sizeof(buf) - len,
802 "%20s : %10u\n", "BE queued",
803 priv->debug.tx_stats.queue_stats[WME_AC_BE]);
804 len += snprintf(buf + len, sizeof(buf) - len,
805 "%20s : %10u\n", "BK queued",
806 priv->debug.tx_stats.queue_stats[WME_AC_BK]);
807 len += snprintf(buf + len, sizeof(buf) - len,
808 "%20s : %10u\n", "VI queued",
809 priv->debug.tx_stats.queue_stats[WME_AC_VI]);
810 len += snprintf(buf + len, sizeof(buf) - len,
811 "%20s : %10u\n", "VO queued",
812 priv->debug.tx_stats.queue_stats[WME_AC_VO]);
813
814 if (len > sizeof(buf))
815 len = sizeof(buf);
816
817 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
818}
819
820static const struct file_operations fops_xmit = {
821 .read = read_file_xmit,
822 .open = ath9k_debugfs_open,
823 .owner = THIS_MODULE,
824 .llseek = default_llseek,
825};
826
827static ssize_t read_file_recv(struct file *file, char __user *user_buf,
828 size_t count, loff_t *ppos)
829{
830 struct ath9k_htc_priv *priv = file->private_data;
831 char buf[512];
832 unsigned int len = 0;
833
834 len += snprintf(buf + len, sizeof(buf) - len,
835 "%20s : %10u\n", "SKBs allocated",
836 priv->debug.rx_stats.skb_allocated);
837 len += snprintf(buf + len, sizeof(buf) - len,
838 "%20s : %10u\n", "SKBs completed",
839 priv->debug.rx_stats.skb_completed);
840 len += snprintf(buf + len, sizeof(buf) - len,
841 "%20s : %10u\n", "SKBs Dropped",
842 priv->debug.rx_stats.skb_dropped);
843
844 if (len > sizeof(buf))
845 len = sizeof(buf);
846
847 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
848}
849
850static const struct file_operations fops_recv = {
851 .read = read_file_recv,
852 .open = ath9k_debugfs_open,
853 .owner = THIS_MODULE,
854 .llseek = default_llseek,
855};
856
857int ath9k_htc_init_debug(struct ath_hw *ah)
858{
859 struct ath_common *common = ath9k_hw_common(ah);
860 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
861
862 if (!ath9k_debugfs_root)
863 return -ENOENT;
864
865 priv->debug.debugfs_phy = debugfs_create_dir(wiphy_name(priv->hw->wiphy),
866 ath9k_debugfs_root);
867 if (!priv->debug.debugfs_phy)
868 goto err;
869
870 priv->debug.debugfs_tgt_stats = debugfs_create_file("tgt_stats", S_IRUSR,
871 priv->debug.debugfs_phy,
872 priv, &fops_tgt_stats);
873 if (!priv->debug.debugfs_tgt_stats)
874 goto err;
875
876
877 priv->debug.debugfs_xmit = debugfs_create_file("xmit", S_IRUSR,
878 priv->debug.debugfs_phy,
879 priv, &fops_xmit);
880 if (!priv->debug.debugfs_xmit)
881 goto err;
882
883 priv->debug.debugfs_recv = debugfs_create_file("recv", S_IRUSR,
884 priv->debug.debugfs_phy,
885 priv, &fops_recv);
886 if (!priv->debug.debugfs_recv)
887 goto err;
888
889 return 0;
890
891err:
892 ath9k_htc_exit_debug(ah);
893 return -ENOMEM;
894}
895
896void ath9k_htc_exit_debug(struct ath_hw *ah)
897{
898 struct ath_common *common = ath9k_hw_common(ah);
899 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
900
901 debugfs_remove(priv->debug.debugfs_recv);
902 debugfs_remove(priv->debug.debugfs_xmit);
903 debugfs_remove(priv->debug.debugfs_tgt_stats);
904 debugfs_remove(priv->debug.debugfs_phy);
905}
906
907int ath9k_htc_debug_create_root(void)
908{
909 ath9k_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
910 if (!ath9k_debugfs_root)
911 return -ENOENT;
912
913 return 0;
914}
915
916void ath9k_htc_debug_remove_root(void)
917{
918 debugfs_remove(ath9k_debugfs_root);
919 ath9k_debugfs_root = NULL;
920}
921
922#endif /* CONFIG_ATH9K_HTC_DEBUGFS */
923
924/*******/ 741/*******/
925/* ANI */ 742/* ANI */
926/*******/ 743/*******/
@@ -1040,7 +857,8 @@ static void ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1040{ 857{
1041 struct ieee80211_hdr *hdr; 858 struct ieee80211_hdr *hdr;
1042 struct ath9k_htc_priv *priv = hw->priv; 859 struct ath9k_htc_priv *priv = hw->priv;
1043 int padpos, padsize, ret; 860 struct ath_common *common = ath9k_hw_common(priv->ah);
861 int padpos, padsize, ret, slot;
1044 862
1045 hdr = (struct ieee80211_hdr *) skb->data; 863 hdr = (struct ieee80211_hdr *) skb->data;
1046 864
@@ -1048,30 +866,32 @@ static void ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1048 padpos = ath9k_cmn_padpos(hdr->frame_control); 866 padpos = ath9k_cmn_padpos(hdr->frame_control);
1049 padsize = padpos & 3; 867 padsize = padpos & 3;
1050 if (padsize && skb->len > padpos) { 868 if (padsize && skb->len > padpos) {
1051 if (skb_headroom(skb) < padsize) 869 if (skb_headroom(skb) < padsize) {
870 ath_dbg(common, ATH_DBG_XMIT, "No room for padding\n");
1052 goto fail_tx; 871 goto fail_tx;
872 }
1053 skb_push(skb, padsize); 873 skb_push(skb, padsize);
1054 memmove(skb->data, skb->data + padsize, padpos); 874 memmove(skb->data, skb->data + padsize, padpos);
1055 } 875 }
1056 876
1057 ret = ath9k_htc_tx_start(priv, skb); 877 slot = ath9k_htc_tx_get_slot(priv);
1058 if (ret != 0) { 878 if (slot < 0) {
1059 if (ret == -ENOMEM) { 879 ath_dbg(common, ATH_DBG_XMIT, "No free TX slot\n");
1060 ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
1061 "Stopping TX queues\n");
1062 ieee80211_stop_queues(hw);
1063 spin_lock_bh(&priv->tx_lock);
1064 priv->tx_queues_stop = true;
1065 spin_unlock_bh(&priv->tx_lock);
1066 } else {
1067 ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
1068 "Tx failed\n");
1069 }
1070 goto fail_tx; 880 goto fail_tx;
1071 } 881 }
1072 882
883 ret = ath9k_htc_tx_start(priv, skb, slot, false);
884 if (ret != 0) {
885 ath_dbg(common, ATH_DBG_XMIT, "Tx failed\n");
886 goto clear_slot;
887 }
888
889 ath9k_htc_check_stop_queues(priv);
890
1073 return; 891 return;
1074 892
893clear_slot:
894 ath9k_htc_tx_clear_slot(priv, slot);
1075fail_tx: 895fail_tx:
1076 dev_kfree_skb_any(skb); 896 dev_kfree_skb_any(skb);
1077} 897}
@@ -1122,7 +942,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
1122 942
1123 ath9k_host_rx_init(priv); 943 ath9k_host_rx_init(priv);
1124 944
1125 ret = ath9k_htc_update_cap_target(priv); 945 ret = ath9k_htc_update_cap_target(priv, 0);
1126 if (ret) 946 if (ret)
1127 ath_dbg(common, ATH_DBG_CONFIG, 947 ath_dbg(common, ATH_DBG_CONFIG,
1128 "Failed to update capability in target\n"); 948 "Failed to update capability in target\n");
@@ -1130,12 +950,15 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
1130 priv->op_flags &= ~OP_INVALID; 950 priv->op_flags &= ~OP_INVALID;
1131 htc_start(priv->htc); 951 htc_start(priv->htc);
1132 952
1133 spin_lock_bh(&priv->tx_lock); 953 spin_lock_bh(&priv->tx.tx_lock);
1134 priv->tx_queues_stop = false; 954 priv->tx.flags &= ~ATH9K_HTC_OP_TX_QUEUES_STOP;
1135 spin_unlock_bh(&priv->tx_lock); 955 spin_unlock_bh(&priv->tx.tx_lock);
1136 956
1137 ieee80211_wake_queues(hw); 957 ieee80211_wake_queues(hw);
1138 958
959 mod_timer(&priv->tx.cleanup_timer,
960 jiffies + msecs_to_jiffies(ATH9K_HTC_TX_CLEANUP_INTERVAL));
961
1139 if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) { 962 if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) {
1140 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT, 963 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
1141 AR_STOMP_LOW_WLAN_WGHT); 964 AR_STOMP_LOW_WLAN_WGHT);
@@ -1152,7 +975,7 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
1152 struct ath9k_htc_priv *priv = hw->priv; 975 struct ath9k_htc_priv *priv = hw->priv;
1153 struct ath_hw *ah = priv->ah; 976 struct ath_hw *ah = priv->ah;
1154 struct ath_common *common = ath9k_hw_common(ah); 977 struct ath_common *common = ath9k_hw_common(ah);
1155 int ret = 0; 978 int ret __attribute__ ((unused));
1156 u8 cmd_rsp; 979 u8 cmd_rsp;
1157 980
1158 mutex_lock(&priv->mutex); 981 mutex_lock(&priv->mutex);
@@ -1164,25 +987,27 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
1164 } 987 }
1165 988
1166 ath9k_htc_ps_wakeup(priv); 989 ath9k_htc_ps_wakeup(priv);
1167 htc_stop(priv->htc); 990
1168 WMI_CMD(WMI_DISABLE_INTR_CMDID); 991 WMI_CMD(WMI_DISABLE_INTR_CMDID);
1169 WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID); 992 WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
1170 WMI_CMD(WMI_STOP_RECV_CMDID); 993 WMI_CMD(WMI_STOP_RECV_CMDID);
1171 994
1172 tasklet_kill(&priv->swba_tasklet);
1173 tasklet_kill(&priv->rx_tasklet); 995 tasklet_kill(&priv->rx_tasklet);
1174 tasklet_kill(&priv->tx_tasklet);
1175 996
1176 skb_queue_purge(&priv->tx_queue); 997 del_timer_sync(&priv->tx.cleanup_timer);
998 ath9k_htc_tx_drain(priv);
999 ath9k_wmi_event_drain(priv);
1177 1000
1178 mutex_unlock(&priv->mutex); 1001 mutex_unlock(&priv->mutex);
1179 1002
1180 /* Cancel all the running timers/work .. */ 1003 /* Cancel all the running timers/work .. */
1181 cancel_work_sync(&priv->fatal_work); 1004 cancel_work_sync(&priv->fatal_work);
1182 cancel_work_sync(&priv->ps_work); 1005 cancel_work_sync(&priv->ps_work);
1183 cancel_delayed_work_sync(&priv->ath9k_led_blink_work); 1006
1007#ifdef CONFIG_MAC80211_LEDS
1008 cancel_work_sync(&priv->led_work);
1009#endif
1184 ath9k_htc_stop_ani(priv); 1010 ath9k_htc_stop_ani(priv);
1185 ath9k_led_stop_brightness(priv);
1186 1011
1187 mutex_lock(&priv->mutex); 1012 mutex_lock(&priv->mutex);
1188 1013
@@ -1245,13 +1070,13 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
1245 1070
1246 switch (vif->type) { 1071 switch (vif->type) {
1247 case NL80211_IFTYPE_STATION: 1072 case NL80211_IFTYPE_STATION:
1248 hvif.opmode = cpu_to_be32(HTC_M_STA); 1073 hvif.opmode = HTC_M_STA;
1249 break; 1074 break;
1250 case NL80211_IFTYPE_ADHOC: 1075 case NL80211_IFTYPE_ADHOC:
1251 hvif.opmode = cpu_to_be32(HTC_M_IBSS); 1076 hvif.opmode = HTC_M_IBSS;
1252 break; 1077 break;
1253 case NL80211_IFTYPE_AP: 1078 case NL80211_IFTYPE_AP:
1254 hvif.opmode = cpu_to_be32(HTC_M_HOSTAP); 1079 hvif.opmode = HTC_M_HOSTAP;
1255 break; 1080 break;
1256 default: 1081 default:
1257 ath_err(common, 1082 ath_err(common,
@@ -1281,14 +1106,20 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
1281 1106
1282 priv->vif_slot |= (1 << avp->index); 1107 priv->vif_slot |= (1 << avp->index);
1283 priv->nvifs++; 1108 priv->nvifs++;
1284 priv->vif = vif;
1285 1109
1286 INC_VIF(priv, vif->type); 1110 INC_VIF(priv, vif->type);
1111
1112 if ((vif->type == NL80211_IFTYPE_AP) ||
1113 (vif->type == NL80211_IFTYPE_ADHOC))
1114 ath9k_htc_assign_bslot(priv, vif);
1115
1287 ath9k_htc_set_opmode(priv); 1116 ath9k_htc_set_opmode(priv);
1288 1117
1289 if ((priv->ah->opmode == NL80211_IFTYPE_AP) && 1118 if ((priv->ah->opmode == NL80211_IFTYPE_AP) &&
1290 !(priv->op_flags & OP_ANI_RUNNING)) 1119 !(priv->op_flags & OP_ANI_RUNNING)) {
1120 ath9k_hw_set_tsfadjust(priv->ah, 1);
1291 ath9k_htc_start_ani(priv); 1121 ath9k_htc_start_ani(priv);
1122 }
1292 1123
1293 ath_dbg(common, ATH_DBG_CONFIG, 1124 ath_dbg(common, ATH_DBG_CONFIG,
1294 "Attach a VIF of type: %d at idx: %d\n", vif->type, avp->index); 1125 "Attach a VIF of type: %d at idx: %d\n", vif->type, avp->index);
@@ -1317,13 +1148,21 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
1317 memcpy(&hvif.myaddr, vif->addr, ETH_ALEN); 1148 memcpy(&hvif.myaddr, vif->addr, ETH_ALEN);
1318 hvif.index = avp->index; 1149 hvif.index = avp->index;
1319 WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif); 1150 WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
1151 if (ret) {
1152 ath_err(common, "Unable to remove interface at idx: %d\n",
1153 avp->index);
1154 }
1320 priv->nvifs--; 1155 priv->nvifs--;
1321 priv->vif_slot &= ~(1 << avp->index); 1156 priv->vif_slot &= ~(1 << avp->index);
1322 1157
1323 ath9k_htc_remove_station(priv, vif, NULL); 1158 ath9k_htc_remove_station(priv, vif, NULL);
1324 priv->vif = NULL;
1325 1159
1326 DEC_VIF(priv, vif->type); 1160 DEC_VIF(priv, vif->type);
1161
1162 if ((vif->type == NL80211_IFTYPE_AP) ||
1163 (vif->type == NL80211_IFTYPE_ADHOC))
1164 ath9k_htc_remove_bslot(priv, vif);
1165
1327 ath9k_htc_set_opmode(priv); 1166 ath9k_htc_set_opmode(priv);
1328 1167
1329 /* 1168 /*
@@ -1493,10 +1332,13 @@ static int ath9k_htc_sta_remove(struct ieee80211_hw *hw,
1493 struct ieee80211_sta *sta) 1332 struct ieee80211_sta *sta)
1494{ 1333{
1495 struct ath9k_htc_priv *priv = hw->priv; 1334 struct ath9k_htc_priv *priv = hw->priv;
1335 struct ath9k_htc_sta *ista;
1496 int ret; 1336 int ret;
1497 1337
1498 mutex_lock(&priv->mutex); 1338 mutex_lock(&priv->mutex);
1499 ath9k_htc_ps_wakeup(priv); 1339 ath9k_htc_ps_wakeup(priv);
1340 ista = (struct ath9k_htc_sta *) sta->drv_priv;
1341 htc_sta_drain(priv->htc, ista->index);
1500 ret = ath9k_htc_remove_station(priv, vif, sta); 1342 ret = ath9k_htc_remove_station(priv, vif, sta);
1501 ath9k_htc_ps_restore(priv); 1343 ath9k_htc_ps_restore(priv);
1502 mutex_unlock(&priv->mutex); 1344 mutex_unlock(&priv->mutex);
@@ -1644,6 +1486,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
1644 if ((changed & BSS_CHANGED_BEACON_ENABLED) && bss_conf->enable_beacon) { 1486 if ((changed & BSS_CHANGED_BEACON_ENABLED) && bss_conf->enable_beacon) {
1645 ath_dbg(common, ATH_DBG_CONFIG, 1487 ath_dbg(common, ATH_DBG_CONFIG,
1646 "Beacon enabled for BSS: %pM\n", bss_conf->bssid); 1488 "Beacon enabled for BSS: %pM\n", bss_conf->bssid);
1489 ath9k_htc_set_tsfadjust(priv, vif);
1647 priv->op_flags |= OP_ENABLE_BEACON; 1490 priv->op_flags |= OP_ENABLE_BEACON;
1648 ath9k_htc_beacon_config(priv, vif); 1491 ath9k_htc_beacon_config(priv, vif);
1649 } 1492 }
@@ -1741,6 +1584,7 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
1741 int ret = 0; 1584 int ret = 0;
1742 1585
1743 mutex_lock(&priv->mutex); 1586 mutex_lock(&priv->mutex);
1587 ath9k_htc_ps_wakeup(priv);
1744 1588
1745 switch (action) { 1589 switch (action) {
1746 case IEEE80211_AMPDU_RX_START: 1590 case IEEE80211_AMPDU_RX_START:
@@ -1758,14 +1602,15 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
1758 break; 1602 break;
1759 case IEEE80211_AMPDU_TX_OPERATIONAL: 1603 case IEEE80211_AMPDU_TX_OPERATIONAL:
1760 ista = (struct ath9k_htc_sta *) sta->drv_priv; 1604 ista = (struct ath9k_htc_sta *) sta->drv_priv;
1761 spin_lock_bh(&priv->tx_lock); 1605 spin_lock_bh(&priv->tx.tx_lock);
1762 ista->tid_state[tid] = AGGR_OPERATIONAL; 1606 ista->tid_state[tid] = AGGR_OPERATIONAL;
1763 spin_unlock_bh(&priv->tx_lock); 1607 spin_unlock_bh(&priv->tx.tx_lock);
1764 break; 1608 break;
1765 default: 1609 default:
1766 ath_err(ath9k_hw_common(priv->ah), "Unknown AMPDU action\n"); 1610 ath_err(ath9k_hw_common(priv->ah), "Unknown AMPDU action\n");
1767 } 1611 }
1768 1612
1613 ath9k_htc_ps_restore(priv);
1769 mutex_unlock(&priv->mutex); 1614 mutex_unlock(&priv->mutex);
1770 1615
1771 return ret; 1616 return ret;
@@ -1816,6 +1661,55 @@ static void ath9k_htc_set_coverage_class(struct ieee80211_hw *hw,
1816 mutex_unlock(&priv->mutex); 1661 mutex_unlock(&priv->mutex);
1817} 1662}
1818 1663
1664/*
1665 * Currently, this is used only for selecting the minimum rate
1666 * for management frames, rate selection for data frames remain
1667 * unaffected.
1668 */
1669static int ath9k_htc_set_bitrate_mask(struct ieee80211_hw *hw,
1670 struct ieee80211_vif *vif,
1671 const struct cfg80211_bitrate_mask *mask)
1672{
1673 struct ath9k_htc_priv *priv = hw->priv;
1674 struct ath_common *common = ath9k_hw_common(priv->ah);
1675 struct ath9k_htc_target_rate_mask tmask;
1676 struct ath9k_htc_vif *avp = (void *)vif->drv_priv;
1677 int ret = 0;
1678 u8 cmd_rsp;
1679
1680 memset(&tmask, 0, sizeof(struct ath9k_htc_target_rate_mask));
1681
1682 tmask.vif_index = avp->index;
1683 tmask.band = IEEE80211_BAND_2GHZ;
1684 tmask.mask = cpu_to_be32(mask->control[IEEE80211_BAND_2GHZ].legacy);
1685
1686 WMI_CMD_BUF(WMI_BITRATE_MASK_CMDID, &tmask);
1687 if (ret) {
1688 ath_err(common,
1689 "Unable to set 2G rate mask for "
1690 "interface at idx: %d\n", avp->index);
1691 goto out;
1692 }
1693
1694 tmask.band = IEEE80211_BAND_5GHZ;
1695 tmask.mask = cpu_to_be32(mask->control[IEEE80211_BAND_5GHZ].legacy);
1696
1697 WMI_CMD_BUF(WMI_BITRATE_MASK_CMDID, &tmask);
1698 if (ret) {
1699 ath_err(common,
1700 "Unable to set 5G rate mask for "
1701 "interface at idx: %d\n", avp->index);
1702 goto out;
1703 }
1704
1705 ath_dbg(common, ATH_DBG_CONFIG,
1706 "Set bitrate masks: 0x%x, 0x%x\n",
1707 mask->control[IEEE80211_BAND_2GHZ].legacy,
1708 mask->control[IEEE80211_BAND_5GHZ].legacy);
1709out:
1710 return ret;
1711}
1712
1819struct ieee80211_ops ath9k_htc_ops = { 1713struct ieee80211_ops ath9k_htc_ops = {
1820 .tx = ath9k_htc_tx, 1714 .tx = ath9k_htc_tx,
1821 .start = ath9k_htc_start, 1715 .start = ath9k_htc_start,
@@ -1838,4 +1732,5 @@ struct ieee80211_ops ath9k_htc_ops = {
1838 .set_rts_threshold = ath9k_htc_set_rts_threshold, 1732 .set_rts_threshold = ath9k_htc_set_rts_threshold,
1839 .rfkill_poll = ath9k_htc_rfkill_poll_state, 1733 .rfkill_poll = ath9k_htc_rfkill_poll_state,
1840 .set_coverage_class = ath9k_htc_set_coverage_class, 1734 .set_coverage_class = ath9k_htc_set_coverage_class,
1735 .set_bitrate_mask = ath9k_htc_set_bitrate_mask,
1841}; 1736};
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 4a4f27ba96af..a898dac22337 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -53,6 +53,138 @@ int get_hw_qnum(u16 queue, int *hwq_map)
53 } 53 }
54} 54}
55 55
56void ath9k_htc_check_stop_queues(struct ath9k_htc_priv *priv)
57{
58 spin_lock_bh(&priv->tx.tx_lock);
59 priv->tx.queued_cnt++;
60 if ((priv->tx.queued_cnt >= ATH9K_HTC_TX_THRESHOLD) &&
61 !(priv->tx.flags & ATH9K_HTC_OP_TX_QUEUES_STOP)) {
62 priv->tx.flags |= ATH9K_HTC_OP_TX_QUEUES_STOP;
63 ieee80211_stop_queues(priv->hw);
64 }
65 spin_unlock_bh(&priv->tx.tx_lock);
66}
67
68void ath9k_htc_check_wake_queues(struct ath9k_htc_priv *priv)
69{
70 spin_lock_bh(&priv->tx.tx_lock);
71 if ((priv->tx.queued_cnt < ATH9K_HTC_TX_THRESHOLD) &&
72 (priv->tx.flags & ATH9K_HTC_OP_TX_QUEUES_STOP)) {
73 priv->tx.flags &= ~ATH9K_HTC_OP_TX_QUEUES_STOP;
74 ieee80211_wake_queues(priv->hw);
75 }
76 spin_unlock_bh(&priv->tx.tx_lock);
77}
78
79int ath9k_htc_tx_get_slot(struct ath9k_htc_priv *priv)
80{
81 int slot;
82
83 spin_lock_bh(&priv->tx.tx_lock);
84 slot = find_first_zero_bit(priv->tx.tx_slot, MAX_TX_BUF_NUM);
85 if (slot >= MAX_TX_BUF_NUM) {
86 spin_unlock_bh(&priv->tx.tx_lock);
87 return -ENOBUFS;
88 }
89 __set_bit(slot, priv->tx.tx_slot);
90 spin_unlock_bh(&priv->tx.tx_lock);
91
92 return slot;
93}
94
95void ath9k_htc_tx_clear_slot(struct ath9k_htc_priv *priv, int slot)
96{
97 spin_lock_bh(&priv->tx.tx_lock);
98 __clear_bit(slot, priv->tx.tx_slot);
99 spin_unlock_bh(&priv->tx.tx_lock);
100}
101
102static inline enum htc_endpoint_id get_htc_epid(struct ath9k_htc_priv *priv,
103 u16 qnum)
104{
105 enum htc_endpoint_id epid;
106
107 switch (qnum) {
108 case 0:
109 TX_QSTAT_INC(WME_AC_VO);
110 epid = priv->data_vo_ep;
111 break;
112 case 1:
113 TX_QSTAT_INC(WME_AC_VI);
114 epid = priv->data_vi_ep;
115 break;
116 case 2:
117 TX_QSTAT_INC(WME_AC_BE);
118 epid = priv->data_be_ep;
119 break;
120 case 3:
121 default:
122 TX_QSTAT_INC(WME_AC_BK);
123 epid = priv->data_bk_ep;
124 break;
125 }
126
127 return epid;
128}
129
130static inline struct sk_buff_head*
131get_htc_epid_queue(struct ath9k_htc_priv *priv, u8 epid)
132{
133 struct ath_common *common = ath9k_hw_common(priv->ah);
134 struct sk_buff_head *epid_queue = NULL;
135
136 if (epid == priv->mgmt_ep)
137 epid_queue = &priv->tx.mgmt_ep_queue;
138 else if (epid == priv->cab_ep)
139 epid_queue = &priv->tx.cab_ep_queue;
140 else if (epid == priv->data_be_ep)
141 epid_queue = &priv->tx.data_be_queue;
142 else if (epid == priv->data_bk_ep)
143 epid_queue = &priv->tx.data_bk_queue;
144 else if (epid == priv->data_vi_ep)
145 epid_queue = &priv->tx.data_vi_queue;
146 else if (epid == priv->data_vo_ep)
147 epid_queue = &priv->tx.data_vo_queue;
148 else
149 ath_err(common, "Invalid EPID: %d\n", epid);
150
151 return epid_queue;
152}
153
154/*
155 * Removes the driver header and returns the TX slot number
156 */
157static inline int strip_drv_header(struct ath9k_htc_priv *priv,
158 struct sk_buff *skb)
159{
160 struct ath_common *common = ath9k_hw_common(priv->ah);
161 struct ath9k_htc_tx_ctl *tx_ctl;
162 int slot;
163
164 tx_ctl = HTC_SKB_CB(skb);
165
166 if (tx_ctl->epid == priv->mgmt_ep) {
167 struct tx_mgmt_hdr *tx_mhdr =
168 (struct tx_mgmt_hdr *)skb->data;
169 slot = tx_mhdr->cookie;
170 skb_pull(skb, sizeof(struct tx_mgmt_hdr));
171 } else if ((tx_ctl->epid == priv->data_bk_ep) ||
172 (tx_ctl->epid == priv->data_be_ep) ||
173 (tx_ctl->epid == priv->data_vi_ep) ||
174 (tx_ctl->epid == priv->data_vo_ep) ||
175 (tx_ctl->epid == priv->cab_ep)) {
176 struct tx_frame_hdr *tx_fhdr =
177 (struct tx_frame_hdr *)skb->data;
178 slot = tx_fhdr->cookie;
179 skb_pull(skb, sizeof(struct tx_frame_hdr));
180 } else {
181 ath_err(common, "Unsupported EPID: %d\n", tx_ctl->epid);
182 slot = -EINVAL;
183 }
184
185 return slot;
186}
187
56int ath_htc_txq_update(struct ath9k_htc_priv *priv, int qnum, 188int ath_htc_txq_update(struct ath9k_htc_priv *priv, int qnum,
57 struct ath9k_tx_queue_info *qinfo) 189 struct ath9k_tx_queue_info *qinfo)
58{ 190{
@@ -79,23 +211,140 @@ int ath_htc_txq_update(struct ath9k_htc_priv *priv, int qnum,
79 return error; 211 return error;
80} 212}
81 213
82int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb) 214static void ath9k_htc_tx_mgmt(struct ath9k_htc_priv *priv,
215 struct ath9k_htc_vif *avp,
216 struct sk_buff *skb,
217 u8 sta_idx, u8 vif_idx, u8 slot)
218{
219 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
220 struct ieee80211_mgmt *mgmt;
221 struct ieee80211_hdr *hdr;
222 struct tx_mgmt_hdr mgmt_hdr;
223 struct ath9k_htc_tx_ctl *tx_ctl;
224 u8 *tx_fhdr;
225
226 tx_ctl = HTC_SKB_CB(skb);
227 hdr = (struct ieee80211_hdr *) skb->data;
228
229 memset(tx_ctl, 0, sizeof(*tx_ctl));
230 memset(&mgmt_hdr, 0, sizeof(struct tx_mgmt_hdr));
231
232 /*
233 * Set the TSF adjust value for probe response
234 * frame also.
235 */
236 if (avp && unlikely(ieee80211_is_probe_resp(hdr->frame_control))) {
237 mgmt = (struct ieee80211_mgmt *)skb->data;
238 mgmt->u.probe_resp.timestamp = avp->tsfadjust;
239 }
240
241 tx_ctl->type = ATH9K_HTC_MGMT;
242
243 mgmt_hdr.node_idx = sta_idx;
244 mgmt_hdr.vif_idx = vif_idx;
245 mgmt_hdr.tidno = 0;
246 mgmt_hdr.flags = 0;
247 mgmt_hdr.cookie = slot;
248
249 mgmt_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb);
250 if (mgmt_hdr.key_type == ATH9K_KEY_TYPE_CLEAR)
251 mgmt_hdr.keyix = (u8) ATH9K_TXKEYIX_INVALID;
252 else
253 mgmt_hdr.keyix = tx_info->control.hw_key->hw_key_idx;
254
255 tx_fhdr = skb_push(skb, sizeof(mgmt_hdr));
256 memcpy(tx_fhdr, (u8 *) &mgmt_hdr, sizeof(mgmt_hdr));
257 tx_ctl->epid = priv->mgmt_ep;
258}
259
260static void ath9k_htc_tx_data(struct ath9k_htc_priv *priv,
261 struct ieee80211_vif *vif,
262 struct sk_buff *skb,
263 u8 sta_idx, u8 vif_idx, u8 slot,
264 bool is_cab)
265{
266 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
267 struct ieee80211_hdr *hdr;
268 struct ath9k_htc_tx_ctl *tx_ctl;
269 struct tx_frame_hdr tx_hdr;
270 u32 flags = 0;
271 u8 *qc, *tx_fhdr;
272 u16 qnum;
273
274 tx_ctl = HTC_SKB_CB(skb);
275 hdr = (struct ieee80211_hdr *) skb->data;
276
277 memset(tx_ctl, 0, sizeof(*tx_ctl));
278 memset(&tx_hdr, 0, sizeof(struct tx_frame_hdr));
279
280 tx_hdr.node_idx = sta_idx;
281 tx_hdr.vif_idx = vif_idx;
282 tx_hdr.cookie = slot;
283
284 /*
285 * This is a bit redundant but it helps to get
286 * the per-packet index quickly when draining the
287 * TX queue in the HIF layer. Otherwise we would
288 * have to parse the packet contents ...
289 */
290 tx_ctl->sta_idx = sta_idx;
291
292 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
293 tx_ctl->type = ATH9K_HTC_AMPDU;
294 tx_hdr.data_type = ATH9K_HTC_AMPDU;
295 } else {
296 tx_ctl->type = ATH9K_HTC_NORMAL;
297 tx_hdr.data_type = ATH9K_HTC_NORMAL;
298 }
299
300 if (ieee80211_is_data_qos(hdr->frame_control)) {
301 qc = ieee80211_get_qos_ctl(hdr);
302 tx_hdr.tidno = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
303 }
304
305 /* Check for RTS protection */
306 if (priv->hw->wiphy->rts_threshold != (u32) -1)
307 if (skb->len > priv->hw->wiphy->rts_threshold)
308 flags |= ATH9K_HTC_TX_RTSCTS;
309
310 /* CTS-to-self */
311 if (!(flags & ATH9K_HTC_TX_RTSCTS) &&
312 (vif && vif->bss_conf.use_cts_prot))
313 flags |= ATH9K_HTC_TX_CTSONLY;
314
315 tx_hdr.flags = cpu_to_be32(flags);
316 tx_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb);
317 if (tx_hdr.key_type == ATH9K_KEY_TYPE_CLEAR)
318 tx_hdr.keyix = (u8) ATH9K_TXKEYIX_INVALID;
319 else
320 tx_hdr.keyix = tx_info->control.hw_key->hw_key_idx;
321
322 tx_fhdr = skb_push(skb, sizeof(tx_hdr));
323 memcpy(tx_fhdr, (u8 *) &tx_hdr, sizeof(tx_hdr));
324
325 if (is_cab) {
326 CAB_STAT_INC;
327 tx_ctl->epid = priv->cab_ep;
328 return;
329 }
330
331 qnum = skb_get_queue_mapping(skb);
332 tx_ctl->epid = get_htc_epid(priv, qnum);
333}
334
335int ath9k_htc_tx_start(struct ath9k_htc_priv *priv,
336 struct sk_buff *skb,
337 u8 slot, bool is_cab)
83{ 338{
84 struct ieee80211_hdr *hdr; 339 struct ieee80211_hdr *hdr;
85 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 340 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
86 struct ieee80211_sta *sta = tx_info->control.sta; 341 struct ieee80211_sta *sta = tx_info->control.sta;
87 struct ieee80211_vif *vif = tx_info->control.vif; 342 struct ieee80211_vif *vif = tx_info->control.vif;
88 struct ath9k_htc_sta *ista; 343 struct ath9k_htc_sta *ista;
89 struct ath9k_htc_vif *avp; 344 struct ath9k_htc_vif *avp = NULL;
90 struct ath9k_htc_tx_ctl tx_ctl;
91 enum htc_endpoint_id epid;
92 u16 qnum;
93 __le16 fc;
94 u8 *tx_fhdr;
95 u8 sta_idx, vif_idx; 345 u8 sta_idx, vif_idx;
96 346
97 hdr = (struct ieee80211_hdr *) skb->data; 347 hdr = (struct ieee80211_hdr *) skb->data;
98 fc = hdr->frame_control;
99 348
100 /* 349 /*
101 * Find out on which interface this packet has to be 350 * Find out on which interface this packet has to be
@@ -124,218 +373,430 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
124 sta_idx = priv->vif_sta_pos[vif_idx]; 373 sta_idx = priv->vif_sta_pos[vif_idx];
125 } 374 }
126 375
127 memset(&tx_ctl, 0, sizeof(struct ath9k_htc_tx_ctl)); 376 if (ieee80211_is_data(hdr->frame_control))
377 ath9k_htc_tx_data(priv, vif, skb,
378 sta_idx, vif_idx, slot, is_cab);
379 else
380 ath9k_htc_tx_mgmt(priv, avp, skb,
381 sta_idx, vif_idx, slot);
128 382
129 if (ieee80211_is_data(fc)) {
130 struct tx_frame_hdr tx_hdr;
131 u32 flags = 0;
132 u8 *qc;
133 383
134 memset(&tx_hdr, 0, sizeof(struct tx_frame_hdr)); 384 return htc_send(priv->htc, skb);
385}
135 386
136 tx_hdr.node_idx = sta_idx; 387static inline bool __ath9k_htc_check_tx_aggr(struct ath9k_htc_priv *priv,
137 tx_hdr.vif_idx = vif_idx; 388 struct ath9k_htc_sta *ista, u8 tid)
389{
390 bool ret = false;
138 391
139 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { 392 spin_lock_bh(&priv->tx.tx_lock);
140 tx_ctl.type = ATH9K_HTC_AMPDU; 393 if ((tid < ATH9K_HTC_MAX_TID) && (ista->tid_state[tid] == AGGR_STOP))
141 tx_hdr.data_type = ATH9K_HTC_AMPDU; 394 ret = true;
142 } else { 395 spin_unlock_bh(&priv->tx.tx_lock);
143 tx_ctl.type = ATH9K_HTC_NORMAL; 396
144 tx_hdr.data_type = ATH9K_HTC_NORMAL; 397 return ret;
145 } 398}
399
400static void ath9k_htc_check_tx_aggr(struct ath9k_htc_priv *priv,
401 struct ieee80211_vif *vif,
402 struct sk_buff *skb)
403{
404 struct ieee80211_sta *sta;
405 struct ieee80211_hdr *hdr;
406 __le16 fc;
407
408 hdr = (struct ieee80211_hdr *) skb->data;
409 fc = hdr->frame_control;
410
411 rcu_read_lock();
412
413 sta = ieee80211_find_sta(vif, hdr->addr1);
414 if (!sta) {
415 rcu_read_unlock();
416 return;
417 }
146 418
419 if (sta && conf_is_ht(&priv->hw->conf) &&
420 !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
147 if (ieee80211_is_data_qos(fc)) { 421 if (ieee80211_is_data_qos(fc)) {
422 u8 *qc, tid;
423 struct ath9k_htc_sta *ista;
424
148 qc = ieee80211_get_qos_ctl(hdr); 425 qc = ieee80211_get_qos_ctl(hdr);
149 tx_hdr.tidno = qc[0] & IEEE80211_QOS_CTL_TID_MASK; 426 tid = qc[0] & 0xf;
427 ista = (struct ath9k_htc_sta *)sta->drv_priv;
428 if (__ath9k_htc_check_tx_aggr(priv, ista, tid)) {
429 ieee80211_start_tx_ba_session(sta, tid, 0);
430 spin_lock_bh(&priv->tx.tx_lock);
431 ista->tid_state[tid] = AGGR_PROGRESS;
432 spin_unlock_bh(&priv->tx.tx_lock);
433 }
150 } 434 }
435 }
151 436
152 /* Check for RTS protection */ 437 rcu_read_unlock();
153 if (priv->hw->wiphy->rts_threshold != (u32) -1) 438}
154 if (skb->len > priv->hw->wiphy->rts_threshold)
155 flags |= ATH9K_HTC_TX_RTSCTS;
156 439
157 /* CTS-to-self */ 440static void ath9k_htc_tx_process(struct ath9k_htc_priv *priv,
158 if (!(flags & ATH9K_HTC_TX_RTSCTS) && 441 struct sk_buff *skb,
159 (vif && vif->bss_conf.use_cts_prot)) 442 struct __wmi_event_txstatus *txs)
160 flags |= ATH9K_HTC_TX_CTSONLY; 443{
444 struct ieee80211_vif *vif;
445 struct ath9k_htc_tx_ctl *tx_ctl;
446 struct ieee80211_tx_info *tx_info;
447 struct ieee80211_tx_rate *rate;
448 struct ieee80211_conf *cur_conf = &priv->hw->conf;
449 bool txok;
450 int slot;
161 451
162 tx_hdr.flags = cpu_to_be32(flags); 452 slot = strip_drv_header(priv, skb);
163 tx_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb); 453 if (slot < 0) {
164 if (tx_hdr.key_type == ATH9K_KEY_TYPE_CLEAR) 454 dev_kfree_skb_any(skb);
165 tx_hdr.keyix = (u8) ATH9K_TXKEYIX_INVALID; 455 return;
166 else 456 }
167 tx_hdr.keyix = tx_info->control.hw_key->hw_key_idx;
168 457
169 tx_fhdr = skb_push(skb, sizeof(tx_hdr)); 458 tx_ctl = HTC_SKB_CB(skb);
170 memcpy(tx_fhdr, (u8 *) &tx_hdr, sizeof(tx_hdr)); 459 txok = tx_ctl->txok;
460 tx_info = IEEE80211_SKB_CB(skb);
461 vif = tx_info->control.vif;
462 rate = &tx_info->status.rates[0];
171 463
172 qnum = skb_get_queue_mapping(skb); 464 memset(&tx_info->status, 0, sizeof(tx_info->status));
173 465
174 switch (qnum) { 466 /*
175 case 0: 467 * URB submission failed for this frame, it never reached
176 TX_QSTAT_INC(WME_AC_VO); 468 * the target.
177 epid = priv->data_vo_ep; 469 */
178 break; 470 if (!txok || !vif || !txs)
179 case 1: 471 goto send_mac80211;
180 TX_QSTAT_INC(WME_AC_VI); 472
181 epid = priv->data_vi_ep; 473 if (txs->ts_flags & ATH9K_HTC_TXSTAT_ACK)
182 break; 474 tx_info->flags |= IEEE80211_TX_STAT_ACK;
183 case 2:
184 TX_QSTAT_INC(WME_AC_BE);
185 epid = priv->data_be_ep;
186 break;
187 case 3:
188 default:
189 TX_QSTAT_INC(WME_AC_BK);
190 epid = priv->data_bk_ep;
191 break;
192 }
193 } else {
194 struct tx_mgmt_hdr mgmt_hdr;
195 475
196 memset(&mgmt_hdr, 0, sizeof(struct tx_mgmt_hdr)); 476 if (txs->ts_flags & ATH9K_HTC_TXSTAT_FILT)
477 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
197 478
198 tx_ctl.type = ATH9K_HTC_NORMAL; 479 if (txs->ts_flags & ATH9K_HTC_TXSTAT_RTC_CTS)
480 rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS;
199 481
200 mgmt_hdr.node_idx = sta_idx; 482 rate->count = 1;
201 mgmt_hdr.vif_idx = vif_idx; 483 rate->idx = MS(txs->ts_rate, ATH9K_HTC_TXSTAT_RATE);
202 mgmt_hdr.tidno = 0;
203 mgmt_hdr.flags = 0;
204 484
205 mgmt_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb); 485 if (txs->ts_flags & ATH9K_HTC_TXSTAT_MCS) {
206 if (mgmt_hdr.key_type == ATH9K_KEY_TYPE_CLEAR) 486 rate->flags |= IEEE80211_TX_RC_MCS;
207 mgmt_hdr.keyix = (u8) ATH9K_TXKEYIX_INVALID;
208 else
209 mgmt_hdr.keyix = tx_info->control.hw_key->hw_key_idx;
210 487
211 tx_fhdr = skb_push(skb, sizeof(mgmt_hdr)); 488 if (txs->ts_flags & ATH9K_HTC_TXSTAT_CW40)
212 memcpy(tx_fhdr, (u8 *) &mgmt_hdr, sizeof(mgmt_hdr)); 489 rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
213 epid = priv->mgmt_ep; 490 if (txs->ts_flags & ATH9K_HTC_TXSTAT_SGI)
491 rate->flags |= IEEE80211_TX_RC_SHORT_GI;
492 } else {
493 if (cur_conf->channel->band == IEEE80211_BAND_5GHZ)
494 rate->idx += 4; /* No CCK rates */
214 } 495 }
215 496
216 return htc_send(priv->htc, skb, epid, &tx_ctl); 497 ath9k_htc_check_tx_aggr(priv, vif, skb);
498
499send_mac80211:
500 spin_lock_bh(&priv->tx.tx_lock);
501 if (WARN_ON(--priv->tx.queued_cnt < 0))
502 priv->tx.queued_cnt = 0;
503 spin_unlock_bh(&priv->tx.tx_lock);
504
505 ath9k_htc_tx_clear_slot(priv, slot);
506
507 /* Send status to mac80211 */
508 ieee80211_tx_status(priv->hw, skb);
217} 509}
218 510
219static bool ath9k_htc_check_tx_aggr(struct ath9k_htc_priv *priv, 511static inline void ath9k_htc_tx_drainq(struct ath9k_htc_priv *priv,
220 struct ath9k_htc_sta *ista, u8 tid) 512 struct sk_buff_head *queue)
221{ 513{
222 bool ret = false; 514 struct sk_buff *skb;
223 515
224 spin_lock_bh(&priv->tx_lock); 516 while ((skb = skb_dequeue(queue)) != NULL) {
225 if ((tid < ATH9K_HTC_MAX_TID) && (ista->tid_state[tid] == AGGR_STOP)) 517 ath9k_htc_tx_process(priv, skb, NULL);
226 ret = true; 518 }
227 spin_unlock_bh(&priv->tx_lock); 519}
228 520
229 return ret; 521void ath9k_htc_tx_drain(struct ath9k_htc_priv *priv)
522{
523 struct ath9k_htc_tx_event *event, *tmp;
524
525 spin_lock_bh(&priv->tx.tx_lock);
526 priv->tx.flags |= ATH9K_HTC_OP_TX_DRAIN;
527 spin_unlock_bh(&priv->tx.tx_lock);
528
529 /*
530 * Ensure that all pending TX frames are flushed,
531 * and that the TX completion/failed tasklets is killed.
532 */
533 htc_stop(priv->htc);
534 tasklet_kill(&priv->wmi->wmi_event_tasklet);
535 tasklet_kill(&priv->tx_failed_tasklet);
536
537 ath9k_htc_tx_drainq(priv, &priv->tx.mgmt_ep_queue);
538 ath9k_htc_tx_drainq(priv, &priv->tx.cab_ep_queue);
539 ath9k_htc_tx_drainq(priv, &priv->tx.data_be_queue);
540 ath9k_htc_tx_drainq(priv, &priv->tx.data_bk_queue);
541 ath9k_htc_tx_drainq(priv, &priv->tx.data_vi_queue);
542 ath9k_htc_tx_drainq(priv, &priv->tx.data_vo_queue);
543 ath9k_htc_tx_drainq(priv, &priv->tx.tx_failed);
544
545 /*
546 * The TX cleanup timer has already been killed.
547 */
548 spin_lock_bh(&priv->wmi->event_lock);
549 list_for_each_entry_safe(event, tmp, &priv->wmi->pending_tx_events, list) {
550 list_del(&event->list);
551 kfree(event);
552 }
553 spin_unlock_bh(&priv->wmi->event_lock);
554
555 spin_lock_bh(&priv->tx.tx_lock);
556 priv->tx.flags &= ~ATH9K_HTC_OP_TX_DRAIN;
557 spin_unlock_bh(&priv->tx.tx_lock);
230} 558}
231 559
232void ath9k_tx_tasklet(unsigned long data) 560void ath9k_tx_failed_tasklet(unsigned long data)
233{ 561{
234 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data; 562 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
235 struct ieee80211_vif *vif;
236 struct ieee80211_sta *sta;
237 struct ieee80211_hdr *hdr;
238 struct ieee80211_tx_info *tx_info;
239 struct sk_buff *skb = NULL;
240 __le16 fc;
241 563
242 while ((skb = skb_dequeue(&priv->tx_queue)) != NULL) { 564 spin_lock_bh(&priv->tx.tx_lock);
565 if (priv->tx.flags & ATH9K_HTC_OP_TX_DRAIN) {
566 spin_unlock_bh(&priv->tx.tx_lock);
567 return;
568 }
569 spin_unlock_bh(&priv->tx.tx_lock);
243 570
244 hdr = (struct ieee80211_hdr *) skb->data; 571 ath9k_htc_tx_drainq(priv, &priv->tx.tx_failed);
245 fc = hdr->frame_control; 572}
246 tx_info = IEEE80211_SKB_CB(skb);
247 vif = tx_info->control.vif;
248 573
249 memset(&tx_info->status, 0, sizeof(tx_info->status)); 574static inline bool check_cookie(struct ath9k_htc_priv *priv,
575 struct sk_buff *skb,
576 u8 cookie, u8 epid)
577{
578 u8 fcookie = 0;
579
580 if (epid == priv->mgmt_ep) {
581 struct tx_mgmt_hdr *hdr;
582 hdr = (struct tx_mgmt_hdr *) skb->data;
583 fcookie = hdr->cookie;
584 } else if ((epid == priv->data_bk_ep) ||
585 (epid == priv->data_be_ep) ||
586 (epid == priv->data_vi_ep) ||
587 (epid == priv->data_vo_ep) ||
588 (epid == priv->cab_ep)) {
589 struct tx_frame_hdr *hdr;
590 hdr = (struct tx_frame_hdr *) skb->data;
591 fcookie = hdr->cookie;
592 }
250 593
251 if (!vif) 594 if (fcookie == cookie)
252 goto send_mac80211; 595 return true;
253 596
254 rcu_read_lock(); 597 return false;
598}
255 599
256 sta = ieee80211_find_sta(vif, hdr->addr1); 600static struct sk_buff* ath9k_htc_tx_get_packet(struct ath9k_htc_priv *priv,
257 if (!sta) { 601 struct __wmi_event_txstatus *txs)
258 rcu_read_unlock(); 602{
259 ieee80211_tx_status(priv->hw, skb); 603 struct ath_common *common = ath9k_hw_common(priv->ah);
260 continue; 604 struct sk_buff_head *epid_queue;
605 struct sk_buff *skb, *tmp;
606 unsigned long flags;
607 u8 epid = MS(txs->ts_rate, ATH9K_HTC_TXSTAT_EPID);
608
609 epid_queue = get_htc_epid_queue(priv, epid);
610 if (!epid_queue)
611 return NULL;
612
613 spin_lock_irqsave(&epid_queue->lock, flags);
614 skb_queue_walk_safe(epid_queue, skb, tmp) {
615 if (check_cookie(priv, skb, txs->cookie, epid)) {
616 __skb_unlink(skb, epid_queue);
617 spin_unlock_irqrestore(&epid_queue->lock, flags);
618 return skb;
261 } 619 }
620 }
621 spin_unlock_irqrestore(&epid_queue->lock, flags);
622
623 ath_dbg(common, ATH_DBG_XMIT,
624 "No matching packet for cookie: %d, epid: %d\n",
625 txs->cookie, epid);
262 626
263 /* Check if we need to start aggregation */ 627 return NULL;
628}
264 629
265 if (sta && conf_is_ht(&priv->hw->conf) && 630void ath9k_htc_txstatus(struct ath9k_htc_priv *priv, void *wmi_event)
266 !(skb->protocol == cpu_to_be16(ETH_P_PAE))) { 631{
267 if (ieee80211_is_data_qos(fc)) { 632 struct wmi_event_txstatus *txs = (struct wmi_event_txstatus *)wmi_event;
268 u8 *qc, tid; 633 struct __wmi_event_txstatus *__txs;
269 struct ath9k_htc_sta *ista; 634 struct sk_buff *skb;
635 struct ath9k_htc_tx_event *tx_pend;
636 int i;
270 637
271 qc = ieee80211_get_qos_ctl(hdr); 638 for (i = 0; i < txs->cnt; i++) {
272 tid = qc[0] & 0xf; 639 WARN_ON(txs->cnt > HTC_MAX_TX_STATUS);
273 ista = (struct ath9k_htc_sta *)sta->drv_priv;
274 640
275 if (ath9k_htc_check_tx_aggr(priv, ista, tid)) { 641 __txs = &txs->txstatus[i];
276 ieee80211_start_tx_ba_session(sta, tid, 0);
277 spin_lock_bh(&priv->tx_lock);
278 ista->tid_state[tid] = AGGR_PROGRESS;
279 spin_unlock_bh(&priv->tx_lock);
280 }
281 }
282 }
283 642
284 rcu_read_unlock(); 643 skb = ath9k_htc_tx_get_packet(priv, __txs);
644 if (!skb) {
645 /*
646 * Store this event, so that the TX cleanup
647 * routine can check later for the needed packet.
648 */
649 tx_pend = kzalloc(sizeof(struct ath9k_htc_tx_event),
650 GFP_ATOMIC);
651 if (!tx_pend)
652 continue;
653
654 memcpy(&tx_pend->txs, __txs,
655 sizeof(struct __wmi_event_txstatus));
656
657 spin_lock(&priv->wmi->event_lock);
658 list_add_tail(&tx_pend->list,
659 &priv->wmi->pending_tx_events);
660 spin_unlock(&priv->wmi->event_lock);
285 661
286 send_mac80211: 662 continue;
287 /* Send status to mac80211 */ 663 }
288 ieee80211_tx_status(priv->hw, skb); 664
665 ath9k_htc_tx_process(priv, skb, __txs);
289 } 666 }
290 667
291 /* Wake TX queues if needed */ 668 /* Wake TX queues if needed */
292 spin_lock_bh(&priv->tx_lock); 669 ath9k_htc_check_wake_queues(priv);
293 if (priv->tx_queues_stop) {
294 priv->tx_queues_stop = false;
295 spin_unlock_bh(&priv->tx_lock);
296 ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
297 "Waking up TX queues\n");
298 ieee80211_wake_queues(priv->hw);
299 return;
300 }
301 spin_unlock_bh(&priv->tx_lock);
302} 670}
303 671
304void ath9k_htc_txep(void *drv_priv, struct sk_buff *skb, 672void ath9k_htc_txep(void *drv_priv, struct sk_buff *skb,
305 enum htc_endpoint_id ep_id, bool txok) 673 enum htc_endpoint_id ep_id, bool txok)
306{ 674{
307 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) drv_priv; 675 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) drv_priv;
308 struct ath_common *common = ath9k_hw_common(priv->ah); 676 struct ath9k_htc_tx_ctl *tx_ctl;
309 struct ieee80211_tx_info *tx_info; 677 struct sk_buff_head *epid_queue;
310 678
311 if (!skb) 679 tx_ctl = HTC_SKB_CB(skb);
680 tx_ctl->txok = txok;
681 tx_ctl->timestamp = jiffies;
682
683 if (!txok) {
684 skb_queue_tail(&priv->tx.tx_failed, skb);
685 tasklet_schedule(&priv->tx_failed_tasklet);
312 return; 686 return;
687 }
313 688
314 if (ep_id == priv->mgmt_ep) { 689 epid_queue = get_htc_epid_queue(priv, ep_id);
315 skb_pull(skb, sizeof(struct tx_mgmt_hdr)); 690 if (!epid_queue) {
316 } else if ((ep_id == priv->data_bk_ep) ||
317 (ep_id == priv->data_be_ep) ||
318 (ep_id == priv->data_vi_ep) ||
319 (ep_id == priv->data_vo_ep)) {
320 skb_pull(skb, sizeof(struct tx_frame_hdr));
321 } else {
322 ath_err(common, "Unsupported TX EPID: %d\n", ep_id);
323 dev_kfree_skb_any(skb); 691 dev_kfree_skb_any(skb);
324 return; 692 return;
325 } 693 }
326 694
327 tx_info = IEEE80211_SKB_CB(skb); 695 skb_queue_tail(epid_queue, skb);
696}
328 697
329 if (txok) 698static inline bool check_packet(struct ath9k_htc_priv *priv, struct sk_buff *skb)
330 tx_info->flags |= IEEE80211_TX_STAT_ACK; 699{
700 struct ath_common *common = ath9k_hw_common(priv->ah);
701 struct ath9k_htc_tx_ctl *tx_ctl;
331 702
332 skb_queue_tail(&priv->tx_queue, skb); 703 tx_ctl = HTC_SKB_CB(skb);
333 tasklet_schedule(&priv->tx_tasklet); 704
705 if (time_after(jiffies,
706 tx_ctl->timestamp +
707 msecs_to_jiffies(ATH9K_HTC_TX_TIMEOUT_INTERVAL))) {
708 ath_dbg(common, ATH_DBG_XMIT,
709 "Dropping a packet due to TX timeout\n");
710 return true;
711 }
712
713 return false;
714}
715
716static void ath9k_htc_tx_cleanup_queue(struct ath9k_htc_priv *priv,
717 struct sk_buff_head *epid_queue)
718{
719 bool process = false;
720 unsigned long flags;
721 struct sk_buff *skb, *tmp;
722 struct sk_buff_head queue;
723
724 skb_queue_head_init(&queue);
725
726 spin_lock_irqsave(&epid_queue->lock, flags);
727 skb_queue_walk_safe(epid_queue, skb, tmp) {
728 if (check_packet(priv, skb)) {
729 __skb_unlink(skb, epid_queue);
730 __skb_queue_tail(&queue, skb);
731 process = true;
732 }
733 }
734 spin_unlock_irqrestore(&epid_queue->lock, flags);
735
736 if (process) {
737 skb_queue_walk_safe(&queue, skb, tmp) {
738 __skb_unlink(skb, &queue);
739 ath9k_htc_tx_process(priv, skb, NULL);
740 }
741 }
742}
743
744void ath9k_htc_tx_cleanup_timer(unsigned long data)
745{
746 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) data;
747 struct ath_common *common = ath9k_hw_common(priv->ah);
748 struct ath9k_htc_tx_event *event, *tmp;
749 struct sk_buff *skb;
750
751 spin_lock(&priv->wmi->event_lock);
752 list_for_each_entry_safe(event, tmp, &priv->wmi->pending_tx_events, list) {
753
754 skb = ath9k_htc_tx_get_packet(priv, &event->txs);
755 if (skb) {
756 ath_dbg(common, ATH_DBG_XMIT,
757 "Found packet for cookie: %d, epid: %d\n",
758 event->txs.cookie,
759 MS(event->txs.ts_rate, ATH9K_HTC_TXSTAT_EPID));
760
761 ath9k_htc_tx_process(priv, skb, &event->txs);
762 list_del(&event->list);
763 kfree(event);
764 continue;
765 }
766
767 if (++event->count >= ATH9K_HTC_TX_TIMEOUT_COUNT) {
768 list_del(&event->list);
769 kfree(event);
770 }
771 }
772 spin_unlock(&priv->wmi->event_lock);
773
774 /*
775 * Check if status-pending packets have to be cleaned up.
776 */
777 ath9k_htc_tx_cleanup_queue(priv, &priv->tx.mgmt_ep_queue);
778 ath9k_htc_tx_cleanup_queue(priv, &priv->tx.cab_ep_queue);
779 ath9k_htc_tx_cleanup_queue(priv, &priv->tx.data_be_queue);
780 ath9k_htc_tx_cleanup_queue(priv, &priv->tx.data_bk_queue);
781 ath9k_htc_tx_cleanup_queue(priv, &priv->tx.data_vi_queue);
782 ath9k_htc_tx_cleanup_queue(priv, &priv->tx.data_vo_queue);
783
784 /* Wake TX queues if needed */
785 ath9k_htc_check_wake_queues(priv);
786
787 mod_timer(&priv->tx.cleanup_timer,
788 jiffies + msecs_to_jiffies(ATH9K_HTC_TX_CLEANUP_INTERVAL));
334} 789}
335 790
336int ath9k_tx_init(struct ath9k_htc_priv *priv) 791int ath9k_tx_init(struct ath9k_htc_priv *priv)
337{ 792{
338 skb_queue_head_init(&priv->tx_queue); 793 skb_queue_head_init(&priv->tx.mgmt_ep_queue);
794 skb_queue_head_init(&priv->tx.cab_ep_queue);
795 skb_queue_head_init(&priv->tx.data_be_queue);
796 skb_queue_head_init(&priv->tx.data_bk_queue);
797 skb_queue_head_init(&priv->tx.data_vi_queue);
798 skb_queue_head_init(&priv->tx.data_vo_queue);
799 skb_queue_head_init(&priv->tx.tx_failed);
339 return 0; 800 return 0;
340} 801}
341 802
@@ -507,8 +968,9 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
507 int last_rssi = ATH_RSSI_DUMMY_MARKER; 968 int last_rssi = ATH_RSSI_DUMMY_MARKER;
508 __le16 fc; 969 __le16 fc;
509 970
510 if (skb->len <= HTC_RX_FRAME_HEADER_SIZE) { 971 if (skb->len < HTC_RX_FRAME_HEADER_SIZE) {
511 ath_err(common, "Corrupted RX frame, dropping\n"); 972 ath_err(common, "Corrupted RX frame, dropping (len: %d)\n",
973 skb->len);
512 goto rx_next; 974 goto rx_next;
513 } 975 }
514 976
@@ -522,6 +984,8 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
522 goto rx_next; 984 goto rx_next;
523 } 985 }
524 986
987 ath9k_htc_err_stat_rx(priv, rxstatus);
988
525 /* Get the RX status information */ 989 /* Get the RX status information */
526 memcpy(&rxbuf->rxstatus, rxstatus, HTC_RX_FRAME_HEADER_SIZE); 990 memcpy(&rxbuf->rxstatus, rxstatus, HTC_RX_FRAME_HEADER_SIZE);
527 skb_pull(skb, HTC_RX_FRAME_HEADER_SIZE); 991 skb_pull(skb, HTC_RX_FRAME_HEADER_SIZE);
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index 62e139a30a74..cee970fdf652 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -17,8 +17,8 @@
17#include "htc.h" 17#include "htc.h"
18 18
19static int htc_issue_send(struct htc_target *target, struct sk_buff* skb, 19static int htc_issue_send(struct htc_target *target, struct sk_buff* skb,
20 u16 len, u8 flags, u8 epid, 20 u16 len, u8 flags, u8 epid)
21 struct ath9k_htc_tx_ctl *tx_ctl) 21
22{ 22{
23 struct htc_frame_hdr *hdr; 23 struct htc_frame_hdr *hdr;
24 struct htc_endpoint *endpoint = &target->endpoint[epid]; 24 struct htc_endpoint *endpoint = &target->endpoint[epid];
@@ -30,8 +30,8 @@ static int htc_issue_send(struct htc_target *target, struct sk_buff* skb,
30 hdr->flags = flags; 30 hdr->flags = flags;
31 hdr->payload_len = cpu_to_be16(len); 31 hdr->payload_len = cpu_to_be16(len);
32 32
33 status = target->hif->send(target->hif_dev, endpoint->ul_pipeid, skb, 33 status = target->hif->send(target->hif_dev, endpoint->ul_pipeid, skb);
34 tx_ctl); 34
35 return status; 35 return status;
36} 36}
37 37
@@ -162,7 +162,7 @@ static int htc_config_pipe_credits(struct htc_target *target)
162 162
163 target->htc_flags |= HTC_OP_CONFIG_PIPE_CREDITS; 163 target->htc_flags |= HTC_OP_CONFIG_PIPE_CREDITS;
164 164
165 ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0, NULL); 165 ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0);
166 if (ret) 166 if (ret)
167 goto err; 167 goto err;
168 168
@@ -197,7 +197,7 @@ static int htc_setup_complete(struct htc_target *target)
197 197
198 target->htc_flags |= HTC_OP_START_WAIT; 198 target->htc_flags |= HTC_OP_START_WAIT;
199 199
200 ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0, NULL); 200 ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0);
201 if (ret) 201 if (ret)
202 goto err; 202 goto err;
203 203
@@ -268,7 +268,7 @@ int htc_connect_service(struct htc_target *target,
268 conn_msg->dl_pipeid = endpoint->dl_pipeid; 268 conn_msg->dl_pipeid = endpoint->dl_pipeid;
269 conn_msg->ul_pipeid = endpoint->ul_pipeid; 269 conn_msg->ul_pipeid = endpoint->ul_pipeid;
270 270
271 ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0, NULL); 271 ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0);
272 if (ret) 272 if (ret)
273 goto err; 273 goto err;
274 274
@@ -286,35 +286,33 @@ err:
286 return ret; 286 return ret;
287} 287}
288 288
289int htc_send(struct htc_target *target, struct sk_buff *skb, 289int htc_send(struct htc_target *target, struct sk_buff *skb)
290 enum htc_endpoint_id epid, struct ath9k_htc_tx_ctl *tx_ctl)
291{ 290{
292 return htc_issue_send(target, skb, skb->len, 0, epid, tx_ctl); 291 struct ath9k_htc_tx_ctl *tx_ctl;
292
293 tx_ctl = HTC_SKB_CB(skb);
294 return htc_issue_send(target, skb, skb->len, 0, tx_ctl->epid);
293} 295}
294 296
295void htc_stop(struct htc_target *target) 297int htc_send_epid(struct htc_target *target, struct sk_buff *skb,
298 enum htc_endpoint_id epid)
296{ 299{
297 enum htc_endpoint_id epid; 300 return htc_issue_send(target, skb, skb->len, 0, epid);
298 struct htc_endpoint *endpoint; 301}
299 302
300 for (epid = ENDPOINT0; epid < ENDPOINT_MAX; epid++) { 303void htc_stop(struct htc_target *target)
301 endpoint = &target->endpoint[epid]; 304{
302 if (endpoint->service_id != 0) 305 target->hif->stop(target->hif_dev);
303 target->hif->stop(target->hif_dev, endpoint->ul_pipeid);
304 }
305} 306}
306 307
307void htc_start(struct htc_target *target) 308void htc_start(struct htc_target *target)
308{ 309{
309 enum htc_endpoint_id epid; 310 target->hif->start(target->hif_dev);
310 struct htc_endpoint *endpoint; 311}
311 312
312 for (epid = ENDPOINT0; epid < ENDPOINT_MAX; epid++) { 313void htc_sta_drain(struct htc_target *target, u8 idx)
313 endpoint = &target->endpoint[epid]; 314{
314 if (endpoint->service_id != 0) 315 target->hif->sta_drain(target->hif_dev, idx);
315 target->hif->start(target->hif_dev,
316 endpoint->ul_pipeid);
317 }
318} 316}
319 317
320void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle, 318void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.h b/drivers/net/wireless/ath/ath9k/htc_hst.h
index ecd018798c47..91a5305db95a 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.h
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.h
@@ -33,10 +33,10 @@ struct ath9k_htc_hif {
33 u8 control_dl_pipe; 33 u8 control_dl_pipe;
34 u8 control_ul_pipe; 34 u8 control_ul_pipe;
35 35
36 void (*start) (void *hif_handle, u8 pipe); 36 void (*start) (void *hif_handle);
37 void (*stop) (void *hif_handle, u8 pipe); 37 void (*stop) (void *hif_handle);
38 int (*send) (void *hif_handle, u8 pipe, struct sk_buff *buf, 38 void (*sta_drain) (void *hif_handle, u8 idx);
39 struct ath9k_htc_tx_ctl *tx_ctl); 39 int (*send) (void *hif_handle, u8 pipe, struct sk_buff *buf);
40}; 40};
41 41
42enum htc_endpoint_id { 42enum htc_endpoint_id {
@@ -83,21 +83,10 @@ struct htc_ep_callbacks {
83 void (*rx) (void *, struct sk_buff *, enum htc_endpoint_id); 83 void (*rx) (void *, struct sk_buff *, enum htc_endpoint_id);
84}; 84};
85 85
86#define HTC_TX_QUEUE_SIZE 256
87
88struct htc_txq {
89 struct sk_buff *buf[HTC_TX_QUEUE_SIZE];
90 u32 txqdepth;
91 u16 txbuf_cnt;
92 u16 txq_head;
93 u16 txq_tail;
94};
95
96struct htc_endpoint { 86struct htc_endpoint {
97 u16 service_id; 87 u16 service_id;
98 88
99 struct htc_ep_callbacks ep_callbacks; 89 struct htc_ep_callbacks ep_callbacks;
100 struct htc_txq htc_txq;
101 u32 max_txqdepth; 90 u32 max_txqdepth;
102 int max_msglen; 91 int max_msglen;
103 92
@@ -205,10 +194,12 @@ int htc_init(struct htc_target *target);
205int htc_connect_service(struct htc_target *target, 194int htc_connect_service(struct htc_target *target,
206 struct htc_service_connreq *service_connreq, 195 struct htc_service_connreq *service_connreq,
207 enum htc_endpoint_id *conn_rsp_eid); 196 enum htc_endpoint_id *conn_rsp_eid);
208int htc_send(struct htc_target *target, struct sk_buff *skb, 197int htc_send(struct htc_target *target, struct sk_buff *skb);
209 enum htc_endpoint_id eid, struct ath9k_htc_tx_ctl *tx_ctl); 198int htc_send_epid(struct htc_target *target, struct sk_buff *skb,
199 enum htc_endpoint_id epid);
210void htc_stop(struct htc_target *target); 200void htc_stop(struct htc_target *target);
211void htc_start(struct htc_target *target); 201void htc_start(struct htc_target *target);
202void htc_sta_drain(struct htc_target *target, u8 idx);
212 203
213void ath9k_htc_rx_msg(struct htc_target *htc_handle, 204void ath9k_htc_rx_msg(struct htc_target *htc_handle,
214 struct sk_buff *skb, u32 len, u8 pipe_id); 205 struct sk_buff *skb, u32 len, u8 pipe_id);
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index c8f254fe0f0b..8b8f0445aef8 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -116,16 +116,21 @@ static inline void ath9k_hw_clr11n_aggr(struct ath_hw *ah, void *ds)
116 ath9k_hw_ops(ah)->clr11n_aggr(ah, ds); 116 ath9k_hw_ops(ah)->clr11n_aggr(ah, ds);
117} 117}
118 118
119static inline void ath9k_hw_set11n_burstduration(struct ath_hw *ah, void *ds, 119static inline void ath9k_hw_set_clrdmask(struct ath_hw *ah, void *ds, bool val)
120 u32 burstDuration)
121{ 120{
122 ath9k_hw_ops(ah)->set11n_burstduration(ah, ds, burstDuration); 121 ath9k_hw_ops(ah)->set_clrdmask(ah, ds, val);
123} 122}
124 123
125static inline void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, void *ds, 124static inline void ath9k_hw_antdiv_comb_conf_get(struct ath_hw *ah,
126 u32 vmf) 125 struct ath_hw_antcomb_conf *antconf)
127{ 126{
128 ath9k_hw_ops(ah)->set11n_virtualmorefrag(ah, ds, vmf); 127 ath9k_hw_ops(ah)->antdiv_comb_conf_get(ah, antconf);
128}
129
130static inline void ath9k_hw_antdiv_comb_conf_set(struct ath_hw *ah,
131 struct ath_hw_antcomb_conf *antconf)
132{
133 ath9k_hw_ops(ah)->antdiv_comb_conf_set(ah, antconf);
129} 134}
130 135
131/* Private hardware call ops */ 136/* Private hardware call ops */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index c95bc5cc1a1f..b75b5dca4e29 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -130,6 +130,20 @@ bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout)
130} 130}
131EXPORT_SYMBOL(ath9k_hw_wait); 131EXPORT_SYMBOL(ath9k_hw_wait);
132 132
133void ath9k_hw_write_array(struct ath_hw *ah, struct ar5416IniArray *array,
134 int column, unsigned int *writecnt)
135{
136 int r;
137
138 ENABLE_REGWRITE_BUFFER(ah);
139 for (r = 0; r < array->ia_rows; r++) {
140 REG_WRITE(ah, INI_RA(array, r, 0),
141 INI_RA(array, r, column));
142 DO_DELAY(*writecnt);
143 }
144 REGWRITE_BUFFER_FLUSH(ah);
145}
146
133u32 ath9k_hw_reverse_bits(u32 val, u32 n) 147u32 ath9k_hw_reverse_bits(u32 val, u32 n)
134{ 148{
135 u32 retval; 149 u32 retval;
@@ -142,25 +156,6 @@ u32 ath9k_hw_reverse_bits(u32 val, u32 n)
142 return retval; 156 return retval;
143} 157}
144 158
145bool ath9k_get_channel_edges(struct ath_hw *ah,
146 u16 flags, u16 *low,
147 u16 *high)
148{
149 struct ath9k_hw_capabilities *pCap = &ah->caps;
150
151 if (flags & CHANNEL_5GHZ) {
152 *low = pCap->low_5ghz_chan;
153 *high = pCap->high_5ghz_chan;
154 return true;
155 }
156 if ((flags & CHANNEL_2GHZ)) {
157 *low = pCap->low_2ghz_chan;
158 *high = pCap->high_2ghz_chan;
159 return true;
160 }
161 return false;
162}
163
164u16 ath9k_hw_computetxtime(struct ath_hw *ah, 159u16 ath9k_hw_computetxtime(struct ath_hw *ah,
165 u8 phy, int kbps, 160 u8 phy, int kbps,
166 u32 frameLen, u16 rateix, 161 u32 frameLen, u16 rateix,
@@ -252,6 +247,17 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
252{ 247{
253 u32 val; 248 u32 val;
254 249
250 switch (ah->hw_version.devid) {
251 case AR5416_AR9100_DEVID:
252 ah->hw_version.macVersion = AR_SREV_VERSION_9100;
253 break;
254 case AR9300_DEVID_AR9340:
255 ah->hw_version.macVersion = AR_SREV_VERSION_9340;
256 val = REG_READ(ah, AR_SREV);
257 ah->hw_version.macRev = MS(val, AR_SREV_REVISION2);
258 return;
259 }
260
255 val = REG_READ(ah, AR_SREV) & AR_SREV_ID; 261 val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
256 262
257 if (val == 0xFF) { 263 if (val == 0xFF) {
@@ -364,11 +370,6 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
364 ah->config.spurchans[i][1] = AR_NO_SPUR; 370 ah->config.spurchans[i][1] = AR_NO_SPUR;
365 } 371 }
366 372
367 if (ah->hw_version.devid != AR2427_DEVID_PCIE)
368 ah->config.ht_enable = 1;
369 else
370 ah->config.ht_enable = 0;
371
372 /* PAPRD needs some more work to be enabled */ 373 /* PAPRD needs some more work to be enabled */
373 ah->config.paprd_disable = 1; 374 ah->config.paprd_disable = 1;
374 375
@@ -410,6 +411,8 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
410 ah->sta_id1_defaults = 411 ah->sta_id1_defaults =
411 AR_STA_ID1_CRPT_MIC_ENABLE | 412 AR_STA_ID1_CRPT_MIC_ENABLE |
412 AR_STA_ID1_MCAST_KSRCH; 413 AR_STA_ID1_MCAST_KSRCH;
414 if (AR_SREV_9100(ah))
415 ah->sta_id1_defaults |= AR_STA_ID1_AR9100_BA_FIX;
413 ah->enable_32kHz_clock = DONT_USE_32KHZ; 416 ah->enable_32kHz_clock = DONT_USE_32KHZ;
414 ah->slottime = 20; 417 ah->slottime = 20;
415 ah->globaltxtimeout = (u32) -1; 418 ah->globaltxtimeout = (u32) -1;
@@ -470,7 +473,7 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
470 return ecode; 473 return ecode;
471 } 474 }
472 475
473 if (!AR_SREV_9100(ah)) { 476 if (!AR_SREV_9100(ah) && !AR_SREV_9340(ah)) {
474 ath9k_hw_ani_setup(ah); 477 ath9k_hw_ani_setup(ah);
475 ath9k_hw_ani_init(ah); 478 ath9k_hw_ani_init(ah);
476 } 479 }
@@ -492,9 +495,6 @@ static int __ath9k_hw_init(struct ath_hw *ah)
492 struct ath_common *common = ath9k_hw_common(ah); 495 struct ath_common *common = ath9k_hw_common(ah);
493 int r = 0; 496 int r = 0;
494 497
495 if (ah->hw_version.devid == AR5416_AR9100_DEVID)
496 ah->hw_version.macVersion = AR_SREV_VERSION_9100;
497
498 ath9k_hw_read_revisions(ah); 498 ath9k_hw_read_revisions(ah);
499 499
500 /* 500 /*
@@ -552,6 +552,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
552 case AR_SREV_VERSION_9271: 552 case AR_SREV_VERSION_9271:
553 case AR_SREV_VERSION_9300: 553 case AR_SREV_VERSION_9300:
554 case AR_SREV_VERSION_9485: 554 case AR_SREV_VERSION_9485:
555 case AR_SREV_VERSION_9340:
555 break; 556 break;
556 default: 557 default:
557 ath_err(common, 558 ath_err(common,
@@ -560,7 +561,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
560 return -EOPNOTSUPP; 561 return -EOPNOTSUPP;
561 } 562 }
562 563
563 if (AR_SREV_9271(ah) || AR_SREV_9100(ah)) 564 if (AR_SREV_9271(ah) || AR_SREV_9100(ah) || AR_SREV_9340(ah))
564 ah->is_pciexpress = false; 565 ah->is_pciexpress = false;
565 566
566 ah->hw_version.phyRev = REG_READ(ah, AR_PHY_CHIP_ID); 567 ah->hw_version.phyRev = REG_READ(ah, AR_PHY_CHIP_ID);
@@ -629,6 +630,7 @@ int ath9k_hw_init(struct ath_hw *ah)
629 case AR2427_DEVID_PCIE: 630 case AR2427_DEVID_PCIE:
630 case AR9300_DEVID_PCIE: 631 case AR9300_DEVID_PCIE:
631 case AR9300_DEVID_AR9485_PCIE: 632 case AR9300_DEVID_AR9485_PCIE:
633 case AR9300_DEVID_AR9340:
632 break; 634 break;
633 default: 635 default:
634 if (common->bus_ops->ath_bus_type == ATH_USB) 636 if (common->bus_ops->ath_bus_type == ATH_USB)
@@ -671,48 +673,89 @@ static void ath9k_hw_init_qos(struct ath_hw *ah)
671 REGWRITE_BUFFER_FLUSH(ah); 673 REGWRITE_BUFFER_FLUSH(ah);
672} 674}
673 675
674unsigned long ar9003_get_pll_sqsum_dvc(struct ath_hw *ah) 676u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah)
675{ 677{
676 REG_WRITE(ah, PLL3, (REG_READ(ah, PLL3) & ~(PLL3_DO_MEAS_MASK))); 678 REG_CLR_BIT(ah, PLL3, PLL3_DO_MEAS_MASK);
677 udelay(100); 679 udelay(100);
678 REG_WRITE(ah, PLL3, (REG_READ(ah, PLL3) | PLL3_DO_MEAS_MASK)); 680 REG_SET_BIT(ah, PLL3, PLL3_DO_MEAS_MASK);
679 681
680 while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0) 682 while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0)
681 udelay(100); 683 udelay(100);
682 684
683 return (REG_READ(ah, PLL3) & SQSUM_DVC_MASK) >> 3; 685 return (REG_READ(ah, PLL3) & SQSUM_DVC_MASK) >> 3;
684} 686}
685EXPORT_SYMBOL(ar9003_get_pll_sqsum_dvc); 687EXPORT_SYMBOL(ar9003_get_pll_sqsum_dvc);
686 688
687#define DPLL2_KD_VAL 0x3D
688#define DPLL2_KI_VAL 0x06
689#define DPLL3_PHASE_SHIFT_VAL 0x1
690
691static void ath9k_hw_init_pll(struct ath_hw *ah, 689static void ath9k_hw_init_pll(struct ath_hw *ah,
692 struct ath9k_channel *chan) 690 struct ath9k_channel *chan)
693{ 691{
694 u32 pll; 692 u32 pll;
695 693
696 if (AR_SREV_9485(ah)) { 694 if (AR_SREV_9485(ah)) {
697 REG_WRITE(ah, AR_RTC_PLL_CONTROL2, 0x886666);
698 REG_WRITE(ah, AR_CH0_DDR_DPLL2, 0x19e82f01);
699 695
700 REG_RMW_FIELD(ah, AR_CH0_DDR_DPLL3, 696 /* program BB PLL ki and kd value, ki=0x4, kd=0x40 */
701 AR_CH0_DPLL3_PHASE_SHIFT, DPLL3_PHASE_SHIFT_VAL); 697 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
702 698 AR_CH0_BB_DPLL2_PLL_PWD, 0x1);
703 REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x1142c); 699 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
704 udelay(1000); 700 AR_CH0_DPLL2_KD, 0x40);
701 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
702 AR_CH0_DPLL2_KI, 0x4);
705 703
706 REG_WRITE(ah, AR_RTC_PLL_CONTROL2, 0x886666); 704 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL1,
705 AR_CH0_BB_DPLL1_REFDIV, 0x5);
706 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL1,
707 AR_CH0_BB_DPLL1_NINI, 0x58);
708 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL1,
709 AR_CH0_BB_DPLL1_NFRAC, 0x0);
707 710
708 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, 711 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
709 AR_CH0_DPLL2_KD, DPLL2_KD_VAL); 712 AR_CH0_BB_DPLL2_OUTDIV, 0x1);
713 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
714 AR_CH0_BB_DPLL2_LOCAL_PLL, 0x1);
710 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, 715 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
711 AR_CH0_DPLL2_KI, DPLL2_KI_VAL); 716 AR_CH0_BB_DPLL2_EN_NEGTRIG, 0x1);
712 717
718 /* program BB PLL phase_shift to 0x6 */
713 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL3, 719 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL3,
714 AR_CH0_DPLL3_PHASE_SHIFT, DPLL3_PHASE_SHIFT_VAL); 720 AR_CH0_BB_DPLL3_PHASE_SHIFT, 0x6);
715 REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x142c); 721
722 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
723 AR_CH0_BB_DPLL2_PLL_PWD, 0x0);
724 udelay(1000);
725 } else if (AR_SREV_9340(ah)) {
726 u32 regval, pll2_divint, pll2_divfrac, refdiv;
727
728 REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x1142c);
729 udelay(1000);
730
731 REG_SET_BIT(ah, AR_PHY_PLL_MODE, 0x1 << 16);
732 udelay(100);
733
734 if (ah->is_clk_25mhz) {
735 pll2_divint = 0x54;
736 pll2_divfrac = 0x1eb85;
737 refdiv = 3;
738 } else {
739 pll2_divint = 88;
740 pll2_divfrac = 0;
741 refdiv = 5;
742 }
743
744 regval = REG_READ(ah, AR_PHY_PLL_MODE);
745 regval |= (0x1 << 16);
746 REG_WRITE(ah, AR_PHY_PLL_MODE, regval);
747 udelay(100);
748
749 REG_WRITE(ah, AR_PHY_PLL_CONTROL, (refdiv << 27) |
750 (pll2_divint << 18) | pll2_divfrac);
751 udelay(100);
752
753 regval = REG_READ(ah, AR_PHY_PLL_MODE);
754 regval = (regval & 0x80071fff) | (0x1 << 30) | (0x1 << 13) |
755 (0x4 << 26) | (0x18 << 19);
756 REG_WRITE(ah, AR_PHY_PLL_MODE, regval);
757 REG_WRITE(ah, AR_PHY_PLL_MODE,
758 REG_READ(ah, AR_PHY_PLL_MODE) & 0xfffeffff);
716 udelay(1000); 759 udelay(1000);
717 } 760 }
718 761
@@ -720,6 +763,9 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
720 763
721 REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll); 764 REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll);
722 765
766 if (AR_SREV_9485(ah) || AR_SREV_9340(ah))
767 udelay(1000);
768
723 /* Switch the core clock for ar9271 to 117Mhz */ 769 /* Switch the core clock for ar9271 to 117Mhz */
724 if (AR_SREV_9271(ah)) { 770 if (AR_SREV_9271(ah)) {
725 udelay(500); 771 udelay(500);
@@ -729,17 +775,34 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
729 udelay(RTC_PLL_SETTLE_DELAY); 775 udelay(RTC_PLL_SETTLE_DELAY);
730 776
731 REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK); 777 REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK);
778
779 if (AR_SREV_9340(ah)) {
780 if (ah->is_clk_25mhz) {
781 REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1);
782 REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7);
783 REG_WRITE(ah, AR_SLP32_INC, 0x0001e7ae);
784 } else {
785 REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x261 << 1);
786 REG_WRITE(ah, AR_SLP32_MODE, 0x0010f400);
787 REG_WRITE(ah, AR_SLP32_INC, 0x0001e800);
788 }
789 udelay(100);
790 }
732} 791}
733 792
734static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah, 793static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
735 enum nl80211_iftype opmode) 794 enum nl80211_iftype opmode)
736{ 795{
796 u32 sync_default = AR_INTR_SYNC_DEFAULT;
737 u32 imr_reg = AR_IMR_TXERR | 797 u32 imr_reg = AR_IMR_TXERR |
738 AR_IMR_TXURN | 798 AR_IMR_TXURN |
739 AR_IMR_RXERR | 799 AR_IMR_RXERR |
740 AR_IMR_RXORN | 800 AR_IMR_RXORN |
741 AR_IMR_BCNMISC; 801 AR_IMR_BCNMISC;
742 802
803 if (AR_SREV_9340(ah))
804 sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
805
743 if (AR_SREV_9300_20_OR_LATER(ah)) { 806 if (AR_SREV_9300_20_OR_LATER(ah)) {
744 imr_reg |= AR_IMR_RXOK_HP; 807 imr_reg |= AR_IMR_RXOK_HP;
745 if (ah->config.rx_intr_mitigation) 808 if (ah->config.rx_intr_mitigation)
@@ -770,7 +833,7 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
770 833
771 if (!AR_SREV_9100(ah)) { 834 if (!AR_SREV_9100(ah)) {
772 REG_WRITE(ah, AR_INTR_SYNC_CAUSE, 0xFFFFFFFF); 835 REG_WRITE(ah, AR_INTR_SYNC_CAUSE, 0xFFFFFFFF);
773 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, AR_INTR_SYNC_DEFAULT); 836 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default);
774 REG_WRITE(ah, AR_INTR_SYNC_MASK, 0); 837 REG_WRITE(ah, AR_INTR_SYNC_MASK, 0);
775 } 838 }
776 839
@@ -830,8 +893,7 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
830 ah->misc_mode); 893 ah->misc_mode);
831 894
832 if (ah->misc_mode != 0) 895 if (ah->misc_mode != 0)
833 REG_WRITE(ah, AR_PCU_MISC, 896 REG_SET_BIT(ah, AR_PCU_MISC, ah->misc_mode);
834 REG_READ(ah, AR_PCU_MISC) | ah->misc_mode);
835 897
836 if (conf->channel && conf->channel->band == IEEE80211_BAND_5GHZ) 898 if (conf->channel && conf->channel->band == IEEE80211_BAND_5GHZ)
837 sifstime = 16; 899 sifstime = 16;
@@ -899,23 +961,19 @@ u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan)
899static inline void ath9k_hw_set_dma(struct ath_hw *ah) 961static inline void ath9k_hw_set_dma(struct ath_hw *ah)
900{ 962{
901 struct ath_common *common = ath9k_hw_common(ah); 963 struct ath_common *common = ath9k_hw_common(ah);
902 u32 regval;
903 964
904 ENABLE_REGWRITE_BUFFER(ah); 965 ENABLE_REGWRITE_BUFFER(ah);
905 966
906 /* 967 /*
907 * set AHB_MODE not to do cacheline prefetches 968 * set AHB_MODE not to do cacheline prefetches
908 */ 969 */
909 if (!AR_SREV_9300_20_OR_LATER(ah)) { 970 if (!AR_SREV_9300_20_OR_LATER(ah))
910 regval = REG_READ(ah, AR_AHB_MODE); 971 REG_SET_BIT(ah, AR_AHB_MODE, AR_AHB_PREFETCH_RD_EN);
911 REG_WRITE(ah, AR_AHB_MODE, regval | AR_AHB_PREFETCH_RD_EN);
912 }
913 972
914 /* 973 /*
915 * let mac dma reads be in 128 byte chunks 974 * let mac dma reads be in 128 byte chunks
916 */ 975 */
917 regval = REG_READ(ah, AR_TXCFG) & ~AR_TXCFG_DMASZ_MASK; 976 REG_RMW(ah, AR_TXCFG, AR_TXCFG_DMASZ_128B, AR_TXCFG_DMASZ_MASK);
918 REG_WRITE(ah, AR_TXCFG, regval | AR_TXCFG_DMASZ_128B);
919 977
920 REGWRITE_BUFFER_FLUSH(ah); 978 REGWRITE_BUFFER_FLUSH(ah);
921 979
@@ -932,8 +990,7 @@ static inline void ath9k_hw_set_dma(struct ath_hw *ah)
932 /* 990 /*
933 * let mac dma writes be in 128 byte chunks 991 * let mac dma writes be in 128 byte chunks
934 */ 992 */
935 regval = REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_DMASZ_MASK; 993 REG_RMW(ah, AR_RXCFG, AR_RXCFG_DMASZ_128B, AR_RXCFG_DMASZ_MASK);
936 REG_WRITE(ah, AR_RXCFG, regval | AR_RXCFG_DMASZ_128B);
937 994
938 /* 995 /*
939 * Setup receive FIFO threshold to hold off TX activities 996 * Setup receive FIFO threshold to hold off TX activities
@@ -972,30 +1029,27 @@ static inline void ath9k_hw_set_dma(struct ath_hw *ah)
972 1029
973static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode) 1030static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
974{ 1031{
975 u32 val; 1032 u32 mask = AR_STA_ID1_STA_AP | AR_STA_ID1_ADHOC;
1033 u32 set = AR_STA_ID1_KSRCH_MODE;
976 1034
977 val = REG_READ(ah, AR_STA_ID1);
978 val &= ~(AR_STA_ID1_STA_AP | AR_STA_ID1_ADHOC);
979 switch (opmode) { 1035 switch (opmode) {
980 case NL80211_IFTYPE_AP:
981 REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_STA_AP
982 | AR_STA_ID1_KSRCH_MODE);
983 REG_CLR_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
984 break;
985 case NL80211_IFTYPE_ADHOC: 1036 case NL80211_IFTYPE_ADHOC:
986 case NL80211_IFTYPE_MESH_POINT: 1037 case NL80211_IFTYPE_MESH_POINT:
987 REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_ADHOC 1038 set |= AR_STA_ID1_ADHOC;
988 | AR_STA_ID1_KSRCH_MODE);
989 REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION); 1039 REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
990 break; 1040 break;
1041 case NL80211_IFTYPE_AP:
1042 set |= AR_STA_ID1_STA_AP;
1043 /* fall through */
991 case NL80211_IFTYPE_STATION: 1044 case NL80211_IFTYPE_STATION:
992 REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_KSRCH_MODE); 1045 REG_CLR_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
993 break; 1046 break;
994 default: 1047 default:
995 if (ah->is_monitoring) 1048 if (!ah->is_monitoring)
996 REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_KSRCH_MODE); 1049 set = 0;
997 break; 1050 break;
998 } 1051 }
1052 REG_RMW(ah, AR_STA_ID1, set, mask);
999} 1053}
1000 1054
1001void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled, 1055void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
@@ -1021,10 +1075,8 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
1021 u32 tmpReg; 1075 u32 tmpReg;
1022 1076
1023 if (AR_SREV_9100(ah)) { 1077 if (AR_SREV_9100(ah)) {
1024 u32 val = REG_READ(ah, AR_RTC_DERIVED_CLK); 1078 REG_RMW_FIELD(ah, AR_RTC_DERIVED_CLK,
1025 val &= ~AR_RTC_DERIVED_CLK_PERIOD; 1079 AR_RTC_DERIVED_CLK_PERIOD, 1);
1026 val |= SM(1, AR_RTC_DERIVED_CLK_PERIOD);
1027 REG_WRITE(ah, AR_RTC_DERIVED_CLK, val);
1028 (void)REG_READ(ah, AR_RTC_DERIVED_CLK); 1080 (void)REG_READ(ah, AR_RTC_DERIVED_CLK);
1029 } 1081 }
1030 1082
@@ -1212,6 +1264,20 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
1212 return true; 1264 return true;
1213} 1265}
1214 1266
1267static void ath9k_hw_apply_gpio_override(struct ath_hw *ah)
1268{
1269 u32 gpio_mask = ah->gpio_mask;
1270 int i;
1271
1272 for (i = 0; gpio_mask; i++, gpio_mask >>= 1) {
1273 if (!(gpio_mask & 1))
1274 continue;
1275
1276 ath9k_hw_cfg_output(ah, i, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
1277 ath9k_hw_set_gpio(ah, i, !!(ah->gpio_val & BIT(i)));
1278 }
1279}
1280
1215bool ath9k_hw_check_alive(struct ath_hw *ah) 1281bool ath9k_hw_check_alive(struct ath_hw *ah)
1216{ 1282{
1217 int count = 50; 1283 int count = 50;
@@ -1409,7 +1475,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1409 REGWRITE_BUFFER_FLUSH(ah); 1475 REGWRITE_BUFFER_FLUSH(ah);
1410 1476
1411 ah->intr_txqs = 0; 1477 ah->intr_txqs = 0;
1412 for (i = 0; i < ah->caps.total_queues; i++) 1478 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1413 ath9k_hw_resettxqueue(ah, i); 1479 ath9k_hw_resettxqueue(ah, i);
1414 1480
1415 ath9k_hw_init_interrupt_masks(ah, ah->opmode); 1481 ath9k_hw_init_interrupt_masks(ah, ah->opmode);
@@ -1426,8 +1492,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1426 ar9002_hw_enable_wep_aggregation(ah); 1492 ar9002_hw_enable_wep_aggregation(ah);
1427 } 1493 }
1428 1494
1429 REG_WRITE(ah, AR_STA_ID1, 1495 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PRESERVE_SEQNUM);
1430 REG_READ(ah, AR_STA_ID1) | AR_STA_ID1_PRESERVE_SEQNUM);
1431 1496
1432 ath9k_hw_set_dma(ah); 1497 ath9k_hw_set_dma(ah);
1433 1498
@@ -1480,7 +1545,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1480 REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD); 1545 REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
1481 } 1546 }
1482#ifdef __BIG_ENDIAN 1547#ifdef __BIG_ENDIAN
1483 else 1548 else if (AR_SREV_9340(ah))
1549 REG_RMW(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB, 0);
1550 else
1484 REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD); 1551 REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
1485#endif 1552#endif
1486 } 1553 }
@@ -1491,6 +1558,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1491 if (AR_SREV_9300_20_OR_LATER(ah)) 1558 if (AR_SREV_9300_20_OR_LATER(ah))
1492 ar9003_hw_bb_watchdog_config(ah); 1559 ar9003_hw_bb_watchdog_config(ah);
1493 1560
1561 ath9k_hw_apply_gpio_override(ah);
1562
1494 return 0; 1563 return 0;
1495} 1564}
1496EXPORT_SYMBOL(ath9k_hw_reset); 1565EXPORT_SYMBOL(ath9k_hw_reset);
@@ -1670,21 +1739,15 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
1670 case NL80211_IFTYPE_MESH_POINT: 1739 case NL80211_IFTYPE_MESH_POINT:
1671 REG_SET_BIT(ah, AR_TXCFG, 1740 REG_SET_BIT(ah, AR_TXCFG,
1672 AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY); 1741 AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY);
1673 REG_WRITE(ah, AR_NEXT_NDP_TIMER, 1742 REG_WRITE(ah, AR_NEXT_NDP_TIMER, next_beacon +
1674 TU_TO_USEC(next_beacon + 1743 TU_TO_USEC(ah->atim_window ? ah->atim_window : 1));
1675 (ah->atim_window ? ah->
1676 atim_window : 1)));
1677 flags |= AR_NDP_TIMER_EN; 1744 flags |= AR_NDP_TIMER_EN;
1678 case NL80211_IFTYPE_AP: 1745 case NL80211_IFTYPE_AP:
1679 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon)); 1746 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, next_beacon);
1680 REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, 1747 REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, next_beacon -
1681 TU_TO_USEC(next_beacon - 1748 TU_TO_USEC(ah->config.dma_beacon_response_time));
1682 ah->config. 1749 REG_WRITE(ah, AR_NEXT_SWBA, next_beacon -
1683 dma_beacon_response_time)); 1750 TU_TO_USEC(ah->config.sw_beacon_response_time));
1684 REG_WRITE(ah, AR_NEXT_SWBA,
1685 TU_TO_USEC(next_beacon -
1686 ah->config.
1687 sw_beacon_response_time));
1688 flags |= 1751 flags |=
1689 AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN; 1752 AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN;
1690 break; 1753 break;
@@ -1696,18 +1759,13 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
1696 break; 1759 break;
1697 } 1760 }
1698 1761
1699 REG_WRITE(ah, AR_BEACON_PERIOD, TU_TO_USEC(beacon_period)); 1762 REG_WRITE(ah, AR_BEACON_PERIOD, beacon_period);
1700 REG_WRITE(ah, AR_DMA_BEACON_PERIOD, TU_TO_USEC(beacon_period)); 1763 REG_WRITE(ah, AR_DMA_BEACON_PERIOD, beacon_period);
1701 REG_WRITE(ah, AR_SWBA_PERIOD, TU_TO_USEC(beacon_period)); 1764 REG_WRITE(ah, AR_SWBA_PERIOD, beacon_period);
1702 REG_WRITE(ah, AR_NDP_PERIOD, TU_TO_USEC(beacon_period)); 1765 REG_WRITE(ah, AR_NDP_PERIOD, beacon_period);
1703 1766
1704 REGWRITE_BUFFER_FLUSH(ah); 1767 REGWRITE_BUFFER_FLUSH(ah);
1705 1768
1706 beacon_period &= ~ATH9K_BEACON_ENA;
1707 if (beacon_period & ATH9K_BEACON_RESET_TSF) {
1708 ath9k_hw_reset_tsf(ah);
1709 }
1710
1711 REG_SET_BIT(ah, AR_TIMER_MODE, flags); 1769 REG_SET_BIT(ah, AR_TIMER_MODE, flags);
1712} 1770}
1713EXPORT_SYMBOL(ath9k_hw_beaconinit); 1771EXPORT_SYMBOL(ath9k_hw_beaconinit);
@@ -1795,7 +1853,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
1795 struct ath_common *common = ath9k_hw_common(ah); 1853 struct ath_common *common = ath9k_hw_common(ah);
1796 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; 1854 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
1797 1855
1798 u16 capField = 0, eeval; 1856 u16 eeval;
1799 u8 ant_div_ctl1, tx_chainmask, rx_chainmask; 1857 u8 ant_div_ctl1, tx_chainmask, rx_chainmask;
1800 1858
1801 eeval = ah->eep_ops->get_eeprom(ah, EEP_REG_0); 1859 eeval = ah->eep_ops->get_eeprom(ah, EEP_REG_0);
@@ -1806,8 +1864,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
1806 eeval |= AR9285_RDEXT_DEFAULT; 1864 eeval |= AR9285_RDEXT_DEFAULT;
1807 regulatory->current_rd_ext = eeval; 1865 regulatory->current_rd_ext = eeval;
1808 1866
1809 capField = ah->eep_ops->get_eeprom(ah, EEP_OP_CAP);
1810
1811 if (ah->opmode != NL80211_IFTYPE_AP && 1867 if (ah->opmode != NL80211_IFTYPE_AP &&
1812 ah->hw_version.subvendorid == AR_SUBVENDOR_ID_NEW_A) { 1868 ah->hw_version.subvendorid == AR_SUBVENDOR_ID_NEW_A) {
1813 if (regulatory->current_rd == 0x64 || 1869 if (regulatory->current_rd == 0x64 ||
@@ -1842,6 +1898,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
1842 !(AR_SREV_9271(ah))) 1898 !(AR_SREV_9271(ah)))
1843 /* CB71: GPIO 0 is pulled down to indicate 3 rx chains */ 1899 /* CB71: GPIO 0 is pulled down to indicate 3 rx chains */
1844 pCap->rx_chainmask = ath9k_hw_gpio_get(ah, 0) ? 0x5 : 0x7; 1900 pCap->rx_chainmask = ath9k_hw_gpio_get(ah, 0) ? 0x5 : 0x7;
1901 else if (AR_SREV_9100(ah))
1902 pCap->rx_chainmask = 0x7;
1845 else 1903 else
1846 /* Use rx_chainmask from EEPROM. */ 1904 /* Use rx_chainmask from EEPROM. */
1847 pCap->rx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_RX_MASK); 1905 pCap->rx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_RX_MASK);
@@ -1852,36 +1910,13 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
1852 if (AR_SREV_9300_20_OR_LATER(ah)) 1910 if (AR_SREV_9300_20_OR_LATER(ah))
1853 ah->misc_mode |= AR_PCU_ALWAYS_PERFORM_KEYSEARCH; 1911 ah->misc_mode |= AR_PCU_ALWAYS_PERFORM_KEYSEARCH;
1854 1912
1855 pCap->low_2ghz_chan = 2312;
1856 pCap->high_2ghz_chan = 2732;
1857
1858 pCap->low_5ghz_chan = 4920;
1859 pCap->high_5ghz_chan = 6100;
1860
1861 common->crypt_caps |= ATH_CRYPT_CAP_CIPHER_AESCCM; 1913 common->crypt_caps |= ATH_CRYPT_CAP_CIPHER_AESCCM;
1862 1914
1863 if (ah->config.ht_enable) 1915 if (ah->hw_version.devid != AR2427_DEVID_PCIE)
1864 pCap->hw_caps |= ATH9K_HW_CAP_HT; 1916 pCap->hw_caps |= ATH9K_HW_CAP_HT;
1865 else 1917 else
1866 pCap->hw_caps &= ~ATH9K_HW_CAP_HT; 1918 pCap->hw_caps &= ~ATH9K_HW_CAP_HT;
1867 1919
1868 if (capField & AR_EEPROM_EEPCAP_MAXQCU)
1869 pCap->total_queues =
1870 MS(capField, AR_EEPROM_EEPCAP_MAXQCU);
1871 else
1872 pCap->total_queues = ATH9K_NUM_TX_QUEUES;
1873
1874 if (capField & AR_EEPROM_EEPCAP_KC_ENTRIES)
1875 pCap->keycache_size =
1876 1 << MS(capField, AR_EEPROM_EEPCAP_KC_ENTRIES);
1877 else
1878 pCap->keycache_size = AR_KEYTABLE_SIZE;
1879
1880 if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
1881 pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD >> 1;
1882 else
1883 pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD;
1884
1885 if (AR_SREV_9271(ah)) 1920 if (AR_SREV_9271(ah))
1886 pCap->num_gpio_pins = AR9271_NUM_GPIO; 1921 pCap->num_gpio_pins = AR9271_NUM_GPIO;
1887 else if (AR_DEVID_7010(ah)) 1922 else if (AR_DEVID_7010(ah))
@@ -1900,8 +1935,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
1900 pCap->rts_aggr_limit = (8 * 1024); 1935 pCap->rts_aggr_limit = (8 * 1024);
1901 } 1936 }
1902 1937
1903 pCap->hw_caps |= ATH9K_HW_CAP_ENHANCEDPM;
1904
1905#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) 1938#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
1906 ah->rfsilent = ah->eep_ops->get_eeprom(ah, EEP_RF_SILENT); 1939 ah->rfsilent = ah->eep_ops->get_eeprom(ah, EEP_RF_SILENT);
1907 if (ah->rfsilent & EEP_RFSILENT_ENABLED) { 1940 if (ah->rfsilent & EEP_RFSILENT_ENABLED) {
@@ -1923,32 +1956,23 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
1923 else 1956 else
1924 pCap->hw_caps |= ATH9K_HW_CAP_4KB_SPLITTRANS; 1957 pCap->hw_caps |= ATH9K_HW_CAP_4KB_SPLITTRANS;
1925 1958
1926 if (regulatory->current_rd_ext & (1 << REG_EXT_JAPAN_MIDBAND)) { 1959 if (common->btcoex_enabled) {
1927 pCap->reg_cap = 1960 if (AR_SREV_9300_20_OR_LATER(ah)) {
1928 AR_EEPROM_EEREGCAP_EN_KK_NEW_11A |
1929 AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN |
1930 AR_EEPROM_EEREGCAP_EN_KK_U2 |
1931 AR_EEPROM_EEREGCAP_EN_KK_MIDBAND;
1932 } else {
1933 pCap->reg_cap =
1934 AR_EEPROM_EEREGCAP_EN_KK_NEW_11A |
1935 AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN;
1936 }
1937
1938 /* Advertise midband for AR5416 with FCC midband set in eeprom */
1939 if (regulatory->current_rd_ext & (1 << REG_EXT_FCC_MIDBAND) &&
1940 AR_SREV_5416(ah))
1941 pCap->reg_cap |= AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND;
1942
1943 if (AR_SREV_9280_20_OR_LATER(ah) && common->btcoex_enabled) {
1944 btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO;
1945 btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO;
1946
1947 if (AR_SREV_9285(ah)) {
1948 btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE; 1961 btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE;
1949 btcoex_hw->btpriority_gpio = ATH_BTPRIORITY_GPIO; 1962 btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9300;
1950 } else { 1963 btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9300;
1951 btcoex_hw->scheme = ATH_BTCOEX_CFG_2WIRE; 1964 btcoex_hw->btpriority_gpio = ATH_BTPRIORITY_GPIO_9300;
1965 } else if (AR_SREV_9280_20_OR_LATER(ah)) {
1966 btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9280;
1967 btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9280;
1968
1969 if (AR_SREV_9285(ah)) {
1970 btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE;
1971 btcoex_hw->btpriority_gpio =
1972 ATH_BTPRIORITY_GPIO_9285;
1973 } else {
1974 btcoex_hw->scheme = ATH_BTCOEX_CFG_2WIRE;
1975 }
1952 } 1976 }
1953 } else { 1977 } else {
1954 btcoex_hw->scheme = ATH_BTCOEX_CFG_NONE; 1978 btcoex_hw->scheme = ATH_BTCOEX_CFG_NONE;
@@ -1998,6 +2022,22 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
1998 } 2022 }
1999 2023
2000 2024
2025 if (AR_SREV_9485(ah)) {
2026 ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
2027 /*
2028 * enable the diversity-combining algorithm only when
2029 * both enable_lna_div and enable_fast_div are set
2030 * Table for Diversity
2031 * ant_div_alt_lnaconf bit 0-1
2032 * ant_div_main_lnaconf bit 2-3
2033 * ant_div_alt_gaintb bit 4
2034 * ant_div_main_gaintb bit 5
2035 * enable_ant_div_lnadiv bit 6
2036 * enable_ant_fast_div bit 7
2037 */
2038 if ((ant_div_ctl1 >> 0x6) == 0x3)
2039 pCap->hw_caps |= ATH9K_HW_CAP_ANT_DIV_COMB;
2040 }
2001 2041
2002 if (AR_SREV_9485_10(ah)) { 2042 if (AR_SREV_9485_10(ah)) {
2003 pCap->pcie_lcr_extsync_en = true; 2043 pCap->pcie_lcr_extsync_en = true;
@@ -2186,11 +2226,9 @@ void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits)
2186 REG_WRITE(ah, AR_PHY_ERR, phybits); 2226 REG_WRITE(ah, AR_PHY_ERR, phybits);
2187 2227
2188 if (phybits) 2228 if (phybits)
2189 REG_WRITE(ah, AR_RXCFG, 2229 REG_SET_BIT(ah, AR_RXCFG, AR_RXCFG_ZLFDMA);
2190 REG_READ(ah, AR_RXCFG) | AR_RXCFG_ZLFDMA);
2191 else 2230 else
2192 REG_WRITE(ah, AR_RXCFG, 2231 REG_CLR_BIT(ah, AR_RXCFG, AR_RXCFG_ZLFDMA);
2193 REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_ZLFDMA);
2194 2232
2195 REGWRITE_BUFFER_FLUSH(ah); 2233 REGWRITE_BUFFER_FLUSH(ah);
2196} 2234}
@@ -2366,10 +2404,11 @@ static u32 rightmost_index(struct ath_gen_timer_table *timer_table, u32 *mask)
2366 return timer_table->gen_timer_index[b]; 2404 return timer_table->gen_timer_index[b];
2367} 2405}
2368 2406
2369static u32 ath9k_hw_gettsf32(struct ath_hw *ah) 2407u32 ath9k_hw_gettsf32(struct ath_hw *ah)
2370{ 2408{
2371 return REG_READ(ah, AR_TSF_L32); 2409 return REG_READ(ah, AR_TSF_L32);
2372} 2410}
2411EXPORT_SYMBOL(ath9k_hw_gettsf32);
2373 2412
2374struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah, 2413struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
2375 void (*trigger)(void *), 2414 void (*trigger)(void *),
@@ -2402,11 +2441,11 @@ EXPORT_SYMBOL(ath_gen_timer_alloc);
2402 2441
2403void ath9k_hw_gen_timer_start(struct ath_hw *ah, 2442void ath9k_hw_gen_timer_start(struct ath_hw *ah,
2404 struct ath_gen_timer *timer, 2443 struct ath_gen_timer *timer,
2405 u32 timer_next, 2444 u32 trig_timeout,
2406 u32 timer_period) 2445 u32 timer_period)
2407{ 2446{
2408 struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers; 2447 struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
2409 u32 tsf; 2448 u32 tsf, timer_next;
2410 2449
2411 BUG_ON(!timer_period); 2450 BUG_ON(!timer_period);
2412 2451
@@ -2414,18 +2453,13 @@ void ath9k_hw_gen_timer_start(struct ath_hw *ah,
2414 2453
2415 tsf = ath9k_hw_gettsf32(ah); 2454 tsf = ath9k_hw_gettsf32(ah);
2416 2455
2456 timer_next = tsf + trig_timeout;
2457
2417 ath_dbg(ath9k_hw_common(ah), ATH_DBG_HWTIMER, 2458 ath_dbg(ath9k_hw_common(ah), ATH_DBG_HWTIMER,
2418 "current tsf %x period %x timer_next %x\n", 2459 "current tsf %x period %x timer_next %x\n",
2419 tsf, timer_period, timer_next); 2460 tsf, timer_period, timer_next);
2420 2461
2421 /* 2462 /*
2422 * Pull timer_next forward if the current TSF already passed it
2423 * because of software latency
2424 */
2425 if (timer_next < tsf)
2426 timer_next = tsf + timer_period;
2427
2428 /*
2429 * Program generic timer registers 2463 * Program generic timer registers
2430 */ 2464 */
2431 REG_WRITE(ah, gen_tmr_configuration[timer->index].next_addr, 2465 REG_WRITE(ah, gen_tmr_configuration[timer->index].next_addr,
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 6650fd48415c..7af2773d2bfc 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -43,6 +43,7 @@
43#define AR9287_DEVID_PCI 0x002d 43#define AR9287_DEVID_PCI 0x002d
44#define AR9287_DEVID_PCIE 0x002e 44#define AR9287_DEVID_PCIE 0x002e
45#define AR9300_DEVID_PCIE 0x0030 45#define AR9300_DEVID_PCIE 0x0030
46#define AR9300_DEVID_AR9340 0x0031
46#define AR9300_DEVID_AR9485_PCIE 0x0032 47#define AR9300_DEVID_AR9485_PCIE 0x0032
47 48
48#define AR5416_AR9100_DEVID 0x000b 49#define AR5416_AR9100_DEVID 0x000b
@@ -55,6 +56,9 @@
55#define AT9285_COEX3WIRE_SA_SUBSYSID 0x30aa 56#define AT9285_COEX3WIRE_SA_SUBSYSID 0x30aa
56#define AT9285_COEX3WIRE_DA_SUBSYSID 0x30ab 57#define AT9285_COEX3WIRE_DA_SUBSYSID 0x30ab
57 58
59#define AR9300_NUM_BT_WEIGHTS 4
60#define AR9300_NUM_WLAN_WEIGHTS 4
61
58#define ATH_AMPDU_LIMIT_MAX (64 * 1024 - 1) 62#define ATH_AMPDU_LIMIT_MAX (64 * 1024 - 1)
59 63
60#define ATH_DEFAULT_NOISE_FLOOR -95 64#define ATH_DEFAULT_NOISE_FLOOR -95
@@ -65,53 +69,49 @@
65 69
66/* Register read/write primitives */ 70/* Register read/write primitives */
67#define REG_WRITE(_ah, _reg, _val) \ 71#define REG_WRITE(_ah, _reg, _val) \
68 ath9k_hw_common(_ah)->ops->write((_ah), (_val), (_reg)) 72 (_ah)->reg_ops.write((_ah), (_val), (_reg))
69 73
70#define REG_READ(_ah, _reg) \ 74#define REG_READ(_ah, _reg) \
71 ath9k_hw_common(_ah)->ops->read((_ah), (_reg)) 75 (_ah)->reg_ops.read((_ah), (_reg))
72 76
73#define REG_READ_MULTI(_ah, _addr, _val, _cnt) \ 77#define REG_READ_MULTI(_ah, _addr, _val, _cnt) \
74 ath9k_hw_common(_ah)->ops->multi_read((_ah), (_addr), (_val), (_cnt)) 78 (_ah)->reg_ops.multi_read((_ah), (_addr), (_val), (_cnt))
79
80#define REG_RMW(_ah, _reg, _set, _clr) \
81 (_ah)->reg_ops.rmw((_ah), (_reg), (_set), (_clr))
75 82
76#define ENABLE_REGWRITE_BUFFER(_ah) \ 83#define ENABLE_REGWRITE_BUFFER(_ah) \
77 do { \ 84 do { \
78 if (ath9k_hw_common(_ah)->ops->enable_write_buffer) \ 85 if ((_ah)->reg_ops.enable_write_buffer) \
79 ath9k_hw_common(_ah)->ops->enable_write_buffer((_ah)); \ 86 (_ah)->reg_ops.enable_write_buffer((_ah)); \
80 } while (0) 87 } while (0)
81 88
82#define REGWRITE_BUFFER_FLUSH(_ah) \ 89#define REGWRITE_BUFFER_FLUSH(_ah) \
83 do { \ 90 do { \
84 if (ath9k_hw_common(_ah)->ops->write_flush) \ 91 if ((_ah)->reg_ops.write_flush) \
85 ath9k_hw_common(_ah)->ops->write_flush((_ah)); \ 92 (_ah)->reg_ops.write_flush((_ah)); \
86 } while (0) 93 } while (0)
87 94
88#define SM(_v, _f) (((_v) << _f##_S) & _f) 95#define SM(_v, _f) (((_v) << _f##_S) & _f)
89#define MS(_v, _f) (((_v) & _f) >> _f##_S) 96#define MS(_v, _f) (((_v) & _f) >> _f##_S)
90#define REG_RMW(_a, _r, _set, _clr) \
91 REG_WRITE(_a, _r, (REG_READ(_a, _r) & ~(_clr)) | (_set))
92#define REG_RMW_FIELD(_a, _r, _f, _v) \ 97#define REG_RMW_FIELD(_a, _r, _f, _v) \
93 REG_WRITE(_a, _r, \ 98 REG_RMW(_a, _r, (((_v) << _f##_S) & _f), (_f))
94 (REG_READ(_a, _r) & ~_f) | (((_v) << _f##_S) & _f))
95#define REG_READ_FIELD(_a, _r, _f) \ 99#define REG_READ_FIELD(_a, _r, _f) \
96 (((REG_READ(_a, _r) & _f) >> _f##_S)) 100 (((REG_READ(_a, _r) & _f) >> _f##_S))
97#define REG_SET_BIT(_a, _r, _f) \ 101#define REG_SET_BIT(_a, _r, _f) \
98 REG_WRITE(_a, _r, REG_READ(_a, _r) | (_f)) 102 REG_RMW(_a, _r, (_f), 0)
99#define REG_CLR_BIT(_a, _r, _f) \ 103#define REG_CLR_BIT(_a, _r, _f) \
100 REG_WRITE(_a, _r, REG_READ(_a, _r) & ~(_f)) 104 REG_RMW(_a, _r, 0, (_f))
101 105
102#define DO_DELAY(x) do { \ 106#define DO_DELAY(x) do { \
103 if ((++(x) % 64) == 0) \ 107 if (((++(x) % 64) == 0) && \
104 udelay(1); \ 108 (ath9k_hw_common(ah)->bus_ops->ath_bus_type \
109 != ATH_USB)) \
110 udelay(1); \
105 } while (0) 111 } while (0)
106 112
107#define REG_WRITE_ARRAY(iniarray, column, regWr) do { \ 113#define REG_WRITE_ARRAY(iniarray, column, regWr) \
108 int r; \ 114 ath9k_hw_write_array(ah, iniarray, column, &(regWr))
109 for (r = 0; r < ((iniarray)->ia_rows); r++) { \
110 REG_WRITE(ah, INI_RA((iniarray), (r), 0), \
111 INI_RA((iniarray), r, (column))); \
112 DO_DELAY(regWr); \
113 } \
114 } while (0)
115 115
116#define AR_GPIO_OUTPUT_MUX_AS_OUTPUT 0 116#define AR_GPIO_OUTPUT_MUX_AS_OUTPUT 0
117#define AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED 1 117#define AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED 1
@@ -125,7 +125,7 @@
125#define AR_GPIO_BIT(_gpio) (1 << (_gpio)) 125#define AR_GPIO_BIT(_gpio) (1 << (_gpio))
126 126
127#define BASE_ACTIVATE_DELAY 100 127#define BASE_ACTIVATE_DELAY 100
128#define RTC_PLL_SETTLE_DELAY 100 128#define RTC_PLL_SETTLE_DELAY (AR_SREV_9340(ah) ? 1000 : 100)
129#define COEF_SCALE_S 24 129#define COEF_SCALE_S 24
130#define HT40_CHANNEL_CENTER_SHIFT 10 130#define HT40_CHANNEL_CENTER_SHIFT 10
131 131
@@ -178,7 +178,6 @@ enum ath9k_hw_caps {
178 ATH9K_HW_CAP_HT = BIT(0), 178 ATH9K_HW_CAP_HT = BIT(0),
179 ATH9K_HW_CAP_RFSILENT = BIT(1), 179 ATH9K_HW_CAP_RFSILENT = BIT(1),
180 ATH9K_HW_CAP_CST = BIT(2), 180 ATH9K_HW_CAP_CST = BIT(2),
181 ATH9K_HW_CAP_ENHANCEDPM = BIT(3),
182 ATH9K_HW_CAP_AUTOSLEEP = BIT(4), 181 ATH9K_HW_CAP_AUTOSLEEP = BIT(4),
183 ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(5), 182 ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(5),
184 ATH9K_HW_CAP_EDMA = BIT(6), 183 ATH9K_HW_CAP_EDMA = BIT(6),
@@ -195,17 +194,11 @@ enum ath9k_hw_caps {
195 194
196struct ath9k_hw_capabilities { 195struct ath9k_hw_capabilities {
197 u32 hw_caps; /* ATH9K_HW_CAP_* from ath9k_hw_caps */ 196 u32 hw_caps; /* ATH9K_HW_CAP_* from ath9k_hw_caps */
198 u16 total_queues;
199 u16 keycache_size;
200 u16 low_5ghz_chan, high_5ghz_chan;
201 u16 low_2ghz_chan, high_2ghz_chan;
202 u16 rts_aggr_limit; 197 u16 rts_aggr_limit;
203 u8 tx_chainmask; 198 u8 tx_chainmask;
204 u8 rx_chainmask; 199 u8 rx_chainmask;
205 u8 max_txchains; 200 u8 max_txchains;
206 u8 max_rxchains; 201 u8 max_rxchains;
207 u16 tx_triglevel_max;
208 u16 reg_cap;
209 u8 num_gpio_pins; 202 u8 num_gpio_pins;
210 u8 rx_hp_qdepth; 203 u8 rx_hp_qdepth;
211 u8 rx_lp_qdepth; 204 u8 rx_lp_qdepth;
@@ -227,7 +220,6 @@ struct ath9k_ops_config {
227 u8 pcie_clock_req; 220 u8 pcie_clock_req;
228 u32 pcie_waen; 221 u32 pcie_waen;
229 u8 analog_shiftreg; 222 u8 analog_shiftreg;
230 u8 ht_enable;
231 u8 paprd_disable; 223 u8 paprd_disable;
232 u32 ofdm_trig_low; 224 u32 ofdm_trig_low;
233 u32 ofdm_trig_high; 225 u32 ofdm_trig_high;
@@ -412,8 +404,6 @@ struct ath9k_beacon_state {
412 u32 bs_nextdtim; 404 u32 bs_nextdtim;
413 u32 bs_intval; 405 u32 bs_intval;
414#define ATH9K_BEACON_PERIOD 0x0000ffff 406#define ATH9K_BEACON_PERIOD 0x0000ffff
415#define ATH9K_BEACON_ENA 0x00800000
416#define ATH9K_BEACON_RESET_TSF 0x01000000
417#define ATH9K_TSFOOR_THRESHOLD 0x00004240 /* 16k us */ 407#define ATH9K_TSFOOR_THRESHOLD 0x00004240 /* 16k us */
418 u32 bs_dtimperiod; 408 u32 bs_dtimperiod;
419 u16 bs_cfpperiod; 409 u16 bs_cfpperiod;
@@ -489,6 +479,10 @@ struct ath_hw_antcomb_conf {
489 u8 main_lna_conf; 479 u8 main_lna_conf;
490 u8 alt_lna_conf; 480 u8 alt_lna_conf;
491 u8 fast_div_bias; 481 u8 fast_div_bias;
482 u8 main_gaintb;
483 u8 alt_gaintb;
484 int lna1_lna2_delta;
485 u8 div_group;
492}; 486};
493 487
494/** 488/**
@@ -638,10 +632,12 @@ struct ath_hw_ops {
638 u32 numDelims); 632 u32 numDelims);
639 void (*set11n_aggr_last)(struct ath_hw *ah, void *ds); 633 void (*set11n_aggr_last)(struct ath_hw *ah, void *ds);
640 void (*clr11n_aggr)(struct ath_hw *ah, void *ds); 634 void (*clr11n_aggr)(struct ath_hw *ah, void *ds);
641 void (*set11n_burstduration)(struct ath_hw *ah, void *ds, 635 void (*set_clrdmask)(struct ath_hw *ah, void *ds, bool val);
642 u32 burstDuration); 636 void (*antdiv_comb_conf_get)(struct ath_hw *ah,
643 void (*set11n_virtualmorefrag)(struct ath_hw *ah, void *ds, 637 struct ath_hw_antcomb_conf *antconf);
644 u32 vmf); 638 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
639 struct ath_hw_antcomb_conf *antconf);
640
645}; 641};
646 642
647struct ath_nf_limits { 643struct ath_nf_limits {
@@ -655,6 +651,8 @@ struct ath_nf_limits {
655#define AH_UNPLUGGED 0x2 /* The card has been physically removed. */ 651#define AH_UNPLUGGED 0x2 /* The card has been physically removed. */
656 652
657struct ath_hw { 653struct ath_hw {
654 struct ath_ops reg_ops;
655
658 struct ieee80211_hw *hw; 656 struct ieee80211_hw *hw;
659 struct ath_common common; 657 struct ath_common common;
660 struct ath9k_hw_version hw_version; 658 struct ath9k_hw_version hw_version;
@@ -784,6 +782,8 @@ struct ath_hw {
784 782
785 /* Bluetooth coexistance */ 783 /* Bluetooth coexistance */
786 struct ath_btcoex_hw btcoex_hw; 784 struct ath_btcoex_hw btcoex_hw;
785 u32 bt_coex_bt_weight[AR9300_NUM_BT_WEIGHTS];
786 u32 bt_coex_wlan_weight[AR9300_NUM_WLAN_WEIGHTS];
787 787
788 u32 intr_txqs; 788 u32 intr_txqs;
789 u8 txchainmask; 789 u8 txchainmask;
@@ -794,7 +794,9 @@ struct ath_hw {
794 u32 originalGain[22]; 794 u32 originalGain[22];
795 int initPDADC; 795 int initPDADC;
796 int PDADCdelta; 796 int PDADCdelta;
797 u8 led_pin; 797 int led_pin;
798 u32 gpio_mask;
799 u32 gpio_val;
798 800
799 struct ar5416IniArray iniModes; 801 struct ar5416IniArray iniModes;
800 struct ar5416IniArray iniCommon; 802 struct ar5416IniArray iniCommon;
@@ -810,6 +812,7 @@ struct ath_hw {
810 struct ar5416IniArray iniPcieSerdes; 812 struct ar5416IniArray iniPcieSerdes;
811 struct ar5416IniArray iniPcieSerdesLowPower; 813 struct ar5416IniArray iniPcieSerdesLowPower;
812 struct ar5416IniArray iniModesAdditional; 814 struct ar5416IniArray iniModesAdditional;
815 struct ar5416IniArray iniModesAdditional_40M;
813 struct ar5416IniArray iniModesRxGain; 816 struct ar5416IniArray iniModesRxGain;
814 struct ar5416IniArray iniModesTxGain; 817 struct ar5416IniArray iniModesTxGain;
815 struct ar5416IniArray iniModes_9271_1_0_only; 818 struct ar5416IniArray iniModes_9271_1_0_only;
@@ -856,6 +859,16 @@ struct ath_hw {
856 859
857 /* Enterprise mode cap */ 860 /* Enterprise mode cap */
858 u32 ent_mode; 861 u32 ent_mode;
862
863 bool is_clk_25mhz;
864};
865
866struct ath_bus_ops {
867 enum ath_bus_type ath_bus_type;
868 void (*read_cachesize)(struct ath_common *common, int *csz);
869 bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data);
870 void (*bt_coex_prep)(struct ath_common *common);
871 void (*extn_synch_en)(struct ath_common *common);
859}; 872};
860 873
861static inline struct ath_common *ath9k_hw_common(struct ath_hw *ah) 874static inline struct ath_common *ath9k_hw_common(struct ath_hw *ah)
@@ -900,15 +913,12 @@ void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio,
900void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val); 913void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val);
901u32 ath9k_hw_getdefantenna(struct ath_hw *ah); 914u32 ath9k_hw_getdefantenna(struct ath_hw *ah);
902void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna); 915void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna);
903void ath9k_hw_antdiv_comb_conf_get(struct ath_hw *ah,
904 struct ath_hw_antcomb_conf *antconf);
905void ath9k_hw_antdiv_comb_conf_set(struct ath_hw *ah,
906 struct ath_hw_antcomb_conf *antconf);
907 916
908/* General Operation */ 917/* General Operation */
909bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout); 918bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout);
919void ath9k_hw_write_array(struct ath_hw *ah, struct ar5416IniArray *array,
920 int column, unsigned int *writecnt);
910u32 ath9k_hw_reverse_bits(u32 val, u32 n); 921u32 ath9k_hw_reverse_bits(u32 val, u32 n);
911bool ath9k_get_channel_edges(struct ath_hw *ah, u16 flags, u16 *low, u16 *high);
912u16 ath9k_hw_computetxtime(struct ath_hw *ah, 922u16 ath9k_hw_computetxtime(struct ath_hw *ah,
913 u8 phy, int kbps, 923 u8 phy, int kbps,
914 u32 frameLen, u16 rateix, bool shortPreamble); 924 u32 frameLen, u16 rateix, bool shortPreamble);
@@ -924,12 +934,13 @@ void ath9k_hw_setopmode(struct ath_hw *ah);
924void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1); 934void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1);
925void ath9k_hw_setbssidmask(struct ath_hw *ah); 935void ath9k_hw_setbssidmask(struct ath_hw *ah);
926void ath9k_hw_write_associd(struct ath_hw *ah); 936void ath9k_hw_write_associd(struct ath_hw *ah);
937u32 ath9k_hw_gettsf32(struct ath_hw *ah);
927u64 ath9k_hw_gettsf64(struct ath_hw *ah); 938u64 ath9k_hw_gettsf64(struct ath_hw *ah);
928void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64); 939void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64);
929void ath9k_hw_reset_tsf(struct ath_hw *ah); 940void ath9k_hw_reset_tsf(struct ath_hw *ah);
930void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting); 941void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting);
931void ath9k_hw_init_global_settings(struct ath_hw *ah); 942void ath9k_hw_init_global_settings(struct ath_hw *ah);
932unsigned long ar9003_get_pll_sqsum_dvc(struct ath_hw *ah); 943u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah);
933void ath9k_hw_set11nmac2040(struct ath_hw *ah); 944void ath9k_hw_set11nmac2040(struct ath_hw *ah);
934void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period); 945void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period);
935void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah, 946void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 79aec983279f..b172d1509515 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -15,6 +15,7 @@
15 */ 15 */
16 16
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/ath9k_platform.h>
18 19
19#include "ath9k.h" 20#include "ath9k.h"
20 21
@@ -195,10 +196,27 @@ static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
195 return val; 196 return val;
196} 197}
197 198
198static const struct ath_ops ath9k_common_ops = { 199static unsigned int ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr)
199 .read = ath9k_ioread32, 200{
200 .write = ath9k_iowrite32, 201 struct ath_hw *ah = (struct ath_hw *) hw_priv;
201}; 202 struct ath_common *common = ath9k_hw_common(ah);
203 struct ath_softc *sc = (struct ath_softc *) common->priv;
204 unsigned long uninitialized_var(flags);
205 u32 val;
206
207 if (ah->config.serialize_regmode == SER_REG_MODE_ON)
208 spin_lock_irqsave(&sc->sc_serial_rw, flags);
209
210 val = ioread32(sc->mem + reg_offset);
211 val &= ~clr;
212 val |= set;
213 iowrite32(val, sc->mem + reg_offset);
214
215 if (ah->config.serialize_regmode == SER_REG_MODE_ON)
216 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
217
218 return val;
219}
202 220
203/**************************/ 221/**************************/
204/* Initialization */ 222/* Initialization */
@@ -389,13 +407,7 @@ void ath9k_init_crypto(struct ath_softc *sc)
389 int i = 0; 407 int i = 0;
390 408
391 /* Get the hardware key cache size. */ 409 /* Get the hardware key cache size. */
392 common->keymax = sc->sc_ah->caps.keycache_size; 410 common->keymax = AR_KEYTABLE_SIZE;
393 if (common->keymax > ATH_KEYMAX) {
394 ath_dbg(common, ATH_DBG_ANY,
395 "Warning, using only %u entries in %u key cache\n",
396 ATH_KEYMAX, common->keymax);
397 common->keymax = ATH_KEYMAX;
398 }
399 411
400 /* 412 /*
401 * Reset the key cache since some parts do not 413 * Reset the key cache since some parts do not
@@ -537,6 +549,7 @@ static void ath9k_init_misc(struct ath_softc *sc)
537static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid, 549static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
538 const struct ath_bus_ops *bus_ops) 550 const struct ath_bus_ops *bus_ops)
539{ 551{
552 struct ath9k_platform_data *pdata = sc->dev->platform_data;
540 struct ath_hw *ah = NULL; 553 struct ath_hw *ah = NULL;
541 struct ath_common *common; 554 struct ath_common *common;
542 int ret = 0, i; 555 int ret = 0, i;
@@ -549,13 +562,23 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
549 ah->hw = sc->hw; 562 ah->hw = sc->hw;
550 ah->hw_version.devid = devid; 563 ah->hw_version.devid = devid;
551 ah->hw_version.subsysid = subsysid; 564 ah->hw_version.subsysid = subsysid;
565 ah->reg_ops.read = ath9k_ioread32;
566 ah->reg_ops.write = ath9k_iowrite32;
567 ah->reg_ops.rmw = ath9k_reg_rmw;
552 sc->sc_ah = ah; 568 sc->sc_ah = ah;
553 569
554 if (!sc->dev->platform_data) 570 if (!pdata) {
555 ah->ah_flags |= AH_USE_EEPROM; 571 ah->ah_flags |= AH_USE_EEPROM;
572 sc->sc_ah->led_pin = -1;
573 } else {
574 sc->sc_ah->gpio_mask = pdata->gpio_mask;
575 sc->sc_ah->gpio_val = pdata->gpio_val;
576 sc->sc_ah->led_pin = pdata->led_pin;
577 ah->is_clk_25mhz = pdata->is_clk_25mhz;
578 }
556 579
557 common = ath9k_hw_common(ah); 580 common = ath9k_hw_common(ah);
558 common->ops = &ath9k_common_ops; 581 common->ops = &ah->reg_ops;
559 common->bus_ops = bus_ops; 582 common->bus_ops = bus_ops;
560 common->ah = ah; 583 common->ah = ah;
561 common->hw = sc->hw; 584 common->hw = sc->hw;
@@ -587,6 +610,9 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
587 if (ret) 610 if (ret)
588 goto err_hw; 611 goto err_hw;
589 612
613 if (pdata && pdata->macaddr)
614 memcpy(common->macaddr, pdata->macaddr, ETH_ALEN);
615
590 ret = ath9k_init_queues(sc); 616 ret = ath9k_init_queues(sc);
591 if (ret) 617 if (ret)
592 goto err_queues; 618 goto err_queues;
@@ -679,6 +705,8 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
679 if (AR_SREV_5416(sc->sc_ah)) 705 if (AR_SREV_5416(sc->sc_ah))
680 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 706 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
681 707
708 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
709
682 hw->queues = 4; 710 hw->queues = 4;
683 hw->max_rates = 4; 711 hw->max_rates = 4;
684 hw->channel_change_time = 5000; 712 hw->channel_change_time = 5000;
@@ -773,6 +801,7 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
773 801
774 INIT_WORK(&sc->hw_check_work, ath_hw_check); 802 INIT_WORK(&sc->hw_check_work, ath_hw_check);
775 INIT_WORK(&sc->paprd_work, ath_paprd_calibrate); 803 INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
804 INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
776 sc->last_rssi = ATH_RSSI_DUMMY_MARKER; 805 sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
777 806
778 ath_init_leds(sc); 807 ath_init_leds(sc);
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index edc1cbbfecaf..bd6d2b9d736f 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -209,15 +209,8 @@ bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
209{ 209{
210 u32 cw; 210 u32 cw;
211 struct ath_common *common = ath9k_hw_common(ah); 211 struct ath_common *common = ath9k_hw_common(ah);
212 struct ath9k_hw_capabilities *pCap = &ah->caps;
213 struct ath9k_tx_queue_info *qi; 212 struct ath9k_tx_queue_info *qi;
214 213
215 if (q >= pCap->total_queues) {
216 ath_dbg(common, ATH_DBG_QUEUE,
217 "Set TXQ properties, invalid queue: %u\n", q);
218 return false;
219 }
220
221 qi = &ah->txq[q]; 214 qi = &ah->txq[q];
222 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 215 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
223 ath_dbg(common, ATH_DBG_QUEUE, 216 ath_dbg(common, ATH_DBG_QUEUE,
@@ -280,15 +273,8 @@ bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
280 struct ath9k_tx_queue_info *qinfo) 273 struct ath9k_tx_queue_info *qinfo)
281{ 274{
282 struct ath_common *common = ath9k_hw_common(ah); 275 struct ath_common *common = ath9k_hw_common(ah);
283 struct ath9k_hw_capabilities *pCap = &ah->caps;
284 struct ath9k_tx_queue_info *qi; 276 struct ath9k_tx_queue_info *qi;
285 277
286 if (q >= pCap->total_queues) {
287 ath_dbg(common, ATH_DBG_QUEUE,
288 "Get TXQ properties, invalid queue: %u\n", q);
289 return false;
290 }
291
292 qi = &ah->txq[q]; 278 qi = &ah->txq[q];
293 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 279 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
294 ath_dbg(common, ATH_DBG_QUEUE, 280 ath_dbg(common, ATH_DBG_QUEUE,
@@ -320,28 +306,27 @@ int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
320{ 306{
321 struct ath_common *common = ath9k_hw_common(ah); 307 struct ath_common *common = ath9k_hw_common(ah);
322 struct ath9k_tx_queue_info *qi; 308 struct ath9k_tx_queue_info *qi;
323 struct ath9k_hw_capabilities *pCap = &ah->caps;
324 int q; 309 int q;
325 310
326 switch (type) { 311 switch (type) {
327 case ATH9K_TX_QUEUE_BEACON: 312 case ATH9K_TX_QUEUE_BEACON:
328 q = pCap->total_queues - 1; 313 q = ATH9K_NUM_TX_QUEUES - 1;
329 break; 314 break;
330 case ATH9K_TX_QUEUE_CAB: 315 case ATH9K_TX_QUEUE_CAB:
331 q = pCap->total_queues - 2; 316 q = ATH9K_NUM_TX_QUEUES - 2;
332 break; 317 break;
333 case ATH9K_TX_QUEUE_PSPOLL: 318 case ATH9K_TX_QUEUE_PSPOLL:
334 q = 1; 319 q = 1;
335 break; 320 break;
336 case ATH9K_TX_QUEUE_UAPSD: 321 case ATH9K_TX_QUEUE_UAPSD:
337 q = pCap->total_queues - 3; 322 q = ATH9K_NUM_TX_QUEUES - 3;
338 break; 323 break;
339 case ATH9K_TX_QUEUE_DATA: 324 case ATH9K_TX_QUEUE_DATA:
340 for (q = 0; q < pCap->total_queues; q++) 325 for (q = 0; q < ATH9K_NUM_TX_QUEUES; q++)
341 if (ah->txq[q].tqi_type == 326 if (ah->txq[q].tqi_type ==
342 ATH9K_TX_QUEUE_INACTIVE) 327 ATH9K_TX_QUEUE_INACTIVE)
343 break; 328 break;
344 if (q == pCap->total_queues) { 329 if (q == ATH9K_NUM_TX_QUEUES) {
345 ath_err(common, "No available TX queue\n"); 330 ath_err(common, "No available TX queue\n");
346 return -1; 331 return -1;
347 } 332 }
@@ -382,15 +367,9 @@ EXPORT_SYMBOL(ath9k_hw_setuptxqueue);
382 367
383bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q) 368bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
384{ 369{
385 struct ath9k_hw_capabilities *pCap = &ah->caps;
386 struct ath_common *common = ath9k_hw_common(ah); 370 struct ath_common *common = ath9k_hw_common(ah);
387 struct ath9k_tx_queue_info *qi; 371 struct ath9k_tx_queue_info *qi;
388 372
389 if (q >= pCap->total_queues) {
390 ath_dbg(common, ATH_DBG_QUEUE,
391 "Release TXQ, invalid queue: %u\n", q);
392 return false;
393 }
394 qi = &ah->txq[q]; 373 qi = &ah->txq[q];
395 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 374 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
396 ath_dbg(common, ATH_DBG_QUEUE, 375 ath_dbg(common, ATH_DBG_QUEUE,
@@ -414,18 +393,11 @@ EXPORT_SYMBOL(ath9k_hw_releasetxqueue);
414 393
415bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q) 394bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
416{ 395{
417 struct ath9k_hw_capabilities *pCap = &ah->caps;
418 struct ath_common *common = ath9k_hw_common(ah); 396 struct ath_common *common = ath9k_hw_common(ah);
419 struct ath9k_channel *chan = ah->curchan; 397 struct ath9k_channel *chan = ah->curchan;
420 struct ath9k_tx_queue_info *qi; 398 struct ath9k_tx_queue_info *qi;
421 u32 cwMin, chanCwMin, value; 399 u32 cwMin, chanCwMin, value;
422 400
423 if (q >= pCap->total_queues) {
424 ath_dbg(common, ATH_DBG_QUEUE,
425 "Reset TXQ, invalid queue: %u\n", q);
426 return false;
427 }
428
429 qi = &ah->txq[q]; 401 qi = &ah->txq[q];
430 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 402 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
431 ath_dbg(common, ATH_DBG_QUEUE, 403 ath_dbg(common, ATH_DBG_QUEUE,
@@ -458,17 +430,21 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
458 SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH)); 430 SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH));
459 431
460 REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ); 432 REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);
461 REG_WRITE(ah, AR_DMISC(q), 433
462 AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2); 434 if (AR_SREV_9340(ah))
435 REG_WRITE(ah, AR_DMISC(q),
436 AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x1);
437 else
438 REG_WRITE(ah, AR_DMISC(q),
439 AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);
463 440
464 if (qi->tqi_cbrPeriod) { 441 if (qi->tqi_cbrPeriod) {
465 REG_WRITE(ah, AR_QCBRCFG(q), 442 REG_WRITE(ah, AR_QCBRCFG(q),
466 SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) | 443 SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) |
467 SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH)); 444 SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH));
468 REG_WRITE(ah, AR_QMISC(q), 445 REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_FSP_CBR |
469 REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_FSP_CBR | 446 (qi->tqi_cbrOverflowLimit ?
470 (qi->tqi_cbrOverflowLimit ? 447 AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0));
471 AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0));
472 } 448 }
473 if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) { 449 if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) {
474 REG_WRITE(ah, AR_QRDYTIMECFG(q), 450 REG_WRITE(ah, AR_QRDYTIMECFG(q),
@@ -481,40 +457,31 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
481 (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0)); 457 (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
482 458
483 if (qi->tqi_burstTime 459 if (qi->tqi_burstTime
484 && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)) { 460 && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE))
485 REG_WRITE(ah, AR_QMISC(q), 461 REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_RDYTIME_EXP_POLICY);
486 REG_READ(ah, AR_QMISC(q)) |
487 AR_Q_MISC_RDYTIME_EXP_POLICY);
488 462
489 } 463 if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE)
490 464 REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS);
491 if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE) {
492 REG_WRITE(ah, AR_DMISC(q),
493 REG_READ(ah, AR_DMISC(q)) |
494 AR_D_MISC_POST_FR_BKOFF_DIS);
495 }
496 465
497 REGWRITE_BUFFER_FLUSH(ah); 466 REGWRITE_BUFFER_FLUSH(ah);
498 467
499 if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) { 468 if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
500 REG_WRITE(ah, AR_DMISC(q), 469 REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_FRAG_BKOFF_EN);
501 REG_READ(ah, AR_DMISC(q)) | 470
502 AR_D_MISC_FRAG_BKOFF_EN);
503 }
504 switch (qi->tqi_type) { 471 switch (qi->tqi_type) {
505 case ATH9K_TX_QUEUE_BEACON: 472 case ATH9K_TX_QUEUE_BEACON:
506 ENABLE_REGWRITE_BUFFER(ah); 473 ENABLE_REGWRITE_BUFFER(ah);
507 474
508 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q)) 475 REG_SET_BIT(ah, AR_QMISC(q),
509 | AR_Q_MISC_FSP_DBA_GATED 476 AR_Q_MISC_FSP_DBA_GATED
510 | AR_Q_MISC_BEACON_USE 477 | AR_Q_MISC_BEACON_USE
511 | AR_Q_MISC_CBR_INCR_DIS1); 478 | AR_Q_MISC_CBR_INCR_DIS1);
512 479
513 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) 480 REG_SET_BIT(ah, AR_DMISC(q),
514 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL << 481 (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
515 AR_D_MISC_ARB_LOCKOUT_CNTRL_S) 482 AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
516 | AR_D_MISC_BEACON_USE 483 | AR_D_MISC_BEACON_USE
517 | AR_D_MISC_POST_FR_BKOFF_DIS); 484 | AR_D_MISC_POST_FR_BKOFF_DIS);
518 485
519 REGWRITE_BUFFER_FLUSH(ah); 486 REGWRITE_BUFFER_FLUSH(ah);
520 487
@@ -533,41 +500,38 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
533 case ATH9K_TX_QUEUE_CAB: 500 case ATH9K_TX_QUEUE_CAB:
534 ENABLE_REGWRITE_BUFFER(ah); 501 ENABLE_REGWRITE_BUFFER(ah);
535 502
536 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q)) 503 REG_SET_BIT(ah, AR_QMISC(q),
537 | AR_Q_MISC_FSP_DBA_GATED 504 AR_Q_MISC_FSP_DBA_GATED
538 | AR_Q_MISC_CBR_INCR_DIS1 505 | AR_Q_MISC_CBR_INCR_DIS1
539 | AR_Q_MISC_CBR_INCR_DIS0); 506 | AR_Q_MISC_CBR_INCR_DIS0);
540 value = (qi->tqi_readyTime - 507 value = (qi->tqi_readyTime -
541 (ah->config.sw_beacon_response_time - 508 (ah->config.sw_beacon_response_time -
542 ah->config.dma_beacon_response_time) - 509 ah->config.dma_beacon_response_time) -
543 ah->config.additional_swba_backoff) * 1024; 510 ah->config.additional_swba_backoff) * 1024;
544 REG_WRITE(ah, AR_QRDYTIMECFG(q), 511 REG_WRITE(ah, AR_QRDYTIMECFG(q),
545 value | AR_Q_RDYTIMECFG_EN); 512 value | AR_Q_RDYTIMECFG_EN);
546 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) 513 REG_SET_BIT(ah, AR_DMISC(q),
547 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL << 514 (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
548 AR_D_MISC_ARB_LOCKOUT_CNTRL_S)); 515 AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
549 516
550 REGWRITE_BUFFER_FLUSH(ah); 517 REGWRITE_BUFFER_FLUSH(ah);
551 518
552 break; 519 break;
553 case ATH9K_TX_QUEUE_PSPOLL: 520 case ATH9K_TX_QUEUE_PSPOLL:
554 REG_WRITE(ah, AR_QMISC(q), 521 REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_CBR_INCR_DIS1);
555 REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_CBR_INCR_DIS1);
556 break; 522 break;
557 case ATH9K_TX_QUEUE_UAPSD: 523 case ATH9K_TX_QUEUE_UAPSD:
558 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) | 524 REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS);
559 AR_D_MISC_POST_FR_BKOFF_DIS);
560 break; 525 break;
561 default: 526 default:
562 break; 527 break;
563 } 528 }
564 529
565 if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) { 530 if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) {
566 REG_WRITE(ah, AR_DMISC(q), 531 REG_SET_BIT(ah, AR_DMISC(q),
567 REG_READ(ah, AR_DMISC(q)) | 532 SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
568 SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL, 533 AR_D_MISC_ARB_LOCKOUT_CNTRL) |
569 AR_D_MISC_ARB_LOCKOUT_CNTRL) | 534 AR_D_MISC_POST_FR_BKOFF_DIS);
570 AR_D_MISC_POST_FR_BKOFF_DIS);
571 } 535 }
572 536
573 if (AR_SREV_9300_20_OR_LATER(ah)) 537 if (AR_SREV_9300_20_OR_LATER(ah))
@@ -754,7 +718,6 @@ EXPORT_SYMBOL(ath9k_hw_abortpcurecv);
754bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset) 718bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset)
755{ 719{
756#define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */ 720#define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */
757#define AH_RX_TIME_QUANTUM 100 /* usec */
758 struct ath_common *common = ath9k_hw_common(ah); 721 struct ath_common *common = ath9k_hw_common(ah);
759 u32 mac_status, last_mac_status = 0; 722 u32 mac_status, last_mac_status = 0;
760 int i; 723 int i;
@@ -797,7 +760,6 @@ bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset)
797 return true; 760 return true;
798 } 761 }
799 762
800#undef AH_RX_TIME_QUANTUM
801#undef AH_RX_STOP_DMA_TIMEOUT 763#undef AH_RX_STOP_DMA_TIMEOUT
802} 764}
803EXPORT_SYMBOL(ath9k_hw_stopdmarecv); 765EXPORT_SYMBOL(ath9k_hw_stopdmarecv);
@@ -855,10 +817,14 @@ EXPORT_SYMBOL(ath9k_hw_disable_interrupts);
855void ath9k_hw_enable_interrupts(struct ath_hw *ah) 817void ath9k_hw_enable_interrupts(struct ath_hw *ah)
856{ 818{
857 struct ath_common *common = ath9k_hw_common(ah); 819 struct ath_common *common = ath9k_hw_common(ah);
820 u32 sync_default = AR_INTR_SYNC_DEFAULT;
858 821
859 if (!(ah->imask & ATH9K_INT_GLOBAL)) 822 if (!(ah->imask & ATH9K_INT_GLOBAL))
860 return; 823 return;
861 824
825 if (AR_SREV_9340(ah))
826 sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
827
862 ath_dbg(common, ATH_DBG_INTERRUPT, "enable IER\n"); 828 ath_dbg(common, ATH_DBG_INTERRUPT, "enable IER\n");
863 REG_WRITE(ah, AR_IER, AR_IER_ENABLE); 829 REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
864 if (!AR_SREV_9100(ah)) { 830 if (!AR_SREV_9100(ah)) {
@@ -867,10 +833,8 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah)
867 REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ); 833 REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
868 834
869 835
870 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 836 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default);
871 AR_INTR_SYNC_DEFAULT); 837 REG_WRITE(ah, AR_INTR_SYNC_MASK, sync_default);
872 REG_WRITE(ah, AR_INTR_SYNC_MASK,
873 AR_INTR_SYNC_DEFAULT);
874 } 838 }
875 ath_dbg(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n", 839 ath_dbg(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
876 REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER)); 840 REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
@@ -926,6 +890,9 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
926 mask |= AR_IMR_GENTMR; 890 mask |= AR_IMR_GENTMR;
927 } 891 }
928 892
893 if (ints & ATH9K_INT_GENTIMER)
894 mask |= AR_IMR_GENTMR;
895
929 if (ints & (ATH9K_INT_BMISC)) { 896 if (ints & (ATH9K_INT_BMISC)) {
930 mask |= AR_IMR_BCNMISC; 897 mask |= AR_IMR_BCNMISC;
931 if (ints & ATH9K_INT_TIM) 898 if (ints & ATH9K_INT_TIM)
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index c2a59386fb9c..b60c130917f7 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -239,7 +239,6 @@ struct ath_desc {
239 void *ds_vdata; 239 void *ds_vdata;
240} __packed __aligned(4); 240} __packed __aligned(4);
241 241
242#define ATH9K_TXDESC_CLRDMASK 0x0001
243#define ATH9K_TXDESC_NOACK 0x0002 242#define ATH9K_TXDESC_NOACK 0x0002
244#define ATH9K_TXDESC_RTSENA 0x0004 243#define ATH9K_TXDESC_RTSENA 0x0004
245#define ATH9K_TXDESC_CTSENA 0x0008 244#define ATH9K_TXDESC_CTSENA 0x0008
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 1482fa650833..17ebdf1e8b7b 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -299,7 +299,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
299 299
300 if (!(sc->sc_flags & (SC_OP_OFFCHANNEL))) { 300 if (!(sc->sc_flags & (SC_OP_OFFCHANNEL))) {
301 if (sc->sc_flags & SC_OP_BEACONS) 301 if (sc->sc_flags & SC_OP_BEACONS)
302 ath_beacon_config(sc, NULL); 302 ath_set_beacon(sc);
303 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0); 303 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
304 ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/2); 304 ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/2);
305 ath_start_ani(common); 305 ath_start_ani(common);
@@ -624,6 +624,43 @@ out:
624 ath9k_ps_restore(sc); 624 ath9k_ps_restore(sc);
625} 625}
626 626
627static void ath_hw_pll_rx_hang_check(struct ath_softc *sc, u32 pll_sqsum)
628{
629 static int count;
630 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
631
632 if (pll_sqsum >= 0x40000) {
633 count++;
634 if (count == 3) {
635 /* Rx is hung for more than 500ms. Reset it */
636 ath_dbg(common, ATH_DBG_RESET,
637 "Possible RX hang, resetting");
638 ath_reset(sc, true);
639 count = 0;
640 }
641 } else
642 count = 0;
643}
644
645void ath_hw_pll_work(struct work_struct *work)
646{
647 struct ath_softc *sc = container_of(work, struct ath_softc,
648 hw_pll_work.work);
649 u32 pll_sqsum;
650
651 if (AR_SREV_9485(sc->sc_ah)) {
652
653 ath9k_ps_wakeup(sc);
654 pll_sqsum = ar9003_get_pll_sqsum_dvc(sc->sc_ah);
655 ath9k_ps_restore(sc);
656
657 ath_hw_pll_rx_hang_check(sc, pll_sqsum);
658
659 ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/5);
660 }
661}
662
663
627void ath9k_tasklet(unsigned long data) 664void ath9k_tasklet(unsigned long data)
628{ 665{
629 struct ath_softc *sc = (struct ath_softc *)data; 666 struct ath_softc *sc = (struct ath_softc *)data;
@@ -652,6 +689,17 @@ void ath9k_tasklet(unsigned long data)
652 !ath9k_hw_check_alive(ah)) 689 !ath9k_hw_check_alive(ah))
653 ieee80211_queue_work(sc->hw, &sc->hw_check_work); 690 ieee80211_queue_work(sc->hw, &sc->hw_check_work);
654 691
692 if ((status & ATH9K_INT_TSFOOR) && sc->ps_enabled) {
693 /*
694 * TSF sync does not look correct; remain awake to sync with
695 * the next Beacon.
696 */
697 ath_dbg(common, ATH_DBG_PS,
698 "TSFOOR - Sync with next Beacon\n");
699 sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC |
700 PS_TSFOOR_SYNC;
701 }
702
655 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 703 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
656 rxmask = (ATH9K_INT_RXHP | ATH9K_INT_RXLP | ATH9K_INT_RXEOL | 704 rxmask = (ATH9K_INT_RXHP | ATH9K_INT_RXLP | ATH9K_INT_RXEOL |
657 ATH9K_INT_RXORN); 705 ATH9K_INT_RXORN);
@@ -674,16 +722,6 @@ void ath9k_tasklet(unsigned long data)
674 ath_tx_tasklet(sc); 722 ath_tx_tasklet(sc);
675 } 723 }
676 724
677 if ((status & ATH9K_INT_TSFOOR) && sc->ps_enabled) {
678 /*
679 * TSF sync does not look correct; remain awake to sync with
680 * the next Beacon.
681 */
682 ath_dbg(common, ATH_DBG_PS,
683 "TSFOOR - Sync with next Beacon\n");
684 sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC;
685 }
686
687 if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) 725 if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
688 if (status & ATH9K_INT_GENTIMER) 726 if (status & ATH9K_INT_GENTIMER)
689 ath_gen_timer_isr(sc->sc_ah); 727 ath_gen_timer_isr(sc->sc_ah);
@@ -828,48 +866,6 @@ chip_reset:
828#undef SCHED_INTR 866#undef SCHED_INTR
829} 867}
830 868
831static void ath9k_bss_assoc_info(struct ath_softc *sc,
832 struct ieee80211_hw *hw,
833 struct ieee80211_vif *vif,
834 struct ieee80211_bss_conf *bss_conf)
835{
836 struct ath_hw *ah = sc->sc_ah;
837 struct ath_common *common = ath9k_hw_common(ah);
838
839 if (bss_conf->assoc) {
840 ath_dbg(common, ATH_DBG_CONFIG,
841 "Bss Info ASSOC %d, bssid: %pM\n",
842 bss_conf->aid, common->curbssid);
843
844 /* New association, store aid */
845 common->curaid = bss_conf->aid;
846 ath9k_hw_write_associd(ah);
847
848 /*
849 * Request a re-configuration of Beacon related timers
850 * on the receipt of the first Beacon frame (i.e.,
851 * after time sync with the AP).
852 */
853 sc->ps_flags |= PS_BEACON_SYNC;
854
855 /* Configure the beacon */
856 ath_beacon_config(sc, vif);
857
858 /* Reset rssi stats */
859 sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
860 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
861
862 sc->sc_flags |= SC_OP_ANI_RUN;
863 ath_start_ani(common);
864 } else {
865 ath_dbg(common, ATH_DBG_CONFIG, "Bss Info DISASSOC\n");
866 common->curaid = 0;
867 /* Stop ANI */
868 sc->sc_flags &= ~SC_OP_ANI_RUN;
869 del_timer_sync(&common->ani.timer);
870 }
871}
872
873void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw) 869void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
874{ 870{
875 struct ath_hw *ah = sc->sc_ah; 871 struct ath_hw *ah = sc->sc_ah;
@@ -899,7 +895,7 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
899 goto out; 895 goto out;
900 } 896 }
901 if (sc->sc_flags & SC_OP_BEACONS) 897 if (sc->sc_flags & SC_OP_BEACONS)
902 ath_beacon_config(sc, NULL); /* restart beacons */ 898 ath_set_beacon(sc); /* restart beacons */
903 899
904 /* Re-Enable interrupts */ 900 /* Re-Enable interrupts */
905 ath9k_hw_set_interrupts(ah, ah->imask); 901 ath9k_hw_set_interrupts(ah, ah->imask);
@@ -1006,7 +1002,7 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
1006 sc->config.txpowlimit, &sc->curtxpow); 1002 sc->config.txpowlimit, &sc->curtxpow);
1007 1003
1008 if ((sc->sc_flags & SC_OP_BEACONS) || !(sc->sc_flags & (SC_OP_OFFCHANNEL))) 1004 if ((sc->sc_flags & SC_OP_BEACONS) || !(sc->sc_flags & (SC_OP_OFFCHANNEL)))
1009 ath_beacon_config(sc, NULL); /* restart beacons */ 1005 ath_set_beacon(sc); /* restart beacons */
1010 1006
1011 ath9k_hw_set_interrupts(ah, ah->imask); 1007 ath9k_hw_set_interrupts(ah, ah->imask);
1012 1008
@@ -1389,7 +1385,9 @@ static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
1389 ath9k_hw_set_tsfadjust(ah, 0); 1385 ath9k_hw_set_tsfadjust(ah, 0);
1390 sc->sc_flags &= ~SC_OP_TSF_RESET; 1386 sc->sc_flags &= ~SC_OP_TSF_RESET;
1391 1387
1392 if (iter_data.nwds + iter_data.nmeshes) 1388 if (iter_data.nmeshes)
1389 ah->opmode = NL80211_IFTYPE_MESH_POINT;
1390 else if (iter_data.nwds)
1393 ah->opmode = NL80211_IFTYPE_AP; 1391 ah->opmode = NL80211_IFTYPE_AP;
1394 else if (iter_data.nadhocs) 1392 else if (iter_data.nadhocs)
1395 ah->opmode = NL80211_IFTYPE_ADHOC; 1393 ah->opmode = NL80211_IFTYPE_ADHOC;
@@ -1413,6 +1411,7 @@ static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
1413 1411
1414 /* Set up ANI */ 1412 /* Set up ANI */
1415 if ((iter_data.naps + iter_data.nadhocs) > 0) { 1413 if ((iter_data.naps + iter_data.nadhocs) > 0) {
1414 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
1416 sc->sc_flags |= SC_OP_ANI_RUN; 1415 sc->sc_flags |= SC_OP_ANI_RUN;
1417 ath_start_ani(common); 1416 ath_start_ani(common);
1418 } else { 1417 } else {
@@ -1452,7 +1451,6 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
1452 struct ath_softc *sc = hw->priv; 1451 struct ath_softc *sc = hw->priv;
1453 struct ath_hw *ah = sc->sc_ah; 1452 struct ath_hw *ah = sc->sc_ah;
1454 struct ath_common *common = ath9k_hw_common(ah); 1453 struct ath_common *common = ath9k_hw_common(ah);
1455 struct ath_vif *avp = (void *)vif->drv_priv;
1456 int ret = 0; 1454 int ret = 0;
1457 1455
1458 ath9k_ps_wakeup(sc); 1456 ath9k_ps_wakeup(sc);
@@ -1482,8 +1480,9 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
1482 } 1480 }
1483 } 1481 }
1484 1482
1485 if ((vif->type == NL80211_IFTYPE_ADHOC) && 1483 if ((ah->opmode == NL80211_IFTYPE_ADHOC) ||
1486 sc->nvifs > 0) { 1484 ((vif->type == NL80211_IFTYPE_ADHOC) &&
1485 sc->nvifs > 0)) {
1487 ath_err(common, "Cannot create ADHOC interface when other" 1486 ath_err(common, "Cannot create ADHOC interface when other"
1488 " interfaces already exist.\n"); 1487 " interfaces already exist.\n");
1489 ret = -EINVAL; 1488 ret = -EINVAL;
@@ -1493,10 +1492,6 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
1493 ath_dbg(common, ATH_DBG_CONFIG, 1492 ath_dbg(common, ATH_DBG_CONFIG,
1494 "Attach a VIF of type: %d\n", vif->type); 1493 "Attach a VIF of type: %d\n", vif->type);
1495 1494
1496 /* Set the VIF opmode */
1497 avp->av_opmode = vif->type;
1498 avp->av_bslot = -1;
1499
1500 sc->nvifs++; 1495 sc->nvifs++;
1501 1496
1502 ath9k_do_vif_add_setup(hw, vif); 1497 ath9k_do_vif_add_setup(hw, vif);
@@ -1782,23 +1777,68 @@ static int ath9k_sta_add(struct ieee80211_hw *hw,
1782 struct ieee80211_sta *sta) 1777 struct ieee80211_sta *sta)
1783{ 1778{
1784 struct ath_softc *sc = hw->priv; 1779 struct ath_softc *sc = hw->priv;
1780 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1781 struct ath_node *an = (struct ath_node *) sta->drv_priv;
1782 struct ieee80211_key_conf ps_key = { };
1785 1783
1786 ath_node_attach(sc, sta); 1784 ath_node_attach(sc, sta);
1787 1785
1786 if (vif->type != NL80211_IFTYPE_AP &&
1787 vif->type != NL80211_IFTYPE_AP_VLAN)
1788 return 0;
1789
1790 an->ps_key = ath_key_config(common, vif, sta, &ps_key);
1791
1788 return 0; 1792 return 0;
1789} 1793}
1790 1794
1795static void ath9k_del_ps_key(struct ath_softc *sc,
1796 struct ieee80211_vif *vif,
1797 struct ieee80211_sta *sta)
1798{
1799 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1800 struct ath_node *an = (struct ath_node *) sta->drv_priv;
1801 struct ieee80211_key_conf ps_key = { .hw_key_idx = an->ps_key };
1802
1803 if (!an->ps_key)
1804 return;
1805
1806 ath_key_delete(common, &ps_key);
1807}
1808
1791static int ath9k_sta_remove(struct ieee80211_hw *hw, 1809static int ath9k_sta_remove(struct ieee80211_hw *hw,
1792 struct ieee80211_vif *vif, 1810 struct ieee80211_vif *vif,
1793 struct ieee80211_sta *sta) 1811 struct ieee80211_sta *sta)
1794{ 1812{
1795 struct ath_softc *sc = hw->priv; 1813 struct ath_softc *sc = hw->priv;
1796 1814
1815 ath9k_del_ps_key(sc, vif, sta);
1797 ath_node_detach(sc, sta); 1816 ath_node_detach(sc, sta);
1798 1817
1799 return 0; 1818 return 0;
1800} 1819}
1801 1820
1821static void ath9k_sta_notify(struct ieee80211_hw *hw,
1822 struct ieee80211_vif *vif,
1823 enum sta_notify_cmd cmd,
1824 struct ieee80211_sta *sta)
1825{
1826 struct ath_softc *sc = hw->priv;
1827 struct ath_node *an = (struct ath_node *) sta->drv_priv;
1828
1829 switch (cmd) {
1830 case STA_NOTIFY_SLEEP:
1831 an->sleeping = true;
1832 if (ath_tx_aggr_sleep(sc, an))
1833 ieee80211_sta_set_tim(sta);
1834 break;
1835 case STA_NOTIFY_AWAKE:
1836 an->sleeping = false;
1837 ath_tx_aggr_wakeup(sc, an);
1838 break;
1839 }
1840}
1841
1802static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue, 1842static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
1803 const struct ieee80211_tx_queue_params *params) 1843 const struct ieee80211_tx_queue_params *params)
1804{ 1844{
@@ -1855,12 +1895,29 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
1855 if (ath9k_modparam_nohwcrypt) 1895 if (ath9k_modparam_nohwcrypt)
1856 return -ENOSPC; 1896 return -ENOSPC;
1857 1897
1898 if (vif->type == NL80211_IFTYPE_ADHOC &&
1899 (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
1900 key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
1901 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
1902 /*
1903 * For now, disable hw crypto for the RSN IBSS group keys. This
1904 * could be optimized in the future to use a modified key cache
1905 * design to support per-STA RX GTK, but until that gets
1906 * implemented, use of software crypto for group addressed
1907 * frames is a acceptable to allow RSN IBSS to be used.
1908 */
1909 return -EOPNOTSUPP;
1910 }
1911
1858 mutex_lock(&sc->mutex); 1912 mutex_lock(&sc->mutex);
1859 ath9k_ps_wakeup(sc); 1913 ath9k_ps_wakeup(sc);
1860 ath_dbg(common, ATH_DBG_CONFIG, "Set HW Key\n"); 1914 ath_dbg(common, ATH_DBG_CONFIG, "Set HW Key\n");
1861 1915
1862 switch (cmd) { 1916 switch (cmd) {
1863 case SET_KEY: 1917 case SET_KEY:
1918 if (sta)
1919 ath9k_del_ps_key(sc, vif, sta);
1920
1864 ret = ath_key_config(common, vif, sta, key); 1921 ret = ath_key_config(common, vif, sta, key);
1865 if (ret >= 0) { 1922 if (ret >= 0) {
1866 key->hw_key_idx = ret; 1923 key->hw_key_idx = ret;
@@ -1886,6 +1943,92 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
1886 1943
1887 return ret; 1944 return ret;
1888} 1945}
1946static void ath9k_bss_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
1947{
1948 struct ath_softc *sc = data;
1949 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1950 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
1951 struct ath_vif *avp = (void *)vif->drv_priv;
1952
1953 switch (sc->sc_ah->opmode) {
1954 case NL80211_IFTYPE_ADHOC:
1955 /* There can be only one vif available */
1956 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
1957 common->curaid = bss_conf->aid;
1958 ath9k_hw_write_associd(sc->sc_ah);
1959 /* configure beacon */
1960 if (bss_conf->enable_beacon)
1961 ath_beacon_config(sc, vif);
1962 break;
1963 case NL80211_IFTYPE_STATION:
1964 /*
1965 * Skip iteration if primary station vif's bss info
1966 * was not changed
1967 */
1968 if (sc->sc_flags & SC_OP_PRIM_STA_VIF)
1969 break;
1970
1971 if (bss_conf->assoc) {
1972 sc->sc_flags |= SC_OP_PRIM_STA_VIF;
1973 avp->primary_sta_vif = true;
1974 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
1975 common->curaid = bss_conf->aid;
1976 ath9k_hw_write_associd(sc->sc_ah);
1977 ath_dbg(common, ATH_DBG_CONFIG,
1978 "Bss Info ASSOC %d, bssid: %pM\n",
1979 bss_conf->aid, common->curbssid);
1980 ath_beacon_config(sc, vif);
1981 /*
1982 * Request a re-configuration of Beacon related timers
1983 * on the receipt of the first Beacon frame (i.e.,
1984 * after time sync with the AP).
1985 */
1986 sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
1987 /* Reset rssi stats */
1988 sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
1989 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
1990
1991 sc->sc_flags |= SC_OP_ANI_RUN;
1992 ath_start_ani(common);
1993 }
1994 break;
1995 default:
1996 break;
1997 }
1998}
1999
2000static void ath9k_config_bss(struct ath_softc *sc, struct ieee80211_vif *vif)
2001{
2002 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2003 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
2004 struct ath_vif *avp = (void *)vif->drv_priv;
2005
2006 /* Reconfigure bss info */
2007 if (avp->primary_sta_vif && !bss_conf->assoc) {
2008 ath_dbg(common, ATH_DBG_CONFIG,
2009 "Bss Info DISASSOC %d, bssid %pM\n",
2010 common->curaid, common->curbssid);
2011 sc->sc_flags &= ~(SC_OP_PRIM_STA_VIF | SC_OP_BEACONS);
2012 avp->primary_sta_vif = false;
2013 memset(common->curbssid, 0, ETH_ALEN);
2014 common->curaid = 0;
2015 }
2016
2017 ieee80211_iterate_active_interfaces_atomic(
2018 sc->hw, ath9k_bss_iter, sc);
2019
2020 /*
2021 * None of station vifs are associated.
2022 * Clear bssid & aid
2023 */
2024 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) &&
2025 !(sc->sc_flags & SC_OP_PRIM_STA_VIF)) {
2026 ath9k_hw_write_associd(sc->sc_ah);
2027 /* Stop ANI */
2028 sc->sc_flags &= ~SC_OP_ANI_RUN;
2029 del_timer_sync(&common->ani.timer);
2030 }
2031}
1889 2032
1890static void ath9k_bss_info_changed(struct ieee80211_hw *hw, 2033static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
1891 struct ieee80211_vif *vif, 2034 struct ieee80211_vif *vif,
@@ -1893,7 +2036,6 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
1893 u32 changed) 2036 u32 changed)
1894{ 2037{
1895 struct ath_softc *sc = hw->priv; 2038 struct ath_softc *sc = hw->priv;
1896 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
1897 struct ath_hw *ah = sc->sc_ah; 2039 struct ath_hw *ah = sc->sc_ah;
1898 struct ath_common *common = ath9k_hw_common(ah); 2040 struct ath_common *common = ath9k_hw_common(ah);
1899 struct ath_vif *avp = (void *)vif->drv_priv; 2041 struct ath_vif *avp = (void *)vif->drv_priv;
@@ -1904,20 +2046,10 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
1904 mutex_lock(&sc->mutex); 2046 mutex_lock(&sc->mutex);
1905 2047
1906 if (changed & BSS_CHANGED_BSSID) { 2048 if (changed & BSS_CHANGED_BSSID) {
1907 /* Set BSSID */ 2049 ath9k_config_bss(sc, vif);
1908 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
1909 memcpy(avp->bssid, bss_conf->bssid, ETH_ALEN);
1910 common->curaid = 0;
1911 ath9k_hw_write_associd(ah);
1912
1913 /* Set aggregation protection mode parameters */
1914 sc->config.ath_aggr_prot = 0;
1915 2050
1916 ath_dbg(common, ATH_DBG_CONFIG, "BSSID: %pM aid: 0x%x\n", 2051 ath_dbg(common, ATH_DBG_CONFIG, "BSSID: %pM aid: 0x%x\n",
1917 common->curbssid, common->curaid); 2052 common->curbssid, common->curaid);
1918
1919 /* need to reconfigure the beacon */
1920 sc->sc_flags &= ~SC_OP_BEACONS ;
1921 } 2053 }
1922 2054
1923 /* Enable transmission of beacons (AP, IBSS, MESH) */ 2055 /* Enable transmission of beacons (AP, IBSS, MESH) */
@@ -1958,7 +2090,6 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
1958 } 2090 }
1959 2091
1960 if (changed & BSS_CHANGED_BEACON_INT) { 2092 if (changed & BSS_CHANGED_BEACON_INT) {
1961 cur_conf->beacon_interval = bss_conf->beacon_int;
1962 /* 2093 /*
1963 * In case of AP mode, the HW TSF has to be reset 2094 * In case of AP mode, the HW TSF has to be reset
1964 * when the beacon interval changes. 2095 * when the beacon interval changes.
@@ -1970,9 +2101,8 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
1970 if (!error) 2101 if (!error)
1971 ath_beacon_config(sc, vif); 2102 ath_beacon_config(sc, vif);
1972 ath9k_set_beaconing_status(sc, true); 2103 ath9k_set_beaconing_status(sc, true);
1973 } else { 2104 } else
1974 ath_beacon_config(sc, vif); 2105 ath_beacon_config(sc, vif);
1975 }
1976 } 2106 }
1977 2107
1978 if (changed & BSS_CHANGED_ERP_PREAMBLE) { 2108 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
@@ -1994,12 +2124,6 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
1994 sc->sc_flags &= ~SC_OP_PROTECT_ENABLE; 2124 sc->sc_flags &= ~SC_OP_PROTECT_ENABLE;
1995 } 2125 }
1996 2126
1997 if (changed & BSS_CHANGED_ASSOC) {
1998 ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
1999 bss_conf->assoc);
2000 ath9k_bss_assoc_info(sc, hw, vif, bss_conf);
2001 }
2002
2003 mutex_unlock(&sc->mutex); 2127 mutex_unlock(&sc->mutex);
2004 ath9k_ps_restore(sc); 2128 ath9k_ps_restore(sc);
2005} 2129}
@@ -2145,10 +2269,9 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
2145 struct ath_common *common = ath9k_hw_common(ah); 2269 struct ath_common *common = ath9k_hw_common(ah);
2146 int timeout = 200; /* ms */ 2270 int timeout = 200; /* ms */
2147 int i, j; 2271 int i, j;
2272 bool drain_txq;
2148 2273
2149 ath9k_ps_wakeup(sc);
2150 mutex_lock(&sc->mutex); 2274 mutex_lock(&sc->mutex);
2151
2152 cancel_delayed_work_sync(&sc->tx_complete_work); 2275 cancel_delayed_work_sync(&sc->tx_complete_work);
2153 2276
2154 if (sc->sc_flags & SC_OP_INVALID) { 2277 if (sc->sc_flags & SC_OP_INVALID) {
@@ -2161,7 +2284,7 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
2161 timeout = 1; 2284 timeout = 1;
2162 2285
2163 for (j = 0; j < timeout; j++) { 2286 for (j = 0; j < timeout; j++) {
2164 int npend = 0; 2287 bool npend = false;
2165 2288
2166 if (j) 2289 if (j)
2167 usleep_range(1000, 2000); 2290 usleep_range(1000, 2000);
@@ -2170,22 +2293,43 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
2170 if (!ATH_TXQ_SETUP(sc, i)) 2293 if (!ATH_TXQ_SETUP(sc, i))
2171 continue; 2294 continue;
2172 2295
2173 npend += ath9k_has_pending_frames(sc, &sc->tx.txq[i]); 2296 npend = ath9k_has_pending_frames(sc, &sc->tx.txq[i]);
2297
2298 if (npend)
2299 break;
2174 } 2300 }
2175 2301
2176 if (!npend) 2302 if (!npend)
2177 goto out; 2303 goto out;
2178 } 2304 }
2179 2305
2180 if (!ath_drain_all_txq(sc, false)) 2306 ath9k_ps_wakeup(sc);
2307 spin_lock_bh(&sc->sc_pcu_lock);
2308 drain_txq = ath_drain_all_txq(sc, false);
2309 spin_unlock_bh(&sc->sc_pcu_lock);
2310 if (!drain_txq)
2181 ath_reset(sc, false); 2311 ath_reset(sc, false);
2182 2312 ath9k_ps_restore(sc);
2183 ieee80211_wake_queues(hw); 2313 ieee80211_wake_queues(hw);
2184 2314
2185out: 2315out:
2186 ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0); 2316 ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0);
2187 mutex_unlock(&sc->mutex); 2317 mutex_unlock(&sc->mutex);
2188 ath9k_ps_restore(sc); 2318}
2319
2320static bool ath9k_tx_frames_pending(struct ieee80211_hw *hw)
2321{
2322 struct ath_softc *sc = hw->priv;
2323 int i;
2324
2325 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2326 if (!ATH_TXQ_SETUP(sc, i))
2327 continue;
2328
2329 if (ath9k_has_pending_frames(sc, &sc->tx.txq[i]))
2330 return true;
2331 }
2332 return false;
2189} 2333}
2190 2334
2191struct ieee80211_ops ath9k_ops = { 2335struct ieee80211_ops ath9k_ops = {
@@ -2199,6 +2343,7 @@ struct ieee80211_ops ath9k_ops = {
2199 .configure_filter = ath9k_configure_filter, 2343 .configure_filter = ath9k_configure_filter,
2200 .sta_add = ath9k_sta_add, 2344 .sta_add = ath9k_sta_add,
2201 .sta_remove = ath9k_sta_remove, 2345 .sta_remove = ath9k_sta_remove,
2346 .sta_notify = ath9k_sta_notify,
2202 .conf_tx = ath9k_conf_tx, 2347 .conf_tx = ath9k_conf_tx,
2203 .bss_info_changed = ath9k_bss_info_changed, 2348 .bss_info_changed = ath9k_bss_info_changed,
2204 .set_key = ath9k_set_key, 2349 .set_key = ath9k_set_key,
@@ -2210,4 +2355,5 @@ struct ieee80211_ops ath9k_ops = {
2210 .rfkill_poll = ath9k_rfkill_poll_state, 2355 .rfkill_poll = ath9k_rfkill_poll_state,
2211 .set_coverage_class = ath9k_set_coverage_class, 2356 .set_coverage_class = ath9k_set_coverage_class,
2212 .flush = ath9k_flush, 2357 .flush = ath9k_flush,
2358 .tx_frames_pending = ath9k_tx_frames_pending,
2213}; 2359};
diff --git a/drivers/net/wireless/ath/ath9k/phy.h b/drivers/net/wireless/ath/ath9k/phy.h
index 5e3d7496986e..9441bf8ca2fd 100644
--- a/drivers/net/wireless/ath/ath9k/phy.h
+++ b/drivers/net/wireless/ath/ath9k/phy.h
@@ -19,7 +19,6 @@
19 19
20#define CHANSEL_DIV 15 20#define CHANSEL_DIV 15
21#define CHANSEL_2G(_freq) (((_freq) * 0x10000) / CHANSEL_DIV) 21#define CHANSEL_2G(_freq) (((_freq) * 0x10000) / CHANSEL_DIV)
22#define CHANSEL_2G_9485(_freq) ((((_freq) * 0x10000) - 215) / CHANSEL_DIV)
23#define CHANSEL_5G(_freq) (((_freq) * 0x8000) / CHANSEL_DIV) 22#define CHANSEL_5G(_freq) (((_freq) * 0x8000) / CHANSEL_DIV)
24 23
25#define AR_PHY_BASE 0x9800 24#define AR_PHY_BASE 0x9800
@@ -38,26 +37,15 @@
38#define AR_PHY_CLC_Q0 0x0000ffd0 37#define AR_PHY_CLC_Q0 0x0000ffd0
39#define AR_PHY_CLC_Q0_S 5 38#define AR_PHY_CLC_Q0_S 5
40 39
41#define REG_WRITE_RF_ARRAY(iniarray, regData, regWr) do { \
42 int r; \
43 for (r = 0; r < ((iniarray)->ia_rows); r++) { \
44 REG_WRITE(ah, INI_RA((iniarray), r, 0), (regData)[r]); \
45 DO_DELAY(regWr); \
46 } \
47 } while (0)
48
49#define ANTSWAP_AB 0x0001 40#define ANTSWAP_AB 0x0001
50#define REDUCE_CHAIN_0 0x00000050 41#define REDUCE_CHAIN_0 0x00000050
51#define REDUCE_CHAIN_1 0x00000051 42#define REDUCE_CHAIN_1 0x00000051
52#define AR_PHY_CHIP_ID 0x9818 43#define AR_PHY_CHIP_ID 0x9818
53 44
54#define RF_BANK_SETUP(_bank, _iniarray, _col) do { \
55 int i; \
56 for (i = 0; i < (_iniarray)->ia_rows; i++) \
57 (_bank)[i] = INI_RA((_iniarray), i, _col);; \
58 } while (0)
59
60#define AR_PHY_TIMING11_SPUR_FREQ_SD 0x3FF00000 45#define AR_PHY_TIMING11_SPUR_FREQ_SD 0x3FF00000
61#define AR_PHY_TIMING11_SPUR_FREQ_SD_S 20 46#define AR_PHY_TIMING11_SPUR_FREQ_SD_S 20
62 47
48#define AR_PHY_PLL_CONTROL 0x16180
49#define AR_PHY_PLL_MODE 0x16184
50
63#endif 51#endif
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 4c0d36a6980f..4ccbf2ddb553 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -854,14 +854,13 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
854 ath_rc_rate_set_rtscts(sc, rate_table, tx_info); 854 ath_rc_rate_set_rtscts(sc, rate_table, tx_info);
855} 855}
856 856
857static bool ath_rc_update_per(struct ath_softc *sc, 857static void ath_rc_update_per(struct ath_softc *sc,
858 const struct ath_rate_table *rate_table, 858 const struct ath_rate_table *rate_table,
859 struct ath_rate_priv *ath_rc_priv, 859 struct ath_rate_priv *ath_rc_priv,
860 struct ieee80211_tx_info *tx_info, 860 struct ieee80211_tx_info *tx_info,
861 int tx_rate, int xretries, int retries, 861 int tx_rate, int xretries, int retries,
862 u32 now_msec) 862 u32 now_msec)
863{ 863{
864 bool state_change = false;
865 int count, n_bad_frames; 864 int count, n_bad_frames;
866 u8 last_per; 865 u8 last_per;
867 static const u32 nretry_to_per_lookup[10] = { 866 static const u32 nretry_to_per_lookup[10] = {
@@ -992,8 +991,6 @@ static bool ath_rc_update_per(struct ath_softc *sc,
992 991
993 } 992 }
994 } 993 }
995
996 return state_change;
997} 994}
998 995
999static void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix, 996static void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
@@ -1017,7 +1014,6 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1017 u32 now_msec = jiffies_to_msecs(jiffies); 1014 u32 now_msec = jiffies_to_msecs(jiffies);
1018 int rate; 1015 int rate;
1019 u8 last_per; 1016 u8 last_per;
1020 bool state_change = false;
1021 const struct ath_rate_table *rate_table = ath_rc_priv->rate_table; 1017 const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
1022 int size = ath_rc_priv->rate_table_size; 1018 int size = ath_rc_priv->rate_table_size;
1023 1019
@@ -1027,9 +1023,9 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1027 last_per = ath_rc_priv->per[tx_rate]; 1023 last_per = ath_rc_priv->per[tx_rate];
1028 1024
1029 /* Update PER first */ 1025 /* Update PER first */
1030 state_change = ath_rc_update_per(sc, rate_table, ath_rc_priv, 1026 ath_rc_update_per(sc, rate_table, ath_rc_priv,
1031 tx_info, tx_rate, xretries, 1027 tx_info, tx_rate, xretries,
1032 retries, now_msec); 1028 retries, now_msec);
1033 1029
1034 /* 1030 /*
1035 * If this rate looks bad (high PER) then stop using it for 1031 * If this rate looks bad (high PER) then stop using it for
@@ -1092,8 +1088,7 @@ static int ath_rc_get_rateindex(const struct ath_rate_table *rate_table,
1092 if (!(rate->flags & IEEE80211_TX_RC_MCS)) 1088 if (!(rate->flags & IEEE80211_TX_RC_MCS))
1093 return rate->idx; 1089 return rate->idx;
1094 1090
1095 while (rate->idx > mcs_rix_off[i] && 1091 while (i < ARRAY_SIZE(mcs_rix_off) && rate->idx > mcs_rix_off[i]) {
1096 i < ARRAY_SIZE(mcs_rix_off)) {
1097 rix++; i++; 1092 rix++; i++;
1098 } 1093 }
1099 1094
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index b29c80def35e..4f52e0429f99 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -28,6 +28,33 @@ static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta,
28 (alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50); 28 (alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50);
29} 29}
30 30
31static inline bool ath_ant_div_comb_alt_check(u8 div_group, int alt_ratio,
32 int curr_main_set, int curr_alt_set,
33 int alt_rssi_avg, int main_rssi_avg)
34{
35 bool result = false;
36 switch (div_group) {
37 case 0:
38 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
39 result = true;
40 break;
41 case 1:
42 if ((((curr_main_set == ATH_ANT_DIV_COMB_LNA2) &&
43 (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) &&
44 (alt_rssi_avg >= (main_rssi_avg - 5))) ||
45 ((curr_main_set == ATH_ANT_DIV_COMB_LNA1) &&
46 (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) &&
47 (alt_rssi_avg >= (main_rssi_avg - 2)))) &&
48 (alt_rssi_avg >= 4))
49 result = true;
50 else
51 result = false;
52 break;
53 }
54
55 return result;
56}
57
31static inline bool ath9k_check_auto_sleep(struct ath_softc *sc) 58static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
32{ 59{
33 return sc->ps_enabled && 60 return sc->ps_enabled &&
@@ -75,7 +102,6 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
75 *sc->rx.rxlink = bf->bf_daddr; 102 *sc->rx.rxlink = bf->bf_daddr;
76 103
77 sc->rx.rxlink = &ds->ds_link; 104 sc->rx.rxlink = &ds->ds_link;
78 ath9k_hw_rxena(ah);
79} 105}
80 106
81static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) 107static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
@@ -426,9 +452,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
426 else 452 else
427 rfilt |= ATH9K_RX_FILTER_BEACON; 453 rfilt |= ATH9K_RX_FILTER_BEACON;
428 454
429 if ((AR_SREV_9280_20_OR_LATER(sc->sc_ah) || 455 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
430 AR_SREV_9285_12_OR_LATER(sc->sc_ah)) &&
431 (sc->sc_ah->opmode == NL80211_IFTYPE_AP) &&
432 (sc->rx.rxfilter & FIF_PSPOLL)) 456 (sc->rx.rxfilter & FIF_PSPOLL))
433 rfilt |= ATH9K_RX_FILTER_PSPOLL; 457 rfilt |= ATH9K_RX_FILTER_PSPOLL;
434 458
@@ -574,7 +598,8 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
574 sc->ps_flags &= ~PS_BEACON_SYNC; 598 sc->ps_flags &= ~PS_BEACON_SYNC;
575 ath_dbg(common, ATH_DBG_PS, 599 ath_dbg(common, ATH_DBG_PS,
576 "Reconfigure Beacon timers based on timestamp from the AP\n"); 600 "Reconfigure Beacon timers based on timestamp from the AP\n");
577 ath_beacon_config(sc, NULL); 601 ath_set_beacon(sc);
602 sc->ps_flags &= ~PS_TSFOOR_SYNC;
578 } 603 }
579 604
580 if (ath_beacon_dtim_pending_cab(skb)) { 605 if (ath_beacon_dtim_pending_cab(skb)) {
@@ -919,7 +944,8 @@ static void ath9k_process_rssi(struct ath_common *common,
919 int last_rssi; 944 int last_rssi;
920 __le16 fc; 945 __le16 fc;
921 946
922 if (ah->opmode != NL80211_IFTYPE_STATION) 947 if ((ah->opmode != NL80211_IFTYPE_STATION) &&
948 (ah->opmode != NL80211_IFTYPE_ADHOC))
923 return; 949 return;
924 950
925 fc = hdr->frame_control; 951 fc = hdr->frame_control;
@@ -1291,49 +1317,138 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
1291 } 1317 }
1292} 1318}
1293 1319
1294static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf) 1320static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
1321 struct ath_ant_comb *antcomb, int alt_ratio)
1295{ 1322{
1296 /* Adjust the fast_div_bias based on main and alt lna conf */ 1323 if (ant_conf->div_group == 0) {
1297 switch ((ant_conf->main_lna_conf << 4) | ant_conf->alt_lna_conf) { 1324 /* Adjust the fast_div_bias based on main and alt lna conf */
1298 case (0x01): /* A-B LNA2 */ 1325 switch ((ant_conf->main_lna_conf << 4) |
1299 ant_conf->fast_div_bias = 0x3b; 1326 ant_conf->alt_lna_conf) {
1300 break; 1327 case (0x01): /* A-B LNA2 */
1301 case (0x02): /* A-B LNA1 */ 1328 ant_conf->fast_div_bias = 0x3b;
1302 ant_conf->fast_div_bias = 0x3d; 1329 break;
1303 break; 1330 case (0x02): /* A-B LNA1 */
1304 case (0x03): /* A-B A+B */ 1331 ant_conf->fast_div_bias = 0x3d;
1305 ant_conf->fast_div_bias = 0x1; 1332 break;
1306 break; 1333 case (0x03): /* A-B A+B */
1307 case (0x10): /* LNA2 A-B */ 1334 ant_conf->fast_div_bias = 0x1;
1308 ant_conf->fast_div_bias = 0x7; 1335 break;
1309 break; 1336 case (0x10): /* LNA2 A-B */
1310 case (0x12): /* LNA2 LNA1 */ 1337 ant_conf->fast_div_bias = 0x7;
1311 ant_conf->fast_div_bias = 0x2; 1338 break;
1312 break; 1339 case (0x12): /* LNA2 LNA1 */
1313 case (0x13): /* LNA2 A+B */ 1340 ant_conf->fast_div_bias = 0x2;
1314 ant_conf->fast_div_bias = 0x7; 1341 break;
1315 break; 1342 case (0x13): /* LNA2 A+B */
1316 case (0x20): /* LNA1 A-B */ 1343 ant_conf->fast_div_bias = 0x7;
1317 ant_conf->fast_div_bias = 0x6; 1344 break;
1318 break; 1345 case (0x20): /* LNA1 A-B */
1319 case (0x21): /* LNA1 LNA2 */ 1346 ant_conf->fast_div_bias = 0x6;
1320 ant_conf->fast_div_bias = 0x0; 1347 break;
1321 break; 1348 case (0x21): /* LNA1 LNA2 */
1322 case (0x23): /* LNA1 A+B */ 1349 ant_conf->fast_div_bias = 0x0;
1323 ant_conf->fast_div_bias = 0x6; 1350 break;
1324 break; 1351 case (0x23): /* LNA1 A+B */
1325 case (0x30): /* A+B A-B */ 1352 ant_conf->fast_div_bias = 0x6;
1326 ant_conf->fast_div_bias = 0x1; 1353 break;
1327 break; 1354 case (0x30): /* A+B A-B */
1328 case (0x31): /* A+B LNA2 */ 1355 ant_conf->fast_div_bias = 0x1;
1329 ant_conf->fast_div_bias = 0x3b; 1356 break;
1330 break; 1357 case (0x31): /* A+B LNA2 */
1331 case (0x32): /* A+B LNA1 */ 1358 ant_conf->fast_div_bias = 0x3b;
1332 ant_conf->fast_div_bias = 0x3d; 1359 break;
1333 break; 1360 case (0x32): /* A+B LNA1 */
1334 default: 1361 ant_conf->fast_div_bias = 0x3d;
1335 break; 1362 break;
1363 default:
1364 break;
1365 }
1366 } else if (ant_conf->div_group == 2) {
1367 /* Adjust the fast_div_bias based on main and alt_lna_conf */
1368 switch ((ant_conf->main_lna_conf << 4) |
1369 ant_conf->alt_lna_conf) {
1370 case (0x01): /* A-B LNA2 */
1371 ant_conf->fast_div_bias = 0x1;
1372 ant_conf->main_gaintb = 0;
1373 ant_conf->alt_gaintb = 0;
1374 break;
1375 case (0x02): /* A-B LNA1 */
1376 ant_conf->fast_div_bias = 0x1;
1377 ant_conf->main_gaintb = 0;
1378 ant_conf->alt_gaintb = 0;
1379 break;
1380 case (0x03): /* A-B A+B */
1381 ant_conf->fast_div_bias = 0x1;
1382 ant_conf->main_gaintb = 0;
1383 ant_conf->alt_gaintb = 0;
1384 break;
1385 case (0x10): /* LNA2 A-B */
1386 if (!(antcomb->scan) &&
1387 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1388 ant_conf->fast_div_bias = 0x1;
1389 else
1390 ant_conf->fast_div_bias = 0x2;
1391 ant_conf->main_gaintb = 0;
1392 ant_conf->alt_gaintb = 0;
1393 break;
1394 case (0x12): /* LNA2 LNA1 */
1395 ant_conf->fast_div_bias = 0x1;
1396 ant_conf->main_gaintb = 0;
1397 ant_conf->alt_gaintb = 0;
1398 break;
1399 case (0x13): /* LNA2 A+B */
1400 if (!(antcomb->scan) &&
1401 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1402 ant_conf->fast_div_bias = 0x1;
1403 else
1404 ant_conf->fast_div_bias = 0x2;
1405 ant_conf->main_gaintb = 0;
1406 ant_conf->alt_gaintb = 0;
1407 break;
1408 case (0x20): /* LNA1 A-B */
1409 if (!(antcomb->scan) &&
1410 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1411 ant_conf->fast_div_bias = 0x1;
1412 else
1413 ant_conf->fast_div_bias = 0x2;
1414 ant_conf->main_gaintb = 0;
1415 ant_conf->alt_gaintb = 0;
1416 break;
1417 case (0x21): /* LNA1 LNA2 */
1418 ant_conf->fast_div_bias = 0x1;
1419 ant_conf->main_gaintb = 0;
1420 ant_conf->alt_gaintb = 0;
1421 break;
1422 case (0x23): /* LNA1 A+B */
1423 if (!(antcomb->scan) &&
1424 (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1425 ant_conf->fast_div_bias = 0x1;
1426 else
1427 ant_conf->fast_div_bias = 0x2;
1428 ant_conf->main_gaintb = 0;
1429 ant_conf->alt_gaintb = 0;
1430 break;
1431 case (0x30): /* A+B A-B */
1432 ant_conf->fast_div_bias = 0x1;
1433 ant_conf->main_gaintb = 0;
1434 ant_conf->alt_gaintb = 0;
1435 break;
1436 case (0x31): /* A+B LNA2 */
1437 ant_conf->fast_div_bias = 0x1;
1438 ant_conf->main_gaintb = 0;
1439 ant_conf->alt_gaintb = 0;
1440 break;
1441 case (0x32): /* A+B LNA1 */
1442 ant_conf->fast_div_bias = 0x1;
1443 ant_conf->main_gaintb = 0;
1444 ant_conf->alt_gaintb = 0;
1445 break;
1446 default:
1447 break;
1448 }
1449
1336 } 1450 }
1451
1337} 1452}
1338 1453
1339/* Antenna diversity and combining */ 1454/* Antenna diversity and combining */
@@ -1342,7 +1457,7 @@ static void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
1342 struct ath_hw_antcomb_conf div_ant_conf; 1457 struct ath_hw_antcomb_conf div_ant_conf;
1343 struct ath_ant_comb *antcomb = &sc->ant_comb; 1458 struct ath_ant_comb *antcomb = &sc->ant_comb;
1344 int alt_ratio = 0, alt_rssi_avg = 0, main_rssi_avg = 0, curr_alt_set; 1459 int alt_ratio = 0, alt_rssi_avg = 0, main_rssi_avg = 0, curr_alt_set;
1345 int curr_main_set, curr_bias; 1460 int curr_main_set;
1346 int main_rssi = rs->rs_rssi_ctl0; 1461 int main_rssi = rs->rs_rssi_ctl0;
1347 int alt_rssi = rs->rs_rssi_ctl1; 1462 int alt_rssi = rs->rs_rssi_ctl1;
1348 int rx_ant_conf, main_ant_conf; 1463 int rx_ant_conf, main_ant_conf;
@@ -1353,8 +1468,8 @@ static void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
1353 main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) & 1468 main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) &
1354 ATH_ANT_RX_MASK; 1469 ATH_ANT_RX_MASK;
1355 1470
1356 /* Record packet only when alt_rssi is positive */ 1471 /* Record packet only when both main_rssi and alt_rssi is positive */
1357 if (alt_rssi > 0) { 1472 if (main_rssi > 0 && alt_rssi > 0) {
1358 antcomb->total_pkt_count++; 1473 antcomb->total_pkt_count++;
1359 antcomb->main_total_rssi += main_rssi; 1474 antcomb->main_total_rssi += main_rssi;
1360 antcomb->alt_total_rssi += alt_rssi; 1475 antcomb->alt_total_rssi += alt_rssi;
@@ -1396,7 +1511,6 @@ static void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
1396 ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf); 1511 ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf);
1397 curr_alt_set = div_ant_conf.alt_lna_conf; 1512 curr_alt_set = div_ant_conf.alt_lna_conf;
1398 curr_main_set = div_ant_conf.main_lna_conf; 1513 curr_main_set = div_ant_conf.main_lna_conf;
1399 curr_bias = div_ant_conf.fast_div_bias;
1400 1514
1401 antcomb->count++; 1515 antcomb->count++;
1402 1516
@@ -1415,7 +1529,9 @@ static void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
1415 } 1529 }
1416 1530
1417 if (!antcomb->scan) { 1531 if (!antcomb->scan) {
1418 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) { 1532 if (ath_ant_div_comb_alt_check(div_ant_conf.div_group,
1533 alt_ratio, curr_main_set, curr_alt_set,
1534 alt_rssi_avg, main_rssi_avg)) {
1419 if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) { 1535 if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) {
1420 /* Switch main and alt LNA */ 1536 /* Switch main and alt LNA */
1421 div_ant_conf.main_lna_conf = 1537 div_ant_conf.main_lna_conf =
@@ -1444,7 +1560,7 @@ static void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
1444 } 1560 }
1445 1561
1446 if ((alt_rssi_avg < (main_rssi_avg + 1562 if ((alt_rssi_avg < (main_rssi_avg +
1447 ATH_ANT_DIV_COMB_LNA1_LNA2_DELTA))) 1563 div_ant_conf.lna1_lna2_delta)))
1448 goto div_comb_done; 1564 goto div_comb_done;
1449 } 1565 }
1450 1566
@@ -1558,8 +1674,7 @@ static void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
1558 antcomb->quick_scan_cnt++; 1674 antcomb->quick_scan_cnt++;
1559 1675
1560div_comb_done: 1676div_comb_done:
1561 ath_ant_div_conf_fast_divbias(&div_ant_conf); 1677 ath_ant_div_conf_fast_divbias(&div_ant_conf, antcomb, alt_ratio);
1562
1563 ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf); 1678 ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf);
1564 1679
1565 antcomb->scan_start_time = jiffies; 1680 antcomb->scan_start_time = jiffies;
@@ -1746,7 +1861,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1746 if ((sc->ps_flags & (PS_WAIT_FOR_BEACON | 1861 if ((sc->ps_flags & (PS_WAIT_FOR_BEACON |
1747 PS_WAIT_FOR_CAB | 1862 PS_WAIT_FOR_CAB |
1748 PS_WAIT_FOR_PSPOLL_DATA)) || 1863 PS_WAIT_FOR_PSPOLL_DATA)) ||
1749 unlikely(ath9k_check_auto_sleep(sc))) 1864 ath9k_check_auto_sleep(sc))
1750 ath_rx_ps(sc, skb); 1865 ath_rx_ps(sc, skb);
1751 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 1866 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
1752 1867
@@ -1767,6 +1882,7 @@ requeue:
1767 } else { 1882 } else {
1768 list_move_tail(&bf->list, &sc->rx.rxbuf); 1883 list_move_tail(&bf->list, &sc->rx.rxbuf);
1769 ath_rx_buf_link(sc, bf); 1884 ath_rx_buf_link(sc, bf);
1885 ath9k_hw_rxena(ah);
1770 } 1886 }
1771 } while (1); 1887 } while (1);
1772 1888
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 8fa8acfde62e..456f3ec20fef 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -693,7 +693,7 @@
693#define AR_RC_APB 0x00000002 693#define AR_RC_APB 0x00000002
694#define AR_RC_HOSTIF 0x00000100 694#define AR_RC_HOSTIF 0x00000100
695 695
696#define AR_WA 0x4004 696#define AR_WA (AR_SREV_9340(ah) ? 0x40c4 : 0x4004)
697#define AR_WA_BIT6 (1 << 6) 697#define AR_WA_BIT6 (1 << 6)
698#define AR_WA_BIT7 (1 << 7) 698#define AR_WA_BIT7 (1 << 7)
699#define AR_WA_BIT23 (1 << 23) 699#define AR_WA_BIT23 (1 << 23)
@@ -712,7 +712,7 @@
712#define AR_PM_STATE 0x4008 712#define AR_PM_STATE 0x4008
713#define AR_PM_STATE_PME_D3COLD_VAUX 0x00100000 713#define AR_PM_STATE_PME_D3COLD_VAUX 0x00100000
714 714
715#define AR_HOST_TIMEOUT 0x4018 715#define AR_HOST_TIMEOUT (AR_SREV_9340(ah) ? 0x4008 : 0x4018)
716#define AR_HOST_TIMEOUT_APB_CNTR 0x0000FFFF 716#define AR_HOST_TIMEOUT_APB_CNTR 0x0000FFFF
717#define AR_HOST_TIMEOUT_APB_CNTR_S 0 717#define AR_HOST_TIMEOUT_APB_CNTR_S 0
718#define AR_HOST_TIMEOUT_LCL_CNTR 0xFFFF0000 718#define AR_HOST_TIMEOUT_LCL_CNTR 0xFFFF0000
@@ -742,7 +742,8 @@
742#define EEPROM_PROTECT_WP_1024_2047 0x8000 742#define EEPROM_PROTECT_WP_1024_2047 0x8000
743 743
744#define AR_SREV \ 744#define AR_SREV \
745 ((AR_SREV_9100(ah)) ? 0x0600 : 0x4020) 745 ((AR_SREV_9100(ah)) ? 0x0600 : (AR_SREV_9340(ah) \
746 ? 0x400c : 0x4020))
746 747
747#define AR_SREV_ID \ 748#define AR_SREV_ID \
748 ((AR_SREV_9100(ah)) ? 0x00000FFF : 0x000000FF) 749 ((AR_SREV_9100(ah)) ? 0x00000FFF : 0x000000FF)
@@ -790,6 +791,7 @@
790#define AR_SREV_VERSION_9485 0x240 791#define AR_SREV_VERSION_9485 0x240
791#define AR_SREV_REVISION_9485_10 0 792#define AR_SREV_REVISION_9485_10 0
792#define AR_SREV_REVISION_9485_11 1 793#define AR_SREV_REVISION_9485_11 1
794#define AR_SREV_VERSION_9340 0x300
793 795
794#define AR_SREV_5416(_ah) \ 796#define AR_SREV_5416(_ah) \
795 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \ 797 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \
@@ -858,9 +860,7 @@
858#define AR_SREV_9300(_ah) \ 860#define AR_SREV_9300(_ah) \
859 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9300)) 861 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9300))
860#define AR_SREV_9300_20_OR_LATER(_ah) \ 862#define AR_SREV_9300_20_OR_LATER(_ah) \
861 (((_ah)->hw_version.macVersion > AR_SREV_VERSION_9300) || \ 863 ((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9300)
862 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9300) && \
863 ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9300_20)))
864 864
865#define AR_SREV_9485(_ah) \ 865#define AR_SREV_9485(_ah) \
866 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9485)) 866 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9485))
@@ -870,6 +870,11 @@
870#define AR_SREV_9485_11(_ah) \ 870#define AR_SREV_9485_11(_ah) \
871 (AR_SREV_9485(_ah) && \ 871 (AR_SREV_9485(_ah) && \
872 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9485_11)) 872 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9485_11))
873#define AR_SREV_9485_OR_LATER(_ah) \
874 (((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9485))
875
876#define AR_SREV_9340(_ah) \
877 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9340))
873 878
874#define AR_SREV_9285E_20(_ah) \ 879#define AR_SREV_9285E_20(_ah) \
875 (AR_SREV_9285_12_OR_LATER(_ah) && \ 880 (AR_SREV_9285_12_OR_LATER(_ah) && \
@@ -912,11 +917,11 @@ enum ath_usb_dev {
912#define AR_INTR_SPURIOUS 0xFFFFFFFF 917#define AR_INTR_SPURIOUS 0xFFFFFFFF
913 918
914 919
915#define AR_INTR_SYNC_CAUSE_CLR 0x4028 920#define AR_INTR_SYNC_CAUSE (AR_SREV_9340(ah) ? 0x4010 : 0x4028)
921#define AR_INTR_SYNC_CAUSE_CLR (AR_SREV_9340(ah) ? 0x4010 : 0x4028)
916 922
917#define AR_INTR_SYNC_CAUSE 0x4028
918 923
919#define AR_INTR_SYNC_ENABLE 0x402c 924#define AR_INTR_SYNC_ENABLE (AR_SREV_9340(ah) ? 0x4014 : 0x402c)
920#define AR_INTR_SYNC_ENABLE_GPIO 0xFFFC0000 925#define AR_INTR_SYNC_ENABLE_GPIO 0xFFFC0000
921#define AR_INTR_SYNC_ENABLE_GPIO_S 18 926#define AR_INTR_SYNC_ENABLE_GPIO_S 18
922 927
@@ -956,24 +961,24 @@ enum {
956 961
957}; 962};
958 963
959#define AR_INTR_ASYNC_MASK 0x4030 964#define AR_INTR_ASYNC_MASK (AR_SREV_9340(ah) ? 0x4018 : 0x4030)
960#define AR_INTR_ASYNC_MASK_GPIO 0xFFFC0000 965#define AR_INTR_ASYNC_MASK_GPIO 0xFFFC0000
961#define AR_INTR_ASYNC_MASK_GPIO_S 18 966#define AR_INTR_ASYNC_MASK_GPIO_S 18
962 967
963#define AR_INTR_SYNC_MASK 0x4034 968#define AR_INTR_SYNC_MASK (AR_SREV_9340(ah) ? 0x401c : 0x4034)
964#define AR_INTR_SYNC_MASK_GPIO 0xFFFC0000 969#define AR_INTR_SYNC_MASK_GPIO 0xFFFC0000
965#define AR_INTR_SYNC_MASK_GPIO_S 18 970#define AR_INTR_SYNC_MASK_GPIO_S 18
966 971
967#define AR_INTR_ASYNC_CAUSE_CLR 0x4038 972#define AR_INTR_ASYNC_CAUSE_CLR (AR_SREV_9340(ah) ? 0x4020 : 0x4038)
968#define AR_INTR_ASYNC_CAUSE 0x4038 973#define AR_INTR_ASYNC_CAUSE (AR_SREV_9340(ah) ? 0x4020 : 0x4038)
969 974
970#define AR_INTR_ASYNC_ENABLE 0x403c 975#define AR_INTR_ASYNC_ENABLE (AR_SREV_9340(ah) ? 0x4024 : 0x403c)
971#define AR_INTR_ASYNC_ENABLE_GPIO 0xFFFC0000 976#define AR_INTR_ASYNC_ENABLE_GPIO 0xFFFC0000
972#define AR_INTR_ASYNC_ENABLE_GPIO_S 18 977#define AR_INTR_ASYNC_ENABLE_GPIO_S 18
973 978
974#define AR_PCIE_SERDES 0x4040 979#define AR_PCIE_SERDES 0x4040
975#define AR_PCIE_SERDES2 0x4044 980#define AR_PCIE_SERDES2 0x4044
976#define AR_PCIE_PM_CTRL 0x4014 981#define AR_PCIE_PM_CTRL (AR_SREV_9340(ah) ? 0x4004 : 0x4014)
977#define AR_PCIE_PM_CTRL_ENA 0x00080000 982#define AR_PCIE_PM_CTRL_ENA 0x00080000
978 983
979#define AR_NUM_GPIO 14 984#define AR_NUM_GPIO 14
@@ -984,7 +989,7 @@ enum {
984#define AR9300_NUM_GPIO 17 989#define AR9300_NUM_GPIO 17
985#define AR7010_NUM_GPIO 16 990#define AR7010_NUM_GPIO 16
986 991
987#define AR_GPIO_IN_OUT 0x4048 992#define AR_GPIO_IN_OUT (AR_SREV_9340(ah) ? 0x4028 : 0x4048)
988#define AR_GPIO_IN_VAL 0x0FFFC000 993#define AR_GPIO_IN_VAL 0x0FFFC000
989#define AR_GPIO_IN_VAL_S 14 994#define AR_GPIO_IN_VAL_S 14
990#define AR928X_GPIO_IN_VAL 0x000FFC00 995#define AR928X_GPIO_IN_VAL 0x000FFC00
@@ -998,11 +1003,12 @@ enum {
998#define AR7010_GPIO_IN_VAL 0x0000FFFF 1003#define AR7010_GPIO_IN_VAL 0x0000FFFF
999#define AR7010_GPIO_IN_VAL_S 0 1004#define AR7010_GPIO_IN_VAL_S 0
1000 1005
1001#define AR_GPIO_IN 0x404c 1006#define AR_GPIO_IN (AR_SREV_9340(ah) ? 0x402c : 0x404c)
1002#define AR9300_GPIO_IN_VAL 0x0001FFFF 1007#define AR9300_GPIO_IN_VAL 0x0001FFFF
1003#define AR9300_GPIO_IN_VAL_S 0 1008#define AR9300_GPIO_IN_VAL_S 0
1004 1009
1005#define AR_GPIO_OE_OUT (AR_SREV_9300_20_OR_LATER(ah) ? 0x4050 : 0x404c) 1010#define AR_GPIO_OE_OUT (AR_SREV_9340(ah) ? 0x4030 : \
1011 (AR_SREV_9300_20_OR_LATER(ah) ? 0x4050 : 0x404c))
1006#define AR_GPIO_OE_OUT_DRV 0x3 1012#define AR_GPIO_OE_OUT_DRV 0x3
1007#define AR_GPIO_OE_OUT_DRV_NO 0x0 1013#define AR_GPIO_OE_OUT_DRV_NO 0x0
1008#define AR_GPIO_OE_OUT_DRV_LOW 0x1 1014#define AR_GPIO_OE_OUT_DRV_LOW 0x1
@@ -1024,11 +1030,13 @@ enum {
1024#define AR7010_GPIO_INT_MASK 0x52024 1030#define AR7010_GPIO_INT_MASK 0x52024
1025#define AR7010_GPIO_FUNCTION 0x52028 1031#define AR7010_GPIO_FUNCTION 0x52028
1026 1032
1027#define AR_GPIO_INTR_POL (AR_SREV_9300_20_OR_LATER(ah) ? 0x4058 : 0x4050) 1033#define AR_GPIO_INTR_POL (AR_SREV_9340(ah) ? 0x4038 : \
1034 (AR_SREV_9300_20_OR_LATER(ah) ? 0x4058 : 0x4050))
1028#define AR_GPIO_INTR_POL_VAL 0x0001FFFF 1035#define AR_GPIO_INTR_POL_VAL 0x0001FFFF
1029#define AR_GPIO_INTR_POL_VAL_S 0 1036#define AR_GPIO_INTR_POL_VAL_S 0
1030 1037
1031#define AR_GPIO_INPUT_EN_VAL (AR_SREV_9300_20_OR_LATER(ah) ? 0x405c : 0x4054) 1038#define AR_GPIO_INPUT_EN_VAL (AR_SREV_9340(ah) ? 0x403c : \
1039 (AR_SREV_9300_20_OR_LATER(ah) ? 0x405c : 0x4054))
1032#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_DEF 0x00000004 1040#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_DEF 0x00000004
1033#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_S 2 1041#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_S 2
1034#define AR_GPIO_INPUT_EN_VAL_BT_FREQUENCY_DEF 0x00000008 1042#define AR_GPIO_INPUT_EN_VAL_BT_FREQUENCY_DEF 0x00000008
@@ -1046,13 +1054,15 @@ enum {
1046#define AR_GPIO_RTC_RESET_OVERRIDE_ENABLE 0x00010000 1054#define AR_GPIO_RTC_RESET_OVERRIDE_ENABLE 0x00010000
1047#define AR_GPIO_JTAG_DISABLE 0x00020000 1055#define AR_GPIO_JTAG_DISABLE 0x00020000
1048 1056
1049#define AR_GPIO_INPUT_MUX1 (AR_SREV_9300_20_OR_LATER(ah) ? 0x4060 : 0x4058) 1057#define AR_GPIO_INPUT_MUX1 (AR_SREV_9340(ah) ? 0x4040 : \
1058 (AR_SREV_9300_20_OR_LATER(ah) ? 0x4060 : 0x4058))
1050#define AR_GPIO_INPUT_MUX1_BT_ACTIVE 0x000f0000 1059#define AR_GPIO_INPUT_MUX1_BT_ACTIVE 0x000f0000
1051#define AR_GPIO_INPUT_MUX1_BT_ACTIVE_S 16 1060#define AR_GPIO_INPUT_MUX1_BT_ACTIVE_S 16
1052#define AR_GPIO_INPUT_MUX1_BT_PRIORITY 0x00000f00 1061#define AR_GPIO_INPUT_MUX1_BT_PRIORITY 0x00000f00
1053#define AR_GPIO_INPUT_MUX1_BT_PRIORITY_S 8 1062#define AR_GPIO_INPUT_MUX1_BT_PRIORITY_S 8
1054 1063
1055#define AR_GPIO_INPUT_MUX2 (AR_SREV_9300_20_OR_LATER(ah) ? 0x4064 : 0x405c) 1064#define AR_GPIO_INPUT_MUX2 (AR_SREV_9340(ah) ? 0x4044 : \
1065 (AR_SREV_9300_20_OR_LATER(ah) ? 0x4064 : 0x405c))
1056#define AR_GPIO_INPUT_MUX2_CLK25 0x0000000f 1066#define AR_GPIO_INPUT_MUX2_CLK25 0x0000000f
1057#define AR_GPIO_INPUT_MUX2_CLK25_S 0 1067#define AR_GPIO_INPUT_MUX2_CLK25_S 0
1058#define AR_GPIO_INPUT_MUX2_RFSILENT 0x000000f0 1068#define AR_GPIO_INPUT_MUX2_RFSILENT 0x000000f0
@@ -1060,13 +1070,18 @@ enum {
1060#define AR_GPIO_INPUT_MUX2_RTC_RESET 0x00000f00 1070#define AR_GPIO_INPUT_MUX2_RTC_RESET 0x00000f00
1061#define AR_GPIO_INPUT_MUX2_RTC_RESET_S 8 1071#define AR_GPIO_INPUT_MUX2_RTC_RESET_S 8
1062 1072
1063#define AR_GPIO_OUTPUT_MUX1 (AR_SREV_9300_20_OR_LATER(ah) ? 0x4068 : 0x4060) 1073#define AR_GPIO_OUTPUT_MUX1 (AR_SREV_9340(ah) ? 0x4048 : \
1064#define AR_GPIO_OUTPUT_MUX2 (AR_SREV_9300_20_OR_LATER(ah) ? 0x406c : 0x4064) 1074 (AR_SREV_9300_20_OR_LATER(ah) ? 0x4068 : 0x4060))
1065#define AR_GPIO_OUTPUT_MUX3 (AR_SREV_9300_20_OR_LATER(ah) ? 0x4070 : 0x4068) 1075#define AR_GPIO_OUTPUT_MUX2 (AR_SREV_9340(ah) ? 0x404c : \
1076 (AR_SREV_9300_20_OR_LATER(ah) ? 0x406c : 0x4064))
1077#define AR_GPIO_OUTPUT_MUX3 (AR_SREV_9340(ah) ? 0x4050 : \
1078 (AR_SREV_9300_20_OR_LATER(ah) ? 0x4070 : 0x4068))
1066 1079
1067#define AR_INPUT_STATE (AR_SREV_9300_20_OR_LATER(ah) ? 0x4074 : 0x406c) 1080#define AR_INPUT_STATE (AR_SREV_9340(ah) ? 0x4054 : \
1081 (AR_SREV_9300_20_OR_LATER(ah) ? 0x4074 : 0x406c))
1068 1082
1069#define AR_EEPROM_STATUS_DATA (AR_SREV_9300_20_OR_LATER(ah) ? 0x4084 : 0x407c) 1083#define AR_EEPROM_STATUS_DATA (AR_SREV_9340(ah) ? 0x40c8 : \
1084 (AR_SREV_9300_20_OR_LATER(ah) ? 0x4084 : 0x407c))
1070#define AR_EEPROM_STATUS_DATA_VAL 0x0000ffff 1085#define AR_EEPROM_STATUS_DATA_VAL 0x0000ffff
1071#define AR_EEPROM_STATUS_DATA_VAL_S 0 1086#define AR_EEPROM_STATUS_DATA_VAL_S 0
1072#define AR_EEPROM_STATUS_DATA_BUSY 0x00010000 1087#define AR_EEPROM_STATUS_DATA_BUSY 0x00010000
@@ -1074,28 +1089,51 @@ enum {
1074#define AR_EEPROM_STATUS_DATA_PROT_ACCESS 0x00040000 1089#define AR_EEPROM_STATUS_DATA_PROT_ACCESS 0x00040000
1075#define AR_EEPROM_STATUS_DATA_ABSENT_ACCESS 0x00080000 1090#define AR_EEPROM_STATUS_DATA_ABSENT_ACCESS 0x00080000
1076 1091
1077#define AR_OBS (AR_SREV_9300_20_OR_LATER(ah) ? 0x4088 : 0x4080) 1092#define AR_OBS (AR_SREV_9340(ah) ? 0x405c : \
1093 (AR_SREV_9300_20_OR_LATER(ah) ? 0x4088 : 0x4080))
1078 1094
1079#define AR_GPIO_PDPU (AR_SREV_9300_20_OR_LATER(ah) ? 0x4090 : 0x4088) 1095#define AR_GPIO_PDPU (AR_SREV_9300_20_OR_LATER(ah) ? 0x4090 : 0x4088)
1080 1096
1081#define AR_PCIE_MSI (AR_SREV_9300_20_OR_LATER(ah) ? 0x40a4 : 0x4094) 1097#define AR_PCIE_MSI (AR_SREV_9340(ah) ? 0x40d8 : \
1098 (AR_SREV_9300_20_OR_LATER(ah) ? 0x40a4 : 0x4094))
1082#define AR_PCIE_MSI_ENABLE 0x00000001 1099#define AR_PCIE_MSI_ENABLE 0x00000001
1083 1100
1084#define AR_INTR_PRIO_SYNC_ENABLE 0x40c4 1101#define AR_INTR_PRIO_SYNC_ENABLE (AR_SREV_9340(ah) ? 0x4088 : 0x40c4)
1085#define AR_INTR_PRIO_ASYNC_MASK 0x40c8 1102#define AR_INTR_PRIO_ASYNC_MASK (AR_SREV_9340(ah) ? 0x408c : 0x40c8)
1086#define AR_INTR_PRIO_SYNC_MASK 0x40cc 1103#define AR_INTR_PRIO_SYNC_MASK (AR_SREV_9340(ah) ? 0x4090 : 0x40cc)
1087#define AR_INTR_PRIO_ASYNC_ENABLE 0x40d4 1104#define AR_INTR_PRIO_ASYNC_ENABLE (AR_SREV_9340(ah) ? 0x4094 : 0x40d4)
1088#define AR_ENT_OTP 0x40d8 1105#define AR_ENT_OTP 0x40d8
1089#define AR_ENT_OTP_CHAIN2_DISABLE 0x00020000 1106#define AR_ENT_OTP_CHAIN2_DISABLE 0x00020000
1090#define AR_ENT_OTP_MPSD 0x00800000 1107#define AR_ENT_OTP_MPSD 0x00800000
1091#define AR_CH0_BB_DPLL2 0x16184 1108
1109#define AR_CH0_BB_DPLL1 0x16180
1110#define AR_CH0_BB_DPLL1_REFDIV 0xF8000000
1111#define AR_CH0_BB_DPLL1_REFDIV_S 27
1112#define AR_CH0_BB_DPLL1_NINI 0x07FC0000
1113#define AR_CH0_BB_DPLL1_NINI_S 18
1114#define AR_CH0_BB_DPLL1_NFRAC 0x0003FFFF
1115#define AR_CH0_BB_DPLL1_NFRAC_S 0
1116
1117#define AR_CH0_BB_DPLL2 0x16184
1118#define AR_CH0_BB_DPLL2_LOCAL_PLL 0x40000000
1119#define AR_CH0_BB_DPLL2_LOCAL_PLL_S 30
1120#define AR_CH0_DPLL2_KI 0x3C000000
1121#define AR_CH0_DPLL2_KI_S 26
1122#define AR_CH0_DPLL2_KD 0x03F80000
1123#define AR_CH0_DPLL2_KD_S 19
1124#define AR_CH0_BB_DPLL2_EN_NEGTRIG 0x00040000
1125#define AR_CH0_BB_DPLL2_EN_NEGTRIG_S 18
1126#define AR_CH0_BB_DPLL2_PLL_PWD 0x00010000
1127#define AR_CH0_BB_DPLL2_PLL_PWD_S 16
1128#define AR_CH0_BB_DPLL2_OUTDIV 0x0000E000
1129#define AR_CH0_BB_DPLL2_OUTDIV_S 13
1130
1092#define AR_CH0_BB_DPLL3 0x16188 1131#define AR_CH0_BB_DPLL3 0x16188
1132#define AR_CH0_BB_DPLL3_PHASE_SHIFT 0x3F800000
1133#define AR_CH0_BB_DPLL3_PHASE_SHIFT_S 23
1134
1093#define AR_CH0_DDR_DPLL2 0x16244 1135#define AR_CH0_DDR_DPLL2 0x16244
1094#define AR_CH0_DDR_DPLL3 0x16248 1136#define AR_CH0_DDR_DPLL3 0x16248
1095#define AR_CH0_DPLL2_KD 0x03F80000
1096#define AR_CH0_DPLL2_KD_S 19
1097#define AR_CH0_DPLL2_KI 0x3C000000
1098#define AR_CH0_DPLL2_KI_S 26
1099#define AR_CH0_DPLL3_PHASE_SHIFT 0x3F800000 1137#define AR_CH0_DPLL3_PHASE_SHIFT 0x3F800000
1100#define AR_CH0_DPLL3_PHASE_SHIFT_S 23 1138#define AR_CH0_DPLL3_PHASE_SHIFT_S 23
1101#define AR_PHY_CCA_NOM_VAL_2GHZ -118 1139#define AR_PHY_CCA_NOM_VAL_2GHZ -118
@@ -1144,6 +1182,7 @@ enum {
1144#define AR_RTC_PLL_REFDIV_5 0x000000c0 1182#define AR_RTC_PLL_REFDIV_5 0x000000c0
1145#define AR_RTC_PLL_CLKSEL 0x00000300 1183#define AR_RTC_PLL_CLKSEL 0x00000300
1146#define AR_RTC_PLL_CLKSEL_S 8 1184#define AR_RTC_PLL_CLKSEL_S 8
1185#define AR_RTC_PLL_BYPASS 0x00010000
1147 1186
1148#define PLL3 0x16188 1187#define PLL3 0x16188
1149#define PLL3_DO_MEAS_MASK 0x40000000 1188#define PLL3_DO_MEAS_MASK 0x40000000
@@ -1190,7 +1229,8 @@ enum {
1190 1229
1191/* RTC_DERIVED_* - only for AR9100 */ 1230/* RTC_DERIVED_* - only for AR9100 */
1192 1231
1193#define AR_RTC_DERIVED_CLK (AR_RTC_BASE + 0x0038) 1232#define AR_RTC_DERIVED_CLK \
1233 (AR_SREV_9100(ah) ? (AR_RTC_BASE + 0x0038) : 0x7038)
1194#define AR_RTC_DERIVED_CLK_PERIOD 0x0000fffe 1234#define AR_RTC_DERIVED_CLK_PERIOD 0x0000fffe
1195#define AR_RTC_DERIVED_CLK_PERIOD_S 1 1235#define AR_RTC_DERIVED_CLK_PERIOD_S 1
1196 1236
@@ -1396,6 +1436,7 @@ enum {
1396#define AR_STA_ID1_PCF 0x00100000 1436#define AR_STA_ID1_PCF 0x00100000
1397#define AR_STA_ID1_USE_DEFANT 0x00200000 1437#define AR_STA_ID1_USE_DEFANT 0x00200000
1398#define AR_STA_ID1_DEFANT_UPDATE 0x00400000 1438#define AR_STA_ID1_DEFANT_UPDATE 0x00400000
1439#define AR_STA_ID1_AR9100_BA_FIX 0x00400000
1399#define AR_STA_ID1_RTS_USE_DEF 0x00800000 1440#define AR_STA_ID1_RTS_USE_DEF 0x00800000
1400#define AR_STA_ID1_ACKCTS_6MB 0x01000000 1441#define AR_STA_ID1_ACKCTS_6MB 0x01000000
1401#define AR_STA_ID1_BASE_RATE_11B 0x02000000 1442#define AR_STA_ID1_BASE_RATE_11B 0x02000000
@@ -1668,6 +1709,22 @@ enum {
1668#define AR_BTCOEX_WL_WGHT 0xffff0000 1709#define AR_BTCOEX_WL_WGHT 0xffff0000
1669#define AR_BTCOEX_WL_WGHT_S 16 1710#define AR_BTCOEX_WL_WGHT_S 16
1670 1711
1712#define AR_BT_COEX_WL_WEIGHTS0 0x8174
1713#define AR_BT_COEX_WL_WEIGHTS1 0x81c4
1714
1715#define AR_BT_COEX_BT_WEIGHTS0 0x83ac
1716#define AR_BT_COEX_BT_WEIGHTS1 0x83b0
1717#define AR_BT_COEX_BT_WEIGHTS2 0x83b4
1718#define AR_BT_COEX_BT_WEIGHTS3 0x83b8
1719
1720#define AR9300_BT_WGHT 0xcccc4444
1721#define AR9300_STOMP_ALL_WLAN_WGHT0 0xfffffff0
1722#define AR9300_STOMP_ALL_WLAN_WGHT1 0xfffffff0
1723#define AR9300_STOMP_LOW_WLAN_WGHT0 0x88888880
1724#define AR9300_STOMP_LOW_WLAN_WGHT1 0x88888880
1725#define AR9300_STOMP_NONE_WLAN_WGHT0 0x00000000
1726#define AR9300_STOMP_NONE_WLAN_WGHT1 0x00000000
1727
1671#define AR_BT_COEX_MODE2 0x817c 1728#define AR_BT_COEX_MODE2 0x817c
1672#define AR_BT_BCN_MISS_THRESH 0x000000ff 1729#define AR_BT_BCN_MISS_THRESH 0x000000ff
1673#define AR_BT_BCN_MISS_THRESH_S 0 1730#define AR_BT_BCN_MISS_THRESH_S 0
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index d3d24904f62f..f9b1eb4853c4 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -23,20 +23,18 @@ static const char *wmi_cmd_to_name(enum wmi_cmd_id wmi_cmd)
23 return "WMI_ECHO_CMDID"; 23 return "WMI_ECHO_CMDID";
24 case WMI_ACCESS_MEMORY_CMDID: 24 case WMI_ACCESS_MEMORY_CMDID:
25 return "WMI_ACCESS_MEMORY_CMDID"; 25 return "WMI_ACCESS_MEMORY_CMDID";
26 case WMI_GET_FW_VERSION:
27 return "WMI_GET_FW_VERSION";
26 case WMI_DISABLE_INTR_CMDID: 28 case WMI_DISABLE_INTR_CMDID:
27 return "WMI_DISABLE_INTR_CMDID"; 29 return "WMI_DISABLE_INTR_CMDID";
28 case WMI_ENABLE_INTR_CMDID: 30 case WMI_ENABLE_INTR_CMDID:
29 return "WMI_ENABLE_INTR_CMDID"; 31 return "WMI_ENABLE_INTR_CMDID";
30 case WMI_RX_LINK_CMDID:
31 return "WMI_RX_LINK_CMDID";
32 case WMI_ATH_INIT_CMDID: 32 case WMI_ATH_INIT_CMDID:
33 return "WMI_ATH_INIT_CMDID"; 33 return "WMI_ATH_INIT_CMDID";
34 case WMI_ABORT_TXQ_CMDID: 34 case WMI_ABORT_TXQ_CMDID:
35 return "WMI_ABORT_TXQ_CMDID"; 35 return "WMI_ABORT_TXQ_CMDID";
36 case WMI_STOP_TX_DMA_CMDID: 36 case WMI_STOP_TX_DMA_CMDID:
37 return "WMI_STOP_TX_DMA_CMDID"; 37 return "WMI_STOP_TX_DMA_CMDID";
38 case WMI_STOP_DMA_RECV_CMDID:
39 return "WMI_STOP_DMA_RECV_CMDID";
40 case WMI_ABORT_TX_DMA_CMDID: 38 case WMI_ABORT_TX_DMA_CMDID:
41 return "WMI_ABORT_TX_DMA_CMDID"; 39 return "WMI_ABORT_TX_DMA_CMDID";
42 case WMI_DRAIN_TXQ_CMDID: 40 case WMI_DRAIN_TXQ_CMDID:
@@ -51,8 +49,6 @@ static const char *wmi_cmd_to_name(enum wmi_cmd_id wmi_cmd)
51 return "WMI_FLUSH_RECV_CMDID"; 49 return "WMI_FLUSH_RECV_CMDID";
52 case WMI_SET_MODE_CMDID: 50 case WMI_SET_MODE_CMDID:
53 return "WMI_SET_MODE_CMDID"; 51 return "WMI_SET_MODE_CMDID";
54 case WMI_RESET_CMDID:
55 return "WMI_RESET_CMDID";
56 case WMI_NODE_CREATE_CMDID: 52 case WMI_NODE_CREATE_CMDID:
57 return "WMI_NODE_CREATE_CMDID"; 53 return "WMI_NODE_CREATE_CMDID";
58 case WMI_NODE_REMOVE_CMDID: 54 case WMI_NODE_REMOVE_CMDID:
@@ -61,8 +57,6 @@ static const char *wmi_cmd_to_name(enum wmi_cmd_id wmi_cmd)
61 return "WMI_VAP_REMOVE_CMDID"; 57 return "WMI_VAP_REMOVE_CMDID";
62 case WMI_VAP_CREATE_CMDID: 58 case WMI_VAP_CREATE_CMDID:
63 return "WMI_VAP_CREATE_CMDID"; 59 return "WMI_VAP_CREATE_CMDID";
64 case WMI_BEACON_UPDATE_CMDID:
65 return "WMI_BEACON_UPDATE_CMDID";
66 case WMI_REG_READ_CMDID: 60 case WMI_REG_READ_CMDID:
67 return "WMI_REG_READ_CMDID"; 61 return "WMI_REG_READ_CMDID";
68 case WMI_REG_WRITE_CMDID: 62 case WMI_REG_WRITE_CMDID:
@@ -71,22 +65,22 @@ static const char *wmi_cmd_to_name(enum wmi_cmd_id wmi_cmd)
71 return "WMI_RC_STATE_CHANGE_CMDID"; 65 return "WMI_RC_STATE_CHANGE_CMDID";
72 case WMI_RC_RATE_UPDATE_CMDID: 66 case WMI_RC_RATE_UPDATE_CMDID:
73 return "WMI_RC_RATE_UPDATE_CMDID"; 67 return "WMI_RC_RATE_UPDATE_CMDID";
74 case WMI_DEBUG_INFO_CMDID:
75 return "WMI_DEBUG_INFO_CMDID";
76 case WMI_HOST_ATTACH:
77 return "WMI_HOST_ATTACH";
78 case WMI_TARGET_IC_UPDATE_CMDID: 68 case WMI_TARGET_IC_UPDATE_CMDID:
79 return "WMI_TARGET_IC_UPDATE_CMDID"; 69 return "WMI_TARGET_IC_UPDATE_CMDID";
80 case WMI_TGT_STATS_CMDID:
81 return "WMI_TGT_STATS_CMDID";
82 case WMI_TX_AGGR_ENABLE_CMDID: 70 case WMI_TX_AGGR_ENABLE_CMDID:
83 return "WMI_TX_AGGR_ENABLE_CMDID"; 71 return "WMI_TX_AGGR_ENABLE_CMDID";
84 case WMI_TGT_DETACH_CMDID: 72 case WMI_TGT_DETACH_CMDID:
85 return "WMI_TGT_DETACH_CMDID"; 73 return "WMI_TGT_DETACH_CMDID";
86 case WMI_TGT_TXQ_ENABLE_CMDID: 74 case WMI_NODE_UPDATE_CMDID:
87 return "WMI_TGT_TXQ_ENABLE_CMDID"; 75 return "WMI_NODE_UPDATE_CMDID";
88 case WMI_AGGR_LIMIT_CMD: 76 case WMI_INT_STATS_CMDID:
89 return "WMI_AGGR_LIMIT_CMD"; 77 return "WMI_INT_STATS_CMDID";
78 case WMI_TX_STATS_CMDID:
79 return "WMI_TX_STATS_CMDID";
80 case WMI_RX_STATS_CMDID:
81 return "WMI_RX_STATS_CMDID";
82 case WMI_BITRATE_MASK_CMDID:
83 return "WMI_BITRATE_MASK_CMDID";
90 } 84 }
91 85
92 return "Bogus"; 86 return "Bogus";
@@ -102,9 +96,15 @@ struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv)
102 96
103 wmi->drv_priv = priv; 97 wmi->drv_priv = priv;
104 wmi->stopped = false; 98 wmi->stopped = false;
99 skb_queue_head_init(&wmi->wmi_event_queue);
100 spin_lock_init(&wmi->wmi_lock);
101 spin_lock_init(&wmi->event_lock);
105 mutex_init(&wmi->op_mutex); 102 mutex_init(&wmi->op_mutex);
106 mutex_init(&wmi->multi_write_mutex); 103 mutex_init(&wmi->multi_write_mutex);
107 init_completion(&wmi->cmd_wait); 104 init_completion(&wmi->cmd_wait);
105 INIT_LIST_HEAD(&wmi->pending_tx_events);
106 tasklet_init(&wmi->wmi_event_tasklet, ath9k_wmi_event_tasklet,
107 (unsigned long)wmi);
108 108
109 return wmi; 109 return wmi;
110} 110}
@@ -120,11 +120,65 @@ void ath9k_deinit_wmi(struct ath9k_htc_priv *priv)
120 kfree(priv->wmi); 120 kfree(priv->wmi);
121} 121}
122 122
123void ath9k_swba_tasklet(unsigned long data) 123void ath9k_wmi_event_drain(struct ath9k_htc_priv *priv)
124{ 124{
125 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data; 125 unsigned long flags;
126 126
127 ath9k_htc_swba(priv, priv->wmi->beacon_pending); 127 tasklet_kill(&priv->wmi->wmi_event_tasklet);
128 spin_lock_irqsave(&priv->wmi->wmi_lock, flags);
129 __skb_queue_purge(&priv->wmi->wmi_event_queue);
130 spin_unlock_irqrestore(&priv->wmi->wmi_lock, flags);
131}
132
133void ath9k_wmi_event_tasklet(unsigned long data)
134{
135 struct wmi *wmi = (struct wmi *)data;
136 struct ath9k_htc_priv *priv = wmi->drv_priv;
137 struct wmi_cmd_hdr *hdr;
138 void *wmi_event;
139 struct wmi_event_swba *swba;
140 struct sk_buff *skb = NULL;
141 unsigned long flags;
142 u16 cmd_id;
143
144 do {
145 spin_lock_irqsave(&wmi->wmi_lock, flags);
146 skb = __skb_dequeue(&wmi->wmi_event_queue);
147 if (!skb) {
148 spin_unlock_irqrestore(&wmi->wmi_lock, flags);
149 return;
150 }
151 spin_unlock_irqrestore(&wmi->wmi_lock, flags);
152
153 hdr = (struct wmi_cmd_hdr *) skb->data;
154 cmd_id = be16_to_cpu(hdr->command_id);
155 wmi_event = skb_pull(skb, sizeof(struct wmi_cmd_hdr));
156
157 switch (cmd_id) {
158 case WMI_SWBA_EVENTID:
159 swba = (struct wmi_event_swba *) wmi_event;
160 ath9k_htc_swba(priv, swba);
161 break;
162 case WMI_FATAL_EVENTID:
163 ieee80211_queue_work(wmi->drv_priv->hw,
164 &wmi->drv_priv->fatal_work);
165 break;
166 case WMI_TXSTATUS_EVENTID:
167 spin_lock_bh(&priv->tx.tx_lock);
168 if (priv->tx.flags & ATH9K_HTC_OP_TX_DRAIN) {
169 spin_unlock_bh(&priv->tx.tx_lock);
170 break;
171 }
172 spin_unlock_bh(&priv->tx.tx_lock);
173
174 ath9k_htc_txstatus(priv, wmi_event);
175 break;
176 default:
177 break;
178 }
179
180 kfree_skb(skb);
181 } while (1);
128} 182}
129 183
130void ath9k_fatal_work(struct work_struct *work) 184void ath9k_fatal_work(struct work_struct *work)
@@ -153,10 +207,6 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
153 struct wmi *wmi = (struct wmi *) priv; 207 struct wmi *wmi = (struct wmi *) priv;
154 struct wmi_cmd_hdr *hdr; 208 struct wmi_cmd_hdr *hdr;
155 u16 cmd_id; 209 u16 cmd_id;
156 void *wmi_event;
157#ifdef CONFIG_ATH9K_HTC_DEBUGFS
158 __be32 txrate;
159#endif
160 210
161 if (unlikely(wmi->stopped)) 211 if (unlikely(wmi->stopped))
162 goto free_skb; 212 goto free_skb;
@@ -165,26 +215,10 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
165 cmd_id = be16_to_cpu(hdr->command_id); 215 cmd_id = be16_to_cpu(hdr->command_id);
166 216
167 if (cmd_id & 0x1000) { 217 if (cmd_id & 0x1000) {
168 wmi_event = skb_pull(skb, sizeof(struct wmi_cmd_hdr)); 218 spin_lock(&wmi->wmi_lock);
169 switch (cmd_id) { 219 __skb_queue_tail(&wmi->wmi_event_queue, skb);
170 case WMI_SWBA_EVENTID: 220 spin_unlock(&wmi->wmi_lock);
171 wmi->beacon_pending = *(u8 *)wmi_event; 221 tasklet_schedule(&wmi->wmi_event_tasklet);
172 tasklet_schedule(&wmi->drv_priv->swba_tasklet);
173 break;
174 case WMI_FATAL_EVENTID:
175 ieee80211_queue_work(wmi->drv_priv->hw,
176 &wmi->drv_priv->fatal_work);
177 break;
178 case WMI_TXRATE_EVENTID:
179#ifdef CONFIG_ATH9K_HTC_DEBUGFS
180 txrate = ((struct wmi_event_txrate *)wmi_event)->txrate;
181 wmi->drv_priv->debug.txrate = be32_to_cpu(txrate);
182#endif
183 break;
184 default:
185 break;
186 }
187 kfree_skb(skb);
188 return; 222 return;
189 } 223 }
190 224
@@ -243,7 +277,7 @@ static int ath9k_wmi_cmd_issue(struct wmi *wmi,
243 hdr->command_id = cpu_to_be16(cmd); 277 hdr->command_id = cpu_to_be16(cmd);
244 hdr->seq_no = cpu_to_be16(++wmi->tx_seq_id); 278 hdr->seq_no = cpu_to_be16(++wmi->tx_seq_id);
245 279
246 return htc_send(wmi->htc, skb, wmi->ctrl_epid, NULL); 280 return htc_send_epid(wmi->htc, skb, wmi->ctrl_epid);
247} 281}
248 282
249int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, 283int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
diff --git a/drivers/net/wireless/ath/ath9k/wmi.h b/drivers/net/wireless/ath/ath9k/wmi.h
index 42084277522d..6095eeb6e025 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.h
+++ b/drivers/net/wireless/ath/ath9k/wmi.h
@@ -17,7 +17,6 @@
17#ifndef WMI_H 17#ifndef WMI_H
18#define WMI_H 18#define WMI_H
19 19
20
21struct wmi_event_txrate { 20struct wmi_event_txrate {
22 __be32 txrate; 21 __be32 txrate;
23 struct { 22 struct {
@@ -31,18 +30,65 @@ struct wmi_cmd_hdr {
31 __be16 seq_no; 30 __be16 seq_no;
32} __packed; 31} __packed;
33 32
33struct wmi_fw_version {
34 __be16 major;
35 __be16 minor;
36
37} __packed;
38
39struct wmi_event_swba {
40 __be64 tsf;
41 u8 beacon_pending;
42};
43
44/*
45 * 64 - HTC header - WMI header - 1 / txstatus
46 * And some other hdr. space is also accounted for.
47 * 12 seems to be the magic number.
48 */
49#define HTC_MAX_TX_STATUS 12
50
51#define ATH9K_HTC_TXSTAT_ACK BIT(0)
52#define ATH9K_HTC_TXSTAT_FILT BIT(1)
53#define ATH9K_HTC_TXSTAT_RTC_CTS BIT(2)
54#define ATH9K_HTC_TXSTAT_MCS BIT(3)
55#define ATH9K_HTC_TXSTAT_CW40 BIT(4)
56#define ATH9K_HTC_TXSTAT_SGI BIT(5)
57
58/*
59 * Legacy rates are indicated as indices.
60 * HT rates are indicated as dot11 numbers.
61 * This allows us to resrict the rate field
62 * to 4 bits.
63 */
64#define ATH9K_HTC_TXSTAT_RATE 0x0f
65#define ATH9K_HTC_TXSTAT_RATE_S 0
66
67#define ATH9K_HTC_TXSTAT_EPID 0xf0
68#define ATH9K_HTC_TXSTAT_EPID_S 4
69
70struct __wmi_event_txstatus {
71 u8 cookie;
72 u8 ts_rate; /* Also holds EP ID */
73 u8 ts_flags;
74};
75
76struct wmi_event_txstatus {
77 u8 cnt;
78 struct __wmi_event_txstatus txstatus[HTC_MAX_TX_STATUS];
79} __packed;
80
34enum wmi_cmd_id { 81enum wmi_cmd_id {
35 WMI_ECHO_CMDID = 0x0001, 82 WMI_ECHO_CMDID = 0x0001,
36 WMI_ACCESS_MEMORY_CMDID, 83 WMI_ACCESS_MEMORY_CMDID,
37 84
38 /* Commands to Target */ 85 /* Commands to Target */
86 WMI_GET_FW_VERSION,
39 WMI_DISABLE_INTR_CMDID, 87 WMI_DISABLE_INTR_CMDID,
40 WMI_ENABLE_INTR_CMDID, 88 WMI_ENABLE_INTR_CMDID,
41 WMI_RX_LINK_CMDID,
42 WMI_ATH_INIT_CMDID, 89 WMI_ATH_INIT_CMDID,
43 WMI_ABORT_TXQ_CMDID, 90 WMI_ABORT_TXQ_CMDID,
44 WMI_STOP_TX_DMA_CMDID, 91 WMI_STOP_TX_DMA_CMDID,
45 WMI_STOP_DMA_RECV_CMDID,
46 WMI_ABORT_TX_DMA_CMDID, 92 WMI_ABORT_TX_DMA_CMDID,
47 WMI_DRAIN_TXQ_CMDID, 93 WMI_DRAIN_TXQ_CMDID,
48 WMI_DRAIN_TXQ_ALL_CMDID, 94 WMI_DRAIN_TXQ_ALL_CMDID,
@@ -50,24 +96,22 @@ enum wmi_cmd_id {
50 WMI_STOP_RECV_CMDID, 96 WMI_STOP_RECV_CMDID,
51 WMI_FLUSH_RECV_CMDID, 97 WMI_FLUSH_RECV_CMDID,
52 WMI_SET_MODE_CMDID, 98 WMI_SET_MODE_CMDID,
53 WMI_RESET_CMDID,
54 WMI_NODE_CREATE_CMDID, 99 WMI_NODE_CREATE_CMDID,
55 WMI_NODE_REMOVE_CMDID, 100 WMI_NODE_REMOVE_CMDID,
56 WMI_VAP_REMOVE_CMDID, 101 WMI_VAP_REMOVE_CMDID,
57 WMI_VAP_CREATE_CMDID, 102 WMI_VAP_CREATE_CMDID,
58 WMI_BEACON_UPDATE_CMDID,
59 WMI_REG_READ_CMDID, 103 WMI_REG_READ_CMDID,
60 WMI_REG_WRITE_CMDID, 104 WMI_REG_WRITE_CMDID,
61 WMI_RC_STATE_CHANGE_CMDID, 105 WMI_RC_STATE_CHANGE_CMDID,
62 WMI_RC_RATE_UPDATE_CMDID, 106 WMI_RC_RATE_UPDATE_CMDID,
63 WMI_DEBUG_INFO_CMDID,
64 WMI_HOST_ATTACH,
65 WMI_TARGET_IC_UPDATE_CMDID, 107 WMI_TARGET_IC_UPDATE_CMDID,
66 WMI_TGT_STATS_CMDID,
67 WMI_TX_AGGR_ENABLE_CMDID, 108 WMI_TX_AGGR_ENABLE_CMDID,
68 WMI_TGT_DETACH_CMDID, 109 WMI_TGT_DETACH_CMDID,
69 WMI_TGT_TXQ_ENABLE_CMDID, 110 WMI_NODE_UPDATE_CMDID,
70 WMI_AGGR_LIMIT_CMD = 0x0026, 111 WMI_INT_STATS_CMDID,
112 WMI_TX_STATS_CMDID,
113 WMI_RX_STATS_CMDID,
114 WMI_BITRATE_MASK_CMDID,
71}; 115};
72 116
73enum wmi_event_id { 117enum wmi_event_id {
@@ -76,9 +120,8 @@ enum wmi_event_id {
76 WMI_FATAL_EVENTID, 120 WMI_FATAL_EVENTID,
77 WMI_TXTO_EVENTID, 121 WMI_TXTO_EVENTID,
78 WMI_BMISS_EVENTID, 122 WMI_BMISS_EVENTID,
79 WMI_WLAN_TXCOMP_EVENTID,
80 WMI_DELBA_EVENTID, 123 WMI_DELBA_EVENTID,
81 WMI_TXRATE_EVENTID, 124 WMI_TXSTATUS_EVENTID,
82}; 125};
83 126
84#define MAX_CMD_NUMBER 62 127#define MAX_CMD_NUMBER 62
@@ -88,6 +131,12 @@ struct register_write {
88 __be32 val; 131 __be32 val;
89}; 132};
90 133
134struct ath9k_htc_tx_event {
135 int count;
136 struct __wmi_event_txstatus txs;
137 struct list_head list;
138};
139
91struct wmi { 140struct wmi {
92 struct ath9k_htc_priv *drv_priv; 141 struct ath9k_htc_priv *drv_priv;
93 struct htc_target *htc; 142 struct htc_target *htc;
@@ -95,12 +144,16 @@ struct wmi {
95 struct mutex op_mutex; 144 struct mutex op_mutex;
96 struct completion cmd_wait; 145 struct completion cmd_wait;
97 enum wmi_cmd_id last_cmd_id; 146 enum wmi_cmd_id last_cmd_id;
147 struct sk_buff_head wmi_event_queue;
148 struct tasklet_struct wmi_event_tasklet;
98 u16 tx_seq_id; 149 u16 tx_seq_id;
99 u8 *cmd_rsp_buf; 150 u8 *cmd_rsp_buf;
100 u32 cmd_rsp_len; 151 u32 cmd_rsp_len;
101 bool stopped; 152 bool stopped;
102 153
103 u8 beacon_pending; 154 struct list_head pending_tx_events;
155 spinlock_t event_lock;
156
104 spinlock_t wmi_lock; 157 spinlock_t wmi_lock;
105 158
106 atomic_t mwrite_cnt; 159 atomic_t mwrite_cnt;
@@ -117,8 +170,9 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
117 u8 *cmd_buf, u32 cmd_len, 170 u8 *cmd_buf, u32 cmd_len,
118 u8 *rsp_buf, u32 rsp_len, 171 u8 *rsp_buf, u32 rsp_len,
119 u32 timeout); 172 u32 timeout);
120void ath9k_swba_tasklet(unsigned long data); 173void ath9k_wmi_event_tasklet(unsigned long data);
121void ath9k_fatal_work(struct work_struct *work); 174void ath9k_fatal_work(struct work_struct *work);
175void ath9k_wmi_event_drain(struct ath9k_htc_priv *priv);
122 176
123#define WMI_CMD(_wmi_cmd) \ 177#define WMI_CMD(_wmi_cmd) \
124 do { \ 178 do { \
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 88fa7fdffd05..97dd1fac98b6 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -357,6 +357,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
357 struct ath_frame_info *fi; 357 struct ath_frame_info *fi;
358 int nframes; 358 int nframes;
359 u8 tidno; 359 u8 tidno;
360 bool clear_filter;
360 361
361 skb = bf->bf_mpdu; 362 skb = bf->bf_mpdu;
362 hdr = (struct ieee80211_hdr *)skb->data; 363 hdr = (struct ieee80211_hdr *)skb->data;
@@ -441,22 +442,24 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
441 /* transmit completion */ 442 /* transmit completion */
442 acked_cnt++; 443 acked_cnt++;
443 } else { 444 } else {
444 if (!(tid->state & AGGR_CLEANUP) && retry) { 445 if ((tid->state & AGGR_CLEANUP) || !retry) {
445 if (fi->retries < ATH_MAX_SW_RETRIES) {
446 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
447 txpending = 1;
448 } else {
449 bf->bf_state.bf_type |= BUF_XRETRY;
450 txfail = 1;
451 sendbar = 1;
452 txfail_cnt++;
453 }
454 } else {
455 /* 446 /*
456 * cleanup in progress, just fail 447 * cleanup in progress, just fail
457 * the un-acked sub-frames 448 * the un-acked sub-frames
458 */ 449 */
459 txfail = 1; 450 txfail = 1;
451 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
452 if (!(ts->ts_status & ATH9K_TXERR_FILT) ||
453 !an->sleeping)
454 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
455
456 clear_filter = true;
457 txpending = 1;
458 } else {
459 bf->bf_state.bf_type |= BUF_XRETRY;
460 txfail = 1;
461 sendbar = 1;
462 txfail_cnt++;
460 } 463 }
461 } 464 }
462 465
@@ -496,6 +499,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
496 !txfail, sendbar); 499 !txfail, sendbar);
497 } else { 500 } else {
498 /* retry the un-acked ones */ 501 /* retry the un-acked ones */
502 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, false);
499 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) { 503 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
500 if (bf->bf_next == NULL && bf_last->bf_stale) { 504 if (bf->bf_next == NULL && bf_last->bf_stale) {
501 struct ath_buf *tbf; 505 struct ath_buf *tbf;
@@ -546,7 +550,12 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
546 550
547 /* prepend un-acked frames to the beginning of the pending frame queue */ 551 /* prepend un-acked frames to the beginning of the pending frame queue */
548 if (!list_empty(&bf_pending)) { 552 if (!list_empty(&bf_pending)) {
553 if (an->sleeping)
554 ieee80211_sta_set_tim(sta);
555
549 spin_lock_bh(&txq->axq_lock); 556 spin_lock_bh(&txq->axq_lock);
557 if (clear_filter)
558 tid->ac->clear_ps_filter = true;
550 list_splice(&bf_pending, &tid->buf_q); 559 list_splice(&bf_pending, &tid->buf_q);
551 ath_tx_queue_tid(txq, tid); 560 ath_tx_queue_tid(txq, tid);
552 spin_unlock_bh(&txq->axq_lock); 561 spin_unlock_bh(&txq->axq_lock);
@@ -816,6 +825,11 @@ static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
816 bf = list_first_entry(&bf_q, struct ath_buf, list); 825 bf = list_first_entry(&bf_q, struct ath_buf, list);
817 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list); 826 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
818 827
828 if (tid->ac->clear_ps_filter) {
829 tid->ac->clear_ps_filter = false;
830 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true);
831 }
832
819 /* if only one frame, send as non-aggregate */ 833 /* if only one frame, send as non-aggregate */
820 if (bf == bf->bf_lastbf) { 834 if (bf == bf->bf_lastbf) {
821 fi = get_frame_info(bf->bf_mpdu); 835 fi = get_frame_info(bf->bf_mpdu);
@@ -896,6 +910,67 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
896 ath_tx_flush_tid(sc, txtid); 910 ath_tx_flush_tid(sc, txtid);
897} 911}
898 912
913bool ath_tx_aggr_sleep(struct ath_softc *sc, struct ath_node *an)
914{
915 struct ath_atx_tid *tid;
916 struct ath_atx_ac *ac;
917 struct ath_txq *txq;
918 bool buffered = false;
919 int tidno;
920
921 for (tidno = 0, tid = &an->tid[tidno];
922 tidno < WME_NUM_TID; tidno++, tid++) {
923
924 if (!tid->sched)
925 continue;
926
927 ac = tid->ac;
928 txq = ac->txq;
929
930 spin_lock_bh(&txq->axq_lock);
931
932 if (!list_empty(&tid->buf_q))
933 buffered = true;
934
935 tid->sched = false;
936 list_del(&tid->list);
937
938 if (ac->sched) {
939 ac->sched = false;
940 list_del(&ac->list);
941 }
942
943 spin_unlock_bh(&txq->axq_lock);
944 }
945
946 return buffered;
947}
948
949void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
950{
951 struct ath_atx_tid *tid;
952 struct ath_atx_ac *ac;
953 struct ath_txq *txq;
954 int tidno;
955
956 for (tidno = 0, tid = &an->tid[tidno];
957 tidno < WME_NUM_TID; tidno++, tid++) {
958
959 ac = tid->ac;
960 txq = ac->txq;
961
962 spin_lock_bh(&txq->axq_lock);
963 ac->clear_ps_filter = true;
964
965 if (!list_empty(&tid->buf_q) && !tid->paused) {
966 ath_tx_queue_tid(txq, tid);
967 ath_txq_schedule(sc, txq);
968 }
969
970 spin_unlock_bh(&txq->axq_lock);
971 }
972}
973
899void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) 974void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
900{ 975{
901 struct ath_atx_tid *txtid; 976 struct ath_atx_tid *txtid;
@@ -1451,7 +1526,7 @@ static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1451 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key; 1526 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
1452 struct ieee80211_hdr *hdr; 1527 struct ieee80211_hdr *hdr;
1453 struct ath_frame_info *fi = get_frame_info(skb); 1528 struct ath_frame_info *fi = get_frame_info(skb);
1454 struct ath_node *an; 1529 struct ath_node *an = NULL;
1455 struct ath_atx_tid *tid; 1530 struct ath_atx_tid *tid;
1456 enum ath9k_key_type keytype; 1531 enum ath9k_key_type keytype;
1457 u16 seqno = 0; 1532 u16 seqno = 0;
@@ -1459,11 +1534,13 @@ static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1459 1534
1460 keytype = ath9k_cmn_get_hw_crypto_keytype(skb); 1535 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
1461 1536
1537 if (sta)
1538 an = (struct ath_node *) sta->drv_priv;
1539
1462 hdr = (struct ieee80211_hdr *)skb->data; 1540 hdr = (struct ieee80211_hdr *)skb->data;
1463 if (sta && ieee80211_is_data_qos(hdr->frame_control) && 1541 if (an && ieee80211_is_data_qos(hdr->frame_control) &&
1464 conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) { 1542 conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) {
1465 1543
1466 an = (struct ath_node *) sta->drv_priv;
1467 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK; 1544 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
1468 1545
1469 /* 1546 /*
@@ -1479,6 +1556,8 @@ static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1479 memset(fi, 0, sizeof(*fi)); 1556 memset(fi, 0, sizeof(*fi));
1480 if (hw_key) 1557 if (hw_key)
1481 fi->keyix = hw_key->hw_key_idx; 1558 fi->keyix = hw_key->hw_key_idx;
1559 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1560 fi->keyix = an->ps_key;
1482 else 1561 else
1483 fi->keyix = ATH9K_TXKEYIX_INVALID; 1562 fi->keyix = ATH9K_TXKEYIX_INVALID;
1484 fi->keytype = keytype; 1563 fi->keytype = keytype;
@@ -1491,7 +1570,6 @@ static int setup_tx_flags(struct sk_buff *skb)
1491 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1570 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1492 int flags = 0; 1571 int flags = 0;
1493 1572
1494 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1495 flags |= ATH9K_TXDESC_INTREQ; 1573 flags |= ATH9K_TXDESC_INTREQ;
1496 1574
1497 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) 1575 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
@@ -1585,8 +1663,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
1585 rix = rates[i].idx; 1663 rix = rates[i].idx;
1586 series[i].Tries = rates[i].count; 1664 series[i].Tries = rates[i].count;
1587 1665
1588 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) || 1666 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
1589 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
1590 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; 1667 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1591 flags |= ATH9K_TXDESC_RTSENA; 1668 flags |= ATH9K_TXDESC_RTSENA;
1592 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { 1669 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
@@ -1655,8 +1732,6 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
1655 !is_pspoll, ctsrate, 1732 !is_pspoll, ctsrate,
1656 0, series, 4, flags); 1733 0, series, 4, flags);
1657 1734
1658 if (sc->config.ath_aggr_prot && flags)
1659 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
1660} 1735}
1661 1736
1662static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw, 1737static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
@@ -1754,6 +1829,9 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1754 if (txctl->paprd) 1829 if (txctl->paprd)
1755 bf->bf_state.bfs_paprd_timestamp = jiffies; 1830 bf->bf_state.bfs_paprd_timestamp = jiffies;
1756 1831
1832 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1833 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true);
1834
1757 ath_tx_send_normal(sc, txctl->txq, tid, &bf_head); 1835 ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
1758 } 1836 }
1759 1837
@@ -1767,6 +1845,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1767 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1845 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1768 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1846 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1769 struct ieee80211_sta *sta = info->control.sta; 1847 struct ieee80211_sta *sta = info->control.sta;
1848 struct ieee80211_vif *vif = info->control.vif;
1770 struct ath_softc *sc = hw->priv; 1849 struct ath_softc *sc = hw->priv;
1771 struct ath_txq *txq = txctl->txq; 1850 struct ath_txq *txq = txctl->txq;
1772 struct ath_buf *bf; 1851 struct ath_buf *bf;
@@ -1804,6 +1883,11 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1804 memmove(skb->data, skb->data + padsize, padpos); 1883 memmove(skb->data, skb->data + padsize, padpos);
1805 } 1884 }
1806 1885
1886 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1887 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1888 !ieee80211_is_data(hdr->frame_control))
1889 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1890
1807 setup_frame_info(hw, skb, frmlen); 1891 setup_frame_info(hw, skb, frmlen);
1808 1892
1809 /* 1893 /*
@@ -1980,7 +2064,7 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
1980 if (ieee80211_is_data(hdr->frame_control) && 2064 if (ieee80211_is_data(hdr->frame_control) &&
1981 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN | 2065 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
1982 ATH9K_TX_DELIM_UNDERRUN)) && 2066 ATH9K_TX_DELIM_UNDERRUN)) &&
1983 ah->tx_trig_level >= sc->sc_ah->caps.tx_triglevel_max) 2067 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
1984 tx_info->status.rates[tx_rateindex].count = 2068 tx_info->status.rates[tx_rateindex].count =
1985 hw->max_rate_tries; 2069 hw->max_rate_tries;
1986 } 2070 }
@@ -2099,28 +2183,6 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2099 } 2183 }
2100} 2184}
2101 2185
2102static void ath_hw_pll_work(struct work_struct *work)
2103{
2104 struct ath_softc *sc = container_of(work, struct ath_softc,
2105 hw_pll_work.work);
2106 static int count;
2107
2108 if (AR_SREV_9485(sc->sc_ah)) {
2109 if (ar9003_get_pll_sqsum_dvc(sc->sc_ah) >= 0x40000) {
2110 count++;
2111
2112 if (count == 3) {
2113 /* Rx is hung for more than 500ms. Reset it */
2114 ath_reset(sc, true);
2115 count = 0;
2116 }
2117 } else
2118 count = 0;
2119
2120 ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/5);
2121 }
2122}
2123
2124static void ath_tx_complete_poll_work(struct work_struct *work) 2186static void ath_tx_complete_poll_work(struct work_struct *work)
2125{ 2187{
2126 struct ath_softc *sc = container_of(work, struct ath_softc, 2188 struct ath_softc *sc = container_of(work, struct ath_softc,
@@ -2144,33 +2206,6 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
2144 } else { 2206 } else {
2145 txq->axq_tx_inprogress = true; 2207 txq->axq_tx_inprogress = true;
2146 } 2208 }
2147 } else {
2148 /* If the queue has pending buffers, then it
2149 * should be doing tx work (and have axq_depth).
2150 * Shouldn't get to this state I think..but
2151 * we do.
2152 */
2153 if (!(sc->sc_flags & (SC_OP_OFFCHANNEL)) &&
2154 (txq->pending_frames > 0 ||
2155 !list_empty(&txq->axq_acq) ||
2156 txq->stopped)) {
2157 ath_err(ath9k_hw_common(sc->sc_ah),
2158 "txq: %p axq_qnum: %u,"
2159 " mac80211_qnum: %i"
2160 " axq_link: %p"
2161 " pending frames: %i"
2162 " axq_acq empty: %i"
2163 " stopped: %i"
2164 " axq_depth: 0 Attempting to"
2165 " restart tx logic.\n",
2166 txq, txq->axq_qnum,
2167 txq->mac80211_qnum,
2168 txq->axq_link,
2169 txq->pending_frames,
2170 list_empty(&txq->axq_acq),
2171 txq->stopped);
2172 ath_txq_schedule(sc, txq);
2173 }
2174 } 2209 }
2175 spin_unlock_bh(&txq->axq_lock); 2210 spin_unlock_bh(&txq->axq_lock);
2176 } 2211 }
@@ -2342,7 +2377,6 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
2342 } 2377 }
2343 2378
2344 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work); 2379 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2345 INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
2346 2380
2347 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 2381 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2348 error = ath_tx_edma_init(sc); 2382 error = ath_tx_edma_init(sc);
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
index 3d4ed5863732..bb578690935e 100644
--- a/drivers/net/wireless/ath/carl9170/carl9170.h
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
@@ -448,6 +448,8 @@ struct carl9170_ba_stats {
448 448
449struct carl9170_sta_info { 449struct carl9170_sta_info {
450 bool ht_sta; 450 bool ht_sta;
451 bool sleeping;
452 atomic_t pending_frames;
451 unsigned int ampdu_max_len; 453 unsigned int ampdu_max_len;
452 struct carl9170_sta_tid *agg[CARL9170_NUM_TID]; 454 struct carl9170_sta_tid *agg[CARL9170_NUM_TID];
453 struct carl9170_ba_stats stats[CARL9170_NUM_TID]; 455 struct carl9170_ba_stats stats[CARL9170_NUM_TID];
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 89fe60accf85..7d5c65ea94e6 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -883,7 +883,7 @@ static void carl9170_op_configure_filter(struct ieee80211_hw *hw,
883 * then checking the error flags, later. 883 * then checking the error flags, later.
884 */ 884 */
885 885
886 if (changed_flags & FIF_ALLMULTI && *new_flags & FIF_ALLMULTI) 886 if (*new_flags & FIF_ALLMULTI)
887 multicast = ~0ULL; 887 multicast = ~0ULL;
888 888
889 if (multicast != ar->cur_mc_hash) 889 if (multicast != ar->cur_mc_hash)
@@ -1193,6 +1193,8 @@ static int carl9170_op_sta_add(struct ieee80211_hw *hw,
1193 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv; 1193 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1194 unsigned int i; 1194 unsigned int i;
1195 1195
1196 atomic_set(&sta_info->pending_frames, 0);
1197
1196 if (sta->ht_cap.ht_supported) { 1198 if (sta->ht_cap.ht_supported) {
1197 if (sta->ht_cap.ampdu_density > 6) { 1199 if (sta->ht_cap.ampdu_density > 6) {
1198 /* 1200 /*
@@ -1467,99 +1469,17 @@ static void carl9170_op_sta_notify(struct ieee80211_hw *hw,
1467 enum sta_notify_cmd cmd, 1469 enum sta_notify_cmd cmd,
1468 struct ieee80211_sta *sta) 1470 struct ieee80211_sta *sta)
1469{ 1471{
1470 struct ar9170 *ar = hw->priv;
1471 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv; 1472 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1472 struct sk_buff *skb, *tmp;
1473 struct sk_buff_head free;
1474 int i;
1475 1473
1476 switch (cmd) { 1474 switch (cmd) {
1477 case STA_NOTIFY_SLEEP: 1475 case STA_NOTIFY_SLEEP:
1478 /* 1476 sta_info->sleeping = true;
1479 * Since the peer is no longer listening, we have to return 1477 if (atomic_read(&sta_info->pending_frames))
1480 * as many SKBs as possible back to the mac80211 stack. 1478 ieee80211_sta_block_awake(hw, sta, true);
1481 * It will deal with the retry procedure, once the peer
1482 * has become available again.
1483 *
1484 * NB: Ideally, the driver should return the all frames in
1485 * the correct, ascending order. However, I think that this
1486 * functionality should be implemented in the stack and not
1487 * here...
1488 */
1489
1490 __skb_queue_head_init(&free);
1491
1492 if (sta->ht_cap.ht_supported) {
1493 rcu_read_lock();
1494 for (i = 0; i < CARL9170_NUM_TID; i++) {
1495 struct carl9170_sta_tid *tid_info;
1496
1497 tid_info = rcu_dereference(sta_info->agg[i]);
1498
1499 if (!tid_info)
1500 continue;
1501
1502 spin_lock_bh(&ar->tx_ampdu_list_lock);
1503 if (tid_info->state >
1504 CARL9170_TID_STATE_SUSPEND)
1505 tid_info->state =
1506 CARL9170_TID_STATE_SUSPEND;
1507 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1508
1509 spin_lock_bh(&tid_info->lock);
1510 while ((skb = __skb_dequeue(&tid_info->queue)))
1511 __skb_queue_tail(&free, skb);
1512 spin_unlock_bh(&tid_info->lock);
1513 }
1514 rcu_read_unlock();
1515 }
1516
1517 for (i = 0; i < ar->hw->queues; i++) {
1518 spin_lock_bh(&ar->tx_pending[i].lock);
1519 skb_queue_walk_safe(&ar->tx_pending[i], skb, tmp) {
1520 struct _carl9170_tx_superframe *super;
1521 struct ieee80211_hdr *hdr;
1522 struct ieee80211_tx_info *info;
1523
1524 super = (void *) skb->data;
1525 hdr = (void *) super->frame_data;
1526
1527 if (compare_ether_addr(hdr->addr1, sta->addr))
1528 continue;
1529
1530 __skb_unlink(skb, &ar->tx_pending[i]);
1531
1532 info = IEEE80211_SKB_CB(skb);
1533 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1534 atomic_dec(&ar->tx_ampdu_upload);
1535
1536 carl9170_tx_status(ar, skb, false);
1537 }
1538 spin_unlock_bh(&ar->tx_pending[i].lock);
1539 }
1540
1541 while ((skb = __skb_dequeue(&free)))
1542 carl9170_tx_status(ar, skb, false);
1543
1544 break; 1479 break;
1545 1480
1546 case STA_NOTIFY_AWAKE: 1481 case STA_NOTIFY_AWAKE:
1547 if (!sta->ht_cap.ht_supported) 1482 sta_info->sleeping = false;
1548 return;
1549
1550 rcu_read_lock();
1551 for (i = 0; i < CARL9170_NUM_TID; i++) {
1552 struct carl9170_sta_tid *tid_info;
1553
1554 tid_info = rcu_dereference(sta_info->agg[i]);
1555
1556 if (!tid_info)
1557 continue;
1558
1559 if ((tid_info->state == CARL9170_TID_STATE_SUSPEND))
1560 tid_info->state = CARL9170_TID_STATE_IDLE;
1561 }
1562 rcu_read_unlock();
1563 break; 1483 break;
1564 } 1484 }
1565} 1485}
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index cb70ed7ec5cc..e94084fcf6f5 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -104,12 +104,60 @@ static void carl9170_tx_accounting(struct ar9170 *ar, struct sk_buff *skb)
104 spin_unlock_bh(&ar->tx_stats_lock); 104 spin_unlock_bh(&ar->tx_stats_lock);
105} 105}
106 106
107/* needs rcu_read_lock */
108static struct ieee80211_sta *__carl9170_get_tx_sta(struct ar9170 *ar,
109 struct sk_buff *skb)
110{
111 struct _carl9170_tx_superframe *super = (void *) skb->data;
112 struct ieee80211_hdr *hdr = (void *) super->frame_data;
113 struct ieee80211_vif *vif;
114 unsigned int vif_id;
115
116 vif_id = (super->s.misc & CARL9170_TX_SUPER_MISC_VIF_ID) >>
117 CARL9170_TX_SUPER_MISC_VIF_ID_S;
118
119 if (WARN_ON_ONCE(vif_id >= AR9170_MAX_VIRTUAL_MAC))
120 return NULL;
121
122 vif = rcu_dereference(ar->vif_priv[vif_id].vif);
123 if (unlikely(!vif))
124 return NULL;
125
126 /*
127 * Normally we should use wrappers like ieee80211_get_DA to get
128 * the correct peer ieee80211_sta.
129 *
130 * But there is a problem with indirect traffic (broadcasts, or
131 * data which is designated for other stations) in station mode.
132 * The frame will be directed to the AP for distribution and not
133 * to the actual destination.
134 */
135
136 return ieee80211_find_sta(vif, hdr->addr1);
137}
138
139static void carl9170_tx_ps_unblock(struct ar9170 *ar, struct sk_buff *skb)
140{
141 struct ieee80211_sta *sta;
142 struct carl9170_sta_info *sta_info;
143
144 rcu_read_lock();
145 sta = __carl9170_get_tx_sta(ar, skb);
146 if (unlikely(!sta))
147 goto out_rcu;
148
149 sta_info = (struct carl9170_sta_info *) sta->drv_priv;
150 if (atomic_dec_return(&sta_info->pending_frames) == 0)
151 ieee80211_sta_block_awake(ar->hw, sta, false);
152
153out_rcu:
154 rcu_read_unlock();
155}
156
107static void carl9170_tx_accounting_free(struct ar9170 *ar, struct sk_buff *skb) 157static void carl9170_tx_accounting_free(struct ar9170 *ar, struct sk_buff *skb)
108{ 158{
109 struct ieee80211_tx_info *txinfo;
110 int queue; 159 int queue;
111 160
112 txinfo = IEEE80211_SKB_CB(skb);
113 queue = skb_get_queue_mapping(skb); 161 queue = skb_get_queue_mapping(skb);
114 162
115 spin_lock_bh(&ar->tx_stats_lock); 163 spin_lock_bh(&ar->tx_stats_lock);
@@ -135,6 +183,7 @@ static void carl9170_tx_accounting_free(struct ar9170 *ar, struct sk_buff *skb)
135 } 183 }
136 184
137 spin_unlock_bh(&ar->tx_stats_lock); 185 spin_unlock_bh(&ar->tx_stats_lock);
186
138 if (atomic_dec_and_test(&ar->tx_total_queued)) 187 if (atomic_dec_and_test(&ar->tx_total_queued))
139 complete(&ar->tx_flush); 188 complete(&ar->tx_flush);
140} 189}
@@ -329,13 +378,9 @@ static void carl9170_tx_status_process_ampdu(struct ar9170 *ar,
329{ 378{
330 struct _carl9170_tx_superframe *super = (void *) skb->data; 379 struct _carl9170_tx_superframe *super = (void *) skb->data;
331 struct ieee80211_hdr *hdr = (void *) super->frame_data; 380 struct ieee80211_hdr *hdr = (void *) super->frame_data;
332 struct ieee80211_tx_info *tx_info;
333 struct carl9170_tx_info *ar_info;
334 struct carl9170_sta_info *sta_info;
335 struct ieee80211_sta *sta; 381 struct ieee80211_sta *sta;
382 struct carl9170_sta_info *sta_info;
336 struct carl9170_sta_tid *tid_info; 383 struct carl9170_sta_tid *tid_info;
337 struct ieee80211_vif *vif;
338 unsigned int vif_id;
339 u8 tid; 384 u8 tid;
340 385
341 if (!(txinfo->flags & IEEE80211_TX_CTL_AMPDU) || 386 if (!(txinfo->flags & IEEE80211_TX_CTL_AMPDU) ||
@@ -343,30 +388,8 @@ static void carl9170_tx_status_process_ampdu(struct ar9170 *ar,
343 (!(super->f.mac_control & cpu_to_le16(AR9170_TX_MAC_AGGR)))) 388 (!(super->f.mac_control & cpu_to_le16(AR9170_TX_MAC_AGGR))))
344 return; 389 return;
345 390
346 tx_info = IEEE80211_SKB_CB(skb);
347 ar_info = (void *) tx_info->rate_driver_data;
348
349 vif_id = (super->s.misc & CARL9170_TX_SUPER_MISC_VIF_ID) >>
350 CARL9170_TX_SUPER_MISC_VIF_ID_S;
351
352 if (WARN_ON_ONCE(vif_id >= AR9170_MAX_VIRTUAL_MAC))
353 return;
354
355 rcu_read_lock(); 391 rcu_read_lock();
356 vif = rcu_dereference(ar->vif_priv[vif_id].vif); 392 sta = __carl9170_get_tx_sta(ar, skb);
357 if (unlikely(!vif))
358 goto out_rcu;
359
360 /*
361 * Normally we should use wrappers like ieee80211_get_DA to get
362 * the correct peer ieee80211_sta.
363 *
364 * But there is a problem with indirect traffic (broadcasts, or
365 * data which is designated for other stations) in station mode.
366 * The frame will be directed to the AP for distribution and not
367 * to the actual destination.
368 */
369 sta = ieee80211_find_sta(vif, hdr->addr1);
370 if (unlikely(!sta)) 393 if (unlikely(!sta))
371 goto out_rcu; 394 goto out_rcu;
372 395
@@ -427,6 +450,7 @@ void carl9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
427 if (txinfo->flags & IEEE80211_TX_CTL_AMPDU) 450 if (txinfo->flags & IEEE80211_TX_CTL_AMPDU)
428 carl9170_tx_status_process_ampdu(ar, skb, txinfo); 451 carl9170_tx_status_process_ampdu(ar, skb, txinfo);
429 452
453 carl9170_tx_ps_unblock(ar, skb);
430 carl9170_tx_put_skb(skb); 454 carl9170_tx_put_skb(skb);
431} 455}
432 456
@@ -540,11 +564,7 @@ static void carl9170_tx_ampdu_timeout(struct ar9170 *ar)
540 struct sk_buff *skb; 564 struct sk_buff *skb;
541 struct ieee80211_tx_info *txinfo; 565 struct ieee80211_tx_info *txinfo;
542 struct carl9170_tx_info *arinfo; 566 struct carl9170_tx_info *arinfo;
543 struct _carl9170_tx_superframe *super;
544 struct ieee80211_sta *sta; 567 struct ieee80211_sta *sta;
545 struct ieee80211_vif *vif;
546 struct ieee80211_hdr *hdr;
547 unsigned int vif_id;
548 568
549 rcu_read_lock(); 569 rcu_read_lock();
550 list_for_each_entry_rcu(iter, &ar->tx_ampdu_list, list) { 570 list_for_each_entry_rcu(iter, &ar->tx_ampdu_list, list) {
@@ -562,20 +582,7 @@ static void carl9170_tx_ampdu_timeout(struct ar9170 *ar)
562 msecs_to_jiffies(CARL9170_QUEUE_TIMEOUT))) 582 msecs_to_jiffies(CARL9170_QUEUE_TIMEOUT)))
563 goto unlock; 583 goto unlock;
564 584
565 super = (void *) skb->data; 585 sta = __carl9170_get_tx_sta(ar, skb);
566 hdr = (void *) super->frame_data;
567
568 vif_id = (super->s.misc & CARL9170_TX_SUPER_MISC_VIF_ID) >>
569 CARL9170_TX_SUPER_MISC_VIF_ID_S;
570
571 if (WARN_ON(vif_id >= AR9170_MAX_VIRTUAL_MAC))
572 goto unlock;
573
574 vif = rcu_dereference(ar->vif_priv[vif_id].vif);
575 if (WARN_ON(!vif))
576 goto unlock;
577
578 sta = ieee80211_find_sta(vif, hdr->addr1);
579 if (WARN_ON(!sta)) 586 if (WARN_ON(!sta))
580 goto unlock; 587 goto unlock;
581 588
@@ -611,7 +618,6 @@ static void __carl9170_tx_process_status(struct ar9170 *ar,
611{ 618{
612 struct sk_buff *skb; 619 struct sk_buff *skb;
613 struct ieee80211_tx_info *txinfo; 620 struct ieee80211_tx_info *txinfo;
614 struct carl9170_tx_info *arinfo;
615 unsigned int r, t, q; 621 unsigned int r, t, q;
616 bool success = true; 622 bool success = true;
617 623
@@ -627,7 +633,6 @@ static void __carl9170_tx_process_status(struct ar9170 *ar,
627 } 633 }
628 634
629 txinfo = IEEE80211_SKB_CB(skb); 635 txinfo = IEEE80211_SKB_CB(skb);
630 arinfo = (void *) txinfo->rate_driver_data;
631 636
632 if (!(info & CARL9170_TX_STATUS_SUCCESS)) 637 if (!(info & CARL9170_TX_STATUS_SUCCESS))
633 success = false; 638 success = false;
@@ -1199,15 +1204,6 @@ static struct sk_buff *carl9170_tx_pick_skb(struct ar9170 *ar,
1199 arinfo = (void *) info->rate_driver_data; 1204 arinfo = (void *) info->rate_driver_data;
1200 1205
1201 arinfo->timeout = jiffies; 1206 arinfo->timeout = jiffies;
1202
1203 /*
1204 * increase ref count to "2".
1205 * Ref counting is the easiest way to solve the race between
1206 * the the urb's completion routine: carl9170_tx_callback and
1207 * wlan tx status functions: carl9170_tx_status/janitor.
1208 */
1209 carl9170_tx_get_skb(skb);
1210
1211 return skb; 1207 return skb;
1212 1208
1213err_unlock: 1209err_unlock:
@@ -1228,6 +1224,36 @@ void carl9170_tx_drop(struct ar9170 *ar, struct sk_buff *skb)
1228 __carl9170_tx_process_status(ar, super->s.cookie, q); 1224 __carl9170_tx_process_status(ar, super->s.cookie, q);
1229} 1225}
1230 1226
1227static bool carl9170_tx_ps_drop(struct ar9170 *ar, struct sk_buff *skb)
1228{
1229 struct ieee80211_sta *sta;
1230 struct carl9170_sta_info *sta_info;
1231
1232 rcu_read_lock();
1233 sta = __carl9170_get_tx_sta(ar, skb);
1234 if (!sta)
1235 goto out_rcu;
1236
1237 sta_info = (void *) sta->drv_priv;
1238 if (unlikely(sta_info->sleeping)) {
1239 struct ieee80211_tx_info *tx_info;
1240
1241 rcu_read_unlock();
1242
1243 tx_info = IEEE80211_SKB_CB(skb);
1244 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
1245 atomic_dec(&ar->tx_ampdu_upload);
1246
1247 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1248 carl9170_tx_status(ar, skb, false);
1249 return true;
1250 }
1251
1252out_rcu:
1253 rcu_read_unlock();
1254 return false;
1255}
1256
1231static void carl9170_tx(struct ar9170 *ar) 1257static void carl9170_tx(struct ar9170 *ar)
1232{ 1258{
1233 struct sk_buff *skb; 1259 struct sk_buff *skb;
@@ -1247,6 +1273,9 @@ static void carl9170_tx(struct ar9170 *ar)
1247 if (unlikely(!skb)) 1273 if (unlikely(!skb))
1248 break; 1274 break;
1249 1275
1276 if (unlikely(carl9170_tx_ps_drop(ar, skb)))
1277 continue;
1278
1250 atomic_inc(&ar->tx_total_pending); 1279 atomic_inc(&ar->tx_total_pending);
1251 1280
1252 q = __carl9170_get_queue(ar, i); 1281 q = __carl9170_get_queue(ar, i);
@@ -1256,6 +1285,16 @@ static void carl9170_tx(struct ar9170 *ar)
1256 */ 1285 */
1257 skb_queue_tail(&ar->tx_status[q], skb); 1286 skb_queue_tail(&ar->tx_status[q], skb);
1258 1287
1288 /*
1289 * increase ref count to "2".
1290 * Ref counting is the easiest way to solve the
1291 * race between the urb's completion routine:
1292 * carl9170_tx_callback
1293 * and wlan tx status functions:
1294 * carl9170_tx_status/janitor.
1295 */
1296 carl9170_tx_get_skb(skb);
1297
1259 carl9170_usb_tx(ar, skb); 1298 carl9170_usb_tx(ar, skb);
1260 schedule_garbagecollector = true; 1299 schedule_garbagecollector = true;
1261 } 1300 }
@@ -1275,7 +1314,6 @@ static bool carl9170_tx_ampdu_queue(struct ar9170 *ar,
1275 struct carl9170_sta_info *sta_info; 1314 struct carl9170_sta_info *sta_info;
1276 struct carl9170_sta_tid *agg; 1315 struct carl9170_sta_tid *agg;
1277 struct sk_buff *iter; 1316 struct sk_buff *iter;
1278 unsigned int max;
1279 u16 tid, seq, qseq, off; 1317 u16 tid, seq, qseq, off;
1280 bool run = false; 1318 bool run = false;
1281 1319
@@ -1285,7 +1323,6 @@ static bool carl9170_tx_ampdu_queue(struct ar9170 *ar,
1285 1323
1286 rcu_read_lock(); 1324 rcu_read_lock();
1287 agg = rcu_dereference(sta_info->agg[tid]); 1325 agg = rcu_dereference(sta_info->agg[tid]);
1288 max = sta_info->ampdu_max_len;
1289 1326
1290 if (!agg) 1327 if (!agg)
1291 goto err_unlock_rcu; 1328 goto err_unlock_rcu;
@@ -1368,6 +1405,11 @@ void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1368 * all ressouces which are associated with the frame. 1405 * all ressouces which are associated with the frame.
1369 */ 1406 */
1370 1407
1408 if (sta) {
1409 struct carl9170_sta_info *stai = (void *) sta->drv_priv;
1410 atomic_inc(&stai->pending_frames);
1411 }
1412
1371 if (info->flags & IEEE80211_TX_CTL_AMPDU) { 1413 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
1372 run = carl9170_tx_ampdu_queue(ar, sta, skb); 1414 run = carl9170_tx_ampdu_queue(ar, sta, skb);
1373 if (run) 1415 if (run)
diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c
index 37b8e115375a..a61ef3d6d89c 100644
--- a/drivers/net/wireless/ath/key.c
+++ b/drivers/net/wireless/ath/key.c
@@ -23,6 +23,14 @@
23 23
24#define REG_READ (common->ops->read) 24#define REG_READ (common->ops->read)
25#define REG_WRITE(_ah, _reg, _val) (common->ops->write)(_ah, _val, _reg) 25#define REG_WRITE(_ah, _reg, _val) (common->ops->write)(_ah, _val, _reg)
26#define ENABLE_REGWRITE_BUFFER(_ah) \
27 if (common->ops->enable_write_buffer) \
28 common->ops->enable_write_buffer((_ah));
29
30#define REGWRITE_BUFFER_FLUSH(_ah) \
31 if (common->ops->write_flush) \
32 common->ops->write_flush((_ah));
33
26 34
27#define IEEE80211_WEP_NKID 4 /* number of key ids */ 35#define IEEE80211_WEP_NKID 4 /* number of key ids */
28 36
@@ -42,6 +50,8 @@ bool ath_hw_keyreset(struct ath_common *common, u16 entry)
42 50
43 keyType = REG_READ(ah, AR_KEYTABLE_TYPE(entry)); 51 keyType = REG_READ(ah, AR_KEYTABLE_TYPE(entry));
44 52
53 ENABLE_REGWRITE_BUFFER(ah);
54
45 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), 0); 55 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), 0);
46 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), 0); 56 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), 0);
47 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), 0); 57 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), 0);
@@ -66,6 +76,8 @@ bool ath_hw_keyreset(struct ath_common *common, u16 entry)
66 76
67 } 77 }
68 78
79 REGWRITE_BUFFER_FLUSH(ah);
80
69 return true; 81 return true;
70} 82}
71EXPORT_SYMBOL(ath_hw_keyreset); 83EXPORT_SYMBOL(ath_hw_keyreset);
@@ -104,9 +116,13 @@ static bool ath_hw_keysetmac(struct ath_common *common,
104 } else { 116 } else {
105 macLo = macHi = 0; 117 macLo = macHi = 0;
106 } 118 }
119 ENABLE_REGWRITE_BUFFER(ah);
120
107 REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), macLo); 121 REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), macLo);
108 REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), macHi | unicast_flag); 122 REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), macHi | unicast_flag);
109 123
124 REGWRITE_BUFFER_FLUSH(ah);
125
110 return true; 126 return true;
111} 127}
112 128
@@ -223,6 +239,8 @@ static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
223 mic3 = get_unaligned_le16(k->kv_txmic + 0) & 0xffff; 239 mic3 = get_unaligned_le16(k->kv_txmic + 0) & 0xffff;
224 mic4 = get_unaligned_le32(k->kv_txmic + 4); 240 mic4 = get_unaligned_le32(k->kv_txmic + 4);
225 241
242 ENABLE_REGWRITE_BUFFER(ah);
243
226 /* Write RX[31:0] and TX[31:16] */ 244 /* Write RX[31:0] and TX[31:16] */
227 REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0); 245 REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0);
228 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), mic1); 246 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), mic1);
@@ -236,6 +254,8 @@ static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
236 REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry), 254 REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
237 AR_KEYTABLE_TYPE_CLR); 255 AR_KEYTABLE_TYPE_CLR);
238 256
257 REGWRITE_BUFFER_FLUSH(ah);
258
239 } else { 259 } else {
240 /* 260 /*
241 * TKIP uses four key cache entries (two for group 261 * TKIP uses four key cache entries (two for group
@@ -258,6 +278,8 @@ static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
258 mic0 = get_unaligned_le32(k->kv_mic + 0); 278 mic0 = get_unaligned_le32(k->kv_mic + 0);
259 mic2 = get_unaligned_le32(k->kv_mic + 4); 279 mic2 = get_unaligned_le32(k->kv_mic + 4);
260 280
281 ENABLE_REGWRITE_BUFFER(ah);
282
261 /* Write MIC key[31:0] */ 283 /* Write MIC key[31:0] */
262 REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0); 284 REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0);
263 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0); 285 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0);
@@ -270,8 +292,12 @@ static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
270 REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), 0); 292 REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), 0);
271 REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry), 293 REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
272 AR_KEYTABLE_TYPE_CLR); 294 AR_KEYTABLE_TYPE_CLR);
295
296 REGWRITE_BUFFER_FLUSH(ah);
273 } 297 }
274 298
299 ENABLE_REGWRITE_BUFFER(ah);
300
275 /* MAC address registers are reserved for the MIC entry */ 301 /* MAC address registers are reserved for the MIC entry */
276 REG_WRITE(ah, AR_KEYTABLE_MAC0(micentry), 0); 302 REG_WRITE(ah, AR_KEYTABLE_MAC0(micentry), 0);
277 REG_WRITE(ah, AR_KEYTABLE_MAC1(micentry), 0); 303 REG_WRITE(ah, AR_KEYTABLE_MAC1(micentry), 0);
@@ -283,7 +309,11 @@ static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
283 */ 309 */
284 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0); 310 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0);
285 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1); 311 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1);
312
313 REGWRITE_BUFFER_FLUSH(ah);
286 } else { 314 } else {
315 ENABLE_REGWRITE_BUFFER(ah);
316
287 /* Write key[47:0] */ 317 /* Write key[47:0] */
288 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0); 318 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0);
289 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1); 319 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1);
@@ -296,6 +326,8 @@ static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
296 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4); 326 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4);
297 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType); 327 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType);
298 328
329 REGWRITE_BUFFER_FLUSH(ah);
330
299 /* Write MAC address for the entry */ 331 /* Write MAC address for the entry */
300 (void) ath_hw_keysetmac(common, entry, mac); 332 (void) ath_hw_keysetmac(common, entry, mac);
301 } 333 }
@@ -451,6 +483,9 @@ int ath_key_config(struct ath_common *common,
451 memset(&hk, 0, sizeof(hk)); 483 memset(&hk, 0, sizeof(hk));
452 484
453 switch (key->cipher) { 485 switch (key->cipher) {
486 case 0:
487 hk.kv_type = ATH_CIPHER_CLR;
488 break;
454 case WLAN_CIPHER_SUITE_WEP40: 489 case WLAN_CIPHER_SUITE_WEP40:
455 case WLAN_CIPHER_SUITE_WEP104: 490 case WLAN_CIPHER_SUITE_WEP104:
456 hk.kv_type = ATH_CIPHER_WEP; 491 hk.kv_type = ATH_CIPHER_WEP;
@@ -466,7 +501,8 @@ int ath_key_config(struct ath_common *common,
466 } 501 }
467 502
468 hk.kv_len = key->keylen; 503 hk.kv_len = key->keylen;
469 memcpy(hk.kv_val, key->key, key->keylen); 504 if (key->keylen)
505 memcpy(hk.kv_val, key->key, key->keylen);
470 506
471 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { 507 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
472 switch (vif->type) { 508 switch (vif->type) {
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 0e1b8793c864..028310f263c8 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -97,8 +97,8 @@ static const struct ieee80211_regdomain ath_world_regdom_66_69 = {
97 } 97 }
98}; 98};
99 99
100/* Can be used by 0x67, 0x6A and 0x68 */ 100/* Can be used by 0x67, 0x68, 0x6A and 0x6C */
101static const struct ieee80211_regdomain ath_world_regdom_67_68_6A = { 101static const struct ieee80211_regdomain ath_world_regdom_67_68_6A_6C = {
102 .n_reg_rules = 4, 102 .n_reg_rules = 4,
103 .alpha2 = "99", 103 .alpha2 = "99",
104 .reg_rules = { 104 .reg_rules = {
@@ -151,7 +151,8 @@ ieee80211_regdomain *ath_world_regdomain(struct ath_regulatory *reg)
151 case 0x67: 151 case 0x67:
152 case 0x68: 152 case 0x68:
153 case 0x6A: 153 case 0x6A:
154 return &ath_world_regdom_67_68_6A; 154 case 0x6C:
155 return &ath_world_regdom_67_68_6A_6C;
155 default: 156 default:
156 WARN_ON(1); 157 WARN_ON(1);
157 return ath_default_world_regdomain(); 158 return ath_default_world_regdomain();
@@ -333,6 +334,7 @@ static void ath_reg_apply_world_flags(struct wiphy *wiphy,
333 case 0x63: 334 case 0x63:
334 case 0x66: 335 case 0x66:
335 case 0x67: 336 case 0x67:
337 case 0x6C:
336 ath_reg_apply_beaconing_flags(wiphy, initiator); 338 ath_reg_apply_beaconing_flags(wiphy, initiator);
337 break; 339 break;
338 case 0x68: 340 case 0x68:
diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h
index 5c2cfe694152..24b53839fc3a 100644
--- a/drivers/net/wireless/ath/regd_common.h
+++ b/drivers/net/wireless/ath/regd_common.h
@@ -86,6 +86,7 @@ enum EnumRd {
86 WOR9_WORLD = 0x69, 86 WOR9_WORLD = 0x69,
87 WORA_WORLD = 0x6A, 87 WORA_WORLD = 0x6A,
88 WORB_WORLD = 0x6B, 88 WORB_WORLD = 0x6B,
89 WORC_WORLD = 0x6C,
89 90
90 MKK3_MKKB = 0x80, 91 MKK3_MKKB = 0x80,
91 MKK3_MKKA2 = 0x81, 92 MKK3_MKKA2 = 0x81,
@@ -282,6 +283,7 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = {
282 {WOR9_WORLD, NO_CTL, NO_CTL}, 283 {WOR9_WORLD, NO_CTL, NO_CTL},
283 {WORA_WORLD, NO_CTL, NO_CTL}, 284 {WORA_WORLD, NO_CTL, NO_CTL},
284 {WORB_WORLD, NO_CTL, NO_CTL}, 285 {WORB_WORLD, NO_CTL, NO_CTL},
286 {WORC_WORLD, NO_CTL, NO_CTL},
285}; 287};
286 288
287static struct country_code_to_enum_rd allCountries[] = { 289static struct country_code_to_enum_rd allCountries[] = {
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 5af40d9170a0..5a43984bdcea 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -2686,6 +2686,17 @@ out:
2686 dev->mac_suspended++; 2686 dev->mac_suspended++;
2687} 2687}
2688 2688
2689/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MacPhyClkSet */
2690void b43_mac_phy_clock_set(struct b43_wldev *dev, bool on)
2691{
2692 u32 tmslow = ssb_read32(dev->dev, SSB_TMSLOW);
2693 if (on)
2694 tmslow |= B43_TMSLOW_MACPHYCLKEN;
2695 else
2696 tmslow &= ~B43_TMSLOW_MACPHYCLKEN;
2697 ssb_write32(dev->dev, SSB_TMSLOW, tmslow);
2698}
2699
2689static void b43_adjust_opmode(struct b43_wldev *dev) 2700static void b43_adjust_opmode(struct b43_wldev *dev)
2690{ 2701{
2691 struct b43_wl *wl = dev->wl; 2702 struct b43_wl *wl = dev->wl;
@@ -2842,7 +2853,7 @@ static int b43_chip_init(struct b43_wldev *dev)
2842{ 2853{
2843 struct b43_phy *phy = &dev->phy; 2854 struct b43_phy *phy = &dev->phy;
2844 int err; 2855 int err;
2845 u32 value32, macctl; 2856 u32 macctl;
2846 u16 value16; 2857 u16 value16;
2847 2858
2848 /* Initialize the MAC control */ 2859 /* Initialize the MAC control */
@@ -2920,9 +2931,7 @@ static int b43_chip_init(struct b43_wldev *dev)
2920 b43_write32(dev, B43_MMIO_DMA4_IRQ_MASK, 0x0000DC00); 2931 b43_write32(dev, B43_MMIO_DMA4_IRQ_MASK, 0x0000DC00);
2921 b43_write32(dev, B43_MMIO_DMA5_IRQ_MASK, 0x0000DC00); 2932 b43_write32(dev, B43_MMIO_DMA5_IRQ_MASK, 0x0000DC00);
2922 2933
2923 value32 = ssb_read32(dev->dev, SSB_TMSLOW); 2934 b43_mac_phy_clock_set(dev, true);
2924 value32 |= 0x00100000;
2925 ssb_write32(dev->dev, SSB_TMSLOW, value32);
2926 2935
2927 b43_write16(dev, B43_MMIO_POWERUP_DELAY, 2936 b43_write16(dev, B43_MMIO_POWERUP_DELAY,
2928 dev->dev->bus->chipco.fast_pwrup_delay); 2937 dev->dev->bus->chipco.fast_pwrup_delay);
@@ -4213,33 +4222,18 @@ static void b43_bluetooth_coext_disable(struct b43_wldev *dev)
4213 4222
4214static void b43_imcfglo_timeouts_workaround(struct b43_wldev *dev) 4223static void b43_imcfglo_timeouts_workaround(struct b43_wldev *dev)
4215{ 4224{
4216#ifdef CONFIG_SSB_DRIVER_PCICORE
4217 struct ssb_bus *bus = dev->dev->bus; 4225 struct ssb_bus *bus = dev->dev->bus;
4218 u32 tmp; 4226 u32 tmp;
4219 4227
4220 if (bus->pcicore.dev && 4228 if ((bus->chip_id == 0x4311 && bus->chip_rev == 2) ||
4221 bus->pcicore.dev->id.coreid == SSB_DEV_PCI && 4229 (bus->chip_id == 0x4312)) {
4222 bus->pcicore.dev->id.revision <= 5) {
4223 /* IMCFGLO timeouts workaround. */
4224 tmp = ssb_read32(dev->dev, SSB_IMCFGLO); 4230 tmp = ssb_read32(dev->dev, SSB_IMCFGLO);
4225 switch (bus->bustype) { 4231 tmp &= ~SSB_IMCFGLO_REQTO;
4226 case SSB_BUSTYPE_PCI: 4232 tmp &= ~SSB_IMCFGLO_SERTO;
4227 case SSB_BUSTYPE_PCMCIA: 4233 tmp |= 0x3;
4228 tmp &= ~SSB_IMCFGLO_REQTO;
4229 tmp &= ~SSB_IMCFGLO_SERTO;
4230 tmp |= 0x32;
4231 break;
4232 case SSB_BUSTYPE_SSB:
4233 tmp &= ~SSB_IMCFGLO_REQTO;
4234 tmp &= ~SSB_IMCFGLO_SERTO;
4235 tmp |= 0x53;
4236 break;
4237 default:
4238 break;
4239 }
4240 ssb_write32(dev->dev, SSB_IMCFGLO, tmp); 4234 ssb_write32(dev->dev, SSB_IMCFGLO, tmp);
4235 ssb_commit_settings(bus);
4241 } 4236 }
4242#endif /* CONFIG_SSB_DRIVER_PCICORE */
4243} 4237}
4244 4238
4245static void b43_set_synth_pu_delay(struct b43_wldev *dev, bool idle) 4239static void b43_set_synth_pu_delay(struct b43_wldev *dev, bool idle)
@@ -4863,25 +4857,8 @@ static void b43_one_core_detach(struct ssb_device *dev)
4863static int b43_one_core_attach(struct ssb_device *dev, struct b43_wl *wl) 4857static int b43_one_core_attach(struct ssb_device *dev, struct b43_wl *wl)
4864{ 4858{
4865 struct b43_wldev *wldev; 4859 struct b43_wldev *wldev;
4866 struct pci_dev *pdev;
4867 int err = -ENOMEM; 4860 int err = -ENOMEM;
4868 4861
4869 if (!list_empty(&wl->devlist)) {
4870 /* We are not the first core on this chip. */
4871 pdev = (dev->bus->bustype == SSB_BUSTYPE_PCI) ? dev->bus->host_pci : NULL;
4872 /* Only special chips support more than one wireless
4873 * core, although some of the other chips have more than
4874 * one wireless core as well. Check for this and
4875 * bail out early.
4876 */
4877 if (!pdev ||
4878 ((pdev->device != 0x4321) &&
4879 (pdev->device != 0x4313) && (pdev->device != 0x431A))) {
4880 b43dbg(wl, "Ignoring unconnected 802.11 core\n");
4881 return -ENODEV;
4882 }
4883 }
4884
4885 wldev = kzalloc(sizeof(*wldev), GFP_KERNEL); 4862 wldev = kzalloc(sizeof(*wldev), GFP_KERNEL);
4886 if (!wldev) 4863 if (!wldev)
4887 goto out; 4864 goto out;
@@ -5002,7 +4979,7 @@ out:
5002 return err; 4979 return err;
5003} 4980}
5004 4981
5005static int b43_probe(struct ssb_device *dev, const struct ssb_device_id *id) 4982static int b43_ssb_probe(struct ssb_device *dev, const struct ssb_device_id *id)
5006{ 4983{
5007 struct b43_wl *wl; 4984 struct b43_wl *wl;
5008 int err; 4985 int err;
@@ -5040,7 +5017,7 @@ static int b43_probe(struct ssb_device *dev, const struct ssb_device_id *id)
5040 return err; 5017 return err;
5041} 5018}
5042 5019
5043static void b43_remove(struct ssb_device *dev) 5020static void b43_ssb_remove(struct ssb_device *dev)
5044{ 5021{
5045 struct b43_wl *wl = ssb_get_devtypedata(dev); 5022 struct b43_wl *wl = ssb_get_devtypedata(dev);
5046 struct b43_wldev *wldev = ssb_get_drvdata(dev); 5023 struct b43_wldev *wldev = ssb_get_drvdata(dev);
@@ -5083,8 +5060,8 @@ void b43_controller_restart(struct b43_wldev *dev, const char *reason)
5083static struct ssb_driver b43_ssb_driver = { 5060static struct ssb_driver b43_ssb_driver = {
5084 .name = KBUILD_MODNAME, 5061 .name = KBUILD_MODNAME,
5085 .id_table = b43_ssb_tbl, 5062 .id_table = b43_ssb_tbl,
5086 .probe = b43_probe, 5063 .probe = b43_ssb_probe,
5087 .remove = b43_remove, 5064 .remove = b43_ssb_remove,
5088}; 5065};
5089 5066
5090static void b43_print_driverinfo(void) 5067static void b43_print_driverinfo(void)
diff --git a/drivers/net/wireless/b43/main.h b/drivers/net/wireless/b43/main.h
index 40db03678d9f..a0d327f13183 100644
--- a/drivers/net/wireless/b43/main.h
+++ b/drivers/net/wireless/b43/main.h
@@ -133,6 +133,7 @@ void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags);
133 133
134void b43_mac_suspend(struct b43_wldev *dev); 134void b43_mac_suspend(struct b43_wldev *dev);
135void b43_mac_enable(struct b43_wldev *dev); 135void b43_mac_enable(struct b43_wldev *dev);
136void b43_mac_phy_clock_set(struct b43_wldev *dev, bool on);
136 137
137 138
138struct b43_request_fw_context; 139struct b43_request_fw_context;
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 8a00f9a95dbb..b075a3f82a43 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -2281,6 +2281,7 @@ static int b43_nphy_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf,
2281 save_regs_phy[5] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER); 2281 save_regs_phy[5] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
2282 save_regs_phy[6] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S0); 2282 save_regs_phy[6] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S0);
2283 save_regs_phy[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B32S1); 2283 save_regs_phy[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B32S1);
2284 save_regs_phy[8] = 0;
2284 } else { 2285 } else {
2285 save_regs_phy[0] = b43_phy_read(dev, B43_NPHY_AFECTL_C1); 2286 save_regs_phy[0] = b43_phy_read(dev, B43_NPHY_AFECTL_C1);
2286 save_regs_phy[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2); 2287 save_regs_phy[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2);
@@ -2289,6 +2290,8 @@ static int b43_nphy_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf,
2289 save_regs_phy[4] = b43_phy_read(dev, B43_NPHY_RFCTL_OVER); 2290 save_regs_phy[4] = b43_phy_read(dev, B43_NPHY_RFCTL_OVER);
2290 save_regs_phy[5] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO1); 2291 save_regs_phy[5] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO1);
2291 save_regs_phy[6] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO2); 2292 save_regs_phy[6] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO2);
2293 save_regs_phy[7] = 0;
2294 save_regs_phy[8] = 0;
2292 } 2295 }
2293 2296
2294 b43_nphy_rssi_select(dev, 5, type); 2297 b43_nphy_rssi_select(dev, 5, type);
@@ -3537,17 +3540,6 @@ static int b43_nphy_cal_rx_iq(struct b43_wldev *dev,
3537 return b43_nphy_rev2_cal_rx_iq(dev, target, type, debug); 3540 return b43_nphy_rev2_cal_rx_iq(dev, target, type, debug);
3538} 3541}
3539 3542
3540/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MacPhyClkSet */
3541static void b43_nphy_mac_phy_clock_set(struct b43_wldev *dev, bool on)
3542{
3543 u32 tmslow = ssb_read32(dev->dev, SSB_TMSLOW);
3544 if (on)
3545 tmslow |= B43_TMSLOW_MACPHYCLKEN;
3546 else
3547 tmslow &= ~B43_TMSLOW_MACPHYCLKEN;
3548 ssb_write32(dev->dev, SSB_TMSLOW, tmslow);
3549}
3550
3551/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCoreSetState */ 3543/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCoreSetState */
3552static void b43_nphy_set_rx_core_state(struct b43_wldev *dev, u8 mask) 3544static void b43_nphy_set_rx_core_state(struct b43_wldev *dev, u8 mask)
3553{ 3545{
@@ -3688,7 +3680,7 @@ int b43_phy_initn(struct b43_wldev *dev)
3688 b43_phy_write(dev, B43_NPHY_BBCFG, tmp & ~B43_NPHY_BBCFG_RSTCCA); 3680 b43_phy_write(dev, B43_NPHY_BBCFG, tmp & ~B43_NPHY_BBCFG_RSTCCA);
3689 b43_nphy_bmac_clock_fgc(dev, 0); 3681 b43_nphy_bmac_clock_fgc(dev, 0);
3690 3682
3691 b43_nphy_mac_phy_clock_set(dev, true); 3683 b43_mac_phy_clock_set(dev, true);
3692 3684
3693 b43_nphy_pa_override(dev, false); 3685 b43_nphy_pa_override(dev, false);
3694 b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX); 3686 b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX);
@@ -3845,8 +3837,8 @@ static int b43_nphy_set_channel(struct b43_wldev *dev,
3845{ 3837{
3846 struct b43_phy *phy = &dev->phy; 3838 struct b43_phy *phy = &dev->phy;
3847 3839
3848 const struct b43_nphy_channeltab_entry_rev2 *tabent_r2; 3840 const struct b43_nphy_channeltab_entry_rev2 *tabent_r2 = NULL;
3849 const struct b43_nphy_channeltab_entry_rev3 *tabent_r3; 3841 const struct b43_nphy_channeltab_entry_rev3 *tabent_r3 = NULL;
3850 3842
3851 u8 tmp; 3843 u8 tmp;
3852 3844
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index c7fd73e3ad76..1ab8861dd43a 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -2234,7 +2234,7 @@ static int b43legacy_chip_init(struct b43legacy_wldev *dev)
2234 b43legacy_write32(dev, B43legacy_MMIO_DMA5_IRQ_MASK, 0x0000DC00); 2234 b43legacy_write32(dev, B43legacy_MMIO_DMA5_IRQ_MASK, 0x0000DC00);
2235 2235
2236 value32 = ssb_read32(dev->dev, SSB_TMSLOW); 2236 value32 = ssb_read32(dev->dev, SSB_TMSLOW);
2237 value32 |= 0x00100000; 2237 value32 |= B43legacy_TMSLOW_MACPHYCLKEN;
2238 ssb_write32(dev->dev, SSB_TMSLOW, value32); 2238 ssb_write32(dev->dev, SSB_TMSLOW, value32);
2239 2239
2240 b43legacy_write16(dev, B43legacy_MMIO_POWERUP_DELAY, 2240 b43legacy_write16(dev, B43legacy_MMIO_POWERUP_DELAY,
@@ -3104,37 +3104,6 @@ static void setup_struct_wldev_for_init(struct b43legacy_wldev *dev)
3104 memset(&dev->noisecalc, 0, sizeof(dev->noisecalc)); 3104 memset(&dev->noisecalc, 0, sizeof(dev->noisecalc));
3105} 3105}
3106 3106
3107static void b43legacy_imcfglo_timeouts_workaround(struct b43legacy_wldev *dev)
3108{
3109#ifdef CONFIG_SSB_DRIVER_PCICORE
3110 struct ssb_bus *bus = dev->dev->bus;
3111 u32 tmp;
3112
3113 if (bus->pcicore.dev &&
3114 bus->pcicore.dev->id.coreid == SSB_DEV_PCI &&
3115 bus->pcicore.dev->id.revision <= 5) {
3116 /* IMCFGLO timeouts workaround. */
3117 tmp = ssb_read32(dev->dev, SSB_IMCFGLO);
3118 switch (bus->bustype) {
3119 case SSB_BUSTYPE_PCI:
3120 case SSB_BUSTYPE_PCMCIA:
3121 tmp &= ~SSB_IMCFGLO_REQTO;
3122 tmp &= ~SSB_IMCFGLO_SERTO;
3123 tmp |= 0x32;
3124 break;
3125 case SSB_BUSTYPE_SSB:
3126 tmp &= ~SSB_IMCFGLO_REQTO;
3127 tmp &= ~SSB_IMCFGLO_SERTO;
3128 tmp |= 0x53;
3129 break;
3130 default:
3131 break;
3132 }
3133 ssb_write32(dev->dev, SSB_IMCFGLO, tmp);
3134 }
3135#endif /* CONFIG_SSB_DRIVER_PCICORE */
3136}
3137
3138static void b43legacy_set_synth_pu_delay(struct b43legacy_wldev *dev, 3107static void b43legacy_set_synth_pu_delay(struct b43legacy_wldev *dev,
3139 bool idle) { 3108 bool idle) {
3140 u16 pu_delay = 1050; 3109 u16 pu_delay = 1050;
@@ -3278,7 +3247,6 @@ static int b43legacy_wireless_core_init(struct b43legacy_wldev *dev)
3278 /* Enable IRQ routing to this device. */ 3247 /* Enable IRQ routing to this device. */
3279 ssb_pcicore_dev_irqvecs_enable(&bus->pcicore, dev->dev); 3248 ssb_pcicore_dev_irqvecs_enable(&bus->pcicore, dev->dev);
3280 3249
3281 b43legacy_imcfglo_timeouts_workaround(dev);
3282 prepare_phy_data_for_init(dev); 3250 prepare_phy_data_for_init(dev);
3283 b43legacy_phy_calibrate(dev); 3251 b43legacy_phy_calibrate(dev);
3284 err = b43legacy_chip_init(dev); 3252 err = b43legacy_chip_init(dev);
@@ -3728,26 +3696,8 @@ static int b43legacy_one_core_attach(struct ssb_device *dev,
3728 struct b43legacy_wl *wl) 3696 struct b43legacy_wl *wl)
3729{ 3697{
3730 struct b43legacy_wldev *wldev; 3698 struct b43legacy_wldev *wldev;
3731 struct pci_dev *pdev;
3732 int err = -ENOMEM; 3699 int err = -ENOMEM;
3733 3700
3734 if (!list_empty(&wl->devlist)) {
3735 /* We are not the first core on this chip. */
3736 pdev = (dev->bus->bustype == SSB_BUSTYPE_PCI) ? dev->bus->host_pci : NULL;
3737 /* Only special chips support more than one wireless
3738 * core, although some of the other chips have more than
3739 * one wireless core as well. Check for this and
3740 * bail out early.
3741 */
3742 if (!pdev ||
3743 ((pdev->device != 0x4321) &&
3744 (pdev->device != 0x4313) &&
3745 (pdev->device != 0x431A))) {
3746 b43legacydbg(wl, "Ignoring unconnected 802.11 core\n");
3747 return -ENODEV;
3748 }
3749 }
3750
3751 wldev = kzalloc(sizeof(*wldev), GFP_KERNEL); 3701 wldev = kzalloc(sizeof(*wldev), GFP_KERNEL);
3752 if (!wldev) 3702 if (!wldev)
3753 goto out; 3703 goto out;
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index 1d9aed645723..d5084829c9e5 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -79,13 +79,8 @@ struct net_device * hostap_add_interface(struct local_info *local,
79 if (!rtnl_locked) 79 if (!rtnl_locked)
80 rtnl_lock(); 80 rtnl_lock();
81 81
82 ret = 0;
83 if (strchr(dev->name, '%'))
84 ret = dev_alloc_name(dev, dev->name);
85
86 SET_NETDEV_DEV(dev, mdev->dev.parent); 82 SET_NETDEV_DEV(dev, mdev->dev.parent);
87 if (ret >= 0) 83 ret = register_netdevice(dev);
88 ret = register_netdevice(dev);
89 84
90 if (!rtnl_locked) 85 if (!rtnl_locked)
91 rtnl_unlock(); 86 rtnl_unlock();
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
index 5a8a3cce27bc..7e5e85a017b5 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
@@ -955,9 +955,6 @@ int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
955 if (priv->cfg->scan_rx_antennas[band]) 955 if (priv->cfg->scan_rx_antennas[band])
956 rx_ant = priv->cfg->scan_rx_antennas[band]; 956 rx_ant = priv->cfg->scan_rx_antennas[band];
957 957
958 if (priv->cfg->scan_tx_antennas[band])
959 scan_tx_antennas = priv->cfg->scan_tx_antennas[band];
960
961 priv->scan_tx_ant[band] = iwl4965_toggle_tx_ant(priv, 958 priv->scan_tx_ant[band] = iwl4965_toggle_tx_ant(priv,
962 priv->scan_tx_ant[band], 959 priv->scan_tx_ant[band],
963 scan_tx_antennas); 960 scan_tx_antennas);
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rs.c b/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
index 31ac672b64e1..24d149909ba3 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
@@ -2604,7 +2604,7 @@ static ssize_t iwl4965_rs_sta_dbgfs_scale_table_write(struct file *file,
2604 struct iwl_lq_sta *lq_sta = file->private_data; 2604 struct iwl_lq_sta *lq_sta = file->private_data;
2605 struct iwl_priv *priv; 2605 struct iwl_priv *priv;
2606 char buf[64]; 2606 char buf[64];
2607 int buf_size; 2607 size_t buf_size;
2608 u32 parsed_rate; 2608 u32 parsed_rate;
2609 struct iwl_station_priv *sta_priv = 2609 struct iwl_station_priv *sta_priv =
2610 container_of(lq_sta, struct iwl_station_priv, lq_sta); 2610 container_of(lq_sta, struct iwl_station_priv, lq_sta);
@@ -2860,7 +2860,6 @@ static struct rate_control_ops rs_4965_ops = {
2860 2860
2861int iwl4965_rate_control_register(void) 2861int iwl4965_rate_control_register(void)
2862{ 2862{
2863 pr_err("Registering 4965 rate control operations\n");
2864 return ieee80211_rate_control_register(&rs_4965_ops); 2863 return ieee80211_rate_control_register(&rs_4965_ops);
2865} 2864}
2866 2865
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
index 42db0fc8b921..42df8321dae8 100644
--- a/drivers/net/wireless/iwlegacy/iwl-core.c
+++ b/drivers/net/wireless/iwlegacy/iwl-core.c
@@ -211,10 +211,7 @@ int iwl_legacy_init_geos(struct iwl_priv *priv)
211 if (!iwl_legacy_is_channel_valid(ch)) 211 if (!iwl_legacy_is_channel_valid(ch))
212 continue; 212 continue;
213 213
214 if (iwl_legacy_is_channel_a_band(ch)) 214 sband = &priv->bands[ch->band];
215 sband = &priv->bands[IEEE80211_BAND_5GHZ];
216 else
217 sband = &priv->bands[IEEE80211_BAND_2GHZ];
218 215
219 geo_ch = &sband->channels[sband->n_channels++]; 216 geo_ch = &sband->channels[sband->n_channels++];
220 217
@@ -2117,10 +2114,9 @@ int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed)
2117 IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n", 2114 IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
2118 channel->hw_value, changed); 2115 channel->hw_value, changed);
2119 2116
2120 if (unlikely(!priv->cfg->mod_params->disable_hw_scan && 2117 if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
2121 test_bit(STATUS_SCANNING, &priv->status))) {
2122 scan_active = 1; 2118 scan_active = 1;
2123 IWL_DEBUG_MAC80211(priv, "leave - scanning\n"); 2119 IWL_DEBUG_MAC80211(priv, "scan active\n");
2124 } 2120 }
2125 2121
2126 if (changed & (IEEE80211_CONF_CHANGE_SMPS | 2122 if (changed & (IEEE80211_CONF_CHANGE_SMPS |
@@ -2440,11 +2436,13 @@ void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
2440 2436
2441 IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes); 2437 IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
2442 2438
2443 if (!iwl_legacy_is_alive(priv))
2444 return;
2445
2446 mutex_lock(&priv->mutex); 2439 mutex_lock(&priv->mutex);
2447 2440
2441 if (!iwl_legacy_is_alive(priv)) {
2442 mutex_unlock(&priv->mutex);
2443 return;
2444 }
2445
2448 if (changes & BSS_CHANGED_QOS) { 2446 if (changes & BSS_CHANGED_QOS) {
2449 unsigned long flags; 2447 unsigned long flags;
2450 2448
@@ -2653,7 +2651,7 @@ unplugged:
2653 2651
2654none: 2652none:
2655 /* re-enable interrupts here since we don't have anything to service. */ 2653 /* re-enable interrupts here since we don't have anything to service. */
2656 /* only Re-enable if diabled by irq */ 2654 /* only Re-enable if disabled by irq */
2657 if (test_bit(STATUS_INT_ENABLED, &priv->status)) 2655 if (test_bit(STATUS_INT_ENABLED, &priv->status))
2658 iwl_legacy_enable_interrupts(priv); 2656 iwl_legacy_enable_interrupts(priv);
2659 spin_unlock_irqrestore(&priv->lock, flags); 2657 spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.h b/drivers/net/wireless/iwlegacy/iwl-core.h
index f03b463e4378..bc66c604106c 100644
--- a/drivers/net/wireless/iwlegacy/iwl-core.h
+++ b/drivers/net/wireless/iwlegacy/iwl-core.h
@@ -287,7 +287,6 @@ struct iwl_cfg {
287 struct iwl_base_params *base_params; 287 struct iwl_base_params *base_params;
288 /* params likely to change within a device family */ 288 /* params likely to change within a device family */
289 u8 scan_rx_antennas[IEEE80211_NUM_BANDS]; 289 u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
290 u8 scan_tx_antennas[IEEE80211_NUM_BANDS];
291 enum iwl_led_mode led_mode; 290 enum iwl_led_mode led_mode;
292}; 291};
293 292
diff --git a/drivers/net/wireless/iwlegacy/iwl-dev.h b/drivers/net/wireless/iwlegacy/iwl-dev.h
index f43ac1eb9014..be0106c6a2da 100644
--- a/drivers/net/wireless/iwlegacy/iwl-dev.h
+++ b/drivers/net/wireless/iwlegacy/iwl-dev.h
@@ -134,7 +134,7 @@ struct iwl_queue {
134 * space more than this */ 134 * space more than this */
135 int high_mark; /* high watermark, stop queue if free 135 int high_mark; /* high watermark, stop queue if free
136 * space less than this */ 136 * space less than this */
137} __packed; 137};
138 138
139/* One for each TFD */ 139/* One for each TFD */
140struct iwl_tx_info { 140struct iwl_tx_info {
@@ -290,6 +290,7 @@ enum {
290 CMD_SIZE_HUGE = (1 << 0), 290 CMD_SIZE_HUGE = (1 << 0),
291 CMD_ASYNC = (1 << 1), 291 CMD_ASYNC = (1 << 1),
292 CMD_WANT_SKB = (1 << 2), 292 CMD_WANT_SKB = (1 << 2),
293 CMD_MAPPED = (1 << 3),
293}; 294};
294 295
295#define DEF_CMD_PAYLOAD_SIZE 320 296#define DEF_CMD_PAYLOAD_SIZE 320
@@ -1076,7 +1077,6 @@ struct iwl_priv {
1076 spinlock_t hcmd_lock; /* protect hcmd */ 1077 spinlock_t hcmd_lock; /* protect hcmd */
1077 spinlock_t reg_lock; /* protect hw register access */ 1078 spinlock_t reg_lock; /* protect hw register access */
1078 struct mutex mutex; 1079 struct mutex mutex;
1079 struct mutex sync_cmd_mutex; /* enable serialization of sync commands */
1080 1080
1081 /* basic pci-network driver stuff */ 1081 /* basic pci-network driver stuff */
1082 struct pci_dev *pci_dev; 1082 struct pci_dev *pci_dev;
diff --git a/drivers/net/wireless/iwlegacy/iwl-hcmd.c b/drivers/net/wireless/iwlegacy/iwl-hcmd.c
index 9d721cbda5bb..62b4b09122cb 100644
--- a/drivers/net/wireless/iwlegacy/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlegacy/iwl-hcmd.c
@@ -145,6 +145,8 @@ int iwl_legacy_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
145 int cmd_idx; 145 int cmd_idx;
146 int ret; 146 int ret;
147 147
148 lockdep_assert_held(&priv->mutex);
149
148 BUG_ON(cmd->flags & CMD_ASYNC); 150 BUG_ON(cmd->flags & CMD_ASYNC);
149 151
150 /* A synchronous command can not have a callback set. */ 152 /* A synchronous command can not have a callback set. */
@@ -152,7 +154,6 @@ int iwl_legacy_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
152 154
153 IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n", 155 IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n",
154 iwl_legacy_get_cmd_string(cmd->id)); 156 iwl_legacy_get_cmd_string(cmd->id));
155 mutex_lock(&priv->sync_cmd_mutex);
156 157
157 set_bit(STATUS_HCMD_ACTIVE, &priv->status); 158 set_bit(STATUS_HCMD_ACTIVE, &priv->status);
158 IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n", 159 IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n",
@@ -224,7 +225,6 @@ fail:
224 cmd->reply_page = 0; 225 cmd->reply_page = 0;
225 } 226 }
226out: 227out:
227 mutex_unlock(&priv->sync_cmd_mutex);
228 return ret; 228 return ret;
229} 229}
230EXPORT_SYMBOL(iwl_legacy_send_cmd_sync); 230EXPORT_SYMBOL(iwl_legacy_send_cmd_sync);
diff --git a/drivers/net/wireless/iwlegacy/iwl-helpers.h b/drivers/net/wireless/iwlegacy/iwl-helpers.h
index 02132e755831..a6effdae63f9 100644
--- a/drivers/net/wireless/iwlegacy/iwl-helpers.h
+++ b/drivers/net/wireless/iwlegacy/iwl-helpers.h
@@ -149,6 +149,12 @@ static inline void iwl_legacy_disable_interrupts(struct iwl_priv *priv)
149 IWL_DEBUG_ISR(priv, "Disabled interrupts\n"); 149 IWL_DEBUG_ISR(priv, "Disabled interrupts\n");
150} 150}
151 151
152static inline void iwl_legacy_enable_rfkill_int(struct iwl_priv *priv)
153{
154 IWL_DEBUG_ISR(priv, "Enabling rfkill interrupt\n");
155 iwl_write32(priv, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
156}
157
152static inline void iwl_legacy_enable_interrupts(struct iwl_priv *priv) 158static inline void iwl_legacy_enable_interrupts(struct iwl_priv *priv)
153{ 159{
154 IWL_DEBUG_ISR(priv, "Enabling interrupts\n"); 160 IWL_DEBUG_ISR(priv, "Enabling interrupts\n");
diff --git a/drivers/net/wireless/iwlegacy/iwl-tx.c b/drivers/net/wireless/iwlegacy/iwl-tx.c
index a227773cb384..4fff995c6f3e 100644
--- a/drivers/net/wireless/iwlegacy/iwl-tx.c
+++ b/drivers/net/wireless/iwlegacy/iwl-tx.c
@@ -146,33 +146,32 @@ void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv)
146{ 146{
147 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; 147 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
148 struct iwl_queue *q = &txq->q; 148 struct iwl_queue *q = &txq->q;
149 bool huge = false;
150 int i; 149 int i;
151 150
152 if (q->n_bd == 0) 151 if (q->n_bd == 0)
153 return; 152 return;
154 153
155 while (q->read_ptr != q->write_ptr) { 154 while (q->read_ptr != q->write_ptr) {
156 /* we have no way to tell if it is a huge cmd ATM */
157 i = iwl_legacy_get_cmd_index(q, q->read_ptr, 0); 155 i = iwl_legacy_get_cmd_index(q, q->read_ptr, 0);
158 156
159 if (txq->meta[i].flags & CMD_SIZE_HUGE) 157 if (txq->meta[i].flags & CMD_MAPPED) {
160 huge = true;
161 else
162 pci_unmap_single(priv->pci_dev, 158 pci_unmap_single(priv->pci_dev,
163 dma_unmap_addr(&txq->meta[i], mapping), 159 dma_unmap_addr(&txq->meta[i], mapping),
164 dma_unmap_len(&txq->meta[i], len), 160 dma_unmap_len(&txq->meta[i], len),
165 PCI_DMA_BIDIRECTIONAL); 161 PCI_DMA_BIDIRECTIONAL);
162 txq->meta[i].flags = 0;
163 }
166 164
167 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd); 165 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd);
168 } 166 }
169 167
170 if (huge) { 168 i = q->n_window;
171 i = q->n_window; 169 if (txq->meta[i].flags & CMD_MAPPED) {
172 pci_unmap_single(priv->pci_dev, 170 pci_unmap_single(priv->pci_dev,
173 dma_unmap_addr(&txq->meta[i], mapping), 171 dma_unmap_addr(&txq->meta[i], mapping),
174 dma_unmap_len(&txq->meta[i], len), 172 dma_unmap_len(&txq->meta[i], len),
175 PCI_DMA_BIDIRECTIONAL); 173 PCI_DMA_BIDIRECTIONAL);
174 txq->meta[i].flags = 0;
176 } 175 }
177} 176}
178EXPORT_SYMBOL(iwl_legacy_cmd_queue_unmap); 177EXPORT_SYMBOL(iwl_legacy_cmd_queue_unmap);
@@ -467,29 +466,27 @@ int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
467 return -EIO; 466 return -EIO;
468 } 467 }
469 468
469 spin_lock_irqsave(&priv->hcmd_lock, flags);
470
470 if (iwl_legacy_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 471 if (iwl_legacy_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
471 IWL_ERR(priv, "No space in command queue\n"); 472 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
472 IWL_ERR(priv, "Restarting adapter due to queue full\n"); 473
474 IWL_ERR(priv, "Restarting adapter due to command queue full\n");
473 queue_work(priv->workqueue, &priv->restart); 475 queue_work(priv->workqueue, &priv->restart);
474 return -ENOSPC; 476 return -ENOSPC;
475 } 477 }
476 478
477 spin_lock_irqsave(&priv->hcmd_lock, flags);
478
479 /* If this is a huge cmd, mark the huge flag also on the meta.flags
480 * of the _original_ cmd. This is used for DMA mapping clean up.
481 */
482 if (cmd->flags & CMD_SIZE_HUGE) {
483 idx = iwl_legacy_get_cmd_index(q, q->write_ptr, 0);
484 txq->meta[idx].flags = CMD_SIZE_HUGE;
485 }
486
487 idx = iwl_legacy_get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); 479 idx = iwl_legacy_get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
488 out_cmd = txq->cmd[idx]; 480 out_cmd = txq->cmd[idx];
489 out_meta = &txq->meta[idx]; 481 out_meta = &txq->meta[idx];
490 482
483 if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
484 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
485 return -ENOSPC;
486 }
487
491 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ 488 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
492 out_meta->flags = cmd->flags; 489 out_meta->flags = cmd->flags | CMD_MAPPED;
493 if (cmd->flags & CMD_WANT_SKB) 490 if (cmd->flags & CMD_WANT_SKB)
494 out_meta->source = cmd; 491 out_meta->source = cmd;
495 if (cmd->flags & CMD_ASYNC) 492 if (cmd->flags & CMD_ASYNC)
@@ -610,6 +607,7 @@ iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
610 struct iwl_device_cmd *cmd; 607 struct iwl_device_cmd *cmd;
611 struct iwl_cmd_meta *meta; 608 struct iwl_cmd_meta *meta;
612 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; 609 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
610 unsigned long flags;
613 611
614 /* If a Tx command is being handled and it isn't in the actual 612 /* If a Tx command is being handled and it isn't in the actual
615 * command queue then there a command routing bug has been introduced 613 * command queue then there a command routing bug has been introduced
@@ -623,14 +621,6 @@ iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
623 return; 621 return;
624 } 622 }
625 623
626 /* If this is a huge cmd, clear the huge flag on the meta.flags
627 * of the _original_ cmd. So that iwl_legacy_cmd_queue_free won't unmap
628 * the DMA buffer for the scan (huge) command.
629 */
630 if (huge) {
631 cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, 0);
632 txq->meta[cmd_index].flags = 0;
633 }
634 cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, huge); 624 cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, huge);
635 cmd = txq->cmd[cmd_index]; 625 cmd = txq->cmd[cmd_index];
636 meta = &txq->meta[cmd_index]; 626 meta = &txq->meta[cmd_index];
@@ -647,6 +637,8 @@ iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
647 } else if (meta->callback) 637 } else if (meta->callback)
648 meta->callback(priv, cmd, pkt); 638 meta->callback(priv, cmd, pkt);
649 639
640 spin_lock_irqsave(&priv->hcmd_lock, flags);
641
650 iwl_legacy_hcmd_queue_reclaim(priv, txq_id, index, cmd_index); 642 iwl_legacy_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
651 643
652 if (!(meta->flags & CMD_ASYNC)) { 644 if (!(meta->flags & CMD_ASYNC)) {
@@ -655,6 +647,10 @@ iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
655 iwl_legacy_get_cmd_string(cmd->hdr.cmd)); 647 iwl_legacy_get_cmd_string(cmd->hdr.cmd));
656 wake_up_interruptible(&priv->wait_command_queue); 648 wake_up_interruptible(&priv->wait_command_queue);
657 } 649 }
650
651 /* Mark as unmapped */
658 meta->flags = 0; 652 meta->flags = 0;
653
654 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
659} 655}
660EXPORT_SYMBOL(iwl_legacy_tx_cmd_complete); 656EXPORT_SYMBOL(iwl_legacy_tx_cmd_complete);
diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
index cc7ebcee60e5..0ee6be6a9c5d 100644
--- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
+++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
@@ -2748,11 +2748,12 @@ static void iwl3945_bg_init_alive_start(struct work_struct *data)
2748 struct iwl_priv *priv = 2748 struct iwl_priv *priv =
2749 container_of(data, struct iwl_priv, init_alive_start.work); 2749 container_of(data, struct iwl_priv, init_alive_start.work);
2750 2750
2751 mutex_lock(&priv->mutex);
2751 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 2752 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2752 return; 2753 goto out;
2753 2754
2754 mutex_lock(&priv->mutex);
2755 iwl3945_init_alive_start(priv); 2755 iwl3945_init_alive_start(priv);
2756out:
2756 mutex_unlock(&priv->mutex); 2757 mutex_unlock(&priv->mutex);
2757} 2758}
2758 2759
@@ -2761,11 +2762,12 @@ static void iwl3945_bg_alive_start(struct work_struct *data)
2761 struct iwl_priv *priv = 2762 struct iwl_priv *priv =
2762 container_of(data, struct iwl_priv, alive_start.work); 2763 container_of(data, struct iwl_priv, alive_start.work);
2763 2764
2765 mutex_lock(&priv->mutex);
2764 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 2766 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2765 return; 2767 goto out;
2766 2768
2767 mutex_lock(&priv->mutex);
2768 iwl3945_alive_start(priv); 2769 iwl3945_alive_start(priv);
2770out:
2769 mutex_unlock(&priv->mutex); 2771 mutex_unlock(&priv->mutex);
2770} 2772}
2771 2773
@@ -2995,10 +2997,12 @@ static void iwl3945_bg_restart(struct work_struct *data)
2995 } else { 2997 } else {
2996 iwl3945_down(priv); 2998 iwl3945_down(priv);
2997 2999
2998 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3000 mutex_lock(&priv->mutex);
3001 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
3002 mutex_unlock(&priv->mutex);
2999 return; 3003 return;
3004 }
3000 3005
3001 mutex_lock(&priv->mutex);
3002 __iwl3945_up(priv); 3006 __iwl3945_up(priv);
3003 mutex_unlock(&priv->mutex); 3007 mutex_unlock(&priv->mutex);
3004 } 3008 }
@@ -3009,11 +3013,12 @@ static void iwl3945_bg_rx_replenish(struct work_struct *data)
3009 struct iwl_priv *priv = 3013 struct iwl_priv *priv =
3010 container_of(data, struct iwl_priv, rx_replenish); 3014 container_of(data, struct iwl_priv, rx_replenish);
3011 3015
3016 mutex_lock(&priv->mutex);
3012 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3017 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3013 return; 3018 goto out;
3014 3019
3015 mutex_lock(&priv->mutex);
3016 iwl3945_rx_replenish(priv); 3020 iwl3945_rx_replenish(priv);
3021out:
3017 mutex_unlock(&priv->mutex); 3022 mutex_unlock(&priv->mutex);
3018} 3023}
3019 3024
@@ -3810,7 +3815,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
3810 INIT_LIST_HEAD(&priv->free_frames); 3815 INIT_LIST_HEAD(&priv->free_frames);
3811 3816
3812 mutex_init(&priv->mutex); 3817 mutex_init(&priv->mutex);
3813 mutex_init(&priv->sync_cmd_mutex);
3814 3818
3815 priv->ieee_channels = NULL; 3819 priv->ieee_channels = NULL;
3816 priv->ieee_rates = NULL; 3820 priv->ieee_rates = NULL;
diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c
index a62fe24ee594..af2ae22fcfd3 100644
--- a/drivers/net/wireless/iwlegacy/iwl4965-base.c
+++ b/drivers/net/wireless/iwlegacy/iwl4965-base.c
@@ -1069,9 +1069,12 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv)
1069 } 1069 }
1070 1070
1071 /* Re-enable all interrupts */ 1071 /* Re-enable all interrupts */
1072 /* only Re-enable if diabled by irq */ 1072 /* only Re-enable if disabled by irq */
1073 if (test_bit(STATUS_INT_ENABLED, &priv->status)) 1073 if (test_bit(STATUS_INT_ENABLED, &priv->status))
1074 iwl_legacy_enable_interrupts(priv); 1074 iwl_legacy_enable_interrupts(priv);
1075 /* Re-enable RF_KILL if it occurred */
1076 else if (handled & CSR_INT_BIT_RF_KILL)
1077 iwl_legacy_enable_rfkill_int(priv);
1075 1078
1076#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG 1079#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1077 if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) { 1080 if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
@@ -2139,7 +2142,7 @@ static void iwl4965_cancel_deferred_work(struct iwl_priv *priv);
2139static void __iwl4965_down(struct iwl_priv *priv) 2142static void __iwl4965_down(struct iwl_priv *priv)
2140{ 2143{
2141 unsigned long flags; 2144 unsigned long flags;
2142 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status); 2145 int exit_pending;
2143 2146
2144 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n"); 2147 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
2145 2148
@@ -2401,11 +2404,12 @@ static void iwl4965_bg_init_alive_start(struct work_struct *data)
2401 struct iwl_priv *priv = 2404 struct iwl_priv *priv =
2402 container_of(data, struct iwl_priv, init_alive_start.work); 2405 container_of(data, struct iwl_priv, init_alive_start.work);
2403 2406
2407 mutex_lock(&priv->mutex);
2404 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 2408 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2405 return; 2409 goto out;
2406 2410
2407 mutex_lock(&priv->mutex);
2408 priv->cfg->ops->lib->init_alive_start(priv); 2411 priv->cfg->ops->lib->init_alive_start(priv);
2412out:
2409 mutex_unlock(&priv->mutex); 2413 mutex_unlock(&priv->mutex);
2410} 2414}
2411 2415
@@ -2414,11 +2418,12 @@ static void iwl4965_bg_alive_start(struct work_struct *data)
2414 struct iwl_priv *priv = 2418 struct iwl_priv *priv =
2415 container_of(data, struct iwl_priv, alive_start.work); 2419 container_of(data, struct iwl_priv, alive_start.work);
2416 2420
2421 mutex_lock(&priv->mutex);
2417 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 2422 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2418 return; 2423 goto out;
2419 2424
2420 mutex_lock(&priv->mutex);
2421 iwl4965_alive_start(priv); 2425 iwl4965_alive_start(priv);
2426out:
2422 mutex_unlock(&priv->mutex); 2427 mutex_unlock(&priv->mutex);
2423} 2428}
2424 2429
@@ -2468,10 +2473,12 @@ static void iwl4965_bg_restart(struct work_struct *data)
2468 } else { 2473 } else {
2469 iwl4965_down(priv); 2474 iwl4965_down(priv);
2470 2475
2471 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 2476 mutex_lock(&priv->mutex);
2477 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
2478 mutex_unlock(&priv->mutex);
2472 return; 2479 return;
2480 }
2473 2481
2474 mutex_lock(&priv->mutex);
2475 __iwl4965_up(priv); 2482 __iwl4965_up(priv);
2476 mutex_unlock(&priv->mutex); 2483 mutex_unlock(&priv->mutex);
2477 } 2484 }
@@ -2624,9 +2631,10 @@ void iwl4965_mac_stop(struct ieee80211_hw *hw)
2624 2631
2625 flush_workqueue(priv->workqueue); 2632 flush_workqueue(priv->workqueue);
2626 2633
2627 /* enable interrupts again in order to receive rfkill changes */ 2634 /* User space software may expect getting rfkill changes
2635 * even if interface is down */
2628 iwl_write32(priv, CSR_INT, 0xFFFFFFFF); 2636 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2629 iwl_legacy_enable_interrupts(priv); 2637 iwl_legacy_enable_rfkill_int(priv);
2630 2638
2631 IWL_DEBUG_MAC80211(priv, "leave\n"); 2639 IWL_DEBUG_MAC80211(priv, "leave\n");
2632} 2640}
@@ -2847,21 +2855,22 @@ void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
2847 2855
2848 IWL_DEBUG_MAC80211(priv, "enter\n"); 2856 IWL_DEBUG_MAC80211(priv, "enter\n");
2849 2857
2858 mutex_lock(&priv->mutex);
2859
2850 if (iwl_legacy_is_rfkill(priv)) 2860 if (iwl_legacy_is_rfkill(priv))
2851 goto out_exit; 2861 goto out;
2852 2862
2853 if (test_bit(STATUS_EXIT_PENDING, &priv->status) || 2863 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
2854 test_bit(STATUS_SCANNING, &priv->status)) 2864 test_bit(STATUS_SCANNING, &priv->status))
2855 goto out_exit; 2865 goto out;
2856 2866
2857 if (!iwl_legacy_is_associated_ctx(ctx)) 2867 if (!iwl_legacy_is_associated_ctx(ctx))
2858 goto out_exit; 2868 goto out;
2859 2869
2860 /* channel switch in progress */ 2870 /* channel switch in progress */
2861 if (priv->switch_rxon.switch_in_progress == true) 2871 if (priv->switch_rxon.switch_in_progress == true)
2862 goto out_exit; 2872 goto out;
2863 2873
2864 mutex_lock(&priv->mutex);
2865 if (priv->cfg->ops->lib->set_channel_switch) { 2874 if (priv->cfg->ops->lib->set_channel_switch) {
2866 2875
2867 ch = channel->hw_value; 2876 ch = channel->hw_value;
@@ -2917,7 +2926,6 @@ void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
2917 } 2926 }
2918out: 2927out:
2919 mutex_unlock(&priv->mutex); 2928 mutex_unlock(&priv->mutex);
2920out_exit:
2921 if (!priv->switch_rxon.switch_in_progress) 2929 if (!priv->switch_rxon.switch_in_progress)
2922 ieee80211_chswitch_done(ctx->vif, false); 2930 ieee80211_chswitch_done(ctx->vif, false);
2923 IWL_DEBUG_MAC80211(priv, "leave\n"); 2931 IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -3116,7 +3124,6 @@ static int iwl4965_init_drv(struct iwl_priv *priv)
3116 INIT_LIST_HEAD(&priv->free_frames); 3124 INIT_LIST_HEAD(&priv->free_frames);
3117 3125
3118 mutex_init(&priv->mutex); 3126 mutex_init(&priv->mutex);
3119 mutex_init(&priv->sync_cmd_mutex);
3120 3127
3121 priv->ieee_channels = NULL; 3128 priv->ieee_channels = NULL;
3122 priv->ieee_rates = NULL; 3129 priv->ieee_rates = NULL;
@@ -3173,7 +3180,7 @@ static void iwl4965_hw_detect(struct iwl_priv *priv)
3173{ 3180{
3174 priv->hw_rev = _iwl_legacy_read32(priv, CSR_HW_REV); 3181 priv->hw_rev = _iwl_legacy_read32(priv, CSR_HW_REV);
3175 priv->hw_wa_rev = _iwl_legacy_read32(priv, CSR_HW_REV_WA_REG); 3182 priv->hw_wa_rev = _iwl_legacy_read32(priv, CSR_HW_REV_WA_REG);
3176 pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id); 3183 priv->rev_id = priv->pci_dev->revision;
3177 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id); 3184 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id);
3178} 3185}
3179 3186
@@ -3406,14 +3413,14 @@ iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3406 * 8. Enable interrupts and read RFKILL state 3413 * 8. Enable interrupts and read RFKILL state
3407 *********************************************/ 3414 *********************************************/
3408 3415
3409 /* enable interrupts if needed: hw bug w/a */ 3416 /* enable rfkill interrupt: hw bug w/a */
3410 pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd); 3417 pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
3411 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { 3418 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
3412 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; 3419 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
3413 pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd); 3420 pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd);
3414 } 3421 }
3415 3422
3416 iwl_legacy_enable_interrupts(priv); 3423 iwl_legacy_enable_rfkill_int(priv);
3417 3424
3418 /* If platform's RF_KILL switch is NOT set to KILL */ 3425 /* If platform's RF_KILL switch is NOT set to KILL */
3419 if (iwl_read32(priv, CSR_GP_CNTRL) & 3426 if (iwl_read32(priv, CSR_GP_CNTRL) &
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 17d555f2215a..ad3bdba6beed 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -102,6 +102,16 @@ config IWLWIFI_DEVICE_TRACING
102 occur. 102 occur.
103endmenu 103endmenu
104 104
105config IWLWIFI_DEVICE_SVTOOL
106 bool "iwlwifi device svtool support"
107 depends on IWLAGN
108 select NL80211_TESTMODE
109 help
110 This option enables the svtool support for iwlwifi device through
111 NL80211_TESTMODE. svtool is a software validation tool that runs in
112 the user space and interacts with the device in the kernel space
113 through the generic netlink message via NL80211_TESTMODE channel.
114
105config IWL_P2P 115config IWL_P2P
106 bool "iwlwifi experimental P2P support" 116 bool "iwlwifi experimental P2P support"
107 depends on IWLAGN 117 depends on IWLAGN
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 9d6ee836426c..822660483f9f 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -1,8 +1,8 @@
1# AGN 1# AGN
2obj-$(CONFIG_IWLAGN) += iwlagn.o 2obj-$(CONFIG_IWLAGN) += iwlagn.o
3iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o 3iwlagn-objs := iwl-agn.o iwl-agn-rs.o
4iwlagn-objs += iwl-agn-ucode.o iwl-agn-tx.o 4iwlagn-objs += iwl-agn-ucode.o iwl-agn-tx.o
5iwlagn-objs += iwl-agn-lib.o iwl-agn-calib.o 5iwlagn-objs += iwl-agn-lib.o iwl-agn-calib.o iwl-io.o
6iwlagn-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-eeprom.o 6iwlagn-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-eeprom.o
7 7
8iwlagn-objs += iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o 8iwlagn-objs += iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
@@ -14,9 +14,9 @@ iwlagn-objs += iwl-6000.o
14iwlagn-objs += iwl-1000.o 14iwlagn-objs += iwl-1000.o
15iwlagn-objs += iwl-2000.o 15iwlagn-objs += iwl-2000.o
16 16
17iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-agn-debugfs.o
18iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o 17iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
19iwlagn-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o 18iwlagn-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
19iwlagn-$(CONFIG_IWLWIFI_DEVICE_SVTOOL) += iwl-sv-open.o
20 20
21CFLAGS_iwl-devtrace.o := -I$(src) 21CFLAGS_iwl-devtrace.o := -I$(src)
22 22
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 27c5007e577c..b4c81931e136 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -45,8 +45,6 @@
45#include "iwl-agn.h" 45#include "iwl-agn.h"
46#include "iwl-helpers.h" 46#include "iwl-helpers.h"
47#include "iwl-agn-hw.h" 47#include "iwl-agn-hw.h"
48#include "iwl-agn-led.h"
49#include "iwl-agn-debugfs.h"
50 48
51/* Highest firmware API version supported */ 49/* Highest firmware API version supported */
52#define IWL1000_UCODE_API_MAX 5 50#define IWL1000_UCODE_API_MAX 5
@@ -57,12 +55,10 @@
57#define IWL100_UCODE_API_MIN 5 55#define IWL100_UCODE_API_MIN 5
58 56
59#define IWL1000_FW_PRE "iwlwifi-1000-" 57#define IWL1000_FW_PRE "iwlwifi-1000-"
60#define _IWL1000_MODULE_FIRMWARE(api) IWL1000_FW_PRE #api ".ucode" 58#define IWL1000_MODULE_FIRMWARE(api) IWL1000_FW_PRE #api ".ucode"
61#define IWL1000_MODULE_FIRMWARE(api) _IWL1000_MODULE_FIRMWARE(api)
62 59
63#define IWL100_FW_PRE "iwlwifi-100-" 60#define IWL100_FW_PRE "iwlwifi-100-"
64#define _IWL100_MODULE_FIRMWARE(api) IWL100_FW_PRE #api ".ucode" 61#define IWL100_MODULE_FIRMWARE(api) IWL100_FW_PRE #api ".ucode"
65#define IWL100_MODULE_FIRMWARE(api) _IWL100_MODULE_FIRMWARE(api)
66 62
67 63
68/* 64/*
@@ -124,10 +120,10 @@ static struct iwl_sensitivity_ranges iwl1000_sensitivity = {
124 120
125static int iwl1000_hw_set_hw_params(struct iwl_priv *priv) 121static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
126{ 122{
127 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES && 123 if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
128 priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES) 124 iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
129 priv->cfg->base_params->num_of_queues = 125 priv->cfg->base_params->num_of_queues =
130 priv->cfg->mod_params->num_of_queues; 126 iwlagn_mod_params.num_of_queues;
131 127
132 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues; 128 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
133 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM; 129 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
@@ -141,7 +137,6 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
141 priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE; 137 priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
142 priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE; 138 priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
143 139
144 priv->hw_params.max_bsm_size = 0;
145 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) | 140 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
146 BIT(IEEE80211_BAND_5GHZ); 141 BIT(IEEE80211_BAND_5GHZ);
147 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR; 142 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
@@ -176,24 +171,13 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
176 171
177static struct iwl_lib_ops iwl1000_lib = { 172static struct iwl_lib_ops iwl1000_lib = {
178 .set_hw_params = iwl1000_hw_set_hw_params, 173 .set_hw_params = iwl1000_hw_set_hw_params,
179 .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
180 .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
181 .txq_set_sched = iwlagn_txq_set_sched, 174 .txq_set_sched = iwlagn_txq_set_sched,
182 .txq_agg_enable = iwlagn_txq_agg_enable,
183 .txq_agg_disable = iwlagn_txq_agg_disable,
184 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, 175 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
185 .txq_free_tfd = iwl_hw_txq_free_tfd, 176 .txq_free_tfd = iwl_hw_txq_free_tfd,
186 .txq_init = iwl_hw_tx_queue_init, 177 .txq_init = iwl_hw_tx_queue_init,
187 .rx_handler_setup = iwlagn_rx_handler_setup, 178 .rx_handler_setup = iwlagn_rx_handler_setup,
188 .setup_deferred_work = iwlagn_setup_deferred_work, 179 .setup_deferred_work = iwlagn_setup_deferred_work,
189 .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr, 180 .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
190 .load_ucode = iwlagn_load_ucode,
191 .dump_nic_event_log = iwl_dump_nic_event_log,
192 .dump_nic_error_log = iwl_dump_nic_error_log,
193 .dump_csr = iwl_dump_csr,
194 .dump_fh = iwl_dump_fh,
195 .init_alive_start = iwlagn_init_alive_start,
196 .alive_notify = iwlagn_alive_notify,
197 .send_tx_power = iwlagn_send_tx_power, 181 .send_tx_power = iwlagn_send_tx_power,
198 .update_chain_flags = iwl_update_chain_flags, 182 .update_chain_flags = iwl_update_chain_flags,
199 .apm_ops = { 183 .apm_ops = {
@@ -208,45 +192,21 @@ static struct iwl_lib_ops iwl1000_lib = {
208 EEPROM_REG_BAND_4_CHANNELS, 192 EEPROM_REG_BAND_4_CHANNELS,
209 EEPROM_REG_BAND_5_CHANNELS, 193 EEPROM_REG_BAND_5_CHANNELS,
210 EEPROM_REG_BAND_24_HT40_CHANNELS, 194 EEPROM_REG_BAND_24_HT40_CHANNELS,
211 EEPROM_REG_BAND_52_HT40_CHANNELS 195 EEPROM_REGULATORY_BAND_NO_HT40,
212 }, 196 },
213 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
214 .release_semaphore = iwlcore_eeprom_release_semaphore,
215 .calib_version = iwlagn_eeprom_calib_version,
216 .query_addr = iwlagn_eeprom_query_addr, 197 .query_addr = iwlagn_eeprom_query_addr,
217 }, 198 },
218 .isr_ops = {
219 .isr = iwl_isr_ict,
220 .free = iwl_free_isr_ict,
221 .alloc = iwl_alloc_isr_ict,
222 .reset = iwl_reset_ict,
223 .disable = iwl_disable_ict,
224 },
225 .temp_ops = { 199 .temp_ops = {
226 .temperature = iwlagn_temperature, 200 .temperature = iwlagn_temperature,
227 }, 201 },
228 .debugfs_ops = {
229 .rx_stats_read = iwl_ucode_rx_stats_read,
230 .tx_stats_read = iwl_ucode_tx_stats_read,
231 .general_stats_read = iwl_ucode_general_stats_read,
232 .bt_stats_read = iwl_ucode_bt_stats_read,
233 .reply_tx_error = iwl_reply_tx_error_read,
234 },
235 .txfifo_flush = iwlagn_txfifo_flush, 202 .txfifo_flush = iwlagn_txfifo_flush,
236 .dev_txfifo_flush = iwlagn_dev_txfifo_flush, 203 .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
237 .tt_ops = {
238 .lower_power_detection = iwl_tt_is_low_power_state,
239 .tt_power_mode = iwl_tt_current_power_mode,
240 .ct_kill_check = iwl_check_for_ct_kill,
241 }
242}; 204};
243 205
244static const struct iwl_ops iwl1000_ops = { 206static const struct iwl_ops iwl1000_ops = {
245 .lib = &iwl1000_lib, 207 .lib = &iwl1000_lib,
246 .hcmd = &iwlagn_hcmd, 208 .hcmd = &iwlagn_hcmd,
247 .utils = &iwlagn_hcmd_utils, 209 .utils = &iwlagn_hcmd_utils,
248 .led = &iwlagn_led_ops,
249 .ieee80211_ops = &iwlagn_hw_ops,
250}; 210};
251 211
252static struct iwl_base_params iwl1000_base_params = { 212static struct iwl_base_params iwl1000_base_params = {
@@ -254,8 +214,6 @@ static struct iwl_base_params iwl1000_base_params = {
254 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, 214 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
255 .eeprom_size = OTP_LOW_IMAGE_SIZE, 215 .eeprom_size = OTP_LOW_IMAGE_SIZE,
256 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, 216 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
257 .set_l0s = true,
258 .use_bsm = false,
259 .max_ll_items = OTP_MAX_LL_ITEMS_1000, 217 .max_ll_items = OTP_MAX_LL_ITEMS_1000,
260 .shadow_ram_support = false, 218 .shadow_ram_support = false,
261 .led_compensation = 51, 219 .led_compensation = 51,
@@ -265,9 +223,6 @@ static struct iwl_base_params iwl1000_base_params = {
265 .chain_noise_scale = 1000, 223 .chain_noise_scale = 1000,
266 .wd_timeout = IWL_DEF_WD_TIMEOUT, 224 .wd_timeout = IWL_DEF_WD_TIMEOUT,
267 .max_event_log_size = 128, 225 .max_event_log_size = 128,
268 .ucode_tracing = true,
269 .sensitivity_calib_by_driver = true,
270 .chain_noise_calib_by_driver = true,
271}; 226};
272static struct iwl_ht_params iwl1000_ht_params = { 227static struct iwl_ht_params iwl1000_ht_params = {
273 .ht_greenfield_support = true, 228 .ht_greenfield_support = true,
@@ -281,7 +236,6 @@ static struct iwl_ht_params iwl1000_ht_params = {
281 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, \ 236 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, \
282 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ 237 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
283 .ops = &iwl1000_ops, \ 238 .ops = &iwl1000_ops, \
284 .mod_params = &iwlagn_mod_params, \
285 .base_params = &iwl1000_base_params, \ 239 .base_params = &iwl1000_base_params, \
286 .led_mode = IWL_LED_BLINK 240 .led_mode = IWL_LED_BLINK
287 241
@@ -303,7 +257,6 @@ struct iwl_cfg iwl1000_bg_cfg = {
303 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, \ 257 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, \
304 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ 258 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
305 .ops = &iwl1000_ops, \ 259 .ops = &iwl1000_ops, \
306 .mod_params = &iwlagn_mod_params, \
307 .base_params = &iwl1000_base_params, \ 260 .base_params = &iwl1000_base_params, \
308 .led_mode = IWL_LED_RF_STATE, \ 261 .led_mode = IWL_LED_RF_STATE, \
309 .rx_with_siso_diversity = true 262 .rx_with_siso_diversity = true
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index d7b6126408c9..89b8da7a6c8b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -46,30 +46,25 @@
46#include "iwl-helpers.h" 46#include "iwl-helpers.h"
47#include "iwl-agn-hw.h" 47#include "iwl-agn-hw.h"
48#include "iwl-6000-hw.h" 48#include "iwl-6000-hw.h"
49#include "iwl-agn-led.h"
50#include "iwl-agn-debugfs.h"
51 49
52/* Highest firmware API version supported */ 50/* Highest firmware API version supported */
53#define IWL2030_UCODE_API_MAX 5 51#define IWL2030_UCODE_API_MAX 5
54#define IWL2000_UCODE_API_MAX 5 52#define IWL2000_UCODE_API_MAX 5
55#define IWL200_UCODE_API_MAX 5 53#define IWL105_UCODE_API_MAX 5
56 54
57/* Lowest firmware API version supported */ 55/* Lowest firmware API version supported */
58#define IWL2030_UCODE_API_MIN 5 56#define IWL2030_UCODE_API_MIN 5
59#define IWL2000_UCODE_API_MIN 5 57#define IWL2000_UCODE_API_MIN 5
60#define IWL200_UCODE_API_MIN 5 58#define IWL105_UCODE_API_MIN 5
61 59
62#define IWL2030_FW_PRE "iwlwifi-2030-" 60#define IWL2030_FW_PRE "iwlwifi-2030-"
63#define _IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE #api ".ucode" 61#define IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE #api ".ucode"
64#define IWL2030_MODULE_FIRMWARE(api) _IWL2030_MODULE_FIRMWARE(api)
65 62
66#define IWL2000_FW_PRE "iwlwifi-2000-" 63#define IWL2000_FW_PRE "iwlwifi-2000-"
67#define _IWL2000_MODULE_FIRMWARE(api) IWL2000_FW_PRE #api ".ucode" 64#define IWL2000_MODULE_FIRMWARE(api) IWL2000_FW_PRE #api ".ucode"
68#define IWL2000_MODULE_FIRMWARE(api) _IWL2000_MODULE_FIRMWARE(api)
69 65
70#define IWL200_FW_PRE "iwlwifi-200-" 66#define IWL105_FW_PRE "iwlwifi-105-"
71#define _IWL200_MODULE_FIRMWARE(api) IWL200_FW_PRE #api ".ucode" 67#define IWL105_MODULE_FIRMWARE(api) IWL105_FW_PRE #api ".ucode"
72#define IWL200_MODULE_FIRMWARE(api) _IWL200_MODULE_FIRMWARE(api)
73 68
74static void iwl2000_set_ct_threshold(struct iwl_priv *priv) 69static void iwl2000_set_ct_threshold(struct iwl_priv *priv)
75{ 70{
@@ -101,6 +96,8 @@ static void iwl2000_nic_config(struct iwl_priv *priv)
101 iwl_set_bit(priv, CSR_GP_DRIVER_REG, 96 iwl_set_bit(priv, CSR_GP_DRIVER_REG,
102 CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER); 97 CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
103 98
99 if (priv->cfg->disable_otp_refresh)
100 iwl_write_prph(priv, APMG_ANALOG_SVR_REG, 0x80000010);
104} 101}
105 102
106static struct iwl_sensitivity_ranges iwl2000_sensitivity = { 103static struct iwl_sensitivity_ranges iwl2000_sensitivity = {
@@ -130,10 +127,10 @@ static struct iwl_sensitivity_ranges iwl2000_sensitivity = {
130 127
131static int iwl2000_hw_set_hw_params(struct iwl_priv *priv) 128static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
132{ 129{
133 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES && 130 if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
134 priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES) 131 iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
135 priv->cfg->base_params->num_of_queues = 132 priv->cfg->base_params->num_of_queues =
136 priv->cfg->mod_params->num_of_queues; 133 iwlagn_mod_params.num_of_queues;
137 134
138 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues; 135 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
139 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM; 136 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
@@ -147,7 +144,6 @@ static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
147 priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE; 144 priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE;
148 priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE; 145 priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE;
149 146
150 priv->hw_params.max_bsm_size = 0;
151 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) | 147 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
152 BIT(IEEE80211_BAND_5GHZ); 148 BIT(IEEE80211_BAND_5GHZ);
153 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR; 149 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
@@ -256,11 +252,7 @@ static int iwl2030_hw_channel_switch(struct iwl_priv *priv,
256 252
257static struct iwl_lib_ops iwl2000_lib = { 253static struct iwl_lib_ops iwl2000_lib = {
258 .set_hw_params = iwl2000_hw_set_hw_params, 254 .set_hw_params = iwl2000_hw_set_hw_params,
259 .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
260 .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
261 .txq_set_sched = iwlagn_txq_set_sched, 255 .txq_set_sched = iwlagn_txq_set_sched,
262 .txq_agg_enable = iwlagn_txq_agg_enable,
263 .txq_agg_disable = iwlagn_txq_agg_disable,
264 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, 256 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
265 .txq_free_tfd = iwl_hw_txq_free_tfd, 257 .txq_free_tfd = iwl_hw_txq_free_tfd,
266 .txq_init = iwl_hw_tx_queue_init, 258 .txq_init = iwl_hw_tx_queue_init,
@@ -268,13 +260,6 @@ static struct iwl_lib_ops iwl2000_lib = {
268 .setup_deferred_work = iwlagn_bt_setup_deferred_work, 260 .setup_deferred_work = iwlagn_bt_setup_deferred_work,
269 .cancel_deferred_work = iwlagn_bt_cancel_deferred_work, 261 .cancel_deferred_work = iwlagn_bt_cancel_deferred_work,
270 .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr, 262 .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
271 .load_ucode = iwlagn_load_ucode,
272 .dump_nic_event_log = iwl_dump_nic_event_log,
273 .dump_nic_error_log = iwl_dump_nic_error_log,
274 .dump_csr = iwl_dump_csr,
275 .dump_fh = iwl_dump_fh,
276 .init_alive_start = iwlagn_init_alive_start,
277 .alive_notify = iwlagn_alive_notify,
278 .send_tx_power = iwlagn_send_tx_power, 263 .send_tx_power = iwlagn_send_tx_power,
279 .update_chain_flags = iwl_update_chain_flags, 264 .update_chain_flags = iwl_update_chain_flags,
280 .set_channel_switch = iwl2030_hw_channel_switch, 265 .set_channel_switch = iwl2030_hw_channel_switch,
@@ -290,70 +275,40 @@ static struct iwl_lib_ops iwl2000_lib = {
290 EEPROM_REG_BAND_4_CHANNELS, 275 EEPROM_REG_BAND_4_CHANNELS,
291 EEPROM_REG_BAND_5_CHANNELS, 276 EEPROM_REG_BAND_5_CHANNELS,
292 EEPROM_6000_REG_BAND_24_HT40_CHANNELS, 277 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
293 EEPROM_REG_BAND_52_HT40_CHANNELS 278 EEPROM_REGULATORY_BAND_NO_HT40,
294 }, 279 },
295 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
296 .release_semaphore = iwlcore_eeprom_release_semaphore,
297 .calib_version = iwlagn_eeprom_calib_version,
298 .query_addr = iwlagn_eeprom_query_addr, 280 .query_addr = iwlagn_eeprom_query_addr,
299 .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower, 281 .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
300 }, 282 },
301 .isr_ops = {
302 .isr = iwl_isr_ict,
303 .free = iwl_free_isr_ict,
304 .alloc = iwl_alloc_isr_ict,
305 .reset = iwl_reset_ict,
306 .disable = iwl_disable_ict,
307 },
308 .temp_ops = { 283 .temp_ops = {
309 .temperature = iwlagn_temperature, 284 .temperature = iwlagn_temperature,
310 }, 285 },
311 .debugfs_ops = {
312 .rx_stats_read = iwl_ucode_rx_stats_read,
313 .tx_stats_read = iwl_ucode_tx_stats_read,
314 .general_stats_read = iwl_ucode_general_stats_read,
315 .bt_stats_read = iwl_ucode_bt_stats_read,
316 .reply_tx_error = iwl_reply_tx_error_read,
317 },
318 .txfifo_flush = iwlagn_txfifo_flush, 286 .txfifo_flush = iwlagn_txfifo_flush,
319 .dev_txfifo_flush = iwlagn_dev_txfifo_flush, 287 .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
320 .tt_ops = {
321 .lower_power_detection = iwl_tt_is_low_power_state,
322 .tt_power_mode = iwl_tt_current_power_mode,
323 .ct_kill_check = iwl_check_for_ct_kill,
324 }
325}; 288};
326 289
327static const struct iwl_ops iwl2000_ops = { 290static const struct iwl_ops iwl2000_ops = {
328 .lib = &iwl2000_lib, 291 .lib = &iwl2000_lib,
329 .hcmd = &iwlagn_hcmd, 292 .hcmd = &iwlagn_hcmd,
330 .utils = &iwlagn_hcmd_utils, 293 .utils = &iwlagn_hcmd_utils,
331 .led = &iwlagn_led_ops,
332 .ieee80211_ops = &iwlagn_hw_ops,
333}; 294};
334 295
335static const struct iwl_ops iwl2030_ops = { 296static const struct iwl_ops iwl2030_ops = {
336 .lib = &iwl2000_lib, 297 .lib = &iwl2000_lib,
337 .hcmd = &iwlagn_bt_hcmd, 298 .hcmd = &iwlagn_bt_hcmd,
338 .utils = &iwlagn_hcmd_utils, 299 .utils = &iwlagn_hcmd_utils,
339 .led = &iwlagn_led_ops,
340 .ieee80211_ops = &iwlagn_hw_ops,
341}; 300};
342 301
343static const struct iwl_ops iwl200_ops = { 302static const struct iwl_ops iwl105_ops = {
344 .lib = &iwl2000_lib, 303 .lib = &iwl2000_lib,
345 .hcmd = &iwlagn_hcmd, 304 .hcmd = &iwlagn_hcmd,
346 .utils = &iwlagn_hcmd_utils, 305 .utils = &iwlagn_hcmd_utils,
347 .led = &iwlagn_led_ops,
348 .ieee80211_ops = &iwlagn_hw_ops,
349}; 306};
350 307
351static const struct iwl_ops iwl230_ops = { 308static const struct iwl_ops iwl135_ops = {
352 .lib = &iwl2000_lib, 309 .lib = &iwl2000_lib,
353 .hcmd = &iwlagn_bt_hcmd, 310 .hcmd = &iwlagn_bt_hcmd,
354 .utils = &iwlagn_hcmd_utils, 311 .utils = &iwlagn_hcmd_utils,
355 .led = &iwlagn_led_ops,
356 .ieee80211_ops = &iwlagn_hw_ops,
357}; 312};
358 313
359static struct iwl_base_params iwl2000_base_params = { 314static struct iwl_base_params iwl2000_base_params = {
@@ -361,8 +316,6 @@ static struct iwl_base_params iwl2000_base_params = {
361 .num_of_queues = IWLAGN_NUM_QUEUES, 316 .num_of_queues = IWLAGN_NUM_QUEUES,
362 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, 317 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
363 .pll_cfg_val = 0, 318 .pll_cfg_val = 0,
364 .set_l0s = true,
365 .use_bsm = false,
366 .max_ll_items = OTP_MAX_LL_ITEMS_2x00, 319 .max_ll_items = OTP_MAX_LL_ITEMS_2x00,
367 .shadow_ram_support = true, 320 .shadow_ram_support = true,
368 .led_compensation = 51, 321 .led_compensation = 51,
@@ -373,9 +326,6 @@ static struct iwl_base_params iwl2000_base_params = {
373 .chain_noise_scale = 1000, 326 .chain_noise_scale = 1000,
374 .wd_timeout = IWL_DEF_WD_TIMEOUT, 327 .wd_timeout = IWL_DEF_WD_TIMEOUT,
375 .max_event_log_size = 512, 328 .max_event_log_size = 512,
376 .ucode_tracing = true,
377 .sensitivity_calib_by_driver = true,
378 .chain_noise_calib_by_driver = true,
379 .shadow_reg_enable = true, 329 .shadow_reg_enable = true,
380}; 330};
381 331
@@ -385,8 +335,6 @@ static struct iwl_base_params iwl2030_base_params = {
385 .num_of_queues = IWLAGN_NUM_QUEUES, 335 .num_of_queues = IWLAGN_NUM_QUEUES,
386 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, 336 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
387 .pll_cfg_val = 0, 337 .pll_cfg_val = 0,
388 .set_l0s = true,
389 .use_bsm = false,
390 .max_ll_items = OTP_MAX_LL_ITEMS_2x00, 338 .max_ll_items = OTP_MAX_LL_ITEMS_2x00,
391 .shadow_ram_support = true, 339 .shadow_ram_support = true,
392 .led_compensation = 57, 340 .led_compensation = 57,
@@ -397,9 +345,6 @@ static struct iwl_base_params iwl2030_base_params = {
397 .chain_noise_scale = 1000, 345 .chain_noise_scale = 1000,
398 .wd_timeout = IWL_LONG_WD_TIMEOUT, 346 .wd_timeout = IWL_LONG_WD_TIMEOUT,
399 .max_event_log_size = 512, 347 .max_event_log_size = 512,
400 .ucode_tracing = true,
401 .sensitivity_calib_by_driver = true,
402 .chain_noise_calib_by_driver = true,
403 .shadow_reg_enable = true, 348 .shadow_reg_enable = true,
404}; 349};
405 350
@@ -409,7 +354,6 @@ static struct iwl_ht_params iwl2000_ht_params = {
409}; 354};
410 355
411static struct iwl_bt_params iwl2030_bt_params = { 356static struct iwl_bt_params iwl2030_bt_params = {
412 .bt_statistics = true,
413 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ 357 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
414 .advanced_bt_coexist = true, 358 .advanced_bt_coexist = true,
415 .agg_time_limit = BT_AGG_THRESHOLD_DEF, 359 .agg_time_limit = BT_AGG_THRESHOLD_DEF,
@@ -426,12 +370,12 @@ static struct iwl_bt_params iwl2030_bt_params = {
426 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ 370 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
427 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 371 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
428 .ops = &iwl2000_ops, \ 372 .ops = &iwl2000_ops, \
429 .mod_params = &iwlagn_mod_params, \
430 .base_params = &iwl2000_base_params, \ 373 .base_params = &iwl2000_base_params, \
431 .need_dc_calib = true, \ 374 .need_dc_calib = true, \
432 .need_temp_offset_calib = true, \ 375 .need_temp_offset_calib = true, \
433 .led_mode = IWL_LED_RF_STATE, \ 376 .led_mode = IWL_LED_RF_STATE, \
434 .iq_invert = true \ 377 .iq_invert = true, \
378 .disable_otp_refresh = true \
435 379
436struct iwl_cfg iwl2000_2bgn_cfg = { 380struct iwl_cfg iwl2000_2bgn_cfg = {
437 .name = "2000 Series 2x2 BGN", 381 .name = "2000 Series 2x2 BGN",
@@ -451,7 +395,6 @@ struct iwl_cfg iwl2000_2bg_cfg = {
451 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ 395 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
452 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 396 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
453 .ops = &iwl2030_ops, \ 397 .ops = &iwl2030_ops, \
454 .mod_params = &iwlagn_mod_params, \
455 .base_params = &iwl2030_base_params, \ 398 .base_params = &iwl2030_base_params, \
456 .bt_params = &iwl2030_bt_params, \ 399 .bt_params = &iwl2030_bt_params, \
457 .need_dc_calib = true, \ 400 .need_dc_calib = true, \
@@ -471,45 +414,13 @@ struct iwl_cfg iwl2030_2bg_cfg = {
471 IWL_DEVICE_2030, 414 IWL_DEVICE_2030,
472}; 415};
473 416
474#define IWL_DEVICE_6035 \ 417#define IWL_DEVICE_105 \
475 .fw_name_pre = IWL2030_FW_PRE, \ 418 .fw_name_pre = IWL105_FW_PRE, \
476 .ucode_api_max = IWL2030_UCODE_API_MAX, \ 419 .ucode_api_max = IWL105_UCODE_API_MAX, \
477 .ucode_api_min = IWL2030_UCODE_API_MIN, \ 420 .ucode_api_min = IWL105_UCODE_API_MIN, \
478 .eeprom_ver = EEPROM_6035_EEPROM_VERSION, \
479 .eeprom_calib_ver = EEPROM_6035_TX_POWER_VERSION, \
480 .ops = &iwl2030_ops, \
481 .mod_params = &iwlagn_mod_params, \
482 .base_params = &iwl2030_base_params, \
483 .bt_params = &iwl2030_bt_params, \
484 .need_dc_calib = true, \
485 .need_temp_offset_calib = true, \
486 .led_mode = IWL_LED_RF_STATE, \
487 .adv_pm = true \
488
489struct iwl_cfg iwl6035_2agn_cfg = {
490 .name = "2000 Series 2x2 AGN/BT",
491 IWL_DEVICE_6035,
492 .ht_params = &iwl2000_ht_params,
493};
494
495struct iwl_cfg iwl6035_2abg_cfg = {
496 .name = "2000 Series 2x2 ABG/BT",
497 IWL_DEVICE_6035,
498};
499
500struct iwl_cfg iwl6035_2bg_cfg = {
501 .name = "2000 Series 2x2 BG/BT",
502 IWL_DEVICE_6035,
503};
504
505#define IWL_DEVICE_200 \
506 .fw_name_pre = IWL200_FW_PRE, \
507 .ucode_api_max = IWL200_UCODE_API_MAX, \
508 .ucode_api_min = IWL200_UCODE_API_MIN, \
509 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ 421 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
510 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 422 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
511 .ops = &iwl200_ops, \ 423 .ops = &iwl105_ops, \
512 .mod_params = &iwlagn_mod_params, \
513 .base_params = &iwl2000_base_params, \ 424 .base_params = &iwl2000_base_params, \
514 .need_dc_calib = true, \ 425 .need_dc_calib = true, \
515 .need_temp_offset_calib = true, \ 426 .need_temp_offset_calib = true, \
@@ -517,25 +428,24 @@ struct iwl_cfg iwl6035_2bg_cfg = {
517 .adv_pm = true, \ 428 .adv_pm = true, \
518 .rx_with_siso_diversity = true \ 429 .rx_with_siso_diversity = true \
519 430
520struct iwl_cfg iwl200_bg_cfg = { 431struct iwl_cfg iwl105_bg_cfg = {
521 .name = "200 Series 1x1 BG", 432 .name = "105 Series 1x1 BG",
522 IWL_DEVICE_200, 433 IWL_DEVICE_105,
523}; 434};
524 435
525struct iwl_cfg iwl200_bgn_cfg = { 436struct iwl_cfg iwl105_bgn_cfg = {
526 .name = "200 Series 1x1 BGN", 437 .name = "105 Series 1x1 BGN",
527 IWL_DEVICE_200, 438 IWL_DEVICE_105,
528 .ht_params = &iwl2000_ht_params, 439 .ht_params = &iwl2000_ht_params,
529}; 440};
530 441
531#define IWL_DEVICE_230 \ 442#define IWL_DEVICE_135 \
532 .fw_name_pre = IWL200_FW_PRE, \ 443 .fw_name_pre = IWL105_FW_PRE, \
533 .ucode_api_max = IWL200_UCODE_API_MAX, \ 444 .ucode_api_max = IWL105_UCODE_API_MAX, \
534 .ucode_api_min = IWL200_UCODE_API_MIN, \ 445 .ucode_api_min = IWL105_UCODE_API_MIN, \
535 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ 446 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
536 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 447 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
537 .ops = &iwl230_ops, \ 448 .ops = &iwl135_ops, \
538 .mod_params = &iwlagn_mod_params, \
539 .base_params = &iwl2030_base_params, \ 449 .base_params = &iwl2030_base_params, \
540 .bt_params = &iwl2030_bt_params, \ 450 .bt_params = &iwl2030_bt_params, \
541 .need_dc_calib = true, \ 451 .need_dc_calib = true, \
@@ -544,17 +454,17 @@ struct iwl_cfg iwl200_bgn_cfg = {
544 .adv_pm = true, \ 454 .adv_pm = true, \
545 .rx_with_siso_diversity = true \ 455 .rx_with_siso_diversity = true \
546 456
547struct iwl_cfg iwl230_bg_cfg = { 457struct iwl_cfg iwl135_bg_cfg = {
548 .name = "200 Series 1x1 BG/BT", 458 .name = "105 Series 1x1 BG/BT",
549 IWL_DEVICE_230, 459 IWL_DEVICE_135,
550}; 460};
551 461
552struct iwl_cfg iwl230_bgn_cfg = { 462struct iwl_cfg iwl135_bgn_cfg = {
553 .name = "200 Series 1x1 BGN/BT", 463 .name = "105 Series 1x1 BGN/BT",
554 IWL_DEVICE_230, 464 IWL_DEVICE_135,
555 .ht_params = &iwl2000_ht_params, 465 .ht_params = &iwl2000_ht_params,
556}; 466};
557 467
558MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_MAX)); 468MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_MAX));
559MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_MAX)); 469MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_MAX));
560MODULE_FIRMWARE(IWL200_MODULE_FIRMWARE(IWL200_UCODE_API_MAX)); 470MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
index 3975e45e7500..05ad47628b63 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved. 8 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 22e045b5bcee..98f81df166e3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -45,10 +45,8 @@
45#include "iwl-sta.h" 45#include "iwl-sta.h"
46#include "iwl-helpers.h" 46#include "iwl-helpers.h"
47#include "iwl-agn.h" 47#include "iwl-agn.h"
48#include "iwl-agn-led.h"
49#include "iwl-agn-hw.h" 48#include "iwl-agn-hw.h"
50#include "iwl-5000-hw.h" 49#include "iwl-5000-hw.h"
51#include "iwl-agn-debugfs.h"
52 50
53/* Highest firmware API version supported */ 51/* Highest firmware API version supported */
54#define IWL5000_UCODE_API_MAX 5 52#define IWL5000_UCODE_API_MAX 5
@@ -59,12 +57,10 @@
59#define IWL5150_UCODE_API_MIN 1 57#define IWL5150_UCODE_API_MIN 1
60 58
61#define IWL5000_FW_PRE "iwlwifi-5000-" 59#define IWL5000_FW_PRE "iwlwifi-5000-"
62#define _IWL5000_MODULE_FIRMWARE(api) IWL5000_FW_PRE #api ".ucode" 60#define IWL5000_MODULE_FIRMWARE(api) IWL5000_FW_PRE #api ".ucode"
63#define IWL5000_MODULE_FIRMWARE(api) _IWL5000_MODULE_FIRMWARE(api)
64 61
65#define IWL5150_FW_PRE "iwlwifi-5150-" 62#define IWL5150_FW_PRE "iwlwifi-5150-"
66#define _IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE #api ".ucode" 63#define IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE #api ".ucode"
67#define IWL5150_MODULE_FIRMWARE(api) _IWL5150_MODULE_FIRMWARE(api)
68 64
69/* NIC configuration for 5000 series */ 65/* NIC configuration for 5000 series */
70static void iwl5000_nic_config(struct iwl_priv *priv) 66static void iwl5000_nic_config(struct iwl_priv *priv)
@@ -168,10 +164,10 @@ static void iwl5000_set_ct_threshold(struct iwl_priv *priv)
168 164
169static int iwl5000_hw_set_hw_params(struct iwl_priv *priv) 165static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
170{ 166{
171 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES && 167 if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
172 priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES) 168 iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
173 priv->cfg->base_params->num_of_queues = 169 priv->cfg->base_params->num_of_queues =
174 priv->cfg->mod_params->num_of_queues; 170 iwlagn_mod_params.num_of_queues;
175 171
176 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues; 172 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
177 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM; 173 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
@@ -185,7 +181,6 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
185 priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE; 181 priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
186 priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE; 182 priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
187 183
188 priv->hw_params.max_bsm_size = 0;
189 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) | 184 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
190 BIT(IEEE80211_BAND_5GHZ); 185 BIT(IEEE80211_BAND_5GHZ);
191 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR; 186 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
@@ -214,10 +209,10 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
214 209
215static int iwl5150_hw_set_hw_params(struct iwl_priv *priv) 210static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
216{ 211{
217 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES && 212 if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
218 priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES) 213 iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
219 priv->cfg->base_params->num_of_queues = 214 priv->cfg->base_params->num_of_queues =
220 priv->cfg->mod_params->num_of_queues; 215 iwlagn_mod_params.num_of_queues;
221 216
222 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues; 217 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
223 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM; 218 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
@@ -231,7 +226,6 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
231 priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE; 226 priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
232 priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE; 227 priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
233 228
234 priv->hw_params.max_bsm_size = 0;
235 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) | 229 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
236 BIT(IEEE80211_BAND_5GHZ); 230 BIT(IEEE80211_BAND_5GHZ);
237 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR; 231 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
@@ -263,7 +257,7 @@ static void iwl5150_temperature(struct iwl_priv *priv)
263 u32 vt = 0; 257 u32 vt = 0;
264 s32 offset = iwl_temp_calib_to_offset(priv); 258 s32 offset = iwl_temp_calib_to_offset(priv);
265 259
266 vt = le32_to_cpu(priv->_agn.statistics.general.common.temperature); 260 vt = le32_to_cpu(priv->statistics.common.temperature);
267 vt = vt / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF + offset; 261 vt = vt / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF + offset;
268 /* now vt hold the temperature in Kelvin */ 262 /* now vt hold the temperature in Kelvin */
269 priv->temperature = KELVIN_TO_CELSIUS(vt); 263 priv->temperature = KELVIN_TO_CELSIUS(vt);
@@ -345,24 +339,13 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
345 339
346static struct iwl_lib_ops iwl5000_lib = { 340static struct iwl_lib_ops iwl5000_lib = {
347 .set_hw_params = iwl5000_hw_set_hw_params, 341 .set_hw_params = iwl5000_hw_set_hw_params,
348 .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
349 .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
350 .txq_set_sched = iwlagn_txq_set_sched, 342 .txq_set_sched = iwlagn_txq_set_sched,
351 .txq_agg_enable = iwlagn_txq_agg_enable,
352 .txq_agg_disable = iwlagn_txq_agg_disable,
353 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, 343 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
354 .txq_free_tfd = iwl_hw_txq_free_tfd, 344 .txq_free_tfd = iwl_hw_txq_free_tfd,
355 .txq_init = iwl_hw_tx_queue_init, 345 .txq_init = iwl_hw_tx_queue_init,
356 .rx_handler_setup = iwlagn_rx_handler_setup, 346 .rx_handler_setup = iwlagn_rx_handler_setup,
357 .setup_deferred_work = iwlagn_setup_deferred_work, 347 .setup_deferred_work = iwlagn_setup_deferred_work,
358 .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr, 348 .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
359 .dump_nic_event_log = iwl_dump_nic_event_log,
360 .dump_nic_error_log = iwl_dump_nic_error_log,
361 .dump_csr = iwl_dump_csr,
362 .dump_fh = iwl_dump_fh,
363 .load_ucode = iwlagn_load_ucode,
364 .init_alive_start = iwlagn_init_alive_start,
365 .alive_notify = iwlagn_alive_notify,
366 .send_tx_power = iwlagn_send_tx_power, 349 .send_tx_power = iwlagn_send_tx_power,
367 .update_chain_flags = iwl_update_chain_flags, 350 .update_chain_flags = iwl_update_chain_flags,
368 .set_channel_switch = iwl5000_hw_channel_switch, 351 .set_channel_switch = iwl5000_hw_channel_switch,
@@ -380,56 +363,24 @@ static struct iwl_lib_ops iwl5000_lib = {
380 EEPROM_REG_BAND_24_HT40_CHANNELS, 363 EEPROM_REG_BAND_24_HT40_CHANNELS,
381 EEPROM_REG_BAND_52_HT40_CHANNELS 364 EEPROM_REG_BAND_52_HT40_CHANNELS
382 }, 365 },
383 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
384 .release_semaphore = iwlcore_eeprom_release_semaphore,
385 .calib_version = iwlagn_eeprom_calib_version,
386 .query_addr = iwlagn_eeprom_query_addr, 366 .query_addr = iwlagn_eeprom_query_addr,
387 }, 367 },
388 .isr_ops = {
389 .isr = iwl_isr_ict,
390 .free = iwl_free_isr_ict,
391 .alloc = iwl_alloc_isr_ict,
392 .reset = iwl_reset_ict,
393 .disable = iwl_disable_ict,
394 },
395 .temp_ops = { 368 .temp_ops = {
396 .temperature = iwlagn_temperature, 369 .temperature = iwlagn_temperature,
397 }, 370 },
398 .debugfs_ops = {
399 .rx_stats_read = iwl_ucode_rx_stats_read,
400 .tx_stats_read = iwl_ucode_tx_stats_read,
401 .general_stats_read = iwl_ucode_general_stats_read,
402 .bt_stats_read = iwl_ucode_bt_stats_read,
403 .reply_tx_error = iwl_reply_tx_error_read,
404 },
405 .txfifo_flush = iwlagn_txfifo_flush, 371 .txfifo_flush = iwlagn_txfifo_flush,
406 .dev_txfifo_flush = iwlagn_dev_txfifo_flush, 372 .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
407 .tt_ops = {
408 .lower_power_detection = iwl_tt_is_low_power_state,
409 .tt_power_mode = iwl_tt_current_power_mode,
410 .ct_kill_check = iwl_check_for_ct_kill,
411 }
412}; 373};
413 374
414static struct iwl_lib_ops iwl5150_lib = { 375static struct iwl_lib_ops iwl5150_lib = {
415 .set_hw_params = iwl5150_hw_set_hw_params, 376 .set_hw_params = iwl5150_hw_set_hw_params,
416 .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
417 .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
418 .txq_set_sched = iwlagn_txq_set_sched, 377 .txq_set_sched = iwlagn_txq_set_sched,
419 .txq_agg_enable = iwlagn_txq_agg_enable,
420 .txq_agg_disable = iwlagn_txq_agg_disable,
421 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, 378 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
422 .txq_free_tfd = iwl_hw_txq_free_tfd, 379 .txq_free_tfd = iwl_hw_txq_free_tfd,
423 .txq_init = iwl_hw_tx_queue_init, 380 .txq_init = iwl_hw_tx_queue_init,
424 .rx_handler_setup = iwlagn_rx_handler_setup, 381 .rx_handler_setup = iwlagn_rx_handler_setup,
425 .setup_deferred_work = iwlagn_setup_deferred_work, 382 .setup_deferred_work = iwlagn_setup_deferred_work,
426 .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr, 383 .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
427 .dump_nic_event_log = iwl_dump_nic_event_log,
428 .dump_nic_error_log = iwl_dump_nic_error_log,
429 .dump_csr = iwl_dump_csr,
430 .load_ucode = iwlagn_load_ucode,
431 .init_alive_start = iwlagn_init_alive_start,
432 .alive_notify = iwlagn_alive_notify,
433 .send_tx_power = iwlagn_send_tx_power, 384 .send_tx_power = iwlagn_send_tx_power,
434 .update_chain_flags = iwl_update_chain_flags, 385 .update_chain_flags = iwl_update_chain_flags,
435 .set_channel_switch = iwl5000_hw_channel_switch, 386 .set_channel_switch = iwl5000_hw_channel_switch,
@@ -447,51 +398,25 @@ static struct iwl_lib_ops iwl5150_lib = {
447 EEPROM_REG_BAND_24_HT40_CHANNELS, 398 EEPROM_REG_BAND_24_HT40_CHANNELS,
448 EEPROM_REG_BAND_52_HT40_CHANNELS 399 EEPROM_REG_BAND_52_HT40_CHANNELS
449 }, 400 },
450 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
451 .release_semaphore = iwlcore_eeprom_release_semaphore,
452 .calib_version = iwlagn_eeprom_calib_version,
453 .query_addr = iwlagn_eeprom_query_addr, 401 .query_addr = iwlagn_eeprom_query_addr,
454 }, 402 },
455 .isr_ops = {
456 .isr = iwl_isr_ict,
457 .free = iwl_free_isr_ict,
458 .alloc = iwl_alloc_isr_ict,
459 .reset = iwl_reset_ict,
460 .disable = iwl_disable_ict,
461 },
462 .temp_ops = { 403 .temp_ops = {
463 .temperature = iwl5150_temperature, 404 .temperature = iwl5150_temperature,
464 }, 405 },
465 .debugfs_ops = {
466 .rx_stats_read = iwl_ucode_rx_stats_read,
467 .tx_stats_read = iwl_ucode_tx_stats_read,
468 .general_stats_read = iwl_ucode_general_stats_read,
469 .bt_stats_read = iwl_ucode_bt_stats_read,
470 .reply_tx_error = iwl_reply_tx_error_read,
471 },
472 .txfifo_flush = iwlagn_txfifo_flush, 406 .txfifo_flush = iwlagn_txfifo_flush,
473 .dev_txfifo_flush = iwlagn_dev_txfifo_flush, 407 .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
474 .tt_ops = {
475 .lower_power_detection = iwl_tt_is_low_power_state,
476 .tt_power_mode = iwl_tt_current_power_mode,
477 .ct_kill_check = iwl_check_for_ct_kill,
478 }
479}; 408};
480 409
481static const struct iwl_ops iwl5000_ops = { 410static const struct iwl_ops iwl5000_ops = {
482 .lib = &iwl5000_lib, 411 .lib = &iwl5000_lib,
483 .hcmd = &iwlagn_hcmd, 412 .hcmd = &iwlagn_hcmd,
484 .utils = &iwlagn_hcmd_utils, 413 .utils = &iwlagn_hcmd_utils,
485 .led = &iwlagn_led_ops,
486 .ieee80211_ops = &iwlagn_hw_ops,
487}; 414};
488 415
489static const struct iwl_ops iwl5150_ops = { 416static const struct iwl_ops iwl5150_ops = {
490 .lib = &iwl5150_lib, 417 .lib = &iwl5150_lib,
491 .hcmd = &iwlagn_hcmd, 418 .hcmd = &iwlagn_hcmd,
492 .utils = &iwlagn_hcmd_utils, 419 .utils = &iwlagn_hcmd_utils,
493 .led = &iwlagn_led_ops,
494 .ieee80211_ops = &iwlagn_hw_ops,
495}; 420};
496 421
497static struct iwl_base_params iwl5000_base_params = { 422static struct iwl_base_params iwl5000_base_params = {
@@ -499,17 +424,12 @@ static struct iwl_base_params iwl5000_base_params = {
499 .num_of_queues = IWLAGN_NUM_QUEUES, 424 .num_of_queues = IWLAGN_NUM_QUEUES,
500 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, 425 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
501 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, 426 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
502 .set_l0s = true,
503 .use_bsm = false,
504 .led_compensation = 51, 427 .led_compensation = 51,
505 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 428 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
506 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 429 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
507 .chain_noise_scale = 1000, 430 .chain_noise_scale = 1000,
508 .wd_timeout = IWL_LONG_WD_TIMEOUT, 431 .wd_timeout = IWL_LONG_WD_TIMEOUT,
509 .max_event_log_size = 512, 432 .max_event_log_size = 512,
510 .ucode_tracing = true,
511 .sensitivity_calib_by_driver = true,
512 .chain_noise_calib_by_driver = true,
513}; 433};
514static struct iwl_ht_params iwl5000_ht_params = { 434static struct iwl_ht_params iwl5000_ht_params = {
515 .ht_greenfield_support = true, 435 .ht_greenfield_support = true,
@@ -523,7 +443,6 @@ static struct iwl_ht_params iwl5000_ht_params = {
523 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, \ 443 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, \
524 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, \ 444 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, \
525 .ops = &iwl5000_ops, \ 445 .ops = &iwl5000_ops, \
526 .mod_params = &iwlagn_mod_params, \
527 .base_params = &iwl5000_base_params, \ 446 .base_params = &iwl5000_base_params, \
528 .led_mode = IWL_LED_BLINK 447 .led_mode = IWL_LED_BLINK
529 448
@@ -567,7 +486,6 @@ struct iwl_cfg iwl5350_agn_cfg = {
567 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, 486 .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
568 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, 487 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
569 .ops = &iwl5000_ops, 488 .ops = &iwl5000_ops,
570 .mod_params = &iwlagn_mod_params,
571 .base_params = &iwl5000_base_params, 489 .base_params = &iwl5000_base_params,
572 .ht_params = &iwl5000_ht_params, 490 .ht_params = &iwl5000_ht_params,
573 .led_mode = IWL_LED_BLINK, 491 .led_mode = IWL_LED_BLINK,
@@ -581,7 +499,6 @@ struct iwl_cfg iwl5350_agn_cfg = {
581 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, \ 499 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, \
582 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, \ 500 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, \
583 .ops = &iwl5150_ops, \ 501 .ops = &iwl5150_ops, \
584 .mod_params = &iwlagn_mod_params, \
585 .base_params = &iwl5000_base_params, \ 502 .base_params = &iwl5000_base_params, \
586 .need_dc_calib = true, \ 503 .need_dc_calib = true, \
587 .led_mode = IWL_LED_BLINK, \ 504 .led_mode = IWL_LED_BLINK, \
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000-hw.h b/drivers/net/wireless/iwlwifi/iwl-6000-hw.h
index 47891e16a758..b27986e57c92 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-6000-hw.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved. 8 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index a745b01c0ec1..a7921f9a03c6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -46,8 +46,6 @@
46#include "iwl-helpers.h" 46#include "iwl-helpers.h"
47#include "iwl-agn-hw.h" 47#include "iwl-agn-hw.h"
48#include "iwl-6000-hw.h" 48#include "iwl-6000-hw.h"
49#include "iwl-agn-led.h"
50#include "iwl-agn-debugfs.h"
51 49
52/* Highest firmware API version supported */ 50/* Highest firmware API version supported */
53#define IWL6000_UCODE_API_MAX 4 51#define IWL6000_UCODE_API_MAX 4
@@ -60,20 +58,16 @@
60#define IWL6000G2_UCODE_API_MIN 4 58#define IWL6000G2_UCODE_API_MIN 4
61 59
62#define IWL6000_FW_PRE "iwlwifi-6000-" 60#define IWL6000_FW_PRE "iwlwifi-6000-"
63#define _IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE #api ".ucode" 61#define IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE #api ".ucode"
64#define IWL6000_MODULE_FIRMWARE(api) _IWL6000_MODULE_FIRMWARE(api)
65 62
66#define IWL6050_FW_PRE "iwlwifi-6050-" 63#define IWL6050_FW_PRE "iwlwifi-6050-"
67#define _IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE #api ".ucode" 64#define IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE #api ".ucode"
68#define IWL6050_MODULE_FIRMWARE(api) _IWL6050_MODULE_FIRMWARE(api)
69 65
70#define IWL6005_FW_PRE "iwlwifi-6000g2a-" 66#define IWL6005_FW_PRE "iwlwifi-6000g2a-"
71#define _IWL6005_MODULE_FIRMWARE(api) IWL6005_FW_PRE #api ".ucode" 67#define IWL6005_MODULE_FIRMWARE(api) IWL6005_FW_PRE #api ".ucode"
72#define IWL6005_MODULE_FIRMWARE(api) _IWL6005_MODULE_FIRMWARE(api)
73 68
74#define IWL6030_FW_PRE "iwlwifi-6000g2b-" 69#define IWL6030_FW_PRE "iwlwifi-6000g2b-"
75#define _IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE #api ".ucode" 70#define IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE #api ".ucode"
76#define IWL6030_MODULE_FIRMWARE(api) _IWL6030_MODULE_FIRMWARE(api)
77 71
78static void iwl6000_set_ct_threshold(struct iwl_priv *priv) 72static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
79{ 73{
@@ -85,7 +79,7 @@ static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
85static void iwl6050_additional_nic_config(struct iwl_priv *priv) 79static void iwl6050_additional_nic_config(struct iwl_priv *priv)
86{ 80{
87 /* Indicate calibration version to uCode. */ 81 /* Indicate calibration version to uCode. */
88 if (priv->cfg->ops->lib->eeprom_ops.calib_version(priv) >= 6) 82 if (iwlagn_eeprom_calib_version(priv) >= 6)
89 iwl_set_bit(priv, CSR_GP_DRIVER_REG, 83 iwl_set_bit(priv, CSR_GP_DRIVER_REG,
90 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6); 84 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
91} 85}
@@ -93,7 +87,7 @@ static void iwl6050_additional_nic_config(struct iwl_priv *priv)
93static void iwl6150_additional_nic_config(struct iwl_priv *priv) 87static void iwl6150_additional_nic_config(struct iwl_priv *priv)
94{ 88{
95 /* Indicate calibration version to uCode. */ 89 /* Indicate calibration version to uCode. */
96 if (priv->cfg->ops->lib->eeprom_ops.calib_version(priv) >= 6) 90 if (iwlagn_eeprom_calib_version(priv) >= 6)
97 iwl_set_bit(priv, CSR_GP_DRIVER_REG, 91 iwl_set_bit(priv, CSR_GP_DRIVER_REG,
98 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6); 92 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
99 iwl_set_bit(priv, CSR_GP_DRIVER_REG, 93 iwl_set_bit(priv, CSR_GP_DRIVER_REG,
@@ -159,10 +153,10 @@ static struct iwl_sensitivity_ranges iwl6000_sensitivity = {
159 153
160static int iwl6000_hw_set_hw_params(struct iwl_priv *priv) 154static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
161{ 155{
162 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES && 156 if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
163 priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES) 157 iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
164 priv->cfg->base_params->num_of_queues = 158 priv->cfg->base_params->num_of_queues =
165 priv->cfg->mod_params->num_of_queues; 159 iwlagn_mod_params.num_of_queues;
166 160
167 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues; 161 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
168 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM; 162 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
@@ -176,7 +170,6 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
176 priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE; 170 priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE;
177 priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE; 171 priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE;
178 172
179 priv->hw_params.max_bsm_size = 0;
180 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) | 173 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
181 BIT(IEEE80211_BAND_5GHZ); 174 BIT(IEEE80211_BAND_5GHZ);
182 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR; 175 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
@@ -285,24 +278,13 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
285 278
286static struct iwl_lib_ops iwl6000_lib = { 279static struct iwl_lib_ops iwl6000_lib = {
287 .set_hw_params = iwl6000_hw_set_hw_params, 280 .set_hw_params = iwl6000_hw_set_hw_params,
288 .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
289 .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
290 .txq_set_sched = iwlagn_txq_set_sched, 281 .txq_set_sched = iwlagn_txq_set_sched,
291 .txq_agg_enable = iwlagn_txq_agg_enable,
292 .txq_agg_disable = iwlagn_txq_agg_disable,
293 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, 282 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
294 .txq_free_tfd = iwl_hw_txq_free_tfd, 283 .txq_free_tfd = iwl_hw_txq_free_tfd,
295 .txq_init = iwl_hw_tx_queue_init, 284 .txq_init = iwl_hw_tx_queue_init,
296 .rx_handler_setup = iwlagn_rx_handler_setup, 285 .rx_handler_setup = iwlagn_rx_handler_setup,
297 .setup_deferred_work = iwlagn_setup_deferred_work, 286 .setup_deferred_work = iwlagn_setup_deferred_work,
298 .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr, 287 .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
299 .load_ucode = iwlagn_load_ucode,
300 .dump_nic_event_log = iwl_dump_nic_event_log,
301 .dump_nic_error_log = iwl_dump_nic_error_log,
302 .dump_csr = iwl_dump_csr,
303 .dump_fh = iwl_dump_fh,
304 .init_alive_start = iwlagn_init_alive_start,
305 .alive_notify = iwlagn_alive_notify,
306 .send_tx_power = iwlagn_send_tx_power, 288 .send_tx_power = iwlagn_send_tx_power,
307 .update_chain_flags = iwl_update_chain_flags, 289 .update_chain_flags = iwl_update_chain_flags,
308 .set_channel_switch = iwl6000_hw_channel_switch, 290 .set_channel_switch = iwl6000_hw_channel_switch,
@@ -320,45 +302,19 @@ static struct iwl_lib_ops iwl6000_lib = {
320 EEPROM_6000_REG_BAND_24_HT40_CHANNELS, 302 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
321 EEPROM_REG_BAND_52_HT40_CHANNELS 303 EEPROM_REG_BAND_52_HT40_CHANNELS
322 }, 304 },
323 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
324 .release_semaphore = iwlcore_eeprom_release_semaphore,
325 .calib_version = iwlagn_eeprom_calib_version,
326 .query_addr = iwlagn_eeprom_query_addr, 305 .query_addr = iwlagn_eeprom_query_addr,
327 .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower, 306 .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
328 }, 307 },
329 .isr_ops = {
330 .isr = iwl_isr_ict,
331 .free = iwl_free_isr_ict,
332 .alloc = iwl_alloc_isr_ict,
333 .reset = iwl_reset_ict,
334 .disable = iwl_disable_ict,
335 },
336 .temp_ops = { 308 .temp_ops = {
337 .temperature = iwlagn_temperature, 309 .temperature = iwlagn_temperature,
338 }, 310 },
339 .debugfs_ops = {
340 .rx_stats_read = iwl_ucode_rx_stats_read,
341 .tx_stats_read = iwl_ucode_tx_stats_read,
342 .general_stats_read = iwl_ucode_general_stats_read,
343 .bt_stats_read = iwl_ucode_bt_stats_read,
344 .reply_tx_error = iwl_reply_tx_error_read,
345 },
346 .txfifo_flush = iwlagn_txfifo_flush, 311 .txfifo_flush = iwlagn_txfifo_flush,
347 .dev_txfifo_flush = iwlagn_dev_txfifo_flush, 312 .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
348 .tt_ops = {
349 .lower_power_detection = iwl_tt_is_low_power_state,
350 .tt_power_mode = iwl_tt_current_power_mode,
351 .ct_kill_check = iwl_check_for_ct_kill,
352 }
353}; 313};
354 314
355static struct iwl_lib_ops iwl6030_lib = { 315static struct iwl_lib_ops iwl6030_lib = {
356 .set_hw_params = iwl6000_hw_set_hw_params, 316 .set_hw_params = iwl6000_hw_set_hw_params,
357 .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
358 .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
359 .txq_set_sched = iwlagn_txq_set_sched, 317 .txq_set_sched = iwlagn_txq_set_sched,
360 .txq_agg_enable = iwlagn_txq_agg_enable,
361 .txq_agg_disable = iwlagn_txq_agg_disable,
362 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, 318 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
363 .txq_free_tfd = iwl_hw_txq_free_tfd, 319 .txq_free_tfd = iwl_hw_txq_free_tfd,
364 .txq_init = iwl_hw_tx_queue_init, 320 .txq_init = iwl_hw_tx_queue_init,
@@ -366,13 +322,6 @@ static struct iwl_lib_ops iwl6030_lib = {
366 .setup_deferred_work = iwlagn_bt_setup_deferred_work, 322 .setup_deferred_work = iwlagn_bt_setup_deferred_work,
367 .cancel_deferred_work = iwlagn_bt_cancel_deferred_work, 323 .cancel_deferred_work = iwlagn_bt_cancel_deferred_work,
368 .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr, 324 .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
369 .load_ucode = iwlagn_load_ucode,
370 .dump_nic_event_log = iwl_dump_nic_event_log,
371 .dump_nic_error_log = iwl_dump_nic_error_log,
372 .dump_csr = iwl_dump_csr,
373 .dump_fh = iwl_dump_fh,
374 .init_alive_start = iwlagn_init_alive_start,
375 .alive_notify = iwlagn_alive_notify,
376 .send_tx_power = iwlagn_send_tx_power, 325 .send_tx_power = iwlagn_send_tx_power,
377 .update_chain_flags = iwl_update_chain_flags, 326 .update_chain_flags = iwl_update_chain_flags,
378 .set_channel_switch = iwl6000_hw_channel_switch, 327 .set_channel_switch = iwl6000_hw_channel_switch,
@@ -390,36 +339,14 @@ static struct iwl_lib_ops iwl6030_lib = {
390 EEPROM_6000_REG_BAND_24_HT40_CHANNELS, 339 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
391 EEPROM_REG_BAND_52_HT40_CHANNELS 340 EEPROM_REG_BAND_52_HT40_CHANNELS
392 }, 341 },
393 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
394 .release_semaphore = iwlcore_eeprom_release_semaphore,
395 .calib_version = iwlagn_eeprom_calib_version,
396 .query_addr = iwlagn_eeprom_query_addr, 342 .query_addr = iwlagn_eeprom_query_addr,
397 .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower, 343 .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
398 }, 344 },
399 .isr_ops = {
400 .isr = iwl_isr_ict,
401 .free = iwl_free_isr_ict,
402 .alloc = iwl_alloc_isr_ict,
403 .reset = iwl_reset_ict,
404 .disable = iwl_disable_ict,
405 },
406 .temp_ops = { 345 .temp_ops = {
407 .temperature = iwlagn_temperature, 346 .temperature = iwlagn_temperature,
408 }, 347 },
409 .debugfs_ops = {
410 .rx_stats_read = iwl_ucode_rx_stats_read,
411 .tx_stats_read = iwl_ucode_tx_stats_read,
412 .general_stats_read = iwl_ucode_general_stats_read,
413 .bt_stats_read = iwl_ucode_bt_stats_read,
414 .reply_tx_error = iwl_reply_tx_error_read,
415 },
416 .txfifo_flush = iwlagn_txfifo_flush, 348 .txfifo_flush = iwlagn_txfifo_flush,
417 .dev_txfifo_flush = iwlagn_dev_txfifo_flush, 349 .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
418 .tt_ops = {
419 .lower_power_detection = iwl_tt_is_low_power_state,
420 .tt_power_mode = iwl_tt_current_power_mode,
421 .ct_kill_check = iwl_check_for_ct_kill,
422 }
423}; 350};
424 351
425static struct iwl_nic_ops iwl6050_nic_ops = { 352static struct iwl_nic_ops iwl6050_nic_ops = {
@@ -434,34 +361,26 @@ static const struct iwl_ops iwl6000_ops = {
434 .lib = &iwl6000_lib, 361 .lib = &iwl6000_lib,
435 .hcmd = &iwlagn_hcmd, 362 .hcmd = &iwlagn_hcmd,
436 .utils = &iwlagn_hcmd_utils, 363 .utils = &iwlagn_hcmd_utils,
437 .led = &iwlagn_led_ops,
438 .ieee80211_ops = &iwlagn_hw_ops,
439}; 364};
440 365
441static const struct iwl_ops iwl6050_ops = { 366static const struct iwl_ops iwl6050_ops = {
442 .lib = &iwl6000_lib, 367 .lib = &iwl6000_lib,
443 .hcmd = &iwlagn_hcmd, 368 .hcmd = &iwlagn_hcmd,
444 .utils = &iwlagn_hcmd_utils, 369 .utils = &iwlagn_hcmd_utils,
445 .led = &iwlagn_led_ops,
446 .nic = &iwl6050_nic_ops, 370 .nic = &iwl6050_nic_ops,
447 .ieee80211_ops = &iwlagn_hw_ops,
448}; 371};
449 372
450static const struct iwl_ops iwl6150_ops = { 373static const struct iwl_ops iwl6150_ops = {
451 .lib = &iwl6000_lib, 374 .lib = &iwl6000_lib,
452 .hcmd = &iwlagn_hcmd, 375 .hcmd = &iwlagn_hcmd,
453 .utils = &iwlagn_hcmd_utils, 376 .utils = &iwlagn_hcmd_utils,
454 .led = &iwlagn_led_ops,
455 .nic = &iwl6150_nic_ops, 377 .nic = &iwl6150_nic_ops,
456 .ieee80211_ops = &iwlagn_hw_ops,
457}; 378};
458 379
459static const struct iwl_ops iwl6030_ops = { 380static const struct iwl_ops iwl6030_ops = {
460 .lib = &iwl6030_lib, 381 .lib = &iwl6030_lib,
461 .hcmd = &iwlagn_bt_hcmd, 382 .hcmd = &iwlagn_bt_hcmd,
462 .utils = &iwlagn_hcmd_utils, 383 .utils = &iwlagn_hcmd_utils,
463 .led = &iwlagn_led_ops,
464 .ieee80211_ops = &iwlagn_hw_ops,
465}; 384};
466 385
467static struct iwl_base_params iwl6000_base_params = { 386static struct iwl_base_params iwl6000_base_params = {
@@ -469,8 +388,6 @@ static struct iwl_base_params iwl6000_base_params = {
469 .num_of_queues = IWLAGN_NUM_QUEUES, 388 .num_of_queues = IWLAGN_NUM_QUEUES,
470 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, 389 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
471 .pll_cfg_val = 0, 390 .pll_cfg_val = 0,
472 .set_l0s = true,
473 .use_bsm = false,
474 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 391 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
475 .shadow_ram_support = true, 392 .shadow_ram_support = true,
476 .led_compensation = 51, 393 .led_compensation = 51,
@@ -481,9 +398,6 @@ static struct iwl_base_params iwl6000_base_params = {
481 .chain_noise_scale = 1000, 398 .chain_noise_scale = 1000,
482 .wd_timeout = IWL_DEF_WD_TIMEOUT, 399 .wd_timeout = IWL_DEF_WD_TIMEOUT,
483 .max_event_log_size = 512, 400 .max_event_log_size = 512,
484 .ucode_tracing = true,
485 .sensitivity_calib_by_driver = true,
486 .chain_noise_calib_by_driver = true,
487 .shadow_reg_enable = true, 401 .shadow_reg_enable = true,
488}; 402};
489 403
@@ -492,8 +406,6 @@ static struct iwl_base_params iwl6050_base_params = {
492 .num_of_queues = IWLAGN_NUM_QUEUES, 406 .num_of_queues = IWLAGN_NUM_QUEUES,
493 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, 407 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
494 .pll_cfg_val = 0, 408 .pll_cfg_val = 0,
495 .set_l0s = true,
496 .use_bsm = false,
497 .max_ll_items = OTP_MAX_LL_ITEMS_6x50, 409 .max_ll_items = OTP_MAX_LL_ITEMS_6x50,
498 .shadow_ram_support = true, 410 .shadow_ram_support = true,
499 .led_compensation = 51, 411 .led_compensation = 51,
@@ -504,9 +416,6 @@ static struct iwl_base_params iwl6050_base_params = {
504 .chain_noise_scale = 1500, 416 .chain_noise_scale = 1500,
505 .wd_timeout = IWL_DEF_WD_TIMEOUT, 417 .wd_timeout = IWL_DEF_WD_TIMEOUT,
506 .max_event_log_size = 1024, 418 .max_event_log_size = 1024,
507 .ucode_tracing = true,
508 .sensitivity_calib_by_driver = true,
509 .chain_noise_calib_by_driver = true,
510 .shadow_reg_enable = true, 419 .shadow_reg_enable = true,
511}; 420};
512static struct iwl_base_params iwl6000_g2_base_params = { 421static struct iwl_base_params iwl6000_g2_base_params = {
@@ -514,8 +423,6 @@ static struct iwl_base_params iwl6000_g2_base_params = {
514 .num_of_queues = IWLAGN_NUM_QUEUES, 423 .num_of_queues = IWLAGN_NUM_QUEUES,
515 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, 424 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
516 .pll_cfg_val = 0, 425 .pll_cfg_val = 0,
517 .set_l0s = true,
518 .use_bsm = false,
519 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 426 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
520 .shadow_ram_support = true, 427 .shadow_ram_support = true,
521 .led_compensation = 57, 428 .led_compensation = 57,
@@ -526,9 +433,6 @@ static struct iwl_base_params iwl6000_g2_base_params = {
526 .chain_noise_scale = 1000, 433 .chain_noise_scale = 1000,
527 .wd_timeout = IWL_LONG_WD_TIMEOUT, 434 .wd_timeout = IWL_LONG_WD_TIMEOUT,
528 .max_event_log_size = 512, 435 .max_event_log_size = 512,
529 .ucode_tracing = true,
530 .sensitivity_calib_by_driver = true,
531 .chain_noise_calib_by_driver = true,
532 .shadow_reg_enable = true, 436 .shadow_reg_enable = true,
533}; 437};
534 438
@@ -538,7 +442,6 @@ static struct iwl_ht_params iwl6000_ht_params = {
538}; 442};
539 443
540static struct iwl_bt_params iwl6000_bt_params = { 444static struct iwl_bt_params iwl6000_bt_params = {
541 .bt_statistics = true,
542 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ 445 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
543 .advanced_bt_coexist = true, 446 .advanced_bt_coexist = true,
544 .agg_time_limit = BT_AGG_THRESHOLD_DEF, 447 .agg_time_limit = BT_AGG_THRESHOLD_DEF,
@@ -554,7 +457,6 @@ static struct iwl_bt_params iwl6000_bt_params = {
554 .eeprom_ver = EEPROM_6005_EEPROM_VERSION, \ 457 .eeprom_ver = EEPROM_6005_EEPROM_VERSION, \
555 .eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \ 458 .eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
556 .ops = &iwl6000_ops, \ 459 .ops = &iwl6000_ops, \
557 .mod_params = &iwlagn_mod_params, \
558 .base_params = &iwl6000_g2_base_params, \ 460 .base_params = &iwl6000_g2_base_params, \
559 .need_dc_calib = true, \ 461 .need_dc_calib = true, \
560 .need_temp_offset_calib = true, \ 462 .need_temp_offset_calib = true, \
@@ -583,7 +485,6 @@ struct iwl_cfg iwl6005_2bg_cfg = {
583 .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \ 485 .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \
584 .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ 486 .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
585 .ops = &iwl6030_ops, \ 487 .ops = &iwl6030_ops, \
586 .mod_params = &iwlagn_mod_params, \
587 .base_params = &iwl6000_g2_base_params, \ 488 .base_params = &iwl6000_g2_base_params, \
588 .bt_params = &iwl6000_bt_params, \ 489 .bt_params = &iwl6000_bt_params, \
589 .need_dc_calib = true, \ 490 .need_dc_calib = true, \
@@ -613,6 +514,22 @@ struct iwl_cfg iwl6030_2bg_cfg = {
613 IWL_DEVICE_6030, 514 IWL_DEVICE_6030,
614}; 515};
615 516
517struct iwl_cfg iwl6035_2agn_cfg = {
518 .name = "6035 Series 2x2 AGN/BT",
519 IWL_DEVICE_6030,
520 .ht_params = &iwl6000_ht_params,
521};
522
523struct iwl_cfg iwl6035_2abg_cfg = {
524 .name = "6035 Series 2x2 ABG/BT",
525 IWL_DEVICE_6030,
526};
527
528struct iwl_cfg iwl6035_2bg_cfg = {
529 .name = "6035 Series 2x2 BG/BT",
530 IWL_DEVICE_6030,
531};
532
616struct iwl_cfg iwl1030_bgn_cfg = { 533struct iwl_cfg iwl1030_bgn_cfg = {
617 .name = "Intel(R) Centrino(R) Wireless-N 1030 BGN", 534 .name = "Intel(R) Centrino(R) Wireless-N 1030 BGN",
618 IWL_DEVICE_6030, 535 IWL_DEVICE_6030,
@@ -649,7 +566,6 @@ struct iwl_cfg iwl130_bg_cfg = {
649 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, \ 566 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, \
650 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, \ 567 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, \
651 .ops = &iwl6000_ops, \ 568 .ops = &iwl6000_ops, \
652 .mod_params = &iwlagn_mod_params, \
653 .base_params = &iwl6000_base_params, \ 569 .base_params = &iwl6000_base_params, \
654 .pa_type = IWL_PA_INTERNAL, \ 570 .pa_type = IWL_PA_INTERNAL, \
655 .led_mode = IWL_LED_BLINK 571 .led_mode = IWL_LED_BLINK
@@ -679,7 +595,6 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
679 .ops = &iwl6050_ops, \ 595 .ops = &iwl6050_ops, \
680 .eeprom_ver = EEPROM_6050_EEPROM_VERSION, \ 596 .eeprom_ver = EEPROM_6050_EEPROM_VERSION, \
681 .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \ 597 .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \
682 .mod_params = &iwlagn_mod_params, \
683 .base_params = &iwl6050_base_params, \ 598 .base_params = &iwl6050_base_params, \
684 .need_dc_calib = true, \ 599 .need_dc_calib = true, \
685 .led_mode = IWL_LED_BLINK, \ 600 .led_mode = IWL_LED_BLINK, \
@@ -704,7 +619,6 @@ struct iwl_cfg iwl6150_bgn_cfg = {
704 .eeprom_ver = EEPROM_6150_EEPROM_VERSION, 619 .eeprom_ver = EEPROM_6150_EEPROM_VERSION,
705 .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, 620 .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION,
706 .ops = &iwl6150_ops, 621 .ops = &iwl6150_ops,
707 .mod_params = &iwlagn_mod_params,
708 .base_params = &iwl6050_base_params, 622 .base_params = &iwl6050_base_params,
709 .ht_params = &iwl6000_ht_params, 623 .ht_params = &iwl6000_ht_params,
710 .need_dc_calib = true, 624 .need_dc_calib = true,
@@ -720,7 +634,6 @@ struct iwl_cfg iwl6000_3agn_cfg = {
720 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, 634 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
721 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, 635 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
722 .ops = &iwl6000_ops, 636 .ops = &iwl6000_ops,
723 .mod_params = &iwlagn_mod_params,
724 .base_params = &iwl6000_base_params, 637 .base_params = &iwl6000_base_params,
725 .ht_params = &iwl6000_ht_params, 638 .ht_params = &iwl6000_ht_params,
726 .need_dc_calib = true, 639 .need_dc_calib = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
index 9006293e740c..0f6bb9b2e642 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. 8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -605,7 +605,7 @@ void iwl_init_sensitivity(struct iwl_priv *priv)
605 IWL_DEBUG_CALIB(priv, "<<return 0x%X\n", ret); 605 IWL_DEBUG_CALIB(priv, "<<return 0x%X\n", ret);
606} 606}
607 607
608void iwl_sensitivity_calibration(struct iwl_priv *priv, void *resp) 608void iwl_sensitivity_calibration(struct iwl_priv *priv)
609{ 609{
610 u32 rx_enable_time; 610 u32 rx_enable_time;
611 u32 fa_cck; 611 u32 fa_cck;
@@ -631,16 +631,9 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv, void *resp)
631 } 631 }
632 632
633 spin_lock_irqsave(&priv->lock, flags); 633 spin_lock_irqsave(&priv->lock, flags);
634 if (iwl_bt_statistics(priv)) { 634 rx_info = &priv->statistics.rx_non_phy;
635 rx_info = &(((struct iwl_bt_notif_statistics *)resp)-> 635 ofdm = &priv->statistics.rx_ofdm;
636 rx.general.common); 636 cck = &priv->statistics.rx_cck;
637 ofdm = &(((struct iwl_bt_notif_statistics *)resp)->rx.ofdm);
638 cck = &(((struct iwl_bt_notif_statistics *)resp)->rx.cck);
639 } else {
640 rx_info = &(((struct iwl_notif_statistics *)resp)->rx.general);
641 ofdm = &(((struct iwl_notif_statistics *)resp)->rx.ofdm);
642 cck = &(((struct iwl_notif_statistics *)resp)->rx.cck);
643 }
644 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) { 637 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
645 IWL_DEBUG_CALIB(priv, "<< invalid data.\n"); 638 IWL_DEBUG_CALIB(priv, "<< invalid data.\n");
646 spin_unlock_irqrestore(&priv->lock, flags); 639 spin_unlock_irqrestore(&priv->lock, flags);
@@ -851,7 +844,7 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
851 * 1) Which antennas are connected. 844 * 1) Which antennas are connected.
852 * 2) Differential rx gain settings to balance the 3 receivers. 845 * 2) Differential rx gain settings to balance the 3 receivers.
853 */ 846 */
854void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp) 847void iwl_chain_noise_calibration(struct iwl_priv *priv)
855{ 848{
856 struct iwl_chain_noise_data *data = NULL; 849 struct iwl_chain_noise_data *data = NULL;
857 850
@@ -896,13 +889,9 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
896 } 889 }
897 890
898 spin_lock_irqsave(&priv->lock, flags); 891 spin_lock_irqsave(&priv->lock, flags);
899 if (iwl_bt_statistics(priv)) { 892
900 rx_info = &(((struct iwl_bt_notif_statistics *)stat_resp)-> 893 rx_info = &priv->statistics.rx_non_phy;
901 rx.general.common); 894
902 } else {
903 rx_info = &(((struct iwl_notif_statistics *)stat_resp)->
904 rx.general);
905 }
906 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) { 895 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
907 IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n"); 896 IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n");
908 spin_unlock_irqrestore(&priv->lock, flags); 897 spin_unlock_irqrestore(&priv->lock, flags);
@@ -911,19 +900,9 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
911 900
912 rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK); 901 rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK);
913 rxon_chnum = le16_to_cpu(ctx->staging.channel); 902 rxon_chnum = le16_to_cpu(ctx->staging.channel);
914 if (iwl_bt_statistics(priv)) { 903 stat_band24 =
915 stat_band24 = !!(((struct iwl_bt_notif_statistics *) 904 !!(priv->statistics.flag & STATISTICS_REPLY_FLG_BAND_24G_MSK);
916 stat_resp)->flag & 905 stat_chnum = le32_to_cpu(priv->statistics.flag) >> 16;
917 STATISTICS_REPLY_FLG_BAND_24G_MSK);
918 stat_chnum = le32_to_cpu(((struct iwl_bt_notif_statistics *)
919 stat_resp)->flag) >> 16;
920 } else {
921 stat_band24 = !!(((struct iwl_notif_statistics *)
922 stat_resp)->flag &
923 STATISTICS_REPLY_FLG_BAND_24G_MSK);
924 stat_chnum = le32_to_cpu(((struct iwl_notif_statistics *)
925 stat_resp)->flag) >> 16;
926 }
927 906
928 /* Make sure we accumulate data for just the associated channel 907 /* Make sure we accumulate data for just the associated channel
929 * (even if scanning). */ 908 * (even if scanning). */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h b/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
index e37ae7261630..4ef4dd934254 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. 8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -66,8 +66,8 @@
66#include "iwl-core.h" 66#include "iwl-core.h"
67#include "iwl-commands.h" 67#include "iwl-commands.h"
68 68
69void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp); 69void iwl_chain_noise_calibration(struct iwl_priv *priv);
70void iwl_sensitivity_calibration(struct iwl_priv *priv, void *resp); 70void iwl_sensitivity_calibration(struct iwl_priv *priv);
71 71
72void iwl_init_sensitivity(struct iwl_priv *priv); 72void iwl_init_sensitivity(struct iwl_priv *priv);
73void iwl_reset_run_time_calib(struct iwl_priv *priv); 73void iwl_reset_run_time_calib(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
deleted file mode 100644
index b500aaae53ec..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
+++ /dev/null
@@ -1,1073 +0,0 @@
1/******************************************************************************
2*
3* GPL LICENSE SUMMARY
4*
5* Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
6*
7* This program is free software; you can redistribute it and/or modify
8* it under the terms of version 2 of the GNU General Public License as
9* published by the Free Software Foundation.
10*
11* This program is distributed in the hope that it will be useful, but
12* WITHOUT ANY WARRANTY; without even the implied warranty of
13* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14* General Public License for more details.
15*
16* You should have received a copy of the GNU General Public License
17* along with this program; if not, write to the Free Software
18* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19* USA
20*
21* The full GNU General Public License is included in this distribution
22* in the file called LICENSE.GPL.
23*
24* Contact Information:
25* Intel Linux Wireless <ilw@linux.intel.com>
26* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27*****************************************************************************/
28#include "iwl-agn.h"
29#include "iwl-agn-debugfs.h"
30
31static const char *fmt_value = " %-30s %10u\n";
32static const char *fmt_hex = " %-30s 0x%02X\n";
33static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
34static const char *fmt_header =
35 "%-32s current cumulative delta max\n";
36
37static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
38{
39 int p = 0;
40 u32 flag;
41
42 if (iwl_bt_statistics(priv))
43 flag = le32_to_cpu(priv->_agn.statistics_bt.flag);
44 else
45 flag = le32_to_cpu(priv->_agn.statistics.flag);
46
47 p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
48 if (flag & UCODE_STATISTICS_CLEAR_MSK)
49 p += scnprintf(buf + p, bufsz - p,
50 "\tStatistics have been cleared\n");
51 p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
52 (flag & UCODE_STATISTICS_FREQUENCY_MSK)
53 ? "2.4 GHz" : "5.2 GHz");
54 p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
55 (flag & UCODE_STATISTICS_NARROW_BAND_MSK)
56 ? "enabled" : "disabled");
57
58 return p;
59}
60
61ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf,
62 size_t count, loff_t *ppos)
63 {
64 struct iwl_priv *priv = file->private_data;
65 int pos = 0;
66 char *buf;
67 int bufsz = sizeof(struct statistics_rx_phy) * 40 +
68 sizeof(struct statistics_rx_non_phy) * 40 +
69 sizeof(struct statistics_rx_ht_phy) * 40 + 400;
70 ssize_t ret;
71 struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
72 struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
73 struct statistics_rx_non_phy *general, *accum_general;
74 struct statistics_rx_non_phy *delta_general, *max_general;
75 struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
76
77 if (!iwl_is_alive(priv))
78 return -EAGAIN;
79
80 buf = kzalloc(bufsz, GFP_KERNEL);
81 if (!buf) {
82 IWL_ERR(priv, "Can not allocate Buffer\n");
83 return -ENOMEM;
84 }
85
86 /*
87 * the statistic information display here is based on
88 * the last statistics notification from uCode
89 * might not reflect the current uCode activity
90 */
91 if (iwl_bt_statistics(priv)) {
92 ofdm = &priv->_agn.statistics_bt.rx.ofdm;
93 cck = &priv->_agn.statistics_bt.rx.cck;
94 general = &priv->_agn.statistics_bt.rx.general.common;
95 ht = &priv->_agn.statistics_bt.rx.ofdm_ht;
96 accum_ofdm = &priv->_agn.accum_statistics_bt.rx.ofdm;
97 accum_cck = &priv->_agn.accum_statistics_bt.rx.cck;
98 accum_general =
99 &priv->_agn.accum_statistics_bt.rx.general.common;
100 accum_ht = &priv->_agn.accum_statistics_bt.rx.ofdm_ht;
101 delta_ofdm = &priv->_agn.delta_statistics_bt.rx.ofdm;
102 delta_cck = &priv->_agn.delta_statistics_bt.rx.cck;
103 delta_general =
104 &priv->_agn.delta_statistics_bt.rx.general.common;
105 delta_ht = &priv->_agn.delta_statistics_bt.rx.ofdm_ht;
106 max_ofdm = &priv->_agn.max_delta_bt.rx.ofdm;
107 max_cck = &priv->_agn.max_delta_bt.rx.cck;
108 max_general = &priv->_agn.max_delta_bt.rx.general.common;
109 max_ht = &priv->_agn.max_delta_bt.rx.ofdm_ht;
110 } else {
111 ofdm = &priv->_agn.statistics.rx.ofdm;
112 cck = &priv->_agn.statistics.rx.cck;
113 general = &priv->_agn.statistics.rx.general;
114 ht = &priv->_agn.statistics.rx.ofdm_ht;
115 accum_ofdm = &priv->_agn.accum_statistics.rx.ofdm;
116 accum_cck = &priv->_agn.accum_statistics.rx.cck;
117 accum_general = &priv->_agn.accum_statistics.rx.general;
118 accum_ht = &priv->_agn.accum_statistics.rx.ofdm_ht;
119 delta_ofdm = &priv->_agn.delta_statistics.rx.ofdm;
120 delta_cck = &priv->_agn.delta_statistics.rx.cck;
121 delta_general = &priv->_agn.delta_statistics.rx.general;
122 delta_ht = &priv->_agn.delta_statistics.rx.ofdm_ht;
123 max_ofdm = &priv->_agn.max_delta.rx.ofdm;
124 max_cck = &priv->_agn.max_delta.rx.cck;
125 max_general = &priv->_agn.max_delta.rx.general;
126 max_ht = &priv->_agn.max_delta.rx.ofdm_ht;
127 }
128
129 pos += iwl_statistics_flag(priv, buf, bufsz);
130 pos += scnprintf(buf + pos, bufsz - pos,
131 fmt_header, "Statistics_Rx - OFDM:");
132 pos += scnprintf(buf + pos, bufsz - pos,
133 fmt_table, "ina_cnt:",
134 le32_to_cpu(ofdm->ina_cnt),
135 accum_ofdm->ina_cnt,
136 delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
137 pos += scnprintf(buf + pos, bufsz - pos,
138 fmt_table, "fina_cnt:",
139 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
140 delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
141 pos += scnprintf(buf + pos, bufsz - pos,
142 fmt_table, "plcp_err:",
143 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
144 delta_ofdm->plcp_err, max_ofdm->plcp_err);
145 pos += scnprintf(buf + pos, bufsz - pos,
146 fmt_table, "crc32_err:",
147 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
148 delta_ofdm->crc32_err, max_ofdm->crc32_err);
149 pos += scnprintf(buf + pos, bufsz - pos,
150 fmt_table, "overrun_err:",
151 le32_to_cpu(ofdm->overrun_err),
152 accum_ofdm->overrun_err, delta_ofdm->overrun_err,
153 max_ofdm->overrun_err);
154 pos += scnprintf(buf + pos, bufsz - pos,
155 fmt_table, "early_overrun_err:",
156 le32_to_cpu(ofdm->early_overrun_err),
157 accum_ofdm->early_overrun_err,
158 delta_ofdm->early_overrun_err,
159 max_ofdm->early_overrun_err);
160 pos += scnprintf(buf + pos, bufsz - pos,
161 fmt_table, "crc32_good:",
162 le32_to_cpu(ofdm->crc32_good),
163 accum_ofdm->crc32_good, delta_ofdm->crc32_good,
164 max_ofdm->crc32_good);
165 pos += scnprintf(buf + pos, bufsz - pos,
166 fmt_table, "false_alarm_cnt:",
167 le32_to_cpu(ofdm->false_alarm_cnt),
168 accum_ofdm->false_alarm_cnt,
169 delta_ofdm->false_alarm_cnt,
170 max_ofdm->false_alarm_cnt);
171 pos += scnprintf(buf + pos, bufsz - pos,
172 fmt_table, "fina_sync_err_cnt:",
173 le32_to_cpu(ofdm->fina_sync_err_cnt),
174 accum_ofdm->fina_sync_err_cnt,
175 delta_ofdm->fina_sync_err_cnt,
176 max_ofdm->fina_sync_err_cnt);
177 pos += scnprintf(buf + pos, bufsz - pos,
178 fmt_table, "sfd_timeout:",
179 le32_to_cpu(ofdm->sfd_timeout),
180 accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout,
181 max_ofdm->sfd_timeout);
182 pos += scnprintf(buf + pos, bufsz - pos,
183 fmt_table, "fina_timeout:",
184 le32_to_cpu(ofdm->fina_timeout),
185 accum_ofdm->fina_timeout, delta_ofdm->fina_timeout,
186 max_ofdm->fina_timeout);
187 pos += scnprintf(buf + pos, bufsz - pos,
188 fmt_table, "unresponded_rts:",
189 le32_to_cpu(ofdm->unresponded_rts),
190 accum_ofdm->unresponded_rts,
191 delta_ofdm->unresponded_rts,
192 max_ofdm->unresponded_rts);
193 pos += scnprintf(buf + pos, bufsz - pos,
194 fmt_table, "rxe_frame_lmt_ovrun:",
195 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
196 accum_ofdm->rxe_frame_limit_overrun,
197 delta_ofdm->rxe_frame_limit_overrun,
198 max_ofdm->rxe_frame_limit_overrun);
199 pos += scnprintf(buf + pos, bufsz - pos,
200 fmt_table, "sent_ack_cnt:",
201 le32_to_cpu(ofdm->sent_ack_cnt),
202 accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt,
203 max_ofdm->sent_ack_cnt);
204 pos += scnprintf(buf + pos, bufsz - pos,
205 fmt_table, "sent_cts_cnt:",
206 le32_to_cpu(ofdm->sent_cts_cnt),
207 accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt,
208 max_ofdm->sent_cts_cnt);
209 pos += scnprintf(buf + pos, bufsz - pos,
210 fmt_table, "sent_ba_rsp_cnt:",
211 le32_to_cpu(ofdm->sent_ba_rsp_cnt),
212 accum_ofdm->sent_ba_rsp_cnt,
213 delta_ofdm->sent_ba_rsp_cnt,
214 max_ofdm->sent_ba_rsp_cnt);
215 pos += scnprintf(buf + pos, bufsz - pos,
216 fmt_table, "dsp_self_kill:",
217 le32_to_cpu(ofdm->dsp_self_kill),
218 accum_ofdm->dsp_self_kill,
219 delta_ofdm->dsp_self_kill,
220 max_ofdm->dsp_self_kill);
221 pos += scnprintf(buf + pos, bufsz - pos,
222 fmt_table, "mh_format_err:",
223 le32_to_cpu(ofdm->mh_format_err),
224 accum_ofdm->mh_format_err,
225 delta_ofdm->mh_format_err,
226 max_ofdm->mh_format_err);
227 pos += scnprintf(buf + pos, bufsz - pos,
228 fmt_table, "re_acq_main_rssi_sum:",
229 le32_to_cpu(ofdm->re_acq_main_rssi_sum),
230 accum_ofdm->re_acq_main_rssi_sum,
231 delta_ofdm->re_acq_main_rssi_sum,
232 max_ofdm->re_acq_main_rssi_sum);
233
234 pos += scnprintf(buf + pos, bufsz - pos,
235 fmt_header, "Statistics_Rx - CCK:");
236 pos += scnprintf(buf + pos, bufsz - pos,
237 fmt_table, "ina_cnt:",
238 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
239 delta_cck->ina_cnt, max_cck->ina_cnt);
240 pos += scnprintf(buf + pos, bufsz - pos,
241 fmt_table, "fina_cnt:",
242 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
243 delta_cck->fina_cnt, max_cck->fina_cnt);
244 pos += scnprintf(buf + pos, bufsz - pos,
245 fmt_table, "plcp_err:",
246 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
247 delta_cck->plcp_err, max_cck->plcp_err);
248 pos += scnprintf(buf + pos, bufsz - pos,
249 fmt_table, "crc32_err:",
250 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
251 delta_cck->crc32_err, max_cck->crc32_err);
252 pos += scnprintf(buf + pos, bufsz - pos,
253 fmt_table, "overrun_err:",
254 le32_to_cpu(cck->overrun_err),
255 accum_cck->overrun_err, delta_cck->overrun_err,
256 max_cck->overrun_err);
257 pos += scnprintf(buf + pos, bufsz - pos,
258 fmt_table, "early_overrun_err:",
259 le32_to_cpu(cck->early_overrun_err),
260 accum_cck->early_overrun_err,
261 delta_cck->early_overrun_err,
262 max_cck->early_overrun_err);
263 pos += scnprintf(buf + pos, bufsz - pos,
264 fmt_table, "crc32_good:",
265 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
266 delta_cck->crc32_good, max_cck->crc32_good);
267 pos += scnprintf(buf + pos, bufsz - pos,
268 fmt_table, "false_alarm_cnt:",
269 le32_to_cpu(cck->false_alarm_cnt),
270 accum_cck->false_alarm_cnt,
271 delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
272 pos += scnprintf(buf + pos, bufsz - pos,
273 fmt_table, "fina_sync_err_cnt:",
274 le32_to_cpu(cck->fina_sync_err_cnt),
275 accum_cck->fina_sync_err_cnt,
276 delta_cck->fina_sync_err_cnt,
277 max_cck->fina_sync_err_cnt);
278 pos += scnprintf(buf + pos, bufsz - pos,
279 fmt_table, "sfd_timeout:",
280 le32_to_cpu(cck->sfd_timeout),
281 accum_cck->sfd_timeout, delta_cck->sfd_timeout,
282 max_cck->sfd_timeout);
283 pos += scnprintf(buf + pos, bufsz - pos,
284 fmt_table, "fina_timeout:",
285 le32_to_cpu(cck->fina_timeout),
286 accum_cck->fina_timeout, delta_cck->fina_timeout,
287 max_cck->fina_timeout);
288 pos += scnprintf(buf + pos, bufsz - pos,
289 fmt_table, "unresponded_rts:",
290 le32_to_cpu(cck->unresponded_rts),
291 accum_cck->unresponded_rts, delta_cck->unresponded_rts,
292 max_cck->unresponded_rts);
293 pos += scnprintf(buf + pos, bufsz - pos,
294 fmt_table, "rxe_frame_lmt_ovrun:",
295 le32_to_cpu(cck->rxe_frame_limit_overrun),
296 accum_cck->rxe_frame_limit_overrun,
297 delta_cck->rxe_frame_limit_overrun,
298 max_cck->rxe_frame_limit_overrun);
299 pos += scnprintf(buf + pos, bufsz - pos,
300 fmt_table, "sent_ack_cnt:",
301 le32_to_cpu(cck->sent_ack_cnt),
302 accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt,
303 max_cck->sent_ack_cnt);
304 pos += scnprintf(buf + pos, bufsz - pos,
305 fmt_table, "sent_cts_cnt:",
306 le32_to_cpu(cck->sent_cts_cnt),
307 accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt,
308 max_cck->sent_cts_cnt);
309 pos += scnprintf(buf + pos, bufsz - pos,
310 fmt_table, "sent_ba_rsp_cnt:",
311 le32_to_cpu(cck->sent_ba_rsp_cnt),
312 accum_cck->sent_ba_rsp_cnt,
313 delta_cck->sent_ba_rsp_cnt,
314 max_cck->sent_ba_rsp_cnt);
315 pos += scnprintf(buf + pos, bufsz - pos,
316 fmt_table, "dsp_self_kill:",
317 le32_to_cpu(cck->dsp_self_kill),
318 accum_cck->dsp_self_kill, delta_cck->dsp_self_kill,
319 max_cck->dsp_self_kill);
320 pos += scnprintf(buf + pos, bufsz - pos,
321 fmt_table, "mh_format_err:",
322 le32_to_cpu(cck->mh_format_err),
323 accum_cck->mh_format_err, delta_cck->mh_format_err,
324 max_cck->mh_format_err);
325 pos += scnprintf(buf + pos, bufsz - pos,
326 fmt_table, "re_acq_main_rssi_sum:",
327 le32_to_cpu(cck->re_acq_main_rssi_sum),
328 accum_cck->re_acq_main_rssi_sum,
329 delta_cck->re_acq_main_rssi_sum,
330 max_cck->re_acq_main_rssi_sum);
331
332 pos += scnprintf(buf + pos, bufsz - pos,
333 fmt_header, "Statistics_Rx - GENERAL:");
334 pos += scnprintf(buf + pos, bufsz - pos,
335 fmt_table, "bogus_cts:",
336 le32_to_cpu(general->bogus_cts),
337 accum_general->bogus_cts, delta_general->bogus_cts,
338 max_general->bogus_cts);
339 pos += scnprintf(buf + pos, bufsz - pos,
340 fmt_table, "bogus_ack:",
341 le32_to_cpu(general->bogus_ack),
342 accum_general->bogus_ack, delta_general->bogus_ack,
343 max_general->bogus_ack);
344 pos += scnprintf(buf + pos, bufsz - pos,
345 fmt_table, "non_bssid_frames:",
346 le32_to_cpu(general->non_bssid_frames),
347 accum_general->non_bssid_frames,
348 delta_general->non_bssid_frames,
349 max_general->non_bssid_frames);
350 pos += scnprintf(buf + pos, bufsz - pos,
351 fmt_table, "filtered_frames:",
352 le32_to_cpu(general->filtered_frames),
353 accum_general->filtered_frames,
354 delta_general->filtered_frames,
355 max_general->filtered_frames);
356 pos += scnprintf(buf + pos, bufsz - pos,
357 fmt_table, "non_channel_beacons:",
358 le32_to_cpu(general->non_channel_beacons),
359 accum_general->non_channel_beacons,
360 delta_general->non_channel_beacons,
361 max_general->non_channel_beacons);
362 pos += scnprintf(buf + pos, bufsz - pos,
363 fmt_table, "channel_beacons:",
364 le32_to_cpu(general->channel_beacons),
365 accum_general->channel_beacons,
366 delta_general->channel_beacons,
367 max_general->channel_beacons);
368 pos += scnprintf(buf + pos, bufsz - pos,
369 fmt_table, "num_missed_bcon:",
370 le32_to_cpu(general->num_missed_bcon),
371 accum_general->num_missed_bcon,
372 delta_general->num_missed_bcon,
373 max_general->num_missed_bcon);
374 pos += scnprintf(buf + pos, bufsz - pos,
375 fmt_table, "adc_rx_saturation_time:",
376 le32_to_cpu(general->adc_rx_saturation_time),
377 accum_general->adc_rx_saturation_time,
378 delta_general->adc_rx_saturation_time,
379 max_general->adc_rx_saturation_time);
380 pos += scnprintf(buf + pos, bufsz - pos,
381 fmt_table, "ina_detect_search_tm:",
382 le32_to_cpu(general->ina_detection_search_time),
383 accum_general->ina_detection_search_time,
384 delta_general->ina_detection_search_time,
385 max_general->ina_detection_search_time);
386 pos += scnprintf(buf + pos, bufsz - pos,
387 fmt_table, "beacon_silence_rssi_a:",
388 le32_to_cpu(general->beacon_silence_rssi_a),
389 accum_general->beacon_silence_rssi_a,
390 delta_general->beacon_silence_rssi_a,
391 max_general->beacon_silence_rssi_a);
392 pos += scnprintf(buf + pos, bufsz - pos,
393 fmt_table, "beacon_silence_rssi_b:",
394 le32_to_cpu(general->beacon_silence_rssi_b),
395 accum_general->beacon_silence_rssi_b,
396 delta_general->beacon_silence_rssi_b,
397 max_general->beacon_silence_rssi_b);
398 pos += scnprintf(buf + pos, bufsz - pos,
399 fmt_table, "beacon_silence_rssi_c:",
400 le32_to_cpu(general->beacon_silence_rssi_c),
401 accum_general->beacon_silence_rssi_c,
402 delta_general->beacon_silence_rssi_c,
403 max_general->beacon_silence_rssi_c);
404 pos += scnprintf(buf + pos, bufsz - pos,
405 fmt_table, "interference_data_flag:",
406 le32_to_cpu(general->interference_data_flag),
407 accum_general->interference_data_flag,
408 delta_general->interference_data_flag,
409 max_general->interference_data_flag);
410 pos += scnprintf(buf + pos, bufsz - pos,
411 fmt_table, "channel_load:",
412 le32_to_cpu(general->channel_load),
413 accum_general->channel_load,
414 delta_general->channel_load,
415 max_general->channel_load);
416 pos += scnprintf(buf + pos, bufsz - pos,
417 fmt_table, "dsp_false_alarms:",
418 le32_to_cpu(general->dsp_false_alarms),
419 accum_general->dsp_false_alarms,
420 delta_general->dsp_false_alarms,
421 max_general->dsp_false_alarms);
422 pos += scnprintf(buf + pos, bufsz - pos,
423 fmt_table, "beacon_rssi_a:",
424 le32_to_cpu(general->beacon_rssi_a),
425 accum_general->beacon_rssi_a,
426 delta_general->beacon_rssi_a,
427 max_general->beacon_rssi_a);
428 pos += scnprintf(buf + pos, bufsz - pos,
429 fmt_table, "beacon_rssi_b:",
430 le32_to_cpu(general->beacon_rssi_b),
431 accum_general->beacon_rssi_b,
432 delta_general->beacon_rssi_b,
433 max_general->beacon_rssi_b);
434 pos += scnprintf(buf + pos, bufsz - pos,
435 fmt_table, "beacon_rssi_c:",
436 le32_to_cpu(general->beacon_rssi_c),
437 accum_general->beacon_rssi_c,
438 delta_general->beacon_rssi_c,
439 max_general->beacon_rssi_c);
440 pos += scnprintf(buf + pos, bufsz - pos,
441 fmt_table, "beacon_energy_a:",
442 le32_to_cpu(general->beacon_energy_a),
443 accum_general->beacon_energy_a,
444 delta_general->beacon_energy_a,
445 max_general->beacon_energy_a);
446 pos += scnprintf(buf + pos, bufsz - pos,
447 fmt_table, "beacon_energy_b:",
448 le32_to_cpu(general->beacon_energy_b),
449 accum_general->beacon_energy_b,
450 delta_general->beacon_energy_b,
451 max_general->beacon_energy_b);
452 pos += scnprintf(buf + pos, bufsz - pos,
453 fmt_table, "beacon_energy_c:",
454 le32_to_cpu(general->beacon_energy_c),
455 accum_general->beacon_energy_c,
456 delta_general->beacon_energy_c,
457 max_general->beacon_energy_c);
458
459 pos += scnprintf(buf + pos, bufsz - pos,
460 fmt_header, "Statistics_Rx - OFDM_HT:");
461 pos += scnprintf(buf + pos, bufsz - pos,
462 fmt_table, "plcp_err:",
463 le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
464 delta_ht->plcp_err, max_ht->plcp_err);
465 pos += scnprintf(buf + pos, bufsz - pos,
466 fmt_table, "overrun_err:",
467 le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
468 delta_ht->overrun_err, max_ht->overrun_err);
469 pos += scnprintf(buf + pos, bufsz - pos,
470 fmt_table, "early_overrun_err:",
471 le32_to_cpu(ht->early_overrun_err),
472 accum_ht->early_overrun_err,
473 delta_ht->early_overrun_err,
474 max_ht->early_overrun_err);
475 pos += scnprintf(buf + pos, bufsz - pos,
476 fmt_table, "crc32_good:",
477 le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
478 delta_ht->crc32_good, max_ht->crc32_good);
479 pos += scnprintf(buf + pos, bufsz - pos,
480 fmt_table, "crc32_err:",
481 le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
482 delta_ht->crc32_err, max_ht->crc32_err);
483 pos += scnprintf(buf + pos, bufsz - pos,
484 fmt_table, "mh_format_err:",
485 le32_to_cpu(ht->mh_format_err),
486 accum_ht->mh_format_err,
487 delta_ht->mh_format_err, max_ht->mh_format_err);
488 pos += scnprintf(buf + pos, bufsz - pos,
489 fmt_table, "agg_crc32_good:",
490 le32_to_cpu(ht->agg_crc32_good),
491 accum_ht->agg_crc32_good,
492 delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
493 pos += scnprintf(buf + pos, bufsz - pos,
494 fmt_table, "agg_mpdu_cnt:",
495 le32_to_cpu(ht->agg_mpdu_cnt),
496 accum_ht->agg_mpdu_cnt,
497 delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
498 pos += scnprintf(buf + pos, bufsz - pos,
499 fmt_table, "agg_cnt:",
500 le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
501 delta_ht->agg_cnt, max_ht->agg_cnt);
502 pos += scnprintf(buf + pos, bufsz - pos,
503 fmt_table, "unsupport_mcs:",
504 le32_to_cpu(ht->unsupport_mcs),
505 accum_ht->unsupport_mcs,
506 delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
507
508 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
509 kfree(buf);
510 return ret;
511}
512
513ssize_t iwl_ucode_tx_stats_read(struct file *file,
514 char __user *user_buf,
515 size_t count, loff_t *ppos)
516{
517 struct iwl_priv *priv = file->private_data;
518 int pos = 0;
519 char *buf;
520 int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
521 ssize_t ret;
522 struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
523
524 if (!iwl_is_alive(priv))
525 return -EAGAIN;
526
527 buf = kzalloc(bufsz, GFP_KERNEL);
528 if (!buf) {
529 IWL_ERR(priv, "Can not allocate Buffer\n");
530 return -ENOMEM;
531 }
532
533 /* the statistic information display here is based on
534 * the last statistics notification from uCode
535 * might not reflect the current uCode activity
536 */
537 if (iwl_bt_statistics(priv)) {
538 tx = &priv->_agn.statistics_bt.tx;
539 accum_tx = &priv->_agn.accum_statistics_bt.tx;
540 delta_tx = &priv->_agn.delta_statistics_bt.tx;
541 max_tx = &priv->_agn.max_delta_bt.tx;
542 } else {
543 tx = &priv->_agn.statistics.tx;
544 accum_tx = &priv->_agn.accum_statistics.tx;
545 delta_tx = &priv->_agn.delta_statistics.tx;
546 max_tx = &priv->_agn.max_delta.tx;
547 }
548
549 pos += iwl_statistics_flag(priv, buf, bufsz);
550 pos += scnprintf(buf + pos, bufsz - pos,
551 fmt_header, "Statistics_Tx:");
552 pos += scnprintf(buf + pos, bufsz - pos,
553 fmt_table, "preamble:",
554 le32_to_cpu(tx->preamble_cnt),
555 accum_tx->preamble_cnt,
556 delta_tx->preamble_cnt, max_tx->preamble_cnt);
557 pos += scnprintf(buf + pos, bufsz - pos,
558 fmt_table, "rx_detected_cnt:",
559 le32_to_cpu(tx->rx_detected_cnt),
560 accum_tx->rx_detected_cnt,
561 delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
562 pos += scnprintf(buf + pos, bufsz - pos,
563 fmt_table, "bt_prio_defer_cnt:",
564 le32_to_cpu(tx->bt_prio_defer_cnt),
565 accum_tx->bt_prio_defer_cnt,
566 delta_tx->bt_prio_defer_cnt,
567 max_tx->bt_prio_defer_cnt);
568 pos += scnprintf(buf + pos, bufsz - pos,
569 fmt_table, "bt_prio_kill_cnt:",
570 le32_to_cpu(tx->bt_prio_kill_cnt),
571 accum_tx->bt_prio_kill_cnt,
572 delta_tx->bt_prio_kill_cnt,
573 max_tx->bt_prio_kill_cnt);
574 pos += scnprintf(buf + pos, bufsz - pos,
575 fmt_table, "few_bytes_cnt:",
576 le32_to_cpu(tx->few_bytes_cnt),
577 accum_tx->few_bytes_cnt,
578 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
579 pos += scnprintf(buf + pos, bufsz - pos,
580 fmt_table, "cts_timeout:",
581 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
582 delta_tx->cts_timeout, max_tx->cts_timeout);
583 pos += scnprintf(buf + pos, bufsz - pos,
584 fmt_table, "ack_timeout:",
585 le32_to_cpu(tx->ack_timeout),
586 accum_tx->ack_timeout,
587 delta_tx->ack_timeout, max_tx->ack_timeout);
588 pos += scnprintf(buf + pos, bufsz - pos,
589 fmt_table, "expected_ack_cnt:",
590 le32_to_cpu(tx->expected_ack_cnt),
591 accum_tx->expected_ack_cnt,
592 delta_tx->expected_ack_cnt,
593 max_tx->expected_ack_cnt);
594 pos += scnprintf(buf + pos, bufsz - pos,
595 fmt_table, "actual_ack_cnt:",
596 le32_to_cpu(tx->actual_ack_cnt),
597 accum_tx->actual_ack_cnt,
598 delta_tx->actual_ack_cnt,
599 max_tx->actual_ack_cnt);
600 pos += scnprintf(buf + pos, bufsz - pos,
601 fmt_table, "dump_msdu_cnt:",
602 le32_to_cpu(tx->dump_msdu_cnt),
603 accum_tx->dump_msdu_cnt,
604 delta_tx->dump_msdu_cnt,
605 max_tx->dump_msdu_cnt);
606 pos += scnprintf(buf + pos, bufsz - pos,
607 fmt_table, "abort_nxt_frame_mismatch:",
608 le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
609 accum_tx->burst_abort_next_frame_mismatch_cnt,
610 delta_tx->burst_abort_next_frame_mismatch_cnt,
611 max_tx->burst_abort_next_frame_mismatch_cnt);
612 pos += scnprintf(buf + pos, bufsz - pos,
613 fmt_table, "abort_missing_nxt_frame:",
614 le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
615 accum_tx->burst_abort_missing_next_frame_cnt,
616 delta_tx->burst_abort_missing_next_frame_cnt,
617 max_tx->burst_abort_missing_next_frame_cnt);
618 pos += scnprintf(buf + pos, bufsz - pos,
619 fmt_table, "cts_timeout_collision:",
620 le32_to_cpu(tx->cts_timeout_collision),
621 accum_tx->cts_timeout_collision,
622 delta_tx->cts_timeout_collision,
623 max_tx->cts_timeout_collision);
624 pos += scnprintf(buf + pos, bufsz - pos,
625 fmt_table, "ack_ba_timeout_collision:",
626 le32_to_cpu(tx->ack_or_ba_timeout_collision),
627 accum_tx->ack_or_ba_timeout_collision,
628 delta_tx->ack_or_ba_timeout_collision,
629 max_tx->ack_or_ba_timeout_collision);
630 pos += scnprintf(buf + pos, bufsz - pos,
631 fmt_table, "agg ba_timeout:",
632 le32_to_cpu(tx->agg.ba_timeout),
633 accum_tx->agg.ba_timeout,
634 delta_tx->agg.ba_timeout,
635 max_tx->agg.ba_timeout);
636 pos += scnprintf(buf + pos, bufsz - pos,
637 fmt_table, "agg ba_resched_frames:",
638 le32_to_cpu(tx->agg.ba_reschedule_frames),
639 accum_tx->agg.ba_reschedule_frames,
640 delta_tx->agg.ba_reschedule_frames,
641 max_tx->agg.ba_reschedule_frames);
642 pos += scnprintf(buf + pos, bufsz - pos,
643 fmt_table, "agg scd_query_agg_frame:",
644 le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
645 accum_tx->agg.scd_query_agg_frame_cnt,
646 delta_tx->agg.scd_query_agg_frame_cnt,
647 max_tx->agg.scd_query_agg_frame_cnt);
648 pos += scnprintf(buf + pos, bufsz - pos,
649 fmt_table, "agg scd_query_no_agg:",
650 le32_to_cpu(tx->agg.scd_query_no_agg),
651 accum_tx->agg.scd_query_no_agg,
652 delta_tx->agg.scd_query_no_agg,
653 max_tx->agg.scd_query_no_agg);
654 pos += scnprintf(buf + pos, bufsz - pos,
655 fmt_table, "agg scd_query_agg:",
656 le32_to_cpu(tx->agg.scd_query_agg),
657 accum_tx->agg.scd_query_agg,
658 delta_tx->agg.scd_query_agg,
659 max_tx->agg.scd_query_agg);
660 pos += scnprintf(buf + pos, bufsz - pos,
661 fmt_table, "agg scd_query_mismatch:",
662 le32_to_cpu(tx->agg.scd_query_mismatch),
663 accum_tx->agg.scd_query_mismatch,
664 delta_tx->agg.scd_query_mismatch,
665 max_tx->agg.scd_query_mismatch);
666 pos += scnprintf(buf + pos, bufsz - pos,
667 fmt_table, "agg frame_not_ready:",
668 le32_to_cpu(tx->agg.frame_not_ready),
669 accum_tx->agg.frame_not_ready,
670 delta_tx->agg.frame_not_ready,
671 max_tx->agg.frame_not_ready);
672 pos += scnprintf(buf + pos, bufsz - pos,
673 fmt_table, "agg underrun:",
674 le32_to_cpu(tx->agg.underrun),
675 accum_tx->agg.underrun,
676 delta_tx->agg.underrun, max_tx->agg.underrun);
677 pos += scnprintf(buf + pos, bufsz - pos,
678 fmt_table, "agg bt_prio_kill:",
679 le32_to_cpu(tx->agg.bt_prio_kill),
680 accum_tx->agg.bt_prio_kill,
681 delta_tx->agg.bt_prio_kill,
682 max_tx->agg.bt_prio_kill);
683 pos += scnprintf(buf + pos, bufsz - pos,
684 fmt_table, "agg rx_ba_rsp_cnt:",
685 le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
686 accum_tx->agg.rx_ba_rsp_cnt,
687 delta_tx->agg.rx_ba_rsp_cnt,
688 max_tx->agg.rx_ba_rsp_cnt);
689
690 if (tx->tx_power.ant_a || tx->tx_power.ant_b || tx->tx_power.ant_c) {
691 pos += scnprintf(buf + pos, bufsz - pos,
692 "tx power: (1/2 dB step)\n");
693 if ((priv->cfg->valid_tx_ant & ANT_A) && tx->tx_power.ant_a)
694 pos += scnprintf(buf + pos, bufsz - pos,
695 fmt_hex, "antenna A:",
696 tx->tx_power.ant_a);
697 if ((priv->cfg->valid_tx_ant & ANT_B) && tx->tx_power.ant_b)
698 pos += scnprintf(buf + pos, bufsz - pos,
699 fmt_hex, "antenna B:",
700 tx->tx_power.ant_b);
701 if ((priv->cfg->valid_tx_ant & ANT_C) && tx->tx_power.ant_c)
702 pos += scnprintf(buf + pos, bufsz - pos,
703 fmt_hex, "antenna C:",
704 tx->tx_power.ant_c);
705 }
706 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
707 kfree(buf);
708 return ret;
709}
710
711ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf,
712 size_t count, loff_t *ppos)
713{
714 struct iwl_priv *priv = file->private_data;
715 int pos = 0;
716 char *buf;
717 int bufsz = sizeof(struct statistics_general) * 10 + 300;
718 ssize_t ret;
719 struct statistics_general_common *general, *accum_general;
720 struct statistics_general_common *delta_general, *max_general;
721 struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
722 struct statistics_div *div, *accum_div, *delta_div, *max_div;
723
724 if (!iwl_is_alive(priv))
725 return -EAGAIN;
726
727 buf = kzalloc(bufsz, GFP_KERNEL);
728 if (!buf) {
729 IWL_ERR(priv, "Can not allocate Buffer\n");
730 return -ENOMEM;
731 }
732
733 /* the statistic information display here is based on
734 * the last statistics notification from uCode
735 * might not reflect the current uCode activity
736 */
737 if (iwl_bt_statistics(priv)) {
738 general = &priv->_agn.statistics_bt.general.common;
739 dbg = &priv->_agn.statistics_bt.general.common.dbg;
740 div = &priv->_agn.statistics_bt.general.common.div;
741 accum_general = &priv->_agn.accum_statistics_bt.general.common;
742 accum_dbg = &priv->_agn.accum_statistics_bt.general.common.dbg;
743 accum_div = &priv->_agn.accum_statistics_bt.general.common.div;
744 delta_general = &priv->_agn.delta_statistics_bt.general.common;
745 max_general = &priv->_agn.max_delta_bt.general.common;
746 delta_dbg = &priv->_agn.delta_statistics_bt.general.common.dbg;
747 max_dbg = &priv->_agn.max_delta_bt.general.common.dbg;
748 delta_div = &priv->_agn.delta_statistics_bt.general.common.div;
749 max_div = &priv->_agn.max_delta_bt.general.common.div;
750 } else {
751 general = &priv->_agn.statistics.general.common;
752 dbg = &priv->_agn.statistics.general.common.dbg;
753 div = &priv->_agn.statistics.general.common.div;
754 accum_general = &priv->_agn.accum_statistics.general.common;
755 accum_dbg = &priv->_agn.accum_statistics.general.common.dbg;
756 accum_div = &priv->_agn.accum_statistics.general.common.div;
757 delta_general = &priv->_agn.delta_statistics.general.common;
758 max_general = &priv->_agn.max_delta.general.common;
759 delta_dbg = &priv->_agn.delta_statistics.general.common.dbg;
760 max_dbg = &priv->_agn.max_delta.general.common.dbg;
761 delta_div = &priv->_agn.delta_statistics.general.common.div;
762 max_div = &priv->_agn.max_delta.general.common.div;
763 }
764
765 pos += iwl_statistics_flag(priv, buf, bufsz);
766 pos += scnprintf(buf + pos, bufsz - pos,
767 fmt_header, "Statistics_General:");
768 pos += scnprintf(buf + pos, bufsz - pos,
769 fmt_value, "temperature:",
770 le32_to_cpu(general->temperature));
771 pos += scnprintf(buf + pos, bufsz - pos,
772 fmt_value, "temperature_m:",
773 le32_to_cpu(general->temperature_m));
774 pos += scnprintf(buf + pos, bufsz - pos,
775 fmt_value, "ttl_timestamp:",
776 le32_to_cpu(general->ttl_timestamp));
777 pos += scnprintf(buf + pos, bufsz - pos,
778 fmt_table, "burst_check:",
779 le32_to_cpu(dbg->burst_check),
780 accum_dbg->burst_check,
781 delta_dbg->burst_check, max_dbg->burst_check);
782 pos += scnprintf(buf + pos, bufsz - pos,
783 fmt_table, "burst_count:",
784 le32_to_cpu(dbg->burst_count),
785 accum_dbg->burst_count,
786 delta_dbg->burst_count, max_dbg->burst_count);
787 pos += scnprintf(buf + pos, bufsz - pos,
788 fmt_table, "wait_for_silence_timeout_count:",
789 le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
790 accum_dbg->wait_for_silence_timeout_cnt,
791 delta_dbg->wait_for_silence_timeout_cnt,
792 max_dbg->wait_for_silence_timeout_cnt);
793 pos += scnprintf(buf + pos, bufsz - pos,
794 fmt_table, "sleep_time:",
795 le32_to_cpu(general->sleep_time),
796 accum_general->sleep_time,
797 delta_general->sleep_time, max_general->sleep_time);
798 pos += scnprintf(buf + pos, bufsz - pos,
799 fmt_table, "slots_out:",
800 le32_to_cpu(general->slots_out),
801 accum_general->slots_out,
802 delta_general->slots_out, max_general->slots_out);
803 pos += scnprintf(buf + pos, bufsz - pos,
804 fmt_table, "slots_idle:",
805 le32_to_cpu(general->slots_idle),
806 accum_general->slots_idle,
807 delta_general->slots_idle, max_general->slots_idle);
808 pos += scnprintf(buf + pos, bufsz - pos,
809 fmt_table, "tx_on_a:",
810 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
811 delta_div->tx_on_a, max_div->tx_on_a);
812 pos += scnprintf(buf + pos, bufsz - pos,
813 fmt_table, "tx_on_b:",
814 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
815 delta_div->tx_on_b, max_div->tx_on_b);
816 pos += scnprintf(buf + pos, bufsz - pos,
817 fmt_table, "exec_time:",
818 le32_to_cpu(div->exec_time), accum_div->exec_time,
819 delta_div->exec_time, max_div->exec_time);
820 pos += scnprintf(buf + pos, bufsz - pos,
821 fmt_table, "probe_time:",
822 le32_to_cpu(div->probe_time), accum_div->probe_time,
823 delta_div->probe_time, max_div->probe_time);
824 pos += scnprintf(buf + pos, bufsz - pos,
825 fmt_table, "rx_enable_counter:",
826 le32_to_cpu(general->rx_enable_counter),
827 accum_general->rx_enable_counter,
828 delta_general->rx_enable_counter,
829 max_general->rx_enable_counter);
830 pos += scnprintf(buf + pos, bufsz - pos,
831 fmt_table, "num_of_sos_states:",
832 le32_to_cpu(general->num_of_sos_states),
833 accum_general->num_of_sos_states,
834 delta_general->num_of_sos_states,
835 max_general->num_of_sos_states);
836 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
837 kfree(buf);
838 return ret;
839}
840
841ssize_t iwl_ucode_bt_stats_read(struct file *file,
842 char __user *user_buf,
843 size_t count, loff_t *ppos)
844{
845 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
846 int pos = 0;
847 char *buf;
848 int bufsz = (sizeof(struct statistics_bt_activity) * 24) + 200;
849 ssize_t ret;
850 struct statistics_bt_activity *bt, *accum_bt;
851
852 if (!iwl_is_alive(priv))
853 return -EAGAIN;
854
855 if (!priv->bt_enable_flag)
856 return -EINVAL;
857
858 /* make request to uCode to retrieve statistics information */
859 mutex_lock(&priv->mutex);
860 ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
861 mutex_unlock(&priv->mutex);
862
863 if (ret) {
864 IWL_ERR(priv,
865 "Error sending statistics request: %zd\n", ret);
866 return -EAGAIN;
867 }
868 buf = kzalloc(bufsz, GFP_KERNEL);
869 if (!buf) {
870 IWL_ERR(priv, "Can not allocate Buffer\n");
871 return -ENOMEM;
872 }
873
874 /*
875 * the statistic information display here is based on
876 * the last statistics notification from uCode
877 * might not reflect the current uCode activity
878 */
879 bt = &priv->_agn.statistics_bt.general.activity;
880 accum_bt = &priv->_agn.accum_statistics_bt.general.activity;
881
882 pos += iwl_statistics_flag(priv, buf, bufsz);
883 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_BT:\n");
884 pos += scnprintf(buf + pos, bufsz - pos,
885 "\t\t\tcurrent\t\t\taccumulative\n");
886 pos += scnprintf(buf + pos, bufsz - pos,
887 "hi_priority_tx_req_cnt:\t\t%u\t\t\t%u\n",
888 le32_to_cpu(bt->hi_priority_tx_req_cnt),
889 accum_bt->hi_priority_tx_req_cnt);
890 pos += scnprintf(buf + pos, bufsz - pos,
891 "hi_priority_tx_denied_cnt:\t%u\t\t\t%u\n",
892 le32_to_cpu(bt->hi_priority_tx_denied_cnt),
893 accum_bt->hi_priority_tx_denied_cnt);
894 pos += scnprintf(buf + pos, bufsz - pos,
895 "lo_priority_tx_req_cnt:\t\t%u\t\t\t%u\n",
896 le32_to_cpu(bt->lo_priority_tx_req_cnt),
897 accum_bt->lo_priority_tx_req_cnt);
898 pos += scnprintf(buf + pos, bufsz - pos,
899 "lo_priority_tx_denied_cnt:\t%u\t\t\t%u\n",
900 le32_to_cpu(bt->lo_priority_tx_denied_cnt),
901 accum_bt->lo_priority_tx_denied_cnt);
902 pos += scnprintf(buf + pos, bufsz - pos,
903 "hi_priority_rx_req_cnt:\t\t%u\t\t\t%u\n",
904 le32_to_cpu(bt->hi_priority_rx_req_cnt),
905 accum_bt->hi_priority_rx_req_cnt);
906 pos += scnprintf(buf + pos, bufsz - pos,
907 "hi_priority_rx_denied_cnt:\t%u\t\t\t%u\n",
908 le32_to_cpu(bt->hi_priority_rx_denied_cnt),
909 accum_bt->hi_priority_rx_denied_cnt);
910 pos += scnprintf(buf + pos, bufsz - pos,
911 "lo_priority_rx_req_cnt:\t\t%u\t\t\t%u\n",
912 le32_to_cpu(bt->lo_priority_rx_req_cnt),
913 accum_bt->lo_priority_rx_req_cnt);
914 pos += scnprintf(buf + pos, bufsz - pos,
915 "lo_priority_rx_denied_cnt:\t%u\t\t\t%u\n",
916 le32_to_cpu(bt->lo_priority_rx_denied_cnt),
917 accum_bt->lo_priority_rx_denied_cnt);
918
919 pos += scnprintf(buf + pos, bufsz - pos,
920 "(rx)num_bt_kills:\t\t%u\t\t\t%u\n",
921 le32_to_cpu(priv->_agn.statistics_bt.rx.
922 general.num_bt_kills),
923 priv->_agn.accum_statistics_bt.rx.
924 general.num_bt_kills);
925
926 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
927 kfree(buf);
928 return ret;
929}
930
931ssize_t iwl_reply_tx_error_read(struct file *file,
932 char __user *user_buf,
933 size_t count, loff_t *ppos)
934{
935 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
936 int pos = 0;
937 char *buf;
938 int bufsz = (sizeof(struct reply_tx_error_statistics) * 24) +
939 (sizeof(struct reply_agg_tx_error_statistics) * 24) + 200;
940 ssize_t ret;
941
942 if (!iwl_is_alive(priv))
943 return -EAGAIN;
944
945 buf = kzalloc(bufsz, GFP_KERNEL);
946 if (!buf) {
947 IWL_ERR(priv, "Can not allocate Buffer\n");
948 return -ENOMEM;
949 }
950
951 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_TX_Error:\n");
952 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t\t%u\n",
953 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_DELAY),
954 priv->_agn.reply_tx_stats.pp_delay);
955 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
956 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_FEW_BYTES),
957 priv->_agn.reply_tx_stats.pp_few_bytes);
958 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
959 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_BT_PRIO),
960 priv->_agn.reply_tx_stats.pp_bt_prio);
961 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
962 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_QUIET_PERIOD),
963 priv->_agn.reply_tx_stats.pp_quiet_period);
964 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
965 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_CALC_TTAK),
966 priv->_agn.reply_tx_stats.pp_calc_ttak);
967 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
968 iwl_get_tx_fail_reason(
969 TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY),
970 priv->_agn.reply_tx_stats.int_crossed_retry);
971 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
972 iwl_get_tx_fail_reason(TX_STATUS_FAIL_SHORT_LIMIT),
973 priv->_agn.reply_tx_stats.short_limit);
974 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
975 iwl_get_tx_fail_reason(TX_STATUS_FAIL_LONG_LIMIT),
976 priv->_agn.reply_tx_stats.long_limit);
977 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
978 iwl_get_tx_fail_reason(TX_STATUS_FAIL_FIFO_UNDERRUN),
979 priv->_agn.reply_tx_stats.fifo_underrun);
980 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
981 iwl_get_tx_fail_reason(TX_STATUS_FAIL_DRAIN_FLOW),
982 priv->_agn.reply_tx_stats.drain_flow);
983 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
984 iwl_get_tx_fail_reason(TX_STATUS_FAIL_RFKILL_FLUSH),
985 priv->_agn.reply_tx_stats.rfkill_flush);
986 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
987 iwl_get_tx_fail_reason(TX_STATUS_FAIL_LIFE_EXPIRE),
988 priv->_agn.reply_tx_stats.life_expire);
989 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
990 iwl_get_tx_fail_reason(TX_STATUS_FAIL_DEST_PS),
991 priv->_agn.reply_tx_stats.dest_ps);
992 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
993 iwl_get_tx_fail_reason(TX_STATUS_FAIL_HOST_ABORTED),
994 priv->_agn.reply_tx_stats.host_abort);
995 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
996 iwl_get_tx_fail_reason(TX_STATUS_FAIL_BT_RETRY),
997 priv->_agn.reply_tx_stats.pp_delay);
998 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
999 iwl_get_tx_fail_reason(TX_STATUS_FAIL_STA_INVALID),
1000 priv->_agn.reply_tx_stats.sta_invalid);
1001 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1002 iwl_get_tx_fail_reason(TX_STATUS_FAIL_FRAG_DROPPED),
1003 priv->_agn.reply_tx_stats.frag_drop);
1004 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1005 iwl_get_tx_fail_reason(TX_STATUS_FAIL_TID_DISABLE),
1006 priv->_agn.reply_tx_stats.tid_disable);
1007 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1008 iwl_get_tx_fail_reason(TX_STATUS_FAIL_FIFO_FLUSHED),
1009 priv->_agn.reply_tx_stats.fifo_flush);
1010 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
1011 iwl_get_tx_fail_reason(
1012 TX_STATUS_FAIL_INSUFFICIENT_CF_POLL),
1013 priv->_agn.reply_tx_stats.insuff_cf_poll);
1014 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1015 iwl_get_tx_fail_reason(TX_STATUS_FAIL_PASSIVE_NO_RX),
1016 priv->_agn.reply_tx_stats.fail_hw_drop);
1017 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
1018 iwl_get_tx_fail_reason(
1019 TX_STATUS_FAIL_NO_BEACON_ON_RADAR),
1020 priv->_agn.reply_tx_stats.sta_color_mismatch);
1021 pos += scnprintf(buf + pos, bufsz - pos, "UNKNOWN:\t\t\t%u\n",
1022 priv->_agn.reply_tx_stats.unknown);
1023
1024 pos += scnprintf(buf + pos, bufsz - pos,
1025 "\nStatistics_Agg_TX_Error:\n");
1026
1027 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1028 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_UNDERRUN_MSK),
1029 priv->_agn.reply_agg_tx_stats.underrun);
1030 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1031 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_BT_PRIO_MSK),
1032 priv->_agn.reply_agg_tx_stats.bt_prio);
1033 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1034 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_FEW_BYTES_MSK),
1035 priv->_agn.reply_agg_tx_stats.few_bytes);
1036 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1037 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_ABORT_MSK),
1038 priv->_agn.reply_agg_tx_stats.abort);
1039 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
1040 iwl_get_agg_tx_fail_reason(
1041 AGG_TX_STATE_LAST_SENT_TTL_MSK),
1042 priv->_agn.reply_agg_tx_stats.last_sent_ttl);
1043 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
1044 iwl_get_agg_tx_fail_reason(
1045 AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK),
1046 priv->_agn.reply_agg_tx_stats.last_sent_try);
1047 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
1048 iwl_get_agg_tx_fail_reason(
1049 AGG_TX_STATE_LAST_SENT_BT_KILL_MSK),
1050 priv->_agn.reply_agg_tx_stats.last_sent_bt_kill);
1051 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1052 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_SCD_QUERY_MSK),
1053 priv->_agn.reply_agg_tx_stats.scd_query);
1054 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
1055 iwl_get_agg_tx_fail_reason(
1056 AGG_TX_STATE_TEST_BAD_CRC32_MSK),
1057 priv->_agn.reply_agg_tx_stats.bad_crc32);
1058 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1059 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_RESPONSE_MSK),
1060 priv->_agn.reply_agg_tx_stats.response);
1061 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1062 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_DUMP_TX_MSK),
1063 priv->_agn.reply_agg_tx_stats.dump_tx);
1064 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1065 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_DELAY_TX_MSK),
1066 priv->_agn.reply_agg_tx_stats.delay_tx);
1067 pos += scnprintf(buf + pos, bufsz - pos, "UNKNOWN:\t\t\t%u\n",
1068 priv->_agn.reply_agg_tx_stats.unknown);
1069
1070 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1071 kfree(buf);
1072 return ret;
1073}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h
deleted file mode 100644
index f2573b5486cd..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h
+++ /dev/null
@@ -1,70 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include "iwl-dev.h"
30#include "iwl-core.h"
31#include "iwl-debug.h"
32
33#ifdef CONFIG_IWLWIFI_DEBUGFS
34ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf,
35 size_t count, loff_t *ppos);
36ssize_t iwl_ucode_tx_stats_read(struct file *file, char __user *user_buf,
37 size_t count, loff_t *ppos);
38ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf,
39 size_t count, loff_t *ppos);
40ssize_t iwl_ucode_bt_stats_read(struct file *file, char __user *user_buf,
41 size_t count, loff_t *ppos);
42ssize_t iwl_reply_tx_error_read(struct file *file, char __user *user_buf,
43 size_t count, loff_t *ppos);
44#else
45static ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf,
46 size_t count, loff_t *ppos)
47{
48 return 0;
49}
50static ssize_t iwl_ucode_tx_stats_read(struct file *file, char __user *user_buf,
51 size_t count, loff_t *ppos)
52{
53 return 0;
54}
55static ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf,
56 size_t count, loff_t *ppos)
57{
58 return 0;
59}
60static ssize_t iwl_ucode_bt_stats_read(struct file *file, char __user *user_buf,
61 size_t count, loff_t *ppos)
62{
63 return 0;
64}
65static ssize_t iwl_reply_tx_error_read(struct file *file, char __user *user_buf,
66 size_t count, loff_t *ppos)
67{
68 return 0;
69}
70#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
index 27b5a3eec9dc..2ef9448b1c20 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. 8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -81,52 +81,13 @@
81 * 81 *
82******************************************************************************/ 82******************************************************************************/
83 83
84/*
85 * The device's EEPROM semaphore prevents conflicts between driver and uCode
86 * when accessing the EEPROM; each access is a series of pulses to/from the
87 * EEPROM chip, not a single event, so even reads could conflict if they
88 * weren't arbitrated by the semaphore.
89 */
90int iwlcore_eeprom_acquire_semaphore(struct iwl_priv *priv)
91{
92 u16 count;
93 int ret;
94
95 for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
96 /* Request semaphore */
97 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
98 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
99
100 /* See if we got it */
101 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
102 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
103 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
104 EEPROM_SEM_TIMEOUT);
105 if (ret >= 0) {
106 IWL_DEBUG_IO(priv,
107 "Acquired semaphore after %d tries.\n",
108 count+1);
109 return ret;
110 }
111 }
112
113 return ret;
114}
115
116void iwlcore_eeprom_release_semaphore(struct iwl_priv *priv)
117{
118 iwl_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
119 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
120
121}
122
123int iwl_eeprom_check_version(struct iwl_priv *priv) 84int iwl_eeprom_check_version(struct iwl_priv *priv)
124{ 85{
125 u16 eeprom_ver; 86 u16 eeprom_ver;
126 u16 calib_ver; 87 u16 calib_ver;
127 88
128 eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION); 89 eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
129 calib_ver = priv->cfg->ops->lib->eeprom_ops.calib_version(priv); 90 calib_ver = iwlagn_eeprom_calib_version(priv);
130 91
131 if (eeprom_ver < priv->cfg->eeprom_ver || 92 if (eeprom_ver < priv->cfg->eeprom_ver ||
132 calib_ver < priv->cfg->eeprom_calib_ver) 93 calib_ver < priv->cfg->eeprom_calib_ver)
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
index 41543ad4cb84..b12c72d63ccb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
@@ -2,7 +2,7 @@
2 * 2 *
3 * GPL LICENSE SUMMARY 3 * GPL LICENSE SUMMARY
4 * 4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. 5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as 8 * it under the terms of version 2 of the GNU General Public License as
@@ -37,54 +37,6 @@
37#include "iwl-io.h" 37#include "iwl-io.h"
38#include "iwl-agn.h" 38#include "iwl-agn.h"
39 39
40int iwlagn_send_rxon_assoc(struct iwl_priv *priv,
41 struct iwl_rxon_context *ctx)
42{
43 int ret = 0;
44 struct iwl5000_rxon_assoc_cmd rxon_assoc;
45 const struct iwl_rxon_cmd *rxon1 = &ctx->staging;
46 const struct iwl_rxon_cmd *rxon2 = &ctx->active;
47
48 if ((rxon1->flags == rxon2->flags) &&
49 (rxon1->filter_flags == rxon2->filter_flags) &&
50 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
51 (rxon1->ofdm_ht_single_stream_basic_rates ==
52 rxon2->ofdm_ht_single_stream_basic_rates) &&
53 (rxon1->ofdm_ht_dual_stream_basic_rates ==
54 rxon2->ofdm_ht_dual_stream_basic_rates) &&
55 (rxon1->ofdm_ht_triple_stream_basic_rates ==
56 rxon2->ofdm_ht_triple_stream_basic_rates) &&
57 (rxon1->acquisition_data == rxon2->acquisition_data) &&
58 (rxon1->rx_chain == rxon2->rx_chain) &&
59 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
60 IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
61 return 0;
62 }
63
64 rxon_assoc.flags = ctx->staging.flags;
65 rxon_assoc.filter_flags = ctx->staging.filter_flags;
66 rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
67 rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
68 rxon_assoc.reserved1 = 0;
69 rxon_assoc.reserved2 = 0;
70 rxon_assoc.reserved3 = 0;
71 rxon_assoc.ofdm_ht_single_stream_basic_rates =
72 ctx->staging.ofdm_ht_single_stream_basic_rates;
73 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
74 ctx->staging.ofdm_ht_dual_stream_basic_rates;
75 rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
76 rxon_assoc.ofdm_ht_triple_stream_basic_rates =
77 ctx->staging.ofdm_ht_triple_stream_basic_rates;
78 rxon_assoc.acquisition_data = ctx->staging.acquisition_data;
79
80 ret = iwl_send_cmd_pdu_async(priv, ctx->rxon_assoc_cmd,
81 sizeof(rxon_assoc), &rxon_assoc, NULL);
82 if (ret)
83 return ret;
84
85 return ret;
86}
87
88int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant) 40int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
89{ 41{
90 struct iwl_tx_ant_config_cmd tx_ant_cmd = { 42 struct iwl_tx_ant_config_cmd tx_ant_cmd = {
@@ -102,12 +54,6 @@ int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
102 } 54 }
103} 55}
104 56
105/* Currently this is the superset of everything */
106static u16 iwlagn_get_hcmd_size(u8 cmd_id, u16 len)
107{
108 return len;
109}
110
111static u16 iwlagn_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data) 57static u16 iwlagn_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
112{ 58{
113 u16 size = (u16)sizeof(struct iwl_addsta_cmd); 59 u16 size = (u16)sizeof(struct iwl_addsta_cmd);
@@ -364,7 +310,6 @@ static int iwlagn_set_pan_params(struct iwl_priv *priv)
364} 310}
365 311
366struct iwl_hcmd_ops iwlagn_hcmd = { 312struct iwl_hcmd_ops iwlagn_hcmd = {
367 .rxon_assoc = iwlagn_send_rxon_assoc,
368 .commit_rxon = iwlagn_commit_rxon, 313 .commit_rxon = iwlagn_commit_rxon,
369 .set_rxon_chain = iwlagn_set_rxon_chain, 314 .set_rxon_chain = iwlagn_set_rxon_chain,
370 .set_tx_ant = iwlagn_send_tx_ant_config, 315 .set_tx_ant = iwlagn_send_tx_ant_config,
@@ -373,7 +318,6 @@ struct iwl_hcmd_ops iwlagn_hcmd = {
373}; 318};
374 319
375struct iwl_hcmd_ops iwlagn_bt_hcmd = { 320struct iwl_hcmd_ops iwlagn_bt_hcmd = {
376 .rxon_assoc = iwlagn_send_rxon_assoc,
377 .commit_rxon = iwlagn_commit_rxon, 321 .commit_rxon = iwlagn_commit_rxon,
378 .set_rxon_chain = iwlagn_set_rxon_chain, 322 .set_rxon_chain = iwlagn_set_rxon_chain,
379 .set_tx_ant = iwlagn_send_tx_ant_config, 323 .set_tx_ant = iwlagn_send_tx_ant_config,
@@ -382,7 +326,6 @@ struct iwl_hcmd_ops iwlagn_bt_hcmd = {
382}; 326};
383 327
384struct iwl_hcmd_utils_ops iwlagn_hcmd_utils = { 328struct iwl_hcmd_utils_ops iwlagn_hcmd_utils = {
385 .get_hcmd_size = iwlagn_get_hcmd_size,
386 .build_addsta_hcmd = iwlagn_build_addsta_hcmd, 329 .build_addsta_hcmd = iwlagn_build_addsta_hcmd,
387 .gain_computation = iwlagn_gain_computation, 330 .gain_computation = iwlagn_gain_computation,
388 .chain_noise_reset = iwlagn_chain_noise_reset, 331 .chain_noise_reset = iwlagn_chain_noise_reset,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
index a52b82c8e7a6..7bd19f4e66de 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved. 8 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ict.c b/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
index ed0148d714de..0d5fda44c3a3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
@@ -2,7 +2,7 @@
2 * 2 *
3 * GPL LICENSE SUMMARY 3 * GPL LICENSE SUMMARY
4 * 4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. 5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as 8 * it under the terms of version 2 of the GNU General Public License as
@@ -59,8 +59,6 @@ void iwl_free_isr_ict(struct iwl_priv *priv)
59int iwl_alloc_isr_ict(struct iwl_priv *priv) 59int iwl_alloc_isr_ict(struct iwl_priv *priv)
60{ 60{
61 61
62 if (priv->cfg->base_params->use_isr_legacy)
63 return 0;
64 /* allocate shrared data table */ 62 /* allocate shrared data table */
65 priv->_agn.ict_tbl_vir = 63 priv->_agn.ict_tbl_vir =
66 dma_alloc_coherent(&priv->pci_dev->dev, 64 dma_alloc_coherent(&priv->pci_dev->dev,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-led.c b/drivers/net/wireless/iwlwifi/iwl-agn-led.c
deleted file mode 100644
index c1190d965614..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-agn-led.c
+++ /dev/null
@@ -1,73 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
33#include <linux/skbuff.h>
34#include <linux/netdevice.h>
35#include <linux/wireless.h>
36#include <net/mac80211.h>
37#include <linux/etherdevice.h>
38#include <asm/unaligned.h>
39
40#include "iwl-commands.h"
41#include "iwl-dev.h"
42#include "iwl-core.h"
43#include "iwl-io.h"
44#include "iwl-agn-led.h"
45
46/* Send led command */
47static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
48{
49 struct iwl_host_cmd cmd = {
50 .id = REPLY_LEDS_CMD,
51 .len = sizeof(struct iwl_led_cmd),
52 .data = led_cmd,
53 .flags = CMD_ASYNC,
54 .callback = NULL,
55 };
56 u32 reg;
57
58 reg = iwl_read32(priv, CSR_LED_REG);
59 if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
60 iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
61
62 return iwl_send_cmd(priv, &cmd);
63}
64
65/* Set led register off */
66void iwlagn_led_enable(struct iwl_priv *priv)
67{
68 iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
69}
70
71const struct iwl_led_ops iwlagn_led_ops = {
72 .cmd = iwl_send_led_cmd,
73};
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index 08ccb9496f76..8e79653aed9a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -2,7 +2,7 @@
2 * 2 *
3 * GPL LICENSE SUMMARY 3 * GPL LICENSE SUMMARY
4 * 4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. 5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as 8 * it under the terms of version 2 of the GNU General Public License as
@@ -172,6 +172,7 @@ static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status)
172 172
173static void iwlagn_set_tx_status(struct iwl_priv *priv, 173static void iwlagn_set_tx_status(struct iwl_priv *priv,
174 struct ieee80211_tx_info *info, 174 struct ieee80211_tx_info *info,
175 struct iwl_rxon_context *ctx,
175 struct iwlagn_tx_resp *tx_resp, 176 struct iwlagn_tx_resp *tx_resp,
176 int txq_id, bool is_agg) 177 int txq_id, bool is_agg)
177{ 178{
@@ -186,6 +187,13 @@ static void iwlagn_set_tx_status(struct iwl_priv *priv,
186 if (!iwl_is_tx_success(status)) 187 if (!iwl_is_tx_success(status))
187 iwlagn_count_tx_err_status(priv, status); 188 iwlagn_count_tx_err_status(priv, status);
188 189
190 if (status == TX_STATUS_FAIL_PASSIVE_NO_RX &&
191 iwl_is_associated_ctx(ctx) && ctx->vif &&
192 ctx->vif->type == NL80211_IFTYPE_STATION) {
193 ctx->last_tx_rejected = true;
194 iwl_stop_queue(priv, &priv->txq[txq_id]);
195 }
196
189 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags " 197 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags "
190 "0x%x retries %d\n", 198 "0x%x retries %d\n",
191 txq_id, 199 txq_id,
@@ -242,15 +250,16 @@ static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv,
242 250
243 /* # frames attempted by Tx command */ 251 /* # frames attempted by Tx command */
244 if (agg->frame_count == 1) { 252 if (agg->frame_count == 1) {
253 struct iwl_tx_info *txb;
254
245 /* Only one frame was attempted; no block-ack will arrive */ 255 /* Only one frame was attempted; no block-ack will arrive */
246 idx = start_idx; 256 idx = start_idx;
247 257
248 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n", 258 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
249 agg->frame_count, agg->start_idx, idx); 259 agg->frame_count, agg->start_idx, idx);
250 iwlagn_set_tx_status(priv, 260 txb = &priv->txq[txq_id].txb[idx];
251 IEEE80211_SKB_CB( 261 iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(txb->skb),
252 priv->txq[txq_id].txb[idx].skb), 262 txb->ctx, tx_resp, txq_id, true);
253 tx_resp, txq_id, true);
254 agg->wait_for_ba = 0; 263 agg->wait_for_ba = 0;
255 } else { 264 } else {
256 /* Two or more frames were attempted; expect block-ack */ 265 /* Two or more frames were attempted; expect block-ack */
@@ -391,7 +400,8 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
391 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 400 struct iwl_tx_queue *txq = &priv->txq[txq_id];
392 struct ieee80211_tx_info *info; 401 struct ieee80211_tx_info *info;
393 struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; 402 struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
394 u32 status = le16_to_cpu(tx_resp->status.status); 403 struct iwl_tx_info *txb;
404 u32 status = le16_to_cpu(tx_resp->status.status);
395 int tid; 405 int tid;
396 int sta_id; 406 int sta_id;
397 int freed; 407 int freed;
@@ -406,7 +416,8 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
406 } 416 }
407 417
408 txq->time_stamp = jiffies; 418 txq->time_stamp = jiffies;
409 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb); 419 txb = &txq->txb[txq->q.read_ptr];
420 info = IEEE80211_SKB_CB(txb->skb);
410 memset(&info->status, 0, sizeof(info->status)); 421 memset(&info->status, 0, sizeof(info->status));
411 422
412 tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >> 423 tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
@@ -450,12 +461,14 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
450 iwl_wake_queue(priv, txq); 461 iwl_wake_queue(priv, txq);
451 } 462 }
452 } else { 463 } else {
453 iwlagn_set_tx_status(priv, info, tx_resp, txq_id, false); 464 iwlagn_set_tx_status(priv, info, txb->ctx, tx_resp,
465 txq_id, false);
454 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index); 466 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
455 iwl_free_tfds_in_queue(priv, sta_id, tid, freed); 467 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
456 468
457 if (priv->mac80211_registered && 469 if (priv->mac80211_registered &&
458 (iwl_queue_space(&txq->q) > txq->q.low_mark)) 470 iwl_queue_space(&txq->q) > txq->q.low_mark &&
471 status != TX_STATUS_FAIL_PASSIVE_NO_RX)
459 iwl_wake_queue(priv, txq); 472 iwl_wake_queue(priv, txq);
460 } 473 }
461 474
@@ -470,8 +483,6 @@ void iwlagn_rx_handler_setup(struct iwl_priv *priv)
470 /* init calibration handlers */ 483 /* init calibration handlers */
471 priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] = 484 priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] =
472 iwlagn_rx_calib_result; 485 iwlagn_rx_calib_result;
473 priv->rx_handlers[CALIBRATION_COMPLETE_NOTIFICATION] =
474 iwlagn_rx_calib_complete;
475 priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx; 486 priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
476 487
477 /* set up notification wait support */ 488 /* set up notification wait support */
@@ -482,8 +493,10 @@ void iwlagn_rx_handler_setup(struct iwl_priv *priv)
482 493
483void iwlagn_setup_deferred_work(struct iwl_priv *priv) 494void iwlagn_setup_deferred_work(struct iwl_priv *priv)
484{ 495{
485 /* in agn, the tx power calibration is done in uCode */ 496 /*
486 priv->disable_tx_power_cal = 1; 497 * nothing need to be done here anymore
498 * still keep for future use if needed
499 */
487} 500}
488 501
489int iwlagn_hw_valid_rtc_data_addr(u32 addr) 502int iwlagn_hw_valid_rtc_data_addr(u32 addr)
@@ -534,9 +547,7 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
534void iwlagn_temperature(struct iwl_priv *priv) 547void iwlagn_temperature(struct iwl_priv *priv)
535{ 548{
536 /* store temperature from correct statistics (in Celsius) */ 549 /* store temperature from correct statistics (in Celsius) */
537 priv->temperature = le32_to_cpu((iwl_bt_statistics(priv)) ? 550 priv->temperature = le32_to_cpu(priv->statistics.common.temperature);
538 priv->_agn.statistics_bt.general.common.temperature :
539 priv->_agn.statistics.general.common.temperature);
540 iwl_tt_handler(priv); 551 iwl_tt_handler(priv);
541} 552}
542 553
@@ -652,10 +663,9 @@ int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
652 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ 663 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
653 u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */ 664 u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
654 665
655 if (!priv->cfg->base_params->use_isr_legacy) 666 rb_timeout = RX_RB_TIMEOUT;
656 rb_timeout = RX_RB_TIMEOUT;
657 667
658 if (priv->cfg->mod_params->amsdu_size_8K) 668 if (iwlagn_mod_params.amsdu_size_8K)
659 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; 669 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
660 else 670 else
661 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; 671 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
@@ -913,7 +923,6 @@ void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority)
913 923
914 list_add_tail(&rxb->list, &rxq->rx_free); 924 list_add_tail(&rxb->list, &rxq->rx_free);
915 rxq->free_count++; 925 rxq->free_count++;
916 priv->alloc_rxb_page++;
917 926
918 spin_unlock_irqrestore(&rxq->lock, flags); 927 spin_unlock_irqrestore(&rxq->lock, flags);
919 } 928 }
@@ -1285,9 +1294,17 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1285 * mean we never reach it, but at the same time work around 1294 * mean we never reach it, but at the same time work around
1286 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER 1295 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
1287 * here instead of IWL_GOOD_CRC_TH_DISABLED. 1296 * here instead of IWL_GOOD_CRC_TH_DISABLED.
1297 *
1298 * This was fixed in later versions along with some other
1299 * scan changes, and the threshold behaves as a flag in those
1300 * versions.
1288 */ 1301 */
1289 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT : 1302 if (priv->new_scan_threshold_behaviour)
1290 IWL_GOOD_CRC_TH_NEVER; 1303 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
1304 IWL_GOOD_CRC_TH_DISABLED;
1305 else
1306 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
1307 IWL_GOOD_CRC_TH_NEVER;
1291 1308
1292 band = priv->scan_band; 1309 band = priv->scan_band;
1293 1310
@@ -2245,34 +2262,44 @@ int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display)
2245/* notification wait support */ 2262/* notification wait support */
2246void iwlagn_init_notification_wait(struct iwl_priv *priv, 2263void iwlagn_init_notification_wait(struct iwl_priv *priv,
2247 struct iwl_notification_wait *wait_entry, 2264 struct iwl_notification_wait *wait_entry,
2265 u8 cmd,
2248 void (*fn)(struct iwl_priv *priv, 2266 void (*fn)(struct iwl_priv *priv,
2249 struct iwl_rx_packet *pkt), 2267 struct iwl_rx_packet *pkt,
2250 u8 cmd) 2268 void *data),
2269 void *fn_data)
2251{ 2270{
2252 wait_entry->fn = fn; 2271 wait_entry->fn = fn;
2272 wait_entry->fn_data = fn_data;
2253 wait_entry->cmd = cmd; 2273 wait_entry->cmd = cmd;
2254 wait_entry->triggered = false; 2274 wait_entry->triggered = false;
2275 wait_entry->aborted = false;
2255 2276
2256 spin_lock_bh(&priv->_agn.notif_wait_lock); 2277 spin_lock_bh(&priv->_agn.notif_wait_lock);
2257 list_add(&wait_entry->list, &priv->_agn.notif_waits); 2278 list_add(&wait_entry->list, &priv->_agn.notif_waits);
2258 spin_unlock_bh(&priv->_agn.notif_wait_lock); 2279 spin_unlock_bh(&priv->_agn.notif_wait_lock);
2259} 2280}
2260 2281
2261signed long iwlagn_wait_notification(struct iwl_priv *priv, 2282int iwlagn_wait_notification(struct iwl_priv *priv,
2262 struct iwl_notification_wait *wait_entry, 2283 struct iwl_notification_wait *wait_entry,
2263 unsigned long timeout) 2284 unsigned long timeout)
2264{ 2285{
2265 int ret; 2286 int ret;
2266 2287
2267 ret = wait_event_timeout(priv->_agn.notif_waitq, 2288 ret = wait_event_timeout(priv->_agn.notif_waitq,
2268 wait_entry->triggered, 2289 wait_entry->triggered || wait_entry->aborted,
2269 timeout); 2290 timeout);
2270 2291
2271 spin_lock_bh(&priv->_agn.notif_wait_lock); 2292 spin_lock_bh(&priv->_agn.notif_wait_lock);
2272 list_del(&wait_entry->list); 2293 list_del(&wait_entry->list);
2273 spin_unlock_bh(&priv->_agn.notif_wait_lock); 2294 spin_unlock_bh(&priv->_agn.notif_wait_lock);
2274 2295
2275 return ret; 2296 if (wait_entry->aborted)
2297 return -EIO;
2298
2299 /* return value is always >= 0 */
2300 if (ret <= 0)
2301 return -ETIMEDOUT;
2302 return 0;
2276} 2303}
2277 2304
2278void iwlagn_remove_notification(struct iwl_priv *priv, 2305void iwlagn_remove_notification(struct iwl_priv *priv,
@@ -2282,3 +2309,87 @@ void iwlagn_remove_notification(struct iwl_priv *priv,
2282 list_del(&wait_entry->list); 2309 list_del(&wait_entry->list);
2283 spin_unlock_bh(&priv->_agn.notif_wait_lock); 2310 spin_unlock_bh(&priv->_agn.notif_wait_lock);
2284} 2311}
2312
2313int iwlagn_start_device(struct iwl_priv *priv)
2314{
2315 int ret;
2316
2317 if (iwl_prepare_card_hw(priv)) {
2318 IWL_WARN(priv, "Exit HW not ready\n");
2319 return -EIO;
2320 }
2321
2322 /* If platform's RF_KILL switch is NOT set to KILL */
2323 if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
2324 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2325 else
2326 set_bit(STATUS_RF_KILL_HW, &priv->status);
2327
2328 if (iwl_is_rfkill(priv)) {
2329 wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
2330 iwl_enable_interrupts(priv);
2331 return -ERFKILL;
2332 }
2333
2334 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2335
2336 ret = iwlagn_hw_nic_init(priv);
2337 if (ret) {
2338 IWL_ERR(priv, "Unable to init nic\n");
2339 return ret;
2340 }
2341
2342 /* make sure rfkill handshake bits are cleared */
2343 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2344 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
2345 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2346
2347 /* clear (again), then enable host interrupts */
2348 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2349 iwl_enable_interrupts(priv);
2350
2351 /* really make sure rfkill handshake bits are cleared */
2352 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2353 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2354
2355 return 0;
2356}
2357
2358void iwlagn_stop_device(struct iwl_priv *priv)
2359{
2360 unsigned long flags;
2361
2362 /* stop and reset the on-board processor */
2363 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
2364
2365 /* tell the device to stop sending interrupts */
2366 spin_lock_irqsave(&priv->lock, flags);
2367 iwl_disable_interrupts(priv);
2368 spin_unlock_irqrestore(&priv->lock, flags);
2369 iwl_synchronize_irq(priv);
2370
2371 /* device going down, Stop using ICT table */
2372 iwl_disable_ict(priv);
2373
2374 /*
2375 * If a HW restart happens during firmware loading,
2376 * then the firmware loading might call this function
2377 * and later it might be called again due to the
2378 * restart. So don't process again if the device is
2379 * already dead.
2380 */
2381 if (test_bit(STATUS_DEVICE_ENABLED, &priv->status)) {
2382 iwlagn_txq_ctx_stop(priv);
2383 iwlagn_rxq_stop(priv);
2384
2385 /* Power-down device's busmaster DMA clocks */
2386 iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
2387 udelay(5);
2388 }
2389
2390 /* Make sure (redundant) we've released our request to stay awake */
2391 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2392
2393 /* Stop the device, and put it in low power state */
2394 iwl_apm_stop(priv);
2395}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index d03b4734c892..91f26556ac23 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -115,13 +115,18 @@ const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT] = {
115 /* FIXME:RS: ^^ should be INV (legacy) */ 115 /* FIXME:RS: ^^ should be INV (legacy) */
116}; 116};
117 117
118static inline u8 rs_extract_rate(u32 rate_n_flags)
119{
120 return (u8)(rate_n_flags & RATE_MCS_RATE_MSK);
121}
122
118static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags) 123static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
119{ 124{
120 int idx = 0; 125 int idx = 0;
121 126
122 /* HT rate format */ 127 /* HT rate format */
123 if (rate_n_flags & RATE_MCS_HT_MSK) { 128 if (rate_n_flags & RATE_MCS_HT_MSK) {
124 idx = (rate_n_flags & 0xff); 129 idx = rs_extract_rate(rate_n_flags);
125 130
126 if (idx >= IWL_RATE_MIMO3_6M_PLCP) 131 if (idx >= IWL_RATE_MIMO3_6M_PLCP)
127 idx = idx - IWL_RATE_MIMO3_6M_PLCP; 132 idx = idx - IWL_RATE_MIMO3_6M_PLCP;
@@ -138,7 +143,8 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
138 /* legacy rate format, search for match in table */ 143 /* legacy rate format, search for match in table */
139 } else { 144 } else {
140 for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++) 145 for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++)
141 if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF)) 146 if (iwl_rates[idx].plcp ==
147 rs_extract_rate(rate_n_flags))
142 return idx; 148 return idx;
143 } 149 }
144 150
@@ -239,11 +245,6 @@ static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
239 245
240#define MCS_INDEX_PER_STREAM (8) 246#define MCS_INDEX_PER_STREAM (8)
241 247
242static inline u8 rs_extract_rate(u32 rate_n_flags)
243{
244 return (u8)(rate_n_flags & 0xFF);
245}
246
247static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window) 248static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
248{ 249{
249 window->data = 0; 250 window->data = 0;
@@ -2770,16 +2771,13 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
2770static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta, 2771static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
2771 gfp_t gfp) 2772 gfp_t gfp)
2772{ 2773{
2773 struct iwl_lq_sta *lq_sta;
2774 struct iwl_station_priv *sta_priv = (struct iwl_station_priv *) sta->drv_priv; 2774 struct iwl_station_priv *sta_priv = (struct iwl_station_priv *) sta->drv_priv;
2775 struct iwl_priv *priv; 2775 struct iwl_priv *priv;
2776 2776
2777 priv = (struct iwl_priv *)priv_rate; 2777 priv = (struct iwl_priv *)priv_rate;
2778 IWL_DEBUG_RATE(priv, "create station rate scale window\n"); 2778 IWL_DEBUG_RATE(priv, "create station rate scale window\n");
2779 2779
2780 lq_sta = &sta_priv->lq_sta; 2780 return &sta_priv->lq_sta;
2781
2782 return lq_sta;
2783} 2781}
2784 2782
2785/* 2783/*
@@ -2912,7 +2910,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
2912 ant_toggle_cnt = 1; 2910 ant_toggle_cnt = 1;
2913 repeat_rate = IWL_NUMBER_TRY; 2911 repeat_rate = IWL_NUMBER_TRY;
2914 } else { 2912 } else {
2915 repeat_rate = IWL_HT_NUMBER_TRY; 2913 repeat_rate = min(IWL_HT_NUMBER_TRY,
2914 LINK_QUAL_AGG_DISABLE_START_DEF - 1);
2916 } 2915 }
2917 2916
2918 lq_cmd->general_params.mimo_delimiter = 2917 lq_cmd->general_params.mimo_delimiter =
@@ -3087,7 +3086,7 @@ static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
3087 struct iwl_lq_sta *lq_sta = file->private_data; 3086 struct iwl_lq_sta *lq_sta = file->private_data;
3088 struct iwl_priv *priv; 3087 struct iwl_priv *priv;
3089 char buf[64]; 3088 char buf[64];
3090 int buf_size; 3089 size_t buf_size;
3091 u32 parsed_rate; 3090 u32 parsed_rate;
3092 struct iwl_station_priv *sta_priv = 3091 struct iwl_station_priv *sta_priv =
3093 container_of(lq_sta, struct iwl_station_priv, lq_sta); 3092 container_of(lq_sta, struct iwl_station_priv, lq_sta);
@@ -3257,7 +3256,6 @@ static ssize_t rs_sta_dbgfs_rate_scale_data_read(struct file *file,
3257{ 3256{
3258 char buff[120]; 3257 char buff[120];
3259 int desc = 0; 3258 int desc = 0;
3260 ssize_t ret;
3261 3259
3262 struct iwl_lq_sta *lq_sta = file->private_data; 3260 struct iwl_lq_sta *lq_sta = file->private_data;
3263 struct iwl_priv *priv; 3261 struct iwl_priv *priv;
@@ -3274,8 +3272,7 @@ static ssize_t rs_sta_dbgfs_rate_scale_data_read(struct file *file,
3274 "Bit Rate= %d Mb/s\n", 3272 "Bit Rate= %d Mb/s\n",
3275 iwl_rates[lq_sta->last_txrate_idx].ieee >> 1); 3273 iwl_rates[lq_sta->last_txrate_idx].ieee >> 1);
3276 3274
3277 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); 3275 return simple_read_from_buffer(user_buf, count, ppos, buff, desc);
3278 return ret;
3279} 3276}
3280 3277
3281static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = { 3278static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index 184828c72b31..bdae82e7fa90 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -41,20 +41,6 @@ struct iwl_rate_info {
41 u8 next_rs_tgg; /* next rate used in TGG rs algo */ 41 u8 next_rs_tgg; /* next rate used in TGG rs algo */
42}; 42};
43 43
44struct iwl3945_rate_info {
45 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
46 u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
47 u8 prev_ieee; /* previous rate in IEEE speeds */
48 u8 next_ieee; /* next rate in IEEE speeds */
49 u8 prev_rs; /* previous rate used in rs algo */
50 u8 next_rs; /* next rate used in rs algo */
51 u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
52 u8 next_rs_tgg; /* next rate used in TGG rs algo */
53 u8 table_rs_index; /* index in rate scale table cmd */
54 u8 prev_table_rs; /* prev in rate table cmd */
55};
56
57
58/* 44/*
59 * These serve as indexes into 45 * These serve as indexes into
60 * struct iwl_rate_info iwl_rates[IWL_RATE_COUNT]; 46 * struct iwl_rate_info iwl_rates[IWL_RATE_COUNT];
@@ -75,7 +61,6 @@ enum {
75 IWL_RATE_60M_INDEX, 61 IWL_RATE_60M_INDEX,
76 IWL_RATE_COUNT, /*FIXME:RS:change to IWL_RATE_INDEX_COUNT,*/ 62 IWL_RATE_COUNT, /*FIXME:RS:change to IWL_RATE_INDEX_COUNT,*/
77 IWL_RATE_COUNT_LEGACY = IWL_RATE_COUNT - 1, /* Excluding 60M */ 63 IWL_RATE_COUNT_LEGACY = IWL_RATE_COUNT - 1, /* Excluding 60M */
78 IWL_RATE_COUNT_3945 = IWL_RATE_COUNT - 1,
79 IWL_RATE_INVM_INDEX = IWL_RATE_COUNT, 64 IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
80 IWL_RATE_INVALID = IWL_RATE_COUNT, 65 IWL_RATE_INVALID = IWL_RATE_COUNT,
81}; 66};
@@ -98,7 +83,6 @@ enum {
98 83
99enum { 84enum {
100 IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX, 85 IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
101 IWL39_LAST_OFDM_RATE = IWL_RATE_54M_INDEX,
102 IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX, 86 IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX,
103 IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX, 87 IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX,
104 IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX, 88 IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
@@ -213,7 +197,6 @@ enum {
213 IWL_CCK_BASIC_RATES_MASK) 197 IWL_CCK_BASIC_RATES_MASK)
214 198
215#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1) 199#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
216#define IWL_RATES_MASK_3945 ((1 << IWL_RATE_COUNT_3945) - 1)
217 200
218#define IWL_INVALID_VALUE -1 201#define IWL_INVALID_VALUE -1
219 202
@@ -453,19 +436,9 @@ static inline u8 first_antenna(u8 mask)
453} 436}
454 437
455 438
456/**
457 * iwl3945_rate_scale_init - Initialize the rate scale table based on assoc info
458 *
459 * The specific throughput table used is based on the type of network
460 * the associated with, including A, B, G, and G w/ TGG protection
461 */
462extern void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
463
464/* Initialize station's rate scaling information after adding station */ 439/* Initialize station's rate scaling information after adding station */
465extern void iwl_rs_rate_init(struct iwl_priv *priv, 440extern void iwl_rs_rate_init(struct iwl_priv *priv,
466 struct ieee80211_sta *sta, u8 sta_id); 441 struct ieee80211_sta *sta, u8 sta_id);
467extern void iwl3945_rs_rate_init(struct iwl_priv *priv,
468 struct ieee80211_sta *sta, u8 sta_id);
469 442
470/** 443/**
471 * iwl_rate_control_register - Register the rate control algorithm callbacks 444 * iwl_rate_control_register - Register the rate control algorithm callbacks
@@ -478,7 +451,6 @@ extern void iwl3945_rs_rate_init(struct iwl_priv *priv,
478 * 451 *
479 */ 452 */
480extern int iwlagn_rate_control_register(void); 453extern int iwlagn_rate_control_register(void);
481extern int iwl3945_rate_control_register(void);
482 454
483/** 455/**
484 * iwl_rate_control_unregister - Unregister the rate control callbacks 456 * iwl_rate_control_unregister - Unregister the rate control callbacks
@@ -487,6 +459,5 @@ extern int iwl3945_rate_control_register(void);
487 * the driver is unloaded. 459 * the driver is unloaded.
488 */ 460 */
489extern void iwlagn_rate_control_unregister(void); 461extern void iwlagn_rate_control_unregister(void);
490extern void iwl3945_rate_control_unregister(void);
491 462
492#endif /* __iwl_agn__rs__ */ 463#endif /* __iwl_agn__rs__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
index fbbde0712fa5..02387430f7fe 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -29,6 +29,7 @@
29#include "iwl-sta.h" 29#include "iwl-sta.h"
30#include "iwl-core.h" 30#include "iwl-core.h"
31#include "iwl-agn-calib.h" 31#include "iwl-agn-calib.h"
32#include "iwl-helpers.h"
32 33
33static int iwlagn_disable_bss(struct iwl_priv *priv, 34static int iwlagn_disable_bss(struct iwl_priv *priv,
34 struct iwl_rxon_context *ctx, 35 struct iwl_rxon_context *ctx,
@@ -57,8 +58,9 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
57 u8 old_dev_type = send->dev_type; 58 u8 old_dev_type = send->dev_type;
58 int ret; 59 int ret;
59 60
60 iwlagn_init_notification_wait(priv, &disable_wait, NULL, 61 iwlagn_init_notification_wait(priv, &disable_wait,
61 REPLY_WIPAN_DEACTIVATION_COMPLETE); 62 REPLY_WIPAN_DEACTIVATION_COMPLETE,
63 NULL, NULL);
62 64
63 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 65 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
64 send->dev_type = RXON_DEV_TYPE_P2P; 66 send->dev_type = RXON_DEV_TYPE_P2P;
@@ -71,13 +73,9 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
71 IWL_ERR(priv, "Error disabling PAN (%d)\n", ret); 73 IWL_ERR(priv, "Error disabling PAN (%d)\n", ret);
72 iwlagn_remove_notification(priv, &disable_wait); 74 iwlagn_remove_notification(priv, &disable_wait);
73 } else { 75 } else {
74 signed long wait_res; 76 ret = iwlagn_wait_notification(priv, &disable_wait, HZ);
75 77 if (ret)
76 wait_res = iwlagn_wait_notification(priv, &disable_wait, HZ);
77 if (wait_res == 0) {
78 IWL_ERR(priv, "Timed out waiting for PAN disable\n"); 78 IWL_ERR(priv, "Timed out waiting for PAN disable\n");
79 ret = -EIO;
80 }
81 } 79 }
82 80
83 return ret; 81 return ret;
@@ -123,6 +121,151 @@ static int iwlagn_update_beacon(struct iwl_priv *priv,
123 return iwlagn_send_beacon_cmd(priv); 121 return iwlagn_send_beacon_cmd(priv);
124} 122}
125 123
124static int iwlagn_send_rxon_assoc(struct iwl_priv *priv,
125 struct iwl_rxon_context *ctx)
126{
127 int ret = 0;
128 struct iwl_rxon_assoc_cmd rxon_assoc;
129 const struct iwl_rxon_cmd *rxon1 = &ctx->staging;
130 const struct iwl_rxon_cmd *rxon2 = &ctx->active;
131
132 if ((rxon1->flags == rxon2->flags) &&
133 (rxon1->filter_flags == rxon2->filter_flags) &&
134 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
135 (rxon1->ofdm_ht_single_stream_basic_rates ==
136 rxon2->ofdm_ht_single_stream_basic_rates) &&
137 (rxon1->ofdm_ht_dual_stream_basic_rates ==
138 rxon2->ofdm_ht_dual_stream_basic_rates) &&
139 (rxon1->ofdm_ht_triple_stream_basic_rates ==
140 rxon2->ofdm_ht_triple_stream_basic_rates) &&
141 (rxon1->acquisition_data == rxon2->acquisition_data) &&
142 (rxon1->rx_chain == rxon2->rx_chain) &&
143 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
144 IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
145 return 0;
146 }
147
148 rxon_assoc.flags = ctx->staging.flags;
149 rxon_assoc.filter_flags = ctx->staging.filter_flags;
150 rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
151 rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
152 rxon_assoc.reserved1 = 0;
153 rxon_assoc.reserved2 = 0;
154 rxon_assoc.reserved3 = 0;
155 rxon_assoc.ofdm_ht_single_stream_basic_rates =
156 ctx->staging.ofdm_ht_single_stream_basic_rates;
157 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
158 ctx->staging.ofdm_ht_dual_stream_basic_rates;
159 rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
160 rxon_assoc.ofdm_ht_triple_stream_basic_rates =
161 ctx->staging.ofdm_ht_triple_stream_basic_rates;
162 rxon_assoc.acquisition_data = ctx->staging.acquisition_data;
163
164 ret = iwl_send_cmd_pdu_async(priv, ctx->rxon_assoc_cmd,
165 sizeof(rxon_assoc), &rxon_assoc, NULL);
166 if (ret)
167 return ret;
168
169 return ret;
170}
171
172static int iwlagn_rxon_disconn(struct iwl_priv *priv,
173 struct iwl_rxon_context *ctx)
174{
175 int ret;
176 struct iwl_rxon_cmd *active = (void *)&ctx->active;
177
178 if (ctx->ctxid == IWL_RXON_CTX_BSS)
179 ret = iwlagn_disable_bss(priv, ctx, &ctx->staging);
180 else
181 ret = iwlagn_disable_pan(priv, ctx, &ctx->staging);
182 if (ret)
183 return ret;
184
185 /*
186 * Un-assoc RXON clears the station table and WEP
187 * keys, so we have to restore those afterwards.
188 */
189 iwl_clear_ucode_stations(priv, ctx);
190 iwl_restore_stations(priv, ctx);
191 ret = iwl_restore_default_wep_keys(priv, ctx);
192 if (ret) {
193 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
194 return ret;
195 }
196
197 memcpy(active, &ctx->staging, sizeof(*active));
198 return 0;
199}
200
201static int iwlagn_rxon_connect(struct iwl_priv *priv,
202 struct iwl_rxon_context *ctx)
203{
204 int ret;
205 struct iwl_rxon_cmd *active = (void *)&ctx->active;
206
207 /* RXON timing must be before associated RXON */
208 ret = iwl_send_rxon_timing(priv, ctx);
209 if (ret) {
210 IWL_ERR(priv, "Failed to send timing (%d)!\n", ret);
211 return ret;
212 }
213 /* QoS info may be cleared by previous un-assoc RXON */
214 iwlagn_update_qos(priv, ctx);
215
216 /*
217 * We'll run into this code path when beaconing is
218 * enabled, but then we also need to send the beacon
219 * to the device.
220 */
221 if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_AP)) {
222 ret = iwlagn_update_beacon(priv, ctx->vif);
223 if (ret) {
224 IWL_ERR(priv,
225 "Error sending required beacon (%d)!\n",
226 ret);
227 return ret;
228 }
229 }
230
231 priv->start_calib = 0;
232 /*
233 * Apply the new configuration.
234 *
235 * Associated RXON doesn't clear the station table in uCode,
236 * so we don't need to restore stations etc. after this.
237 */
238 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
239 sizeof(struct iwl_rxon_cmd), &ctx->staging);
240 if (ret) {
241 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
242 return ret;
243 }
244 memcpy(active, &ctx->staging, sizeof(*active));
245
246 iwl_reprogram_ap_sta(priv, ctx);
247
248 /* IBSS beacon needs to be sent after setting assoc */
249 if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_ADHOC))
250 if (iwlagn_update_beacon(priv, ctx->vif))
251 IWL_ERR(priv, "Error sending IBSS beacon\n");
252 iwl_init_sensitivity(priv);
253
254 /*
255 * If we issue a new RXON command which required a tune then
256 * we must send a new TXPOWER command or we won't be able to
257 * Tx any frames.
258 *
259 * It's expected we set power here if channel is changing.
260 */
261 ret = iwl_set_tx_power(priv, priv->tx_power_next, true);
262 if (ret) {
263 IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
264 return ret;
265 }
266 return 0;
267}
268
126/** 269/**
127 * iwlagn_commit_rxon - commit staging_rxon to hardware 270 * iwlagn_commit_rxon - commit staging_rxon to hardware
128 * 271 *
@@ -130,6 +273,16 @@ static int iwlagn_update_beacon(struct iwl_priv *priv,
130 * the active_rxon structure is updated with the new data. This 273 * the active_rxon structure is updated with the new data. This
131 * function correctly transitions out of the RXON_ASSOC_MSK state if 274 * function correctly transitions out of the RXON_ASSOC_MSK state if
132 * a HW tune is required based on the RXON structure changes. 275 * a HW tune is required based on the RXON structure changes.
276 *
277 * The connect/disconnect flow should be as the following:
278 *
279 * 1. make sure send RXON command with association bit unset if not connect
280 * this should include the channel and the band for the candidate
281 * to be connected to
282 * 2. Add Station before RXON association with the AP
283 * 3. RXON_timing has to send before RXON for connection
284 * 4. full RXON command - associated bit set
285 * 5. use RXON_ASSOC command to update any flags changes
133 */ 286 */
134int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) 287int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
135{ 288{
@@ -179,6 +332,7 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
179 else 332 else
180 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 333 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
181 334
335 iwl_print_rx_config_cmd(priv, ctx);
182 ret = iwl_check_rxon_cmd(priv, ctx); 336 ret = iwl_check_rxon_cmd(priv, ctx);
183 if (ret) { 337 if (ret) {
184 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n"); 338 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
@@ -202,14 +356,13 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
202 * and other flags for the current radio configuration. 356 * and other flags for the current radio configuration.
203 */ 357 */
204 if (!iwl_full_rxon_required(priv, ctx)) { 358 if (!iwl_full_rxon_required(priv, ctx)) {
205 ret = iwl_send_rxon_assoc(priv, ctx); 359 ret = iwlagn_send_rxon_assoc(priv, ctx);
206 if (ret) { 360 if (ret) {
207 IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret); 361 IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
208 return ret; 362 return ret;
209 } 363 }
210 364
211 memcpy(active, &ctx->staging, sizeof(*active)); 365 memcpy(active, &ctx->staging, sizeof(*active));
212 iwl_print_rx_config_cmd(priv, ctx);
213 return 0; 366 return 0;
214 } 367 }
215 368
@@ -219,7 +372,7 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
219 return ret; 372 return ret;
220 } 373 }
221 374
222 iwl_set_rxon_hwcrypto(priv, ctx, !priv->cfg->mod_params->sw_crypto); 375 iwl_set_rxon_hwcrypto(priv, ctx, !iwlagn_mod_params.sw_crypto);
223 376
224 IWL_DEBUG_INFO(priv, 377 IWL_DEBUG_INFO(priv,
225 "Going to commit RXON\n" 378 "Going to commit RXON\n"
@@ -237,92 +390,13 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
237 * set up filters in the device. 390 * set up filters in the device.
238 */ 391 */
239 if ((old_assoc && new_assoc) || !new_assoc) { 392 if ((old_assoc && new_assoc) || !new_assoc) {
240 if (ctx->ctxid == IWL_RXON_CTX_BSS) 393 ret = iwlagn_rxon_disconn(priv, ctx);
241 ret = iwlagn_disable_bss(priv, ctx, &ctx->staging);
242 else
243 ret = iwlagn_disable_pan(priv, ctx, &ctx->staging);
244 if (ret) 394 if (ret)
245 return ret; 395 return ret;
246
247 memcpy(active, &ctx->staging, sizeof(*active));
248
249 /*
250 * Un-assoc RXON clears the station table and WEP
251 * keys, so we have to restore those afterwards.
252 */
253 iwl_clear_ucode_stations(priv, ctx);
254 iwl_restore_stations(priv, ctx);
255 ret = iwl_restore_default_wep_keys(priv, ctx);
256 if (ret) {
257 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
258 return ret;
259 }
260 }
261
262 /* RXON timing must be before associated RXON */
263 ret = iwl_send_rxon_timing(priv, ctx);
264 if (ret) {
265 IWL_ERR(priv, "Failed to send timing (%d)!\n", ret);
266 return ret;
267 }
268
269 if (new_assoc) {
270 /* QoS info may be cleared by previous un-assoc RXON */
271 iwlagn_update_qos(priv, ctx);
272
273 /*
274 * We'll run into this code path when beaconing is
275 * enabled, but then we also need to send the beacon
276 * to the device.
277 */
278 if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_AP)) {
279 ret = iwlagn_update_beacon(priv, ctx->vif);
280 if (ret) {
281 IWL_ERR(priv,
282 "Error sending required beacon (%d)!\n",
283 ret);
284 return ret;
285 }
286 }
287
288 priv->start_calib = 0;
289 /*
290 * Apply the new configuration.
291 *
292 * Associated RXON doesn't clear the station table in uCode,
293 * so we don't need to restore stations etc. after this.
294 */
295 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
296 sizeof(struct iwl_rxon_cmd), &ctx->staging);
297 if (ret) {
298 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
299 return ret;
300 }
301 memcpy(active, &ctx->staging, sizeof(*active));
302
303 iwl_reprogram_ap_sta(priv, ctx);
304
305 /* IBSS beacon needs to be sent after setting assoc */
306 if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_ADHOC))
307 if (iwlagn_update_beacon(priv, ctx->vif))
308 IWL_ERR(priv, "Error sending IBSS beacon\n");
309 } 396 }
310 397
311 iwl_print_rx_config_cmd(priv, ctx); 398 if (new_assoc)
312 399 return iwlagn_rxon_connect(priv, ctx);
313 iwl_init_sensitivity(priv);
314
315 /*
316 * If we issue a new RXON command which required a tune then we must
317 * send a new TXPOWER command or we won't be able to Tx any frames.
318 *
319 * It's expected we set power here if channel is changing.
320 */
321 ret = iwl_set_tx_power(priv, priv->tx_power_next, true);
322 if (ret) {
323 IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
324 return ret;
325 }
326 400
327 return 0; 401 return 0;
328} 402}
@@ -595,6 +669,18 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
595 priv->timestamp = bss_conf->timestamp; 669 priv->timestamp = bss_conf->timestamp;
596 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; 670 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
597 } else { 671 } else {
672 /*
673 * If we disassociate while there are pending
674 * frames, just wake up the queues and let the
675 * frames "escape" ... This shouldn't really
676 * be happening to start with, but we should
677 * not get stuck in this case either since it
678 * can happen if userspace gets confused.
679 */
680 if (ctx->last_tx_rejected) {
681 ctx->last_tx_rejected = false;
682 iwl_wake_any_queue(priv, ctx);
683 }
598 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 684 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
599 } 685 }
600 } 686 }
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
index 35f085ac336b..079275f2c64d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
@@ -474,7 +474,7 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
474 memset(&priv->stations[sta_id].keyinfo, 0, 474 memset(&priv->stations[sta_id].keyinfo, 0,
475 sizeof(struct iwl_hw_key)); 475 sizeof(struct iwl_hw_key));
476 memset(&priv->stations[sta_id].sta.key, 0, 476 memset(&priv->stations[sta_id].sta.key, 0,
477 sizeof(struct iwl4965_keyinfo)); 477 sizeof(struct iwl_keyinfo));
478 priv->stations[sta_id].sta.key.key_flags = 478 priv->stations[sta_id].sta.key.key_flags =
479 STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID; 479 STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
480 priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET; 480 priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
index e3a8216a033c..348f74f1c8e8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tt.h b/drivers/net/wireless/iwlwifi/iwl-agn-tt.h
index d55060427cac..d118ed29bf3f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tt.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tt.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index 0712b67283a4..342de780a366 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -2,7 +2,7 @@
2 * 2 *
3 * GPL LICENSE SUMMARY 3 * GPL LICENSE SUMMARY
4 * 4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. 5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as 8 * it under the terms of version 2 of the GNU General Public License as
@@ -98,9 +98,9 @@ static inline int get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid)
98/** 98/**
99 * iwlagn_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array 99 * iwlagn_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
100 */ 100 */
101void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv, 101static void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
102 struct iwl_tx_queue *txq, 102 struct iwl_tx_queue *txq,
103 u16 byte_cnt) 103 u16 byte_cnt)
104{ 104{
105 struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; 105 struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
106 int write_ptr = txq->q.write_ptr; 106 int write_ptr = txq->q.write_ptr;
@@ -112,21 +112,19 @@ void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
112 112
113 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX); 113 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
114 114
115 if (txq_id != priv->cmd_queue) { 115 sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
116 sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id; 116 sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
117 sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl; 117
118 118 switch (sec_ctl & TX_CMD_SEC_MSK) {
119 switch (sec_ctl & TX_CMD_SEC_MSK) { 119 case TX_CMD_SEC_CCM:
120 case TX_CMD_SEC_CCM: 120 len += CCMP_MIC_LEN;
121 len += CCMP_MIC_LEN; 121 break;
122 break; 122 case TX_CMD_SEC_TKIP:
123 case TX_CMD_SEC_TKIP: 123 len += TKIP_ICV_LEN;
124 len += TKIP_ICV_LEN; 124 break;
125 break; 125 case TX_CMD_SEC_WEP:
126 case TX_CMD_SEC_WEP: 126 len += WEP_IV_LEN + WEP_ICV_LEN;
127 len += WEP_IV_LEN + WEP_ICV_LEN; 127 break;
128 break;
129 }
130 } 128 }
131 129
132 bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12)); 130 bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
@@ -138,8 +136,8 @@ void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
138 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; 136 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
139} 137}
140 138
141void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv, 139static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
142 struct iwl_tx_queue *txq) 140 struct iwl_tx_queue *txq)
143{ 141{
144 struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; 142 struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
145 int txq_id = txq->q.id; 143 int txq_id = txq->q.id;
@@ -222,13 +220,8 @@ void iwlagn_tx_queue_set_status(struct iwl_priv *priv,
222 scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id); 220 scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
223} 221}
224 222
225int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id, 223static int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id, int sta_id, int tid)
226 int tx_fifo, int sta_id, int tid, u16 ssn_idx)
227{ 224{
228 unsigned long flags;
229 u16 ra_tid;
230 int ret;
231
232 if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) || 225 if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
233 (IWLAGN_FIRST_AMPDU_QUEUE + 226 (IWLAGN_FIRST_AMPDU_QUEUE +
234 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) { 227 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
@@ -240,12 +233,33 @@ int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id,
240 return -EINVAL; 233 return -EINVAL;
241 } 234 }
242 235
243 ra_tid = BUILD_RAxTID(sta_id, tid);
244
245 /* Modify device's station table to Tx this TID */ 236 /* Modify device's station table to Tx this TID */
246 ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid); 237 return iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
247 if (ret) 238}
248 return ret; 239
240void iwlagn_txq_agg_queue_setup(struct iwl_priv *priv,
241 struct ieee80211_sta *sta,
242 int tid, int frame_limit)
243{
244 int sta_id, tx_fifo, txq_id, ssn_idx;
245 u16 ra_tid;
246 unsigned long flags;
247 struct iwl_tid_data *tid_data;
248
249 sta_id = iwl_sta_id(sta);
250 if (WARN_ON(sta_id == IWL_INVALID_STATION))
251 return;
252 if (WARN_ON(tid >= MAX_TID_COUNT))
253 return;
254
255 spin_lock_irqsave(&priv->sta_lock, flags);
256 tid_data = &priv->stations[sta_id].tid[tid];
257 ssn_idx = SEQ_TO_SN(tid_data->seq_number);
258 txq_id = tid_data->agg.txq_id;
259 tx_fifo = tid_data->agg.tx_fifo;
260 spin_unlock_irqrestore(&priv->sta_lock, flags);
261
262 ra_tid = BUILD_RAxTID(sta_id, tid);
249 263
250 spin_lock_irqsave(&priv->lock, flags); 264 spin_lock_irqsave(&priv->lock, flags);
251 265
@@ -271,10 +285,10 @@ int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id,
271 iwl_write_targ_mem(priv, priv->scd_base_addr + 285 iwl_write_targ_mem(priv, priv->scd_base_addr +
272 IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + 286 IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
273 sizeof(u32), 287 sizeof(u32),
274 ((SCD_WIN_SIZE << 288 ((frame_limit <<
275 IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & 289 IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
276 IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | 290 IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
277 ((SCD_FRAME_LIMIT << 291 ((frame_limit <<
278 IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & 292 IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
279 IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); 293 IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
280 294
@@ -284,12 +298,10 @@ int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id,
284 iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1); 298 iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
285 299
286 spin_unlock_irqrestore(&priv->lock, flags); 300 spin_unlock_irqrestore(&priv->lock, flags);
287
288 return 0;
289} 301}
290 302
291int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id, 303static int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
292 u16 ssn_idx, u8 tx_fifo) 304 u16 ssn_idx, u8 tx_fifo)
293{ 305{
294 if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) || 306 if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
295 (IWLAGN_FIRST_AMPDU_QUEUE + 307 (IWLAGN_FIRST_AMPDU_QUEUE +
@@ -525,7 +537,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
525 struct iwl_tx_cmd *tx_cmd; 537 struct iwl_tx_cmd *tx_cmd;
526 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 538 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
527 int txq_id; 539 int txq_id;
528 dma_addr_t phys_addr; 540 dma_addr_t phys_addr = 0;
529 dma_addr_t txcmd_phys; 541 dma_addr_t txcmd_phys;
530 dma_addr_t scratch_phys; 542 dma_addr_t scratch_phys;
531 u16 len, firstlen, secondlen; 543 u16 len, firstlen, secondlen;
@@ -552,7 +564,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
552 spin_lock_irqsave(&priv->lock, flags); 564 spin_lock_irqsave(&priv->lock, flags);
553 if (iwl_is_rfkill(priv)) { 565 if (iwl_is_rfkill(priv)) {
554 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n"); 566 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
555 goto drop_unlock; 567 goto drop_unlock_priv;
556 } 568 }
557 569
558 fc = hdr->frame_control; 570 fc = hdr->frame_control;
@@ -577,7 +589,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
577 if (sta_id == IWL_INVALID_STATION) { 589 if (sta_id == IWL_INVALID_STATION) {
578 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", 590 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
579 hdr->addr1); 591 hdr->addr1);
580 goto drop_unlock; 592 goto drop_unlock_priv;
581 } 593 }
582 } 594 }
583 595
@@ -621,10 +633,10 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
621 if (ieee80211_is_data_qos(fc)) { 633 if (ieee80211_is_data_qos(fc)) {
622 qc = ieee80211_get_qos_ctl(hdr); 634 qc = ieee80211_get_qos_ctl(hdr);
623 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; 635 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
624 if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) { 636
625 spin_unlock(&priv->sta_lock); 637 if (WARN_ON_ONCE(tid >= MAX_TID_COUNT))
626 goto drop_unlock; 638 goto drop_unlock_sta;
627 } 639
628 seq_number = priv->stations[sta_id].tid[tid].seq_number; 640 seq_number = priv->stations[sta_id].tid[tid].seq_number;
629 seq_number &= IEEE80211_SCTL_SEQ; 641 seq_number &= IEEE80211_SCTL_SEQ;
630 hdr->seq_ctrl = hdr->seq_ctrl & 642 hdr->seq_ctrl = hdr->seq_ctrl &
@@ -642,18 +654,8 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
642 txq = &priv->txq[txq_id]; 654 txq = &priv->txq[txq_id];
643 q = &txq->q; 655 q = &txq->q;
644 656
645 if (unlikely(iwl_queue_space(q) < q->high_mark)) { 657 if (unlikely(iwl_queue_space(q) < q->high_mark))
646 spin_unlock(&priv->sta_lock); 658 goto drop_unlock_sta;
647 goto drop_unlock;
648 }
649
650 if (ieee80211_is_data_qos(fc)) {
651 priv->stations[sta_id].tid[tid].tfds_in_queue++;
652 if (!ieee80211_has_morefrags(fc))
653 priv->stations[sta_id].tid[tid].seq_number = seq_number;
654 }
655
656 spin_unlock(&priv->sta_lock);
657 659
658 /* Set up driver data for this TFD */ 660 /* Set up driver data for this TFD */
659 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); 661 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
@@ -717,12 +719,10 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
717 txcmd_phys = pci_map_single(priv->pci_dev, 719 txcmd_phys = pci_map_single(priv->pci_dev,
718 &out_cmd->hdr, firstlen, 720 &out_cmd->hdr, firstlen,
719 PCI_DMA_BIDIRECTIONAL); 721 PCI_DMA_BIDIRECTIONAL);
722 if (unlikely(pci_dma_mapping_error(priv->pci_dev, txcmd_phys)))
723 goto drop_unlock_sta;
720 dma_unmap_addr_set(out_meta, mapping, txcmd_phys); 724 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
721 dma_unmap_len_set(out_meta, len, firstlen); 725 dma_unmap_len_set(out_meta, len, firstlen);
722 /* Add buffer containing Tx command and MAC(!) header to TFD's
723 * first entry */
724 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
725 txcmd_phys, firstlen, 1, 0);
726 726
727 if (!ieee80211_has_morefrags(hdr->frame_control)) { 727 if (!ieee80211_has_morefrags(hdr->frame_control)) {
728 txq->need_update = 1; 728 txq->need_update = 1;
@@ -737,10 +737,30 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
737 if (secondlen > 0) { 737 if (secondlen > 0) {
738 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, 738 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
739 secondlen, PCI_DMA_TODEVICE); 739 secondlen, PCI_DMA_TODEVICE);
740 if (unlikely(pci_dma_mapping_error(priv->pci_dev, phys_addr))) {
741 pci_unmap_single(priv->pci_dev,
742 dma_unmap_addr(out_meta, mapping),
743 dma_unmap_len(out_meta, len),
744 PCI_DMA_BIDIRECTIONAL);
745 goto drop_unlock_sta;
746 }
747 }
748
749 if (ieee80211_is_data_qos(fc)) {
750 priv->stations[sta_id].tid[tid].tfds_in_queue++;
751 if (!ieee80211_has_morefrags(fc))
752 priv->stations[sta_id].tid[tid].seq_number = seq_number;
753 }
754
755 spin_unlock(&priv->sta_lock);
756
757 /* Attach buffers to TFD */
758 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
759 txcmd_phys, firstlen, 1, 0);
760 if (secondlen > 0)
740 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, 761 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
741 phys_addr, secondlen, 762 phys_addr, secondlen,
742 0, 0); 763 0, 0);
743 }
744 764
745 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + 765 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
746 offsetof(struct iwl_tx_cmd, scratch); 766 offsetof(struct iwl_tx_cmd, scratch);
@@ -759,8 +779,8 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
759 779
760 /* Set up entry for this TFD in Tx byte-count array */ 780 /* Set up entry for this TFD in Tx byte-count array */
761 if (info->flags & IEEE80211_TX_CTL_AMPDU) 781 if (info->flags & IEEE80211_TX_CTL_AMPDU)
762 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 782 iwlagn_txq_update_byte_cnt_tbl(priv, txq,
763 le16_to_cpu(tx_cmd->len)); 783 le16_to_cpu(tx_cmd->len));
764 784
765 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys, 785 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
766 firstlen, PCI_DMA_BIDIRECTIONAL); 786 firstlen, PCI_DMA_BIDIRECTIONAL);
@@ -806,7 +826,9 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
806 826
807 return 0; 827 return 0;
808 828
809drop_unlock: 829drop_unlock_sta:
830 spin_unlock(&priv->sta_lock);
831drop_unlock_priv:
810 spin_unlock_irqrestore(&priv->lock, flags); 832 spin_unlock_irqrestore(&priv->lock, flags);
811 return -1; 833 return -1;
812} 834}
@@ -1039,11 +1061,11 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
1039 tid_data = &priv->stations[sta_id].tid[tid]; 1061 tid_data = &priv->stations[sta_id].tid[tid];
1040 *ssn = SEQ_TO_SN(tid_data->seq_number); 1062 *ssn = SEQ_TO_SN(tid_data->seq_number);
1041 tid_data->agg.txq_id = txq_id; 1063 tid_data->agg.txq_id = txq_id;
1064 tid_data->agg.tx_fifo = tx_fifo;
1042 iwl_set_swq_id(&priv->txq[txq_id], get_ac_from_tid(tid), txq_id); 1065 iwl_set_swq_id(&priv->txq[txq_id], get_ac_from_tid(tid), txq_id);
1043 spin_unlock_irqrestore(&priv->sta_lock, flags); 1066 spin_unlock_irqrestore(&priv->sta_lock, flags);
1044 1067
1045 ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo, 1068 ret = iwlagn_txq_agg_enable(priv, txq_id, sta_id, tid);
1046 sta_id, tid, *ssn);
1047 if (ret) 1069 if (ret)
1048 return ret; 1070 return ret;
1049 1071
@@ -1130,8 +1152,7 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
1130 * to deactivate the uCode queue, just return "success" to allow 1152 * to deactivate the uCode queue, just return "success" to allow
1131 * mac80211 to clean up it own data. 1153 * mac80211 to clean up it own data.
1132 */ 1154 */
1133 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn, 1155 iwlagn_txq_agg_disable(priv, txq_id, ssn, tx_fifo_id);
1134 tx_fifo_id);
1135 spin_unlock_irqrestore(&priv->lock, flags); 1156 spin_unlock_irqrestore(&priv->lock, flags);
1136 1157
1137 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 1158 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
@@ -1160,8 +1181,7 @@ int iwlagn_txq_check_empty(struct iwl_priv *priv,
1160 u16 ssn = SEQ_TO_SN(tid_data->seq_number); 1181 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
1161 int tx_fifo = get_fifo_from_tid(ctx, tid); 1182 int tx_fifo = get_fifo_from_tid(ctx, tid);
1162 IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n"); 1183 IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
1163 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, 1184 iwlagn_txq_agg_disable(priv, txq_id, ssn, tx_fifo);
1164 ssn, tx_fifo);
1165 tid_data->agg.state = IWL_AGG_OFF; 1185 tid_data->agg.state = IWL_AGG_OFF;
1166 ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid); 1186 ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
1167 } 1187 }
@@ -1241,8 +1261,7 @@ int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1241 txq_id >= IWLAGN_FIRST_AMPDU_QUEUE); 1261 txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
1242 tx_info->skb = NULL; 1262 tx_info->skb = NULL;
1243 1263
1244 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) 1264 iwlagn_txq_inval_byte_cnt_tbl(priv, txq);
1245 priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
1246 1265
1247 priv->cfg->ops->lib->txq_free_tfd(priv, txq); 1266 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
1248 } 1267 }
@@ -1260,11 +1279,11 @@ static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1260 struct iwl_compressed_ba_resp *ba_resp) 1279 struct iwl_compressed_ba_resp *ba_resp)
1261 1280
1262{ 1281{
1263 int i, sh, ack; 1282 int sh;
1264 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl); 1283 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
1265 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); 1284 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1266 int successes = 0;
1267 struct ieee80211_tx_info *info; 1285 struct ieee80211_tx_info *info;
1286 u64 bitmap, sent_bitmap;
1268 1287
1269 if (unlikely(!agg->wait_for_ba)) { 1288 if (unlikely(!agg->wait_for_ba)) {
1270 if (unlikely(ba_resp->bitmap)) 1289 if (unlikely(ba_resp->bitmap))
@@ -1278,70 +1297,42 @@ static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1278 1297
1279 /* Calculate shift to align block-ack bits with our Tx window bits */ 1298 /* Calculate shift to align block-ack bits with our Tx window bits */
1280 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4); 1299 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
1281 if (sh < 0) /* tbw something is wrong with indices */ 1300 if (sh < 0)
1282 sh += 0x100; 1301 sh += 0x100;
1283 1302
1284 if (agg->frame_count > (64 - sh)) { 1303 /*
1285 IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size"); 1304 * Check for success or failure according to the
1286 return -1; 1305 * transmitted bitmap and block-ack bitmap
1287 } 1306 */
1288 if (!priv->cfg->base_params->no_agg_framecnt_info && ba_resp->txed) { 1307 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
1308 sent_bitmap = bitmap & agg->bitmap;
1309
1310 /* Sanity check values reported by uCode */
1311 if (ba_resp->txed_2_done > ba_resp->txed) {
1312 IWL_DEBUG_TX_REPLY(priv,
1313 "bogus sent(%d) and ack(%d) count\n",
1314 ba_resp->txed, ba_resp->txed_2_done);
1289 /* 1315 /*
1290 * sent and ack information provided by uCode 1316 * set txed_2_done = txed,
1291 * use it instead of figure out ourself 1317 * so it won't impact rate scale
1292 */ 1318 */
1293 if (ba_resp->txed_2_done > ba_resp->txed) { 1319 ba_resp->txed = ba_resp->txed_2_done;
1294 IWL_DEBUG_TX_REPLY(priv, 1320 }
1295 "bogus sent(%d) and ack(%d) count\n", 1321 IWL_DEBUG_HT(priv, "agg frames sent:%d, acked:%d\n",
1296 ba_resp->txed, ba_resp->txed_2_done); 1322 ba_resp->txed, ba_resp->txed_2_done);
1297 /*
1298 * set txed_2_done = txed,
1299 * so it won't impact rate scale
1300 */
1301 ba_resp->txed = ba_resp->txed_2_done;
1302 }
1303 IWL_DEBUG_HT(priv, "agg frames sent:%d, acked:%d\n",
1304 ba_resp->txed, ba_resp->txed_2_done);
1305 } else {
1306 u64 bitmap, sent_bitmap;
1307
1308 /* don't use 64-bit values for now */
1309 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
1310
1311 /* check for success or failure according to the
1312 * transmitted bitmap and block-ack bitmap */
1313 sent_bitmap = bitmap & agg->bitmap;
1314
1315 /* For each frame attempted in aggregation,
1316 * update driver's record of tx frame's status. */
1317 i = 0;
1318 while (sent_bitmap) {
1319 ack = sent_bitmap & 1ULL;
1320 successes += ack;
1321 IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
1322 ack ? "ACK" : "NACK", i,
1323 (agg->start_idx + i) & 0xff,
1324 agg->start_idx + i);
1325 sent_bitmap >>= 1;
1326 ++i;
1327 }
1328 1323
1329 IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", 1324 /* Find the first ACKed frame to store the TX status */
1330 (unsigned long long)bitmap); 1325 while (sent_bitmap && !(sent_bitmap & 1)) {
1326 agg->start_idx = (agg->start_idx + 1) & 0xff;
1327 sent_bitmap >>= 1;
1331 } 1328 }
1332 1329
1333 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb); 1330 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb);
1334 memset(&info->status, 0, sizeof(info->status)); 1331 memset(&info->status, 0, sizeof(info->status));
1335 info->flags |= IEEE80211_TX_STAT_ACK; 1332 info->flags |= IEEE80211_TX_STAT_ACK;
1336 info->flags |= IEEE80211_TX_STAT_AMPDU; 1333 info->flags |= IEEE80211_TX_STAT_AMPDU;
1337 if (!priv->cfg->base_params->no_agg_framecnt_info && ba_resp->txed) { 1334 info->status.ampdu_ack_len = ba_resp->txed_2_done;
1338 info->status.ampdu_ack_len = ba_resp->txed_2_done; 1335 info->status.ampdu_len = ba_resp->txed;
1339 info->status.ampdu_len = ba_resp->txed;
1340
1341 } else {
1342 info->status.ampdu_ack_len = successes;
1343 info->status.ampdu_len = agg->frame_count;
1344 }
1345 iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, info); 1336 iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
1346 1337
1347 return 0; 1338 return 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
index d807e5e2b718..8bda0e8d6661 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
@@ -2,7 +2,7 @@
2 * 2 *
3 * GPL LICENSE SUMMARY 3 * GPL LICENSE SUMMARY
4 * 4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. 5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as 8 * it under the terms of version 2 of the GNU General Public License as
@@ -161,47 +161,19 @@ static int iwlagn_load_section(struct iwl_priv *priv, const char *name,
161} 161}
162 162
163static int iwlagn_load_given_ucode(struct iwl_priv *priv, 163static int iwlagn_load_given_ucode(struct iwl_priv *priv,
164 struct fw_desc *inst_image, 164 struct fw_img *image)
165 struct fw_desc *data_image)
166{ 165{
167 int ret = 0; 166 int ret = 0;
168 167
169 ret = iwlagn_load_section(priv, "INST", inst_image, 168 ret = iwlagn_load_section(priv, "INST", &image->code,
170 IWLAGN_RTC_INST_LOWER_BOUND); 169 IWLAGN_RTC_INST_LOWER_BOUND);
171 if (ret) 170 if (ret)
172 return ret; 171 return ret;
173 172
174 return iwlagn_load_section(priv, "DATA", data_image, 173 return iwlagn_load_section(priv, "DATA", &image->data,
175 IWLAGN_RTC_DATA_LOWER_BOUND); 174 IWLAGN_RTC_DATA_LOWER_BOUND);
176} 175}
177 176
178int iwlagn_load_ucode(struct iwl_priv *priv)
179{
180 int ret = 0;
181
182 /* check whether init ucode should be loaded, or rather runtime ucode */
183 if (priv->ucode_init.len && (priv->ucode_type == UCODE_NONE)) {
184 IWL_DEBUG_INFO(priv, "Init ucode found. Loading init ucode...\n");
185 ret = iwlagn_load_given_ucode(priv,
186 &priv->ucode_init, &priv->ucode_init_data);
187 if (!ret) {
188 IWL_DEBUG_INFO(priv, "Init ucode load complete.\n");
189 priv->ucode_type = UCODE_INIT;
190 }
191 } else {
192 IWL_DEBUG_INFO(priv, "Init ucode not found, or already loaded. "
193 "Loading runtime ucode...\n");
194 ret = iwlagn_load_given_ucode(priv,
195 &priv->ucode_code, &priv->ucode_data);
196 if (!ret) {
197 IWL_DEBUG_INFO(priv, "Runtime ucode load complete.\n");
198 priv->ucode_type = UCODE_RT;
199 }
200 }
201
202 return ret;
203}
204
205/* 177/*
206 * Calibration 178 * Calibration
207 */ 179 */
@@ -297,33 +269,9 @@ void iwlagn_rx_calib_result(struct iwl_priv *priv,
297 iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len); 269 iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len);
298} 270}
299 271
300void iwlagn_rx_calib_complete(struct iwl_priv *priv, 272int iwlagn_init_alive_start(struct iwl_priv *priv)
301 struct iwl_rx_mem_buffer *rxb)
302{ 273{
303 IWL_DEBUG_INFO(priv, "Init. calibration is completed, restarting fw.\n"); 274 int ret;
304 queue_work(priv->workqueue, &priv->restart);
305}
306
307void iwlagn_init_alive_start(struct iwl_priv *priv)
308{
309 int ret = 0;
310
311 /* initialize uCode was loaded... verify inst image.
312 * This is a paranoid check, because we would not have gotten the
313 * "initialize" alive if code weren't properly loaded. */
314 if (iwl_verify_ucode(priv)) {
315 /* Runtime instruction load was bad;
316 * take it all the way back down so we can try again */
317 IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
318 goto restart;
319 }
320
321 ret = priv->cfg->ops->lib->alive_notify(priv);
322 if (ret) {
323 IWL_WARN(priv,
324 "Could not complete ALIVE transition: %d\n", ret);
325 goto restart;
326 }
327 275
328 if (priv->cfg->bt_params && 276 if (priv->cfg->bt_params &&
329 priv->cfg->bt_params->advanced_bt_coexist) { 277 priv->cfg->bt_params->advanced_bt_coexist) {
@@ -333,24 +281,25 @@ void iwlagn_init_alive_start(struct iwl_priv *priv)
333 * no need to close the envlope since we are going 281 * no need to close the envlope since we are going
334 * to load the runtime uCode later. 282 * to load the runtime uCode later.
335 */ 283 */
336 iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN, 284 ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
337 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2); 285 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
286 if (ret)
287 return ret;
338 288
339 } 289 }
340 iwlagn_send_calib_cfg(priv); 290
291 ret = iwlagn_send_calib_cfg(priv);
292 if (ret)
293 return ret;
341 294
342 /** 295 /**
343 * temperature offset calibration is only needed for runtime ucode, 296 * temperature offset calibration is only needed for runtime ucode,
344 * so prepare the value now. 297 * so prepare the value now.
345 */ 298 */
346 if (priv->cfg->need_temp_offset_calib) 299 if (priv->cfg->need_temp_offset_calib)
347 iwlagn_set_temperature_offset_calib(priv); 300 return iwlagn_set_temperature_offset_calib(priv);
348 301
349 return; 302 return 0;
350
351restart:
352 /* real restart (first load init_ucode) */
353 queue_work(priv->workqueue, &priv->restart);
354} 303}
355 304
356static int iwlagn_send_wimax_coex(struct iwl_priv *priv) 305static int iwlagn_send_wimax_coex(struct iwl_priv *priv)
@@ -413,25 +362,30 @@ void iwlagn_send_prio_tbl(struct iwl_priv *priv)
413 IWL_ERR(priv, "failed to send BT prio tbl command\n"); 362 IWL_ERR(priv, "failed to send BT prio tbl command\n");
414} 363}
415 364
416void iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type) 365int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
417{ 366{
418 struct iwl_bt_coex_prot_env_cmd env_cmd; 367 struct iwl_bt_coex_prot_env_cmd env_cmd;
368 int ret;
419 369
420 env_cmd.action = action; 370 env_cmd.action = action;
421 env_cmd.type = type; 371 env_cmd.type = type;
422 if (iwl_send_cmd_pdu(priv, REPLY_BT_COEX_PROT_ENV, 372 ret = iwl_send_cmd_pdu(priv, REPLY_BT_COEX_PROT_ENV,
423 sizeof(env_cmd), &env_cmd)) 373 sizeof(env_cmd), &env_cmd);
374 if (ret)
424 IWL_ERR(priv, "failed to send BT env command\n"); 375 IWL_ERR(priv, "failed to send BT env command\n");
376 return ret;
425} 377}
426 378
427 379
428int iwlagn_alive_notify(struct iwl_priv *priv) 380static int iwlagn_alive_notify(struct iwl_priv *priv)
429{ 381{
430 const struct queue_to_fifo_ac *queue_to_fifo; 382 const struct queue_to_fifo_ac *queue_to_fifo;
383 struct iwl_rxon_context *ctx;
431 u32 a; 384 u32 a;
432 unsigned long flags; 385 unsigned long flags;
433 int i, chan; 386 int i, chan;
434 u32 reg_val; 387 u32 reg_val;
388 int ret;
435 389
436 spin_lock_irqsave(&priv->lock, flags); 390 spin_lock_irqsave(&priv->lock, flags);
437 391
@@ -500,6 +454,8 @@ int iwlagn_alive_notify(struct iwl_priv *priv)
500 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped)); 454 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
501 for (i = 0; i < 4; i++) 455 for (i = 0; i < 4; i++)
502 atomic_set(&priv->queue_stop_count[i], 0); 456 atomic_set(&priv->queue_stop_count[i], 0);
457 for_each_context(priv, ctx)
458 ctx->last_tx_rejected = false;
503 459
504 /* reset to 0 to enable all the queue first */ 460 /* reset to 0 to enable all the queue first */
505 priv->txq_ctx_active_msk = 0; 461 priv->txq_ctx_active_msk = 0;
@@ -527,12 +483,15 @@ int iwlagn_alive_notify(struct iwl_priv *priv)
527 iwl_clear_bits_prph(priv, APMG_PCIDEV_STT_REG, 483 iwl_clear_bits_prph(priv, APMG_PCIDEV_STT_REG,
528 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 484 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
529 485
530 iwlagn_send_wimax_coex(priv); 486 ret = iwlagn_send_wimax_coex(priv);
487 if (ret)
488 return ret;
531 489
532 iwlagn_set_Xtal_calib(priv); 490 ret = iwlagn_set_Xtal_calib(priv);
533 iwl_send_calib_results(priv); 491 if (ret)
492 return ret;
534 493
535 return 0; 494 return iwl_send_calib_results(priv);
536} 495}
537 496
538 497
@@ -541,11 +500,12 @@ int iwlagn_alive_notify(struct iwl_priv *priv)
541 * using sample data 100 bytes apart. If these sample points are good, 500 * using sample data 100 bytes apart. If these sample points are good,
542 * it's a pretty good bet that everything between them is good, too. 501 * it's a pretty good bet that everything between them is good, too.
543 */ 502 */
544static int iwlcore_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len) 503static int iwlcore_verify_inst_sparse(struct iwl_priv *priv,
504 struct fw_desc *fw_desc)
545{ 505{
506 __le32 *image = (__le32 *)fw_desc->v_addr;
507 u32 len = fw_desc->len;
546 u32 val; 508 u32 val;
547 int ret = 0;
548 u32 errcnt = 0;
549 u32 i; 509 u32 i;
550 510
551 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len); 511 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
@@ -556,104 +516,204 @@ static int iwlcore_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32
556 * if IWL_DL_IO is set */ 516 * if IWL_DL_IO is set */
557 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, 517 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
558 i + IWLAGN_RTC_INST_LOWER_BOUND); 518 i + IWLAGN_RTC_INST_LOWER_BOUND);
559 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 519 val = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
560 if (val != le32_to_cpu(*image)) { 520 if (val != le32_to_cpu(*image))
561 ret = -EIO; 521 return -EIO;
562 errcnt++;
563 if (errcnt >= 3)
564 break;
565 }
566 } 522 }
567 523
568 return ret; 524 return 0;
569} 525}
570 526
571/** 527static void iwl_print_mismatch_inst(struct iwl_priv *priv,
572 * iwlcore_verify_inst_full - verify runtime uCode image in card vs. host, 528 struct fw_desc *fw_desc)
573 * looking at all data.
574 */
575static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 *image,
576 u32 len)
577{ 529{
530 __le32 *image = (__le32 *)fw_desc->v_addr;
531 u32 len = fw_desc->len;
578 u32 val; 532 u32 val;
579 u32 save_len = len; 533 u32 offs;
580 int ret = 0; 534 int errors = 0;
581 u32 errcnt;
582 535
583 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len); 536 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
584 537
585 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, 538 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
586 IWLAGN_RTC_INST_LOWER_BOUND); 539 IWLAGN_RTC_INST_LOWER_BOUND);
587 540
588 errcnt = 0; 541 for (offs = 0;
589 for (; len > 0; len -= sizeof(u32), image++) { 542 offs < len && errors < 20;
543 offs += sizeof(u32), image++) {
590 /* read data comes through single port, auto-incr addr */ 544 /* read data comes through single port, auto-incr addr */
591 /* NOTE: Use the debugless read so we don't flood kernel log 545 val = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
592 * if IWL_DL_IO is set */
593 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
594 if (val != le32_to_cpu(*image)) { 546 if (val != le32_to_cpu(*image)) {
595 IWL_ERR(priv, "uCode INST section is invalid at " 547 IWL_ERR(priv, "uCode INST section at "
596 "offset 0x%x, is 0x%x, s/b 0x%x\n", 548 "offset 0x%x, is 0x%x, s/b 0x%x\n",
597 save_len - len, val, le32_to_cpu(*image)); 549 offs, val, le32_to_cpu(*image));
598 ret = -EIO; 550 errors++;
599 errcnt++;
600 if (errcnt >= 20)
601 break;
602 } 551 }
603 } 552 }
604
605 if (!errcnt)
606 IWL_DEBUG_INFO(priv,
607 "ucode image in INSTRUCTION memory is good\n");
608
609 return ret;
610} 553}
611 554
612/** 555/**
613 * iwl_verify_ucode - determine which instruction image is in SRAM, 556 * iwl_verify_ucode - determine which instruction image is in SRAM,
614 * and verify its contents 557 * and verify its contents
615 */ 558 */
616int iwl_verify_ucode(struct iwl_priv *priv) 559static int iwl_verify_ucode(struct iwl_priv *priv, struct fw_img *img)
560{
561 if (!iwlcore_verify_inst_sparse(priv, &img->code)) {
562 IWL_DEBUG_INFO(priv, "uCode is good in inst SRAM\n");
563 return 0;
564 }
565
566 IWL_ERR(priv, "UCODE IMAGE IN INSTRUCTION SRAM NOT VALID!!\n");
567
568 iwl_print_mismatch_inst(priv, &img->code);
569 return -EIO;
570}
571
572struct iwlagn_alive_data {
573 bool valid;
574 u8 subtype;
575};
576
577static void iwlagn_alive_fn(struct iwl_priv *priv,
578 struct iwl_rx_packet *pkt,
579 void *data)
617{ 580{
618 __le32 *image; 581 struct iwlagn_alive_data *alive_data = data;
619 u32 len; 582 struct iwl_alive_resp *palive;
583
584 palive = &pkt->u.alive_frame;
585
586 IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
587 "0x%01X 0x%01X\n",
588 palive->is_valid, palive->ver_type,
589 palive->ver_subtype);
590
591 priv->device_pointers.error_event_table =
592 le32_to_cpu(palive->error_event_table_ptr);
593 priv->device_pointers.log_event_table =
594 le32_to_cpu(palive->log_event_table_ptr);
595
596 alive_data->subtype = palive->ver_subtype;
597 alive_data->valid = palive->is_valid == UCODE_VALID_OK;
598}
599
600#define UCODE_ALIVE_TIMEOUT HZ
601#define UCODE_CALIB_TIMEOUT (2*HZ)
602
603int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
604 struct fw_img *image,
605 int subtype, int alternate_subtype)
606{
607 struct iwl_notification_wait alive_wait;
608 struct iwlagn_alive_data alive_data;
620 int ret; 609 int ret;
610 enum iwlagn_ucode_subtype old_type;
621 611
622 /* Try bootstrap */ 612 ret = iwlagn_start_device(priv);
623 image = (__le32 *)priv->ucode_boot.v_addr; 613 if (ret)
624 len = priv->ucode_boot.len; 614 return ret;
625 ret = iwlcore_verify_inst_sparse(priv, image, len); 615
626 if (!ret) { 616 iwlagn_init_notification_wait(priv, &alive_wait, REPLY_ALIVE,
627 IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n"); 617 iwlagn_alive_fn, &alive_data);
628 return 0; 618
619 old_type = priv->ucode_type;
620 priv->ucode_type = subtype;
621
622 ret = iwlagn_load_given_ucode(priv, image);
623 if (ret) {
624 priv->ucode_type = old_type;
625 iwlagn_remove_notification(priv, &alive_wait);
626 return ret;
629 } 627 }
630 628
631 /* Try initialize */ 629 /* Remove all resets to allow NIC to operate */
632 image = (__le32 *)priv->ucode_init.v_addr; 630 iwl_write32(priv, CSR_RESET, 0);
633 len = priv->ucode_init.len; 631
634 ret = iwlcore_verify_inst_sparse(priv, image, len); 632 /*
635 if (!ret) { 633 * Some things may run in the background now, but we
636 IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n"); 634 * just wait for the ALIVE notification here.
637 return 0; 635 */
636 ret = iwlagn_wait_notification(priv, &alive_wait, UCODE_ALIVE_TIMEOUT);
637 if (ret) {
638 priv->ucode_type = old_type;
639 return ret;
638 } 640 }
639 641
640 /* Try runtime/protocol */ 642 if (!alive_data.valid) {
641 image = (__le32 *)priv->ucode_code.v_addr; 643 IWL_ERR(priv, "Loaded ucode is not valid!\n");
642 len = priv->ucode_code.len; 644 priv->ucode_type = old_type;
643 ret = iwlcore_verify_inst_sparse(priv, image, len); 645 return -EIO;
644 if (!ret) { 646 }
645 IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n"); 647
646 return 0; 648 if (alive_data.subtype != subtype &&
649 alive_data.subtype != alternate_subtype) {
650 IWL_ERR(priv,
651 "Loaded ucode is not expected type (got %d, expected %d)!\n",
652 alive_data.subtype, subtype);
653 priv->ucode_type = old_type;
654 return -EIO;
647 } 655 }
648 656
649 IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n"); 657 ret = iwl_verify_ucode(priv, image);
658 if (ret) {
659 priv->ucode_type = old_type;
660 return ret;
661 }
662
663 /* delay a bit to give rfkill time to run */
664 msleep(5);
665
666 ret = iwlagn_alive_notify(priv);
667 if (ret) {
668 IWL_WARN(priv,
669 "Could not complete ALIVE transition: %d\n", ret);
670 priv->ucode_type = old_type;
671 return ret;
672 }
673
674 return 0;
675}
676
677int iwlagn_run_init_ucode(struct iwl_priv *priv)
678{
679 struct iwl_notification_wait calib_wait;
680 int ret;
681
682 lockdep_assert_held(&priv->mutex);
683
684 /* No init ucode required? Curious, but maybe ok */
685 if (!priv->ucode_init.code.len)
686 return 0;
687
688 if (priv->ucode_type != UCODE_SUBTYPE_NONE_LOADED)
689 return 0;
690
691 iwlagn_init_notification_wait(priv, &calib_wait,
692 CALIBRATION_COMPLETE_NOTIFICATION,
693 NULL, NULL);
694
695 /* Will also start the device */
696 ret = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_init,
697 UCODE_SUBTYPE_INIT, -1);
698 if (ret)
699 goto error;
700
701 ret = iwlagn_init_alive_start(priv);
702 if (ret)
703 goto error;
704
705 /*
706 * Some things may run in the background now, but we
707 * just wait for the calibration complete notification.
708 */
709 ret = iwlagn_wait_notification(priv, &calib_wait, UCODE_CALIB_TIMEOUT);
650 710
651 /* Since nothing seems to match, show first several data entries in 711 goto out;
652 * instruction SRAM, so maybe visual inspection will give a clue.
653 * Selection of bootstrap image (vs. other images) is arbitrary. */
654 image = (__le32 *)priv->ucode_boot.v_addr;
655 len = priv->ucode_boot.len;
656 ret = iwl_verify_inst_full(priv, image, len);
657 712
713 error:
714 iwlagn_remove_notification(priv, &calib_wait);
715 out:
716 /* Whatever happened, stop the device */
717 iwlagn_stop_device(priv);
658 return ret; 718 return ret;
659} 719}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 321b18b59135..3ecc3198d9bf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
@@ -59,7 +59,6 @@
59#include "iwl-sta.h" 59#include "iwl-sta.h"
60#include "iwl-agn-calib.h" 60#include "iwl-agn-calib.h"
61#include "iwl-agn.h" 61#include "iwl-agn.h"
62#include "iwl-agn-led.h"
63 62
64 63
65/****************************************************************************** 64/******************************************************************************
@@ -103,70 +102,6 @@ void iwl_update_chain_flags(struct iwl_priv *priv)
103 } 102 }
104} 103}
105 104
106static void iwl_clear_free_frames(struct iwl_priv *priv)
107{
108 struct list_head *element;
109
110 IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
111 priv->frames_count);
112
113 while (!list_empty(&priv->free_frames)) {
114 element = priv->free_frames.next;
115 list_del(element);
116 kfree(list_entry(element, struct iwl_frame, list));
117 priv->frames_count--;
118 }
119
120 if (priv->frames_count) {
121 IWL_WARN(priv, "%d frames still in use. Did we lose one?\n",
122 priv->frames_count);
123 priv->frames_count = 0;
124 }
125}
126
127static struct iwl_frame *iwl_get_free_frame(struct iwl_priv *priv)
128{
129 struct iwl_frame *frame;
130 struct list_head *element;
131 if (list_empty(&priv->free_frames)) {
132 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
133 if (!frame) {
134 IWL_ERR(priv, "Could not allocate frame!\n");
135 return NULL;
136 }
137
138 priv->frames_count++;
139 return frame;
140 }
141
142 element = priv->free_frames.next;
143 list_del(element);
144 return list_entry(element, struct iwl_frame, list);
145}
146
147static void iwl_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
148{
149 memset(frame, 0, sizeof(*frame));
150 list_add(&frame->list, &priv->free_frames);
151}
152
153static u32 iwl_fill_beacon_frame(struct iwl_priv *priv,
154 struct ieee80211_hdr *hdr,
155 int left)
156{
157 lockdep_assert_held(&priv->mutex);
158
159 if (!priv->beacon_skb)
160 return 0;
161
162 if (priv->beacon_skb->len > left)
163 return 0;
164
165 memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
166
167 return priv->beacon_skb->len;
168}
169
170/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */ 105/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
171static void iwl_set_beacon_tim(struct iwl_priv *priv, 106static void iwl_set_beacon_tim(struct iwl_priv *priv,
172 struct iwl_tx_beacon_cmd *tx_beacon_cmd, 107 struct iwl_tx_beacon_cmd *tx_beacon_cmd,
@@ -194,13 +129,18 @@ static void iwl_set_beacon_tim(struct iwl_priv *priv,
194 IWL_WARN(priv, "Unable to find TIM Element in beacon\n"); 129 IWL_WARN(priv, "Unable to find TIM Element in beacon\n");
195} 130}
196 131
197static unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv, 132int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
198 struct iwl_frame *frame)
199{ 133{
200 struct iwl_tx_beacon_cmd *tx_beacon_cmd; 134 struct iwl_tx_beacon_cmd *tx_beacon_cmd;
135 struct iwl_host_cmd cmd = {
136 .id = REPLY_TX_BEACON,
137 .flags = CMD_SIZE_HUGE,
138 };
201 u32 frame_size; 139 u32 frame_size;
202 u32 rate_flags; 140 u32 rate_flags;
203 u32 rate; 141 u32 rate;
142 int err;
143
204 /* 144 /*
205 * We have to set up the TX command, the TX Beacon command, and the 145 * We have to set up the TX command, the TX Beacon command, and the
206 * beacon contents. 146 * beacon contents.
@@ -213,17 +153,19 @@ static unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv,
213 return 0; 153 return 0;
214 } 154 }
215 155
216 /* Initialize memory */ 156 if (WARN_ON(!priv->beacon_skb))
217 tx_beacon_cmd = &frame->u.beacon; 157 return -EINVAL;
218 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd)); 158
159 /* Allocate beacon memory */
160 tx_beacon_cmd = kzalloc(sizeof(*tx_beacon_cmd) + priv->beacon_skb->len,
161 GFP_KERNEL);
162 if (!tx_beacon_cmd)
163 return -ENOMEM;
164
165 frame_size = priv->beacon_skb->len;
219 166
220 /* Set up TX beacon contents */ 167 /* Set up TX beacon contents */
221 frame_size = iwl_fill_beacon_frame(priv, tx_beacon_cmd->frame, 168 memcpy(tx_beacon_cmd->frame, priv->beacon_skb->data, frame_size);
222 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
223 if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
224 return 0;
225 if (!frame_size)
226 return 0;
227 169
228 /* Set up TX command fields */ 170 /* Set up TX command fields */
229 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size); 171 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
@@ -246,35 +188,16 @@ static unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv,
246 tx_beacon_cmd->tx.rate_n_flags = iwl_hw_set_rate_n_flags(rate, 188 tx_beacon_cmd->tx.rate_n_flags = iwl_hw_set_rate_n_flags(rate,
247 rate_flags); 189 rate_flags);
248 190
249 return sizeof(*tx_beacon_cmd) + frame_size; 191 /* Submit command */
250} 192 cmd.len = sizeof(*tx_beacon_cmd) + frame_size;
193 cmd.data = tx_beacon_cmd;
251 194
252int iwlagn_send_beacon_cmd(struct iwl_priv *priv) 195 err = iwl_send_cmd_sync(priv, &cmd);
253{
254 struct iwl_frame *frame;
255 unsigned int frame_size;
256 int rc;
257
258 frame = iwl_get_free_frame(priv);
259 if (!frame) {
260 IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
261 "command.\n");
262 return -ENOMEM;
263 }
264 196
265 frame_size = iwl_hw_get_beacon_cmd(priv, frame); 197 /* Free temporary storage */
266 if (!frame_size) { 198 kfree(tx_beacon_cmd);
267 IWL_ERR(priv, "Error configuring the beacon command\n");
268 iwl_free_frame(priv, frame);
269 return -EINVAL;
270 }
271
272 rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
273 &frame->u.cmd[0]);
274 199
275 iwl_free_frame(priv, frame); 200 return err;
276
277 return rc;
278} 201}
279 202
280static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) 203static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
@@ -395,7 +318,9 @@ int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
395 return -EINVAL; 318 return -EINVAL;
396 } 319 }
397 320
398 BUG_ON(addr & ~DMA_BIT_MASK(36)); 321 if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
322 return -EINVAL;
323
399 if (unlikely(addr & ~IWL_TX_DMA_MASK)) 324 if (unlikely(addr & ~IWL_TX_DMA_MASK))
400 IWL_ERR(priv, "Unaligned address = %llx\n", 325 IWL_ERR(priv, "Unaligned address = %llx\n",
401 (unsigned long long)addr); 326 (unsigned long long)addr);
@@ -409,7 +334,7 @@ int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
409 * Tell nic where to find circular buffer of Tx Frame Descriptors for 334 * Tell nic where to find circular buffer of Tx Frame Descriptors for
410 * given Tx queue, and enable the DMA channel used for that queue. 335 * given Tx queue, and enable the DMA channel used for that queue.
411 * 336 *
412 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA 337 * supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
413 * channels supported in hardware. 338 * channels supported in hardware.
414 */ 339 */
415int iwl_hw_tx_queue_init(struct iwl_priv *priv, 340int iwl_hw_tx_queue_init(struct iwl_priv *priv,
@@ -483,12 +408,14 @@ static void iwl_bg_bt_full_concurrency(struct work_struct *work)
483 container_of(work, struct iwl_priv, bt_full_concurrency); 408 container_of(work, struct iwl_priv, bt_full_concurrency);
484 struct iwl_rxon_context *ctx; 409 struct iwl_rxon_context *ctx;
485 410
411 mutex_lock(&priv->mutex);
412
486 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 413 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
487 return; 414 goto out;
488 415
489 /* dont send host command if rf-kill is on */ 416 /* dont send host command if rf-kill is on */
490 if (!iwl_is_ready_rf(priv)) 417 if (!iwl_is_ready_rf(priv))
491 return; 418 goto out;
492 419
493 IWL_DEBUG_INFO(priv, "BT coex in %s mode\n", 420 IWL_DEBUG_INFO(priv, "BT coex in %s mode\n",
494 priv->bt_full_concurrent ? 421 priv->bt_full_concurrent ?
@@ -498,15 +425,15 @@ static void iwl_bg_bt_full_concurrency(struct work_struct *work)
498 * LQ & RXON updated cmds must be sent before BT Config cmd 425 * LQ & RXON updated cmds must be sent before BT Config cmd
499 * to avoid 3-wire collisions 426 * to avoid 3-wire collisions
500 */ 427 */
501 mutex_lock(&priv->mutex);
502 for_each_context(priv, ctx) { 428 for_each_context(priv, ctx) {
503 if (priv->cfg->ops->hcmd->set_rxon_chain) 429 if (priv->cfg->ops->hcmd->set_rxon_chain)
504 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); 430 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
505 iwlcore_commit_rxon(priv, ctx); 431 iwlcore_commit_rxon(priv, ctx);
506 } 432 }
507 mutex_unlock(&priv->mutex);
508 433
509 priv->cfg->ops->hcmd->send_bt_config(priv); 434 priv->cfg->ops->hcmd->send_bt_config(priv);
435out:
436 mutex_unlock(&priv->mutex);
510} 437}
511 438
512/** 439/**
@@ -556,7 +483,7 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
556 } 483 }
557 484
558 /* Set starting address; reads will auto-increment */ 485 /* Set starting address; reads will auto-increment */
559 _iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr); 486 iwl_write32(priv, HBUS_TARG_MEM_RADDR, ptr);
560 rmb(); 487 rmb();
561 488
562 /* 489 /*
@@ -564,13 +491,13 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
564 * place event id # at far right for easier visual parsing. 491 * place event id # at far right for easier visual parsing.
565 */ 492 */
566 for (i = 0; i < num_events; i++) { 493 for (i = 0; i < num_events; i++) {
567 ev = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 494 ev = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
568 time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 495 time = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
569 if (mode == 0) { 496 if (mode == 0) {
570 trace_iwlwifi_dev_ucode_cont_event(priv, 497 trace_iwlwifi_dev_ucode_cont_event(priv,
571 0, time, ev); 498 0, time, ev);
572 } else { 499 } else {
573 data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 500 data = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
574 trace_iwlwifi_dev_ucode_cont_event(priv, 501 trace_iwlwifi_dev_ucode_cont_event(priv,
575 time, data, ev); 502 time, data, ev);
576 } 503 }
@@ -588,10 +515,7 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv)
588 u32 num_wraps; /* # times uCode wrapped to top of log */ 515 u32 num_wraps; /* # times uCode wrapped to top of log */
589 u32 next_entry; /* index of next entry to be written by uCode */ 516 u32 next_entry; /* index of next entry to be written by uCode */
590 517
591 if (priv->ucode_type == UCODE_INIT) 518 base = priv->device_pointers.error_event_table;
592 base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
593 else
594 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
595 if (priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) { 519 if (priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
596 capacity = iwl_read_targ_mem(priv, base); 520 capacity = iwl_read_targ_mem(priv, base);
597 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32))); 521 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
@@ -720,7 +644,10 @@ static void iwl_rx_handle(struct iwl_priv *priv)
720 /* If an RXB doesn't have a Rx queue slot associated with it, 644 /* If an RXB doesn't have a Rx queue slot associated with it,
721 * then a bug has been introduced in the queue refilling 645 * then a bug has been introduced in the queue refilling
722 * routines -- catch it here */ 646 * routines -- catch it here */
723 BUG_ON(rxb == NULL); 647 if (WARN_ON(rxb == NULL)) {
648 i = (i + 1) & RX_QUEUE_MASK;
649 continue;
650 }
724 651
725 rxq->queue[i] = NULL; 652 rxq->queue[i] = NULL;
726 653
@@ -760,13 +687,15 @@ static void iwl_rx_handle(struct iwl_priv *priv)
760 if (w->cmd == pkt->hdr.cmd) { 687 if (w->cmd == pkt->hdr.cmd) {
761 w->triggered = true; 688 w->triggered = true;
762 if (w->fn) 689 if (w->fn)
763 w->fn(priv, pkt); 690 w->fn(priv, pkt, w->fn_data);
764 } 691 }
765 } 692 }
766 spin_unlock(&priv->_agn.notif_wait_lock); 693 spin_unlock(&priv->_agn.notif_wait_lock);
767 694
768 wake_up_all(&priv->_agn.notif_waitq); 695 wake_up_all(&priv->_agn.notif_waitq);
769 } 696 }
697 if (priv->pre_rx_handler)
698 priv->pre_rx_handler(priv, rxb);
770 699
771 /* Based on type of command response or notification, 700 /* Based on type of command response or notification,
772 * handle those that need handling via function in 701 * handle those that need handling via function in
@@ -837,199 +766,6 @@ static void iwl_rx_handle(struct iwl_priv *priv)
837 iwlagn_rx_queue_restock(priv); 766 iwlagn_rx_queue_restock(priv);
838} 767}
839 768
840/* call this function to flush any scheduled tasklet */
841static inline void iwl_synchronize_irq(struct iwl_priv *priv)
842{
843 /* wait to make sure we flush pending tasklet*/
844 synchronize_irq(priv->pci_dev->irq);
845 tasklet_kill(&priv->irq_tasklet);
846}
847
848static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
849{
850 u32 inta, handled = 0;
851 u32 inta_fh;
852 unsigned long flags;
853 u32 i;
854#ifdef CONFIG_IWLWIFI_DEBUG
855 u32 inta_mask;
856#endif
857
858 spin_lock_irqsave(&priv->lock, flags);
859
860 /* Ack/clear/reset pending uCode interrupts.
861 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
862 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
863 inta = iwl_read32(priv, CSR_INT);
864 iwl_write32(priv, CSR_INT, inta);
865
866 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
867 * Any new interrupts that happen after this, either while we're
868 * in this tasklet, or later, will show up in next ISR/tasklet. */
869 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
870 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
871
872#ifdef CONFIG_IWLWIFI_DEBUG
873 if (iwl_get_debug_level(priv) & IWL_DL_ISR) {
874 /* just for debug */
875 inta_mask = iwl_read32(priv, CSR_INT_MASK);
876 IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
877 inta, inta_mask, inta_fh);
878 }
879#endif
880
881 spin_unlock_irqrestore(&priv->lock, flags);
882
883 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
884 * atomic, make sure that inta covers all the interrupts that
885 * we've discovered, even if FH interrupt came in just after
886 * reading CSR_INT. */
887 if (inta_fh & CSR49_FH_INT_RX_MASK)
888 inta |= CSR_INT_BIT_FH_RX;
889 if (inta_fh & CSR49_FH_INT_TX_MASK)
890 inta |= CSR_INT_BIT_FH_TX;
891
892 /* Now service all interrupt bits discovered above. */
893 if (inta & CSR_INT_BIT_HW_ERR) {
894 IWL_ERR(priv, "Hardware error detected. Restarting.\n");
895
896 /* Tell the device to stop sending interrupts */
897 iwl_disable_interrupts(priv);
898
899 priv->isr_stats.hw++;
900 iwl_irq_handle_error(priv);
901
902 handled |= CSR_INT_BIT_HW_ERR;
903
904 return;
905 }
906
907#ifdef CONFIG_IWLWIFI_DEBUG
908 if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
909 /* NIC fires this, but we don't use it, redundant with WAKEUP */
910 if (inta & CSR_INT_BIT_SCD) {
911 IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
912 "the frame/frames.\n");
913 priv->isr_stats.sch++;
914 }
915
916 /* Alive notification via Rx interrupt will do the real work */
917 if (inta & CSR_INT_BIT_ALIVE) {
918 IWL_DEBUG_ISR(priv, "Alive interrupt\n");
919 priv->isr_stats.alive++;
920 }
921 }
922#endif
923 /* Safely ignore these bits for debug checks below */
924 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
925
926 /* HW RF KILL switch toggled */
927 if (inta & CSR_INT_BIT_RF_KILL) {
928 int hw_rf_kill = 0;
929 if (!(iwl_read32(priv, CSR_GP_CNTRL) &
930 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
931 hw_rf_kill = 1;
932
933 IWL_WARN(priv, "RF_KILL bit toggled to %s.\n",
934 hw_rf_kill ? "disable radio" : "enable radio");
935
936 priv->isr_stats.rfkill++;
937
938 /* driver only loads ucode once setting the interface up.
939 * the driver allows loading the ucode even if the radio
940 * is killed. Hence update the killswitch state here. The
941 * rfkill handler will care about restarting if needed.
942 */
943 if (!test_bit(STATUS_ALIVE, &priv->status)) {
944 if (hw_rf_kill)
945 set_bit(STATUS_RF_KILL_HW, &priv->status);
946 else
947 clear_bit(STATUS_RF_KILL_HW, &priv->status);
948 wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
949 }
950
951 handled |= CSR_INT_BIT_RF_KILL;
952 }
953
954 /* Chip got too hot and stopped itself */
955 if (inta & CSR_INT_BIT_CT_KILL) {
956 IWL_ERR(priv, "Microcode CT kill error detected.\n");
957 priv->isr_stats.ctkill++;
958 handled |= CSR_INT_BIT_CT_KILL;
959 }
960
961 /* Error detected by uCode */
962 if (inta & CSR_INT_BIT_SW_ERR) {
963 IWL_ERR(priv, "Microcode SW error detected. "
964 " Restarting 0x%X.\n", inta);
965 priv->isr_stats.sw++;
966 iwl_irq_handle_error(priv);
967 handled |= CSR_INT_BIT_SW_ERR;
968 }
969
970 /*
971 * uCode wakes up after power-down sleep.
972 * Tell device about any new tx or host commands enqueued,
973 * and about any Rx buffers made available while asleep.
974 */
975 if (inta & CSR_INT_BIT_WAKEUP) {
976 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
977 iwl_rx_queue_update_write_ptr(priv, &priv->rxq);
978 for (i = 0; i < priv->hw_params.max_txq_num; i++)
979 iwl_txq_update_write_ptr(priv, &priv->txq[i]);
980 priv->isr_stats.wakeup++;
981 handled |= CSR_INT_BIT_WAKEUP;
982 }
983
984 /* All uCode command responses, including Tx command responses,
985 * Rx "responses" (frame-received notification), and other
986 * notifications from uCode come through here*/
987 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
988 iwl_rx_handle(priv);
989 priv->isr_stats.rx++;
990 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
991 }
992
993 /* This "Tx" DMA channel is used only for loading uCode */
994 if (inta & CSR_INT_BIT_FH_TX) {
995 IWL_DEBUG_ISR(priv, "uCode load interrupt\n");
996 priv->isr_stats.tx++;
997 handled |= CSR_INT_BIT_FH_TX;
998 /* Wake up uCode load routine, now that load is complete */
999 priv->ucode_write_complete = 1;
1000 wake_up_interruptible(&priv->wait_command_queue);
1001 }
1002
1003 if (inta & ~handled) {
1004 IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
1005 priv->isr_stats.unhandled++;
1006 }
1007
1008 if (inta & ~(priv->inta_mask)) {
1009 IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
1010 inta & ~priv->inta_mask);
1011 IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh);
1012 }
1013
1014 /* Re-enable all interrupts */
1015 /* only Re-enable if disabled by irq */
1016 if (test_bit(STATUS_INT_ENABLED, &priv->status))
1017 iwl_enable_interrupts(priv);
1018 /* Re-enable RF_KILL if it occurred */
1019 else if (handled & CSR_INT_BIT_RF_KILL)
1020 iwl_enable_rfkill_int(priv);
1021
1022#ifdef CONFIG_IWLWIFI_DEBUG
1023 if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
1024 inta = iwl_read32(priv, CSR_INT);
1025 inta_mask = iwl_read32(priv, CSR_INT_MASK);
1026 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
1027 IWL_DEBUG_ISR(priv, "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
1028 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
1029 }
1030#endif
1031}
1032
1033/* tasklet for iwlagn interrupt */ 769/* tasklet for iwlagn interrupt */
1034static void iwl_irq_tasklet(struct iwl_priv *priv) 770static void iwl_irq_tasklet(struct iwl_priv *priv)
1035{ 771{
@@ -1171,7 +907,7 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1171 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { 907 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1172 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); 908 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1173 iwl_write32(priv, CSR_FH_INT_STATUS, 909 iwl_write32(priv, CSR_FH_INT_STATUS,
1174 CSR49_FH_INT_RX_MASK); 910 CSR_FH_INT_RX_MASK);
1175 } 911 }
1176 if (inta & CSR_INT_BIT_RX_PERIODIC) { 912 if (inta & CSR_INT_BIT_RX_PERIODIC) {
1177 handled |= CSR_INT_BIT_RX_PERIODIC; 913 handled |= CSR_INT_BIT_RX_PERIODIC;
@@ -1209,7 +945,7 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1209 945
1210 /* This "Tx" DMA channel is used only for loading uCode */ 946 /* This "Tx" DMA channel is used only for loading uCode */
1211 if (inta & CSR_INT_BIT_FH_TX) { 947 if (inta & CSR_INT_BIT_FH_TX) {
1212 iwl_write32(priv, CSR_FH_INT_STATUS, CSR49_FH_INT_TX_MASK); 948 iwl_write32(priv, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
1213 IWL_DEBUG_ISR(priv, "uCode load interrupt\n"); 949 IWL_DEBUG_ISR(priv, "uCode load interrupt\n");
1214 priv->isr_stats.tx++; 950 priv->isr_stats.tx++;
1215 handled |= CSR_INT_BIT_FH_TX; 951 handled |= CSR_INT_BIT_FH_TX;
@@ -1357,26 +1093,48 @@ static struct attribute_group iwl_attribute_group = {
1357 * 1093 *
1358 ******************************************************************************/ 1094 ******************************************************************************/
1359 1095
1360static void iwl_dealloc_ucode_pci(struct iwl_priv *priv) 1096static void iwl_free_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc)
1097{
1098 if (desc->v_addr)
1099 dma_free_coherent(&pci_dev->dev, desc->len,
1100 desc->v_addr, desc->p_addr);
1101 desc->v_addr = NULL;
1102 desc->len = 0;
1103}
1104
1105static void iwl_free_fw_img(struct pci_dev *pci_dev, struct fw_img *img)
1106{
1107 iwl_free_fw_desc(pci_dev, &img->code);
1108 iwl_free_fw_desc(pci_dev, &img->data);
1109}
1110
1111static int iwl_alloc_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc,
1112 const void *data, size_t len)
1361{ 1113{
1362 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_code); 1114 if (!len) {
1363 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data); 1115 desc->v_addr = NULL;
1364 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup); 1116 return -EINVAL;
1365 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init); 1117 }
1366 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init_data); 1118
1367 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_boot); 1119 desc->v_addr = dma_alloc_coherent(&pci_dev->dev, len,
1120 &desc->p_addr, GFP_KERNEL);
1121 if (!desc->v_addr)
1122 return -ENOMEM;
1123 desc->len = len;
1124 memcpy(desc->v_addr, data, len);
1125 return 0;
1368} 1126}
1369 1127
1370static void iwl_nic_start(struct iwl_priv *priv) 1128static void iwl_dealloc_ucode_pci(struct iwl_priv *priv)
1371{ 1129{
1372 /* Remove all resets to allow NIC to operate */ 1130 iwl_free_fw_img(priv->pci_dev, &priv->ucode_rt);
1373 iwl_write32(priv, CSR_RESET, 0); 1131 iwl_free_fw_img(priv->pci_dev, &priv->ucode_init);
1374} 1132}
1375 1133
1376struct iwlagn_ucode_capabilities { 1134struct iwlagn_ucode_capabilities {
1377 u32 max_probe_length; 1135 u32 max_probe_length;
1378 u32 standard_phy_calibration_size; 1136 u32 standard_phy_calibration_size;
1379 bool pan; 1137 u32 flags;
1380}; 1138};
1381 1139
1382static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context); 1140static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context);
@@ -1422,8 +1180,8 @@ static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first)
1422} 1180}
1423 1181
1424struct iwlagn_firmware_pieces { 1182struct iwlagn_firmware_pieces {
1425 const void *inst, *data, *init, *init_data, *boot; 1183 const void *inst, *data, *init, *init_data;
1426 size_t inst_size, data_size, init_size, init_data_size, boot_size; 1184 size_t inst_size, data_size, init_size, init_data_size;
1427 1185
1428 u32 build; 1186 u32 build;
1429 1187
@@ -1444,28 +1202,18 @@ static int iwlagn_load_legacy_firmware(struct iwl_priv *priv,
1444 1202
1445 switch (api_ver) { 1203 switch (api_ver) {
1446 default: 1204 default:
1447 /* 1205 hdr_size = 28;
1448 * 4965 doesn't revision the firmware file format 1206 if (ucode_raw->size < hdr_size) {
1449 * along with the API version, it always uses v1 1207 IWL_ERR(priv, "File size too small!\n");
1450 * file format. 1208 return -EINVAL;
1451 */
1452 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) !=
1453 CSR_HW_REV_TYPE_4965) {
1454 hdr_size = 28;
1455 if (ucode_raw->size < hdr_size) {
1456 IWL_ERR(priv, "File size too small!\n");
1457 return -EINVAL;
1458 }
1459 pieces->build = le32_to_cpu(ucode->u.v2.build);
1460 pieces->inst_size = le32_to_cpu(ucode->u.v2.inst_size);
1461 pieces->data_size = le32_to_cpu(ucode->u.v2.data_size);
1462 pieces->init_size = le32_to_cpu(ucode->u.v2.init_size);
1463 pieces->init_data_size = le32_to_cpu(ucode->u.v2.init_data_size);
1464 pieces->boot_size = le32_to_cpu(ucode->u.v2.boot_size);
1465 src = ucode->u.v2.data;
1466 break;
1467 } 1209 }
1468 /* fall through for 4965 */ 1210 pieces->build = le32_to_cpu(ucode->u.v2.build);
1211 pieces->inst_size = le32_to_cpu(ucode->u.v2.inst_size);
1212 pieces->data_size = le32_to_cpu(ucode->u.v2.data_size);
1213 pieces->init_size = le32_to_cpu(ucode->u.v2.init_size);
1214 pieces->init_data_size = le32_to_cpu(ucode->u.v2.init_data_size);
1215 src = ucode->u.v2.data;
1216 break;
1469 case 0: 1217 case 0:
1470 case 1: 1218 case 1:
1471 case 2: 1219 case 2:
@@ -1479,7 +1227,6 @@ static int iwlagn_load_legacy_firmware(struct iwl_priv *priv,
1479 pieces->data_size = le32_to_cpu(ucode->u.v1.data_size); 1227 pieces->data_size = le32_to_cpu(ucode->u.v1.data_size);
1480 pieces->init_size = le32_to_cpu(ucode->u.v1.init_size); 1228 pieces->init_size = le32_to_cpu(ucode->u.v1.init_size);
1481 pieces->init_data_size = le32_to_cpu(ucode->u.v1.init_data_size); 1229 pieces->init_data_size = le32_to_cpu(ucode->u.v1.init_data_size);
1482 pieces->boot_size = le32_to_cpu(ucode->u.v1.boot_size);
1483 src = ucode->u.v1.data; 1230 src = ucode->u.v1.data;
1484 break; 1231 break;
1485 } 1232 }
@@ -1487,7 +1234,7 @@ static int iwlagn_load_legacy_firmware(struct iwl_priv *priv,
1487 /* Verify size of file vs. image size info in file's header */ 1234 /* Verify size of file vs. image size info in file's header */
1488 if (ucode_raw->size != hdr_size + pieces->inst_size + 1235 if (ucode_raw->size != hdr_size + pieces->inst_size +
1489 pieces->data_size + pieces->init_size + 1236 pieces->data_size + pieces->init_size +
1490 pieces->init_data_size + pieces->boot_size) { 1237 pieces->init_data_size) {
1491 1238
1492 IWL_ERR(priv, 1239 IWL_ERR(priv,
1493 "uCode file size %d does not match expected size\n", 1240 "uCode file size %d does not match expected size\n",
@@ -1503,8 +1250,6 @@ static int iwlagn_load_legacy_firmware(struct iwl_priv *priv,
1503 src += pieces->init_size; 1250 src += pieces->init_size;
1504 pieces->init_data = src; 1251 pieces->init_data = src;
1505 src += pieces->init_data_size; 1252 src += pieces->init_data_size;
1506 pieces->boot = src;
1507 src += pieces->boot_size;
1508 1253
1509 return 0; 1254 return 0;
1510} 1255}
@@ -1605,8 +1350,7 @@ static int iwlagn_load_firmware(struct iwl_priv *priv,
1605 pieces->init_data_size = tlv_len; 1350 pieces->init_data_size = tlv_len;
1606 break; 1351 break;
1607 case IWL_UCODE_TLV_BOOT: 1352 case IWL_UCODE_TLV_BOOT:
1608 pieces->boot = tlv_data; 1353 IWL_ERR(priv, "Found unexpected BOOT ucode\n");
1609 pieces->boot_size = tlv_len;
1610 break; 1354 break;
1611 case IWL_UCODE_TLV_PROBE_MAX_LEN: 1355 case IWL_UCODE_TLV_PROBE_MAX_LEN:
1612 if (tlv_len != sizeof(u32)) 1356 if (tlv_len != sizeof(u32))
@@ -1617,7 +1361,23 @@ static int iwlagn_load_firmware(struct iwl_priv *priv,
1617 case IWL_UCODE_TLV_PAN: 1361 case IWL_UCODE_TLV_PAN:
1618 if (tlv_len) 1362 if (tlv_len)
1619 goto invalid_tlv_len; 1363 goto invalid_tlv_len;
1620 capa->pan = true; 1364 capa->flags |= IWL_UCODE_TLV_FLAGS_PAN;
1365 break;
1366 case IWL_UCODE_TLV_FLAGS:
1367 /* must be at least one u32 */
1368 if (tlv_len < sizeof(u32))
1369 goto invalid_tlv_len;
1370 /* and a proper number of u32s */
1371 if (tlv_len % sizeof(u32))
1372 goto invalid_tlv_len;
1373 /*
1374 * This driver only reads the first u32 as
1375 * right now no more features are defined,
1376 * if that changes then either the driver
1377 * will not work with the new firmware, or
1378 * it'll not take advantage of new features.
1379 */
1380 capa->flags = le32_to_cpup((__le32 *)tlv_data);
1621 break; 1381 break;
1622 case IWL_UCODE_TLV_INIT_EVTLOG_PTR: 1382 case IWL_UCODE_TLV_INIT_EVTLOG_PTR:
1623 if (tlv_len != sizeof(u32)) 1383 if (tlv_len != sizeof(u32))
@@ -1667,7 +1427,7 @@ static int iwlagn_load_firmware(struct iwl_priv *priv,
1667 le32_to_cpup((__le32 *)tlv_data); 1427 le32_to_cpup((__le32 *)tlv_data);
1668 break; 1428 break;
1669 default: 1429 default:
1670 IWL_WARN(priv, "unknown TLV: %d\n", tlv_type); 1430 IWL_DEBUG_INFO(priv, "unknown TLV: %d\n", tlv_type);
1671 break; 1431 break;
1672 } 1432 }
1673 } 1433 }
@@ -1806,8 +1566,6 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
1806 pieces.init_size); 1566 pieces.init_size);
1807 IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %Zd\n", 1567 IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %Zd\n",
1808 pieces.init_data_size); 1568 pieces.init_data_size);
1809 IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %Zd\n",
1810 pieces.boot_size);
1811 1569
1812 /* Verify that uCode images will fit in card's SRAM */ 1570 /* Verify that uCode images will fit in card's SRAM */
1813 if (pieces.inst_size > priv->hw_params.max_inst_size) { 1571 if (pieces.inst_size > priv->hw_params.max_inst_size) {
@@ -1834,48 +1592,25 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
1834 goto try_again; 1592 goto try_again;
1835 } 1593 }
1836 1594
1837 if (pieces.boot_size > priv->hw_params.max_bsm_size) {
1838 IWL_ERR(priv, "uCode boot instr len %Zd too large to fit in\n",
1839 pieces.boot_size);
1840 goto try_again;
1841 }
1842
1843 /* Allocate ucode buffers for card's bus-master loading ... */ 1595 /* Allocate ucode buffers for card's bus-master loading ... */
1844 1596
1845 /* Runtime instructions and 2 copies of data: 1597 /* Runtime instructions and 2 copies of data:
1846 * 1) unmodified from disk 1598 * 1) unmodified from disk
1847 * 2) backup cache for save/restore during power-downs */ 1599 * 2) backup cache for save/restore during power-downs */
1848 priv->ucode_code.len = pieces.inst_size; 1600 if (iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_rt.code,
1849 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_code); 1601 pieces.inst, pieces.inst_size))
1850 1602 goto err_pci_alloc;
1851 priv->ucode_data.len = pieces.data_size; 1603 if (iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_rt.data,
1852 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data); 1604 pieces.data, pieces.data_size))
1853
1854 priv->ucode_data_backup.len = pieces.data_size;
1855 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
1856
1857 if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
1858 !priv->ucode_data_backup.v_addr)
1859 goto err_pci_alloc; 1605 goto err_pci_alloc;
1860 1606
1861 /* Initialization instructions and data */ 1607 /* Initialization instructions and data */
1862 if (pieces.init_size && pieces.init_data_size) { 1608 if (pieces.init_size && pieces.init_data_size) {
1863 priv->ucode_init.len = pieces.init_size; 1609 if (iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init.code,
1864 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init); 1610 pieces.init, pieces.init_size))
1865
1866 priv->ucode_init_data.len = pieces.init_data_size;
1867 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
1868
1869 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
1870 goto err_pci_alloc; 1611 goto err_pci_alloc;
1871 } 1612 if (iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init.data,
1872 1613 pieces.init_data, pieces.init_data_size))
1873 /* Bootstrap (instructions only, no data) */
1874 if (pieces.boot_size) {
1875 priv->ucode_boot.len = pieces.boot_size;
1876 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
1877
1878 if (!priv->ucode_boot.v_addr)
1879 goto err_pci_alloc; 1614 goto err_pci_alloc;
1880 } 1615 }
1881 1616
@@ -1901,50 +1636,19 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
1901 priv->cfg->base_params->max_event_log_size; 1636 priv->cfg->base_params->max_event_log_size;
1902 priv->_agn.inst_errlog_ptr = pieces.inst_errlog_ptr; 1637 priv->_agn.inst_errlog_ptr = pieces.inst_errlog_ptr;
1903 1638
1904 if (ucode_capa.pan) { 1639 priv->new_scan_threshold_behaviour =
1640 !!(ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWSCAN);
1641
1642 if (ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN) {
1905 priv->valid_contexts |= BIT(IWL_RXON_CTX_PAN); 1643 priv->valid_contexts |= BIT(IWL_RXON_CTX_PAN);
1906 priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN; 1644 priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
1907 } else 1645 } else
1908 priv->sta_key_max_num = STA_KEY_MAX_NUM; 1646 priv->sta_key_max_num = STA_KEY_MAX_NUM;
1909 1647
1910 /* Copy images into buffers for card's bus-master reads ... */ 1648 if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
1911 1649 priv->cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
1912 /* Runtime instructions (first block of data in file) */ 1650 else
1913 IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n", 1651 priv->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
1914 pieces.inst_size);
1915 memcpy(priv->ucode_code.v_addr, pieces.inst, pieces.inst_size);
1916
1917 IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
1918 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
1919
1920 /*
1921 * Runtime data
1922 * NOTE: Copy into backup buffer will be done in iwl_up()
1923 */
1924 IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n",
1925 pieces.data_size);
1926 memcpy(priv->ucode_data.v_addr, pieces.data, pieces.data_size);
1927 memcpy(priv->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
1928
1929 /* Initialization instructions */
1930 if (pieces.init_size) {
1931 IWL_DEBUG_INFO(priv, "Copying (but not loading) init instr len %Zd\n",
1932 pieces.init_size);
1933 memcpy(priv->ucode_init.v_addr, pieces.init, pieces.init_size);
1934 }
1935
1936 /* Initialization data */
1937 if (pieces.init_data_size) {
1938 IWL_DEBUG_INFO(priv, "Copying (but not loading) init data len %Zd\n",
1939 pieces.init_data_size);
1940 memcpy(priv->ucode_init_data.v_addr, pieces.init_data,
1941 pieces.init_data_size);
1942 }
1943
1944 /* Bootstrap instructions */
1945 IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n",
1946 pieces.boot_size);
1947 memcpy(priv->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
1948 1652
1949 /* 1653 /*
1950 * figure out the offset of chain noise reset and gain commands 1654 * figure out the offset of chain noise reset and gain commands
@@ -2076,13 +1780,13 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
2076 u32 desc, time, count, base, data1; 1780 u32 desc, time, count, base, data1;
2077 u32 blink1, blink2, ilink1, ilink2; 1781 u32 blink1, blink2, ilink1, ilink2;
2078 u32 pc, hcmd; 1782 u32 pc, hcmd;
1783 struct iwl_error_event_table table;
2079 1784
2080 if (priv->ucode_type == UCODE_INIT) { 1785 base = priv->device_pointers.error_event_table;
2081 base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr); 1786 if (priv->ucode_type == UCODE_SUBTYPE_INIT) {
2082 if (!base) 1787 if (!base)
2083 base = priv->_agn.init_errlog_ptr; 1788 base = priv->_agn.init_errlog_ptr;
2084 } else { 1789 } else {
2085 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
2086 if (!base) 1790 if (!base)
2087 base = priv->_agn.inst_errlog_ptr; 1791 base = priv->_agn.inst_errlog_ptr;
2088 } 1792 }
@@ -2090,11 +1794,15 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
2090 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) { 1794 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
2091 IWL_ERR(priv, 1795 IWL_ERR(priv,
2092 "Not valid error log pointer 0x%08X for %s uCode\n", 1796 "Not valid error log pointer 0x%08X for %s uCode\n",
2093 base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT"); 1797 base,
1798 (priv->ucode_type == UCODE_SUBTYPE_INIT)
1799 ? "Init" : "RT");
2094 return; 1800 return;
2095 } 1801 }
2096 1802
2097 count = iwl_read_targ_mem(priv, base); 1803 iwl_read_targ_mem_words(priv, base, &table, sizeof(table));
1804
1805 count = table.valid;
2098 1806
2099 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { 1807 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
2100 IWL_ERR(priv, "Start IWL Error Log Dump:\n"); 1808 IWL_ERR(priv, "Start IWL Error Log Dump:\n");
@@ -2102,18 +1810,18 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
2102 priv->status, count); 1810 priv->status, count);
2103 } 1811 }
2104 1812
2105 desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32)); 1813 desc = table.error_id;
2106 priv->isr_stats.err_code = desc; 1814 priv->isr_stats.err_code = desc;
2107 pc = iwl_read_targ_mem(priv, base + 2 * sizeof(u32)); 1815 pc = table.pc;
2108 blink1 = iwl_read_targ_mem(priv, base + 3 * sizeof(u32)); 1816 blink1 = table.blink1;
2109 blink2 = iwl_read_targ_mem(priv, base + 4 * sizeof(u32)); 1817 blink2 = table.blink2;
2110 ilink1 = iwl_read_targ_mem(priv, base + 5 * sizeof(u32)); 1818 ilink1 = table.ilink1;
2111 ilink2 = iwl_read_targ_mem(priv, base + 6 * sizeof(u32)); 1819 ilink2 = table.ilink2;
2112 data1 = iwl_read_targ_mem(priv, base + 7 * sizeof(u32)); 1820 data1 = table.data1;
2113 data2 = iwl_read_targ_mem(priv, base + 8 * sizeof(u32)); 1821 data2 = table.data2;
2114 line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32)); 1822 line = table.line;
2115 time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32)); 1823 time = table.tsf_low;
2116 hcmd = iwl_read_targ_mem(priv, base + 22 * sizeof(u32)); 1824 hcmd = table.hcmd;
2117 1825
2118 trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, data2, line, 1826 trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, data2, line,
2119 blink1, blink2, ilink1, ilink2); 1827 blink1, blink2, ilink1, ilink2);
@@ -2147,12 +1855,11 @@ static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
2147 if (num_events == 0) 1855 if (num_events == 0)
2148 return pos; 1856 return pos;
2149 1857
2150 if (priv->ucode_type == UCODE_INIT) { 1858 base = priv->device_pointers.log_event_table;
2151 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr); 1859 if (priv->ucode_type == UCODE_SUBTYPE_INIT) {
2152 if (!base) 1860 if (!base)
2153 base = priv->_agn.init_evtlog_ptr; 1861 base = priv->_agn.init_evtlog_ptr;
2154 } else { 1862 } else {
2155 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
2156 if (!base) 1863 if (!base)
2157 base = priv->_agn.inst_evtlog_ptr; 1864 base = priv->_agn.inst_evtlog_ptr;
2158 } 1865 }
@@ -2169,14 +1876,14 @@ static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
2169 iwl_grab_nic_access(priv); 1876 iwl_grab_nic_access(priv);
2170 1877
2171 /* Set starting address; reads will auto-increment */ 1878 /* Set starting address; reads will auto-increment */
2172 _iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr); 1879 iwl_write32(priv, HBUS_TARG_MEM_RADDR, ptr);
2173 rmb(); 1880 rmb();
2174 1881
2175 /* "time" is actually "data" for mode 0 (no timestamp). 1882 /* "time" is actually "data" for mode 0 (no timestamp).
2176 * place event id # at far right for easier visual parsing. */ 1883 * place event id # at far right for easier visual parsing. */
2177 for (i = 0; i < num_events; i++) { 1884 for (i = 0; i < num_events; i++) {
2178 ev = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 1885 ev = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
2179 time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 1886 time = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
2180 if (mode == 0) { 1887 if (mode == 0) {
2181 /* data, ev */ 1888 /* data, ev */
2182 if (bufsz) { 1889 if (bufsz) {
@@ -2190,7 +1897,7 @@ static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
2190 time, ev); 1897 time, ev);
2191 } 1898 }
2192 } else { 1899 } else {
2193 data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 1900 data = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
2194 if (bufsz) { 1901 if (bufsz) {
2195 pos += scnprintf(*buf + pos, bufsz - pos, 1902 pos += scnprintf(*buf + pos, bufsz - pos,
2196 "EVT_LOGT:%010u:0x%08x:%04u\n", 1903 "EVT_LOGT:%010u:0x%08x:%04u\n",
@@ -2261,13 +1968,12 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
2261 int pos = 0; 1968 int pos = 0;
2262 size_t bufsz = 0; 1969 size_t bufsz = 0;
2263 1970
2264 if (priv->ucode_type == UCODE_INIT) { 1971 base = priv->device_pointers.log_event_table;
2265 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr); 1972 if (priv->ucode_type == UCODE_SUBTYPE_INIT) {
2266 logsize = priv->_agn.init_evtlog_size; 1973 logsize = priv->_agn.init_evtlog_size;
2267 if (!base) 1974 if (!base)
2268 base = priv->_agn.init_evtlog_ptr; 1975 base = priv->_agn.init_evtlog_ptr;
2269 } else { 1976 } else {
2270 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
2271 logsize = priv->_agn.inst_evtlog_size; 1977 logsize = priv->_agn.inst_evtlog_size;
2272 if (!base) 1978 if (!base)
2273 base = priv->_agn.inst_evtlog_ptr; 1979 base = priv->_agn.inst_evtlog_ptr;
@@ -2276,7 +1982,9 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
2276 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) { 1982 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
2277 IWL_ERR(priv, 1983 IWL_ERR(priv,
2278 "Invalid event log pointer 0x%08X for %s uCode\n", 1984 "Invalid event log pointer 0x%08X for %s uCode\n",
2279 base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT"); 1985 base,
1986 (priv->ucode_type == UCODE_SUBTYPE_INIT)
1987 ? "Init" : "RT");
2280 return -EINVAL; 1988 return -EINVAL;
2281 } 1989 }
2282 1990
@@ -2423,30 +2131,14 @@ static int iwlagn_send_calib_cfg_rt(struct iwl_priv *priv, u32 cfg)
2423 * from protocol/runtime uCode (initialization uCode's 2131 * from protocol/runtime uCode (initialization uCode's
2424 * Alive gets handled by iwl_init_alive_start()). 2132 * Alive gets handled by iwl_init_alive_start()).
2425 */ 2133 */
2426static void iwl_alive_start(struct iwl_priv *priv) 2134int iwl_alive_start(struct iwl_priv *priv)
2427{ 2135{
2428 int ret = 0; 2136 int ret = 0;
2429 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 2137 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2430 2138
2431 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); 2139 iwl_reset_ict(priv);
2432
2433 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
2434 * This is a paranoid check, because we would not have gotten the
2435 * "runtime" alive if code weren't properly loaded. */
2436 if (iwl_verify_ucode(priv)) {
2437 /* Runtime instruction load was bad;
2438 * take it all the way back down so we can try again */
2439 IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n");
2440 goto restart;
2441 }
2442
2443 ret = priv->cfg->ops->lib->alive_notify(priv);
2444 if (ret) {
2445 IWL_WARN(priv,
2446 "Could not complete ALIVE transition [ntf]: %d\n", ret);
2447 goto restart;
2448 }
2449 2140
2141 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
2450 2142
2451 /* After the ALIVE response, we can send host commands to the uCode */ 2143 /* After the ALIVE response, we can send host commands to the uCode */
2452 set_bit(STATUS_ALIVE, &priv->status); 2144 set_bit(STATUS_ALIVE, &priv->status);
@@ -2455,7 +2147,7 @@ static void iwl_alive_start(struct iwl_priv *priv)
2455 iwl_setup_watchdog(priv); 2147 iwl_setup_watchdog(priv);
2456 2148
2457 if (iwl_is_rfkill(priv)) 2149 if (iwl_is_rfkill(priv))
2458 return; 2150 return -ERFKILL;
2459 2151
2460 /* download priority table before any calibration request */ 2152 /* download priority table before any calibration request */
2461 if (priv->cfg->bt_params && 2153 if (priv->cfg->bt_params &&
@@ -2469,10 +2161,14 @@ static void iwl_alive_start(struct iwl_priv *priv)
2469 iwlagn_send_prio_tbl(priv); 2161 iwlagn_send_prio_tbl(priv);
2470 2162
2471 /* FIXME: w/a to force change uCode BT state machine */ 2163 /* FIXME: w/a to force change uCode BT state machine */
2472 iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN, 2164 ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
2473 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2); 2165 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
2474 iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_CLOSE, 2166 if (ret)
2475 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2); 2167 return ret;
2168 ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_CLOSE,
2169 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
2170 if (ret)
2171 return ret;
2476 } 2172 }
2477 if (priv->hw_params.calib_rt_cfg) 2173 if (priv->hw_params.calib_rt_cfg)
2478 iwlagn_send_calib_cfg_rt(priv, priv->hw_params.calib_rt_cfg); 2174 iwlagn_send_calib_cfg_rt(priv, priv->hw_params.calib_rt_cfg);
@@ -2514,30 +2210,23 @@ static void iwl_alive_start(struct iwl_priv *priv)
2514 set_bit(STATUS_READY, &priv->status); 2210 set_bit(STATUS_READY, &priv->status);
2515 2211
2516 /* Configure the adapter for unassociated operation */ 2212 /* Configure the adapter for unassociated operation */
2517 iwlcore_commit_rxon(priv, ctx); 2213 ret = iwlcore_commit_rxon(priv, ctx);
2214 if (ret)
2215 return ret;
2518 2216
2519 /* At this point, the NIC is initialized and operational */ 2217 /* At this point, the NIC is initialized and operational */
2520 iwl_rf_kill_ct_config(priv); 2218 iwl_rf_kill_ct_config(priv);
2521 2219
2522 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); 2220 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
2523 wake_up_interruptible(&priv->wait_command_queue);
2524
2525 iwl_power_update_mode(priv, true);
2526 IWL_DEBUG_INFO(priv, "Updated power mode\n");
2527 2221
2528 2222 return iwl_power_update_mode(priv, true);
2529 return;
2530
2531 restart:
2532 queue_work(priv->workqueue, &priv->restart);
2533} 2223}
2534 2224
2535static void iwl_cancel_deferred_work(struct iwl_priv *priv); 2225static void iwl_cancel_deferred_work(struct iwl_priv *priv);
2536 2226
2537static void __iwl_down(struct iwl_priv *priv) 2227static void __iwl_down(struct iwl_priv *priv)
2538{ 2228{
2539 unsigned long flags; 2229 int exit_pending;
2540 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status);
2541 2230
2542 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n"); 2231 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
2543 2232
@@ -2563,40 +2252,15 @@ static void __iwl_down(struct iwl_priv *priv)
2563 priv->bt_full_concurrent = false; 2252 priv->bt_full_concurrent = false;
2564 priv->bt_ci_compliance = 0; 2253 priv->bt_ci_compliance = 0;
2565 2254
2566 /* Unblock any waiting calls */
2567 wake_up_interruptible_all(&priv->wait_command_queue);
2568
2569 /* Wipe out the EXIT_PENDING status bit if we are not actually 2255 /* Wipe out the EXIT_PENDING status bit if we are not actually
2570 * exiting the module */ 2256 * exiting the module */
2571 if (!exit_pending) 2257 if (!exit_pending)
2572 clear_bit(STATUS_EXIT_PENDING, &priv->status); 2258 clear_bit(STATUS_EXIT_PENDING, &priv->status);
2573 2259
2574 /* stop and reset the on-board processor */
2575 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
2576
2577 /* tell the device to stop sending interrupts */
2578 spin_lock_irqsave(&priv->lock, flags);
2579 iwl_disable_interrupts(priv);
2580 spin_unlock_irqrestore(&priv->lock, flags);
2581 iwl_synchronize_irq(priv);
2582
2583 if (priv->mac80211_registered) 2260 if (priv->mac80211_registered)
2584 ieee80211_stop_queues(priv->hw); 2261 ieee80211_stop_queues(priv->hw);
2585 2262
2586 /* If we have not previously called iwl_init() then 2263 /* Clear out all status bits but a few that are stable across reset */
2587 * clear all bits but the RF Kill bit and return */
2588 if (!iwl_is_init(priv)) {
2589 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
2590 STATUS_RF_KILL_HW |
2591 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
2592 STATUS_GEO_CONFIGURED |
2593 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
2594 STATUS_EXIT_PENDING;
2595 goto exit;
2596 }
2597
2598 /* ...otherwise clear out all the status bits but the RF Kill
2599 * bit and continue taking the NIC down. */
2600 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) << 2264 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
2601 STATUS_RF_KILL_HW | 2265 STATUS_RF_KILL_HW |
2602 test_bit(STATUS_GEO_CONFIGURED, &priv->status) << 2266 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
@@ -2606,31 +2270,10 @@ static void __iwl_down(struct iwl_priv *priv)
2606 test_bit(STATUS_EXIT_PENDING, &priv->status) << 2270 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
2607 STATUS_EXIT_PENDING; 2271 STATUS_EXIT_PENDING;
2608 2272
2609 /* device going down, Stop using ICT table */ 2273 iwlagn_stop_device(priv);
2610 if (priv->cfg->ops->lib->isr_ops.disable)
2611 priv->cfg->ops->lib->isr_ops.disable(priv);
2612
2613 iwlagn_txq_ctx_stop(priv);
2614 iwlagn_rxq_stop(priv);
2615
2616 /* Power-down device's busmaster DMA clocks */
2617 iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
2618 udelay(5);
2619
2620 /* Make sure (redundant) we've released our request to stay awake */
2621 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2622
2623 /* Stop the device, and put it in low power state */
2624 iwl_apm_stop(priv);
2625
2626 exit:
2627 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
2628 2274
2629 dev_kfree_skb(priv->beacon_skb); 2275 dev_kfree_skb(priv->beacon_skb);
2630 priv->beacon_skb = NULL; 2276 priv->beacon_skb = NULL;
2631
2632 /* clear out any free frames */
2633 iwl_clear_free_frames(priv);
2634} 2277}
2635 2278
2636static void iwl_down(struct iwl_priv *priv) 2279static void iwl_down(struct iwl_priv *priv)
@@ -2644,9 +2287,10 @@ static void iwl_down(struct iwl_priv *priv)
2644 2287
2645#define HW_READY_TIMEOUT (50) 2288#define HW_READY_TIMEOUT (50)
2646 2289
2290/* Note: returns poll_bit return value, which is >= 0 if success */
2647static int iwl_set_hw_ready(struct iwl_priv *priv) 2291static int iwl_set_hw_ready(struct iwl_priv *priv)
2648{ 2292{
2649 int ret = 0; 2293 int ret;
2650 2294
2651 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 2295 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
2652 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); 2296 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
@@ -2656,25 +2300,21 @@ static int iwl_set_hw_ready(struct iwl_priv *priv)
2656 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, 2300 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2657 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, 2301 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2658 HW_READY_TIMEOUT); 2302 HW_READY_TIMEOUT);
2659 if (ret != -ETIMEDOUT)
2660 priv->hw_ready = true;
2661 else
2662 priv->hw_ready = false;
2663 2303
2664 IWL_DEBUG_INFO(priv, "hardware %s\n", 2304 IWL_DEBUG_INFO(priv, "hardware%s ready\n", ret < 0 ? " not" : "");
2665 (priv->hw_ready == 1) ? "ready" : "not ready");
2666 return ret; 2305 return ret;
2667} 2306}
2668 2307
2669static int iwl_prepare_card_hw(struct iwl_priv *priv) 2308/* Note: returns standard 0/-ERROR code */
2309int iwl_prepare_card_hw(struct iwl_priv *priv)
2670{ 2310{
2671 int ret = 0; 2311 int ret;
2672 2312
2673 IWL_DEBUG_INFO(priv, "iwl_prepare_card_hw enter\n"); 2313 IWL_DEBUG_INFO(priv, "iwl_prepare_card_hw enter\n");
2674 2314
2675 ret = iwl_set_hw_ready(priv); 2315 ret = iwl_set_hw_ready(priv);
2676 if (priv->hw_ready) 2316 if (ret >= 0)
2677 return ret; 2317 return 0;
2678 2318
2679 /* If HW is not ready, prepare the conditions to check again */ 2319 /* If HW is not ready, prepare the conditions to check again */
2680 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 2320 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
@@ -2684,10 +2324,13 @@ static int iwl_prepare_card_hw(struct iwl_priv *priv)
2684 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 2324 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
2685 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000); 2325 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
2686 2326
2687 /* HW should be ready by now, check again. */ 2327 if (ret < 0)
2688 if (ret != -ETIMEDOUT) 2328 return ret;
2689 iwl_set_hw_ready(priv);
2690 2329
2330 /* HW should be ready by now, check again. */
2331 ret = iwl_set_hw_ready(priv);
2332 if (ret >= 0)
2333 return 0;
2691 return ret; 2334 return ret;
2692} 2335}
2693 2336
@@ -2696,19 +2339,15 @@ static int iwl_prepare_card_hw(struct iwl_priv *priv)
2696static int __iwl_up(struct iwl_priv *priv) 2339static int __iwl_up(struct iwl_priv *priv)
2697{ 2340{
2698 struct iwl_rxon_context *ctx; 2341 struct iwl_rxon_context *ctx;
2699 int i;
2700 int ret; 2342 int ret;
2701 2343
2344 lockdep_assert_held(&priv->mutex);
2345
2702 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { 2346 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
2703 IWL_WARN(priv, "Exit pending; will not bring the NIC up\n"); 2347 IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
2704 return -EIO; 2348 return -EIO;
2705 } 2349 }
2706 2350
2707 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
2708 IWL_ERR(priv, "ucode not available for device bringup\n");
2709 return -EIO;
2710 }
2711
2712 for_each_context(priv, ctx) { 2351 for_each_context(priv, ctx) {
2713 ret = iwlagn_alloc_bcast_station(priv, ctx); 2352 ret = iwlagn_alloc_bcast_station(priv, ctx);
2714 if (ret) { 2353 if (ret) {
@@ -2717,89 +2356,33 @@ static int __iwl_up(struct iwl_priv *priv)
2717 } 2356 }
2718 } 2357 }
2719 2358
2720 iwl_prepare_card_hw(priv); 2359 ret = iwlagn_run_init_ucode(priv);
2721 2360 if (ret) {
2722 if (!priv->hw_ready) { 2361 IWL_ERR(priv, "Failed to run INIT ucode: %d\n", ret);
2723 IWL_WARN(priv, "Exit HW not ready\n"); 2362 goto error;
2724 return -EIO;
2725 }
2726
2727 /* If platform's RF_KILL switch is NOT set to KILL */
2728 if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
2729 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2730 else
2731 set_bit(STATUS_RF_KILL_HW, &priv->status);
2732
2733 if (iwl_is_rfkill(priv)) {
2734 wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
2735
2736 iwl_enable_interrupts(priv);
2737 IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
2738 return 0;
2739 } 2363 }
2740 2364
2741 iwl_write32(priv, CSR_INT, 0xFFFFFFFF); 2365 ret = iwlagn_load_ucode_wait_alive(priv,
2742 2366 &priv->ucode_rt,
2743 /* must be initialised before iwl_hw_nic_init */ 2367 UCODE_SUBTYPE_REGULAR,
2744 if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)) 2368 UCODE_SUBTYPE_REGULAR_NEW);
2745 priv->cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
2746 else
2747 priv->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
2748
2749 ret = iwlagn_hw_nic_init(priv);
2750 if (ret) { 2369 if (ret) {
2751 IWL_ERR(priv, "Unable to init nic\n"); 2370 IWL_ERR(priv, "Failed to start RT ucode: %d\n", ret);
2752 return ret; 2371 goto error;
2753 } 2372 }
2754 2373
2755 /* make sure rfkill handshake bits are cleared */ 2374 ret = iwl_alive_start(priv);
2756 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 2375 if (ret)
2757 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, 2376 goto error;
2758 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 2377 return 0;
2759
2760 /* clear (again), then enable host interrupts */
2761 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2762 iwl_enable_interrupts(priv);
2763
2764 /* really make sure rfkill handshake bits are cleared */
2765 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2766 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2767
2768 /* Copy original ucode data image from disk into backup cache.
2769 * This will be used to initialize the on-board processor's
2770 * data SRAM for a clean start when the runtime program first loads. */
2771 memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
2772 priv->ucode_data.len);
2773
2774 for (i = 0; i < MAX_HW_RESTARTS; i++) {
2775
2776 /* load bootstrap state machine,
2777 * load bootstrap program into processor's memory,
2778 * prepare to load the "initialize" uCode */
2779 ret = priv->cfg->ops->lib->load_ucode(priv);
2780
2781 if (ret) {
2782 IWL_ERR(priv, "Unable to set up bootstrap uCode: %d\n",
2783 ret);
2784 continue;
2785 }
2786
2787 /* start card; "initialize" will load runtime ucode */
2788 iwl_nic_start(priv);
2789
2790 IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n");
2791
2792 return 0;
2793 }
2794 2378
2379 error:
2795 set_bit(STATUS_EXIT_PENDING, &priv->status); 2380 set_bit(STATUS_EXIT_PENDING, &priv->status);
2796 __iwl_down(priv); 2381 __iwl_down(priv);
2797 clear_bit(STATUS_EXIT_PENDING, &priv->status); 2382 clear_bit(STATUS_EXIT_PENDING, &priv->status);
2798 2383
2799 /* tried to restart and config the device for as long as our 2384 IWL_ERR(priv, "Unable to initialize device.\n");
2800 * patience could withstand */ 2385 return ret;
2801 IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
2802 return -EIO;
2803} 2386}
2804 2387
2805 2388
@@ -2809,36 +2392,6 @@ static int __iwl_up(struct iwl_priv *priv)
2809 * 2392 *
2810 *****************************************************************************/ 2393 *****************************************************************************/
2811 2394
2812static void iwl_bg_init_alive_start(struct work_struct *data)
2813{
2814 struct iwl_priv *priv =
2815 container_of(data, struct iwl_priv, init_alive_start.work);
2816
2817 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2818 return;
2819
2820 mutex_lock(&priv->mutex);
2821 priv->cfg->ops->lib->init_alive_start(priv);
2822 mutex_unlock(&priv->mutex);
2823}
2824
2825static void iwl_bg_alive_start(struct work_struct *data)
2826{
2827 struct iwl_priv *priv =
2828 container_of(data, struct iwl_priv, alive_start.work);
2829
2830 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2831 return;
2832
2833 /* enable dram interrupt */
2834 if (priv->cfg->ops->lib->isr_ops.reset)
2835 priv->cfg->ops->lib->isr_ops.reset(priv);
2836
2837 mutex_lock(&priv->mutex);
2838 iwl_alive_start(priv);
2839 mutex_unlock(&priv->mutex);
2840}
2841
2842static void iwl_bg_run_time_calib_work(struct work_struct *work) 2395static void iwl_bg_run_time_calib_work(struct work_struct *work)
2843{ 2396{
2844 struct iwl_priv *priv = container_of(work, struct iwl_priv, 2397 struct iwl_priv *priv = container_of(work, struct iwl_priv,
@@ -2853,22 +2406,49 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work)
2853 } 2406 }
2854 2407
2855 if (priv->start_calib) { 2408 if (priv->start_calib) {
2856 if (iwl_bt_statistics(priv)) { 2409 iwl_chain_noise_calibration(priv);
2857 iwl_chain_noise_calibration(priv, 2410 iwl_sensitivity_calibration(priv);
2858 (void *)&priv->_agn.statistics_bt);
2859 iwl_sensitivity_calibration(priv,
2860 (void *)&priv->_agn.statistics_bt);
2861 } else {
2862 iwl_chain_noise_calibration(priv,
2863 (void *)&priv->_agn.statistics);
2864 iwl_sensitivity_calibration(priv,
2865 (void *)&priv->_agn.statistics);
2866 }
2867 } 2411 }
2868 2412
2869 mutex_unlock(&priv->mutex); 2413 mutex_unlock(&priv->mutex);
2870} 2414}
2871 2415
2416static void iwlagn_prepare_restart(struct iwl_priv *priv)
2417{
2418 struct iwl_rxon_context *ctx;
2419 bool bt_full_concurrent;
2420 u8 bt_ci_compliance;
2421 u8 bt_load;
2422 u8 bt_status;
2423
2424 lockdep_assert_held(&priv->mutex);
2425
2426 for_each_context(priv, ctx)
2427 ctx->vif = NULL;
2428 priv->is_open = 0;
2429
2430 /*
2431 * __iwl_down() will clear the BT status variables,
2432 * which is correct, but when we restart we really
2433 * want to keep them so restore them afterwards.
2434 *
2435 * The restart process will later pick them up and
2436 * re-configure the hw when we reconfigure the BT
2437 * command.
2438 */
2439 bt_full_concurrent = priv->bt_full_concurrent;
2440 bt_ci_compliance = priv->bt_ci_compliance;
2441 bt_load = priv->bt_traffic_load;
2442 bt_status = priv->bt_status;
2443
2444 __iwl_down(priv);
2445
2446 priv->bt_full_concurrent = bt_full_concurrent;
2447 priv->bt_ci_compliance = bt_ci_compliance;
2448 priv->bt_traffic_load = bt_load;
2449 priv->bt_status = bt_status;
2450}
2451
2872static void iwl_bg_restart(struct work_struct *data) 2452static void iwl_bg_restart(struct work_struct *data)
2873{ 2453{
2874 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart); 2454 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
@@ -2877,50 +2457,13 @@ static void iwl_bg_restart(struct work_struct *data)
2877 return; 2457 return;
2878 2458
2879 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) { 2459 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
2880 struct iwl_rxon_context *ctx;
2881 bool bt_full_concurrent;
2882 u8 bt_ci_compliance;
2883 u8 bt_load;
2884 u8 bt_status;
2885
2886 mutex_lock(&priv->mutex); 2460 mutex_lock(&priv->mutex);
2887 for_each_context(priv, ctx) 2461 iwlagn_prepare_restart(priv);
2888 ctx->vif = NULL;
2889 priv->is_open = 0;
2890
2891 /*
2892 * __iwl_down() will clear the BT status variables,
2893 * which is correct, but when we restart we really
2894 * want to keep them so restore them afterwards.
2895 *
2896 * The restart process will later pick them up and
2897 * re-configure the hw when we reconfigure the BT
2898 * command.
2899 */
2900 bt_full_concurrent = priv->bt_full_concurrent;
2901 bt_ci_compliance = priv->bt_ci_compliance;
2902 bt_load = priv->bt_traffic_load;
2903 bt_status = priv->bt_status;
2904
2905 __iwl_down(priv);
2906
2907 priv->bt_full_concurrent = bt_full_concurrent;
2908 priv->bt_ci_compliance = bt_ci_compliance;
2909 priv->bt_traffic_load = bt_load;
2910 priv->bt_status = bt_status;
2911
2912 mutex_unlock(&priv->mutex); 2462 mutex_unlock(&priv->mutex);
2913 iwl_cancel_deferred_work(priv); 2463 iwl_cancel_deferred_work(priv);
2914 ieee80211_restart_hw(priv->hw); 2464 ieee80211_restart_hw(priv->hw);
2915 } else { 2465 } else {
2916 iwl_down(priv); 2466 WARN_ON(1);
2917
2918 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2919 return;
2920
2921 mutex_lock(&priv->mutex);
2922 __iwl_up(priv);
2923 mutex_unlock(&priv->mutex);
2924 } 2467 }
2925} 2468}
2926 2469
@@ -3031,8 +2574,6 @@ unlock:
3031 * 2574 *
3032 *****************************************************************************/ 2575 *****************************************************************************/
3033 2576
3034#define UCODE_READY_TIMEOUT (4 * HZ)
3035
3036/* 2577/*
3037 * Not a mac80211 entry point function, but it fits in with all the 2578 * Not a mac80211 entry point function, but it fits in with all the
3038 * other mac80211 functions grouped here. 2579 * other mac80211 functions grouped here.
@@ -3055,14 +2596,16 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
3055 2596
3056 hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF; 2597 hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
3057 2598
3058 if (!priv->cfg->base_params->broken_powersave) 2599 hw->flags |= IEEE80211_HW_SUPPORTS_PS |
3059 hw->flags |= IEEE80211_HW_SUPPORTS_PS | 2600 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
3060 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
3061 2601
3062 if (priv->cfg->sku & IWL_SKU_N) 2602 if (priv->cfg->sku & IWL_SKU_N)
3063 hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | 2603 hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
3064 IEEE80211_HW_SUPPORTS_STATIC_SMPS; 2604 IEEE80211_HW_SUPPORTS_STATIC_SMPS;
3065 2605
2606 if (capa->flags & IWL_UCODE_TLV_FLAGS_MFP)
2607 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
2608
3066 hw->sta_data_size = sizeof(struct iwl_station_priv); 2609 hw->sta_data_size = sizeof(struct iwl_station_priv);
3067 hw->vif_data_size = sizeof(struct iwl_vif_priv); 2610 hw->vif_data_size = sizeof(struct iwl_vif_priv);
3068 2611
@@ -3112,7 +2655,7 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
3112} 2655}
3113 2656
3114 2657
3115int iwlagn_mac_start(struct ieee80211_hw *hw) 2658static int iwlagn_mac_start(struct ieee80211_hw *hw)
3116{ 2659{
3117 struct iwl_priv *priv = hw->priv; 2660 struct iwl_priv *priv = hw->priv;
3118 int ret; 2661 int ret;
@@ -3123,37 +2666,23 @@ int iwlagn_mac_start(struct ieee80211_hw *hw)
3123 mutex_lock(&priv->mutex); 2666 mutex_lock(&priv->mutex);
3124 ret = __iwl_up(priv); 2667 ret = __iwl_up(priv);
3125 mutex_unlock(&priv->mutex); 2668 mutex_unlock(&priv->mutex);
3126
3127 if (ret) 2669 if (ret)
3128 return ret; 2670 return ret;
3129 2671
3130 if (iwl_is_rfkill(priv))
3131 goto out;
3132
3133 IWL_DEBUG_INFO(priv, "Start UP work done.\n"); 2672 IWL_DEBUG_INFO(priv, "Start UP work done.\n");
3134 2673
3135 /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from 2674 /* Now we should be done, and the READY bit should be set. */
3136 * mac80211 will not be run successfully. */ 2675 if (WARN_ON(!test_bit(STATUS_READY, &priv->status)))
3137 ret = wait_event_interruptible_timeout(priv->wait_command_queue, 2676 ret = -EIO;
3138 test_bit(STATUS_READY, &priv->status),
3139 UCODE_READY_TIMEOUT);
3140 if (!ret) {
3141 if (!test_bit(STATUS_READY, &priv->status)) {
3142 IWL_ERR(priv, "START_ALIVE timeout after %dms.\n",
3143 jiffies_to_msecs(UCODE_READY_TIMEOUT));
3144 return -ETIMEDOUT;
3145 }
3146 }
3147 2677
3148 iwlagn_led_enable(priv); 2678 iwlagn_led_enable(priv);
3149 2679
3150out:
3151 priv->is_open = 1; 2680 priv->is_open = 1;
3152 IWL_DEBUG_MAC80211(priv, "leave\n"); 2681 IWL_DEBUG_MAC80211(priv, "leave\n");
3153 return 0; 2682 return 0;
3154} 2683}
3155 2684
3156void iwlagn_mac_stop(struct ieee80211_hw *hw) 2685static void iwlagn_mac_stop(struct ieee80211_hw *hw)
3157{ 2686{
3158 struct iwl_priv *priv = hw->priv; 2687 struct iwl_priv *priv = hw->priv;
3159 2688
@@ -3176,7 +2705,7 @@ void iwlagn_mac_stop(struct ieee80211_hw *hw)
3176 IWL_DEBUG_MAC80211(priv, "leave\n"); 2705 IWL_DEBUG_MAC80211(priv, "leave\n");
3177} 2706}
3178 2707
3179void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 2708static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
3180{ 2709{
3181 struct iwl_priv *priv = hw->priv; 2710 struct iwl_priv *priv = hw->priv;
3182 2711
@@ -3191,11 +2720,11 @@ void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
3191 IWL_DEBUG_MACDUMP(priv, "leave\n"); 2720 IWL_DEBUG_MACDUMP(priv, "leave\n");
3192} 2721}
3193 2722
3194void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw, 2723static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
3195 struct ieee80211_vif *vif, 2724 struct ieee80211_vif *vif,
3196 struct ieee80211_key_conf *keyconf, 2725 struct ieee80211_key_conf *keyconf,
3197 struct ieee80211_sta *sta, 2726 struct ieee80211_sta *sta,
3198 u32 iv32, u16 *phase1key) 2727 u32 iv32, u16 *phase1key)
3199{ 2728{
3200 struct iwl_priv *priv = hw->priv; 2729 struct iwl_priv *priv = hw->priv;
3201 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; 2730 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
@@ -3208,9 +2737,10 @@ void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
3208 IWL_DEBUG_MAC80211(priv, "leave\n"); 2737 IWL_DEBUG_MAC80211(priv, "leave\n");
3209} 2738}
3210 2739
3211int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 2740static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3212 struct ieee80211_vif *vif, struct ieee80211_sta *sta, 2741 struct ieee80211_vif *vif,
3213 struct ieee80211_key_conf *key) 2742 struct ieee80211_sta *sta,
2743 struct ieee80211_key_conf *key)
3214{ 2744{
3215 struct iwl_priv *priv = hw->priv; 2745 struct iwl_priv *priv = hw->priv;
3216 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; 2746 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
@@ -3221,7 +2751,7 @@ int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3221 2751
3222 IWL_DEBUG_MAC80211(priv, "enter\n"); 2752 IWL_DEBUG_MAC80211(priv, "enter\n");
3223 2753
3224 if (priv->cfg->mod_params->sw_crypto) { 2754 if (iwlagn_mod_params.sw_crypto) {
3225 IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n"); 2755 IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
3226 return -EOPNOTSUPP; 2756 return -EOPNOTSUPP;
3227 } 2757 }
@@ -3285,11 +2815,11 @@ int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3285 return ret; 2815 return ret;
3286} 2816}
3287 2817
3288int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, 2818static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
3289 struct ieee80211_vif *vif, 2819 struct ieee80211_vif *vif,
3290 enum ieee80211_ampdu_mlme_action action, 2820 enum ieee80211_ampdu_mlme_action action,
3291 struct ieee80211_sta *sta, u16 tid, u16 *ssn, 2821 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
3292 u8 buf_size) 2822 u8 buf_size)
3293{ 2823{
3294 struct iwl_priv *priv = hw->priv; 2824 struct iwl_priv *priv = hw->priv;
3295 int ret = -EINVAL; 2825 int ret = -EINVAL;
@@ -3348,6 +2878,10 @@ int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
3348 } 2878 }
3349 break; 2879 break;
3350 case IEEE80211_AMPDU_TX_OPERATIONAL: 2880 case IEEE80211_AMPDU_TX_OPERATIONAL:
2881 buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
2882
2883 iwlagn_txq_agg_queue_setup(priv, sta, tid, buf_size);
2884
3351 /* 2885 /*
3352 * If the limit is 0, then it wasn't initialised yet, 2886 * If the limit is 0, then it wasn't initialised yet,
3353 * use the default. We can do that since we take the 2887 * use the default. We can do that since we take the
@@ -3392,9 +2926,9 @@ int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
3392 return ret; 2926 return ret;
3393} 2927}
3394 2928
3395int iwlagn_mac_sta_add(struct ieee80211_hw *hw, 2929static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
3396 struct ieee80211_vif *vif, 2930 struct ieee80211_vif *vif,
3397 struct ieee80211_sta *sta) 2931 struct ieee80211_sta *sta)
3398{ 2932{
3399 struct iwl_priv *priv = hw->priv; 2933 struct iwl_priv *priv = hw->priv;
3400 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; 2934 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
@@ -3435,8 +2969,8 @@ int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
3435 return 0; 2969 return 0;
3436} 2970}
3437 2971
3438void iwlagn_mac_channel_switch(struct ieee80211_hw *hw, 2972static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
3439 struct ieee80211_channel_switch *ch_switch) 2973 struct ieee80211_channel_switch *ch_switch)
3440{ 2974{
3441 struct iwl_priv *priv = hw->priv; 2975 struct iwl_priv *priv = hw->priv;
3442 const struct iwl_channel_info *ch_info; 2976 const struct iwl_channel_info *ch_info;
@@ -3457,21 +2991,22 @@ void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
3457 2991
3458 IWL_DEBUG_MAC80211(priv, "enter\n"); 2992 IWL_DEBUG_MAC80211(priv, "enter\n");
3459 2993
2994 mutex_lock(&priv->mutex);
2995
3460 if (iwl_is_rfkill(priv)) 2996 if (iwl_is_rfkill(priv))
3461 goto out_exit; 2997 goto out;
3462 2998
3463 if (test_bit(STATUS_EXIT_PENDING, &priv->status) || 2999 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
3464 test_bit(STATUS_SCANNING, &priv->status)) 3000 test_bit(STATUS_SCANNING, &priv->status))
3465 goto out_exit; 3001 goto out;
3466 3002
3467 if (!iwl_is_associated_ctx(ctx)) 3003 if (!iwl_is_associated_ctx(ctx))
3468 goto out_exit; 3004 goto out;
3469 3005
3470 /* channel switch in progress */ 3006 /* channel switch in progress */
3471 if (priv->switch_rxon.switch_in_progress == true) 3007 if (priv->switch_rxon.switch_in_progress == true)
3472 goto out_exit; 3008 goto out;
3473 3009
3474 mutex_lock(&priv->mutex);
3475 if (priv->cfg->ops->lib->set_channel_switch) { 3010 if (priv->cfg->ops->lib->set_channel_switch) {
3476 3011
3477 ch = channel->hw_value; 3012 ch = channel->hw_value;
@@ -3527,16 +3062,15 @@ void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
3527 } 3062 }
3528out: 3063out:
3529 mutex_unlock(&priv->mutex); 3064 mutex_unlock(&priv->mutex);
3530out_exit:
3531 if (!priv->switch_rxon.switch_in_progress) 3065 if (!priv->switch_rxon.switch_in_progress)
3532 ieee80211_chswitch_done(ctx->vif, false); 3066 ieee80211_chswitch_done(ctx->vif, false);
3533 IWL_DEBUG_MAC80211(priv, "leave\n"); 3067 IWL_DEBUG_MAC80211(priv, "leave\n");
3534} 3068}
3535 3069
3536void iwlagn_configure_filter(struct ieee80211_hw *hw, 3070static void iwlagn_configure_filter(struct ieee80211_hw *hw,
3537 unsigned int changed_flags, 3071 unsigned int changed_flags,
3538 unsigned int *total_flags, 3072 unsigned int *total_flags,
3539 u64 multicast) 3073 u64 multicast)
3540{ 3074{
3541 struct iwl_priv *priv = hw->priv; 3075 struct iwl_priv *priv = hw->priv;
3542 __le32 filter_or = 0, filter_nand = 0; 3076 __le32 filter_or = 0, filter_nand = 0;
@@ -3583,7 +3117,7 @@ void iwlagn_configure_filter(struct ieee80211_hw *hw,
3583 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; 3117 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
3584} 3118}
3585 3119
3586void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop) 3120static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
3587{ 3121{
3588 struct iwl_priv *priv = hw->priv; 3122 struct iwl_priv *priv = hw->priv;
3589 3123
@@ -3729,8 +3263,6 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
3729 INIT_WORK(&priv->tx_flush, iwl_bg_tx_flush); 3263 INIT_WORK(&priv->tx_flush, iwl_bg_tx_flush);
3730 INIT_WORK(&priv->bt_full_concurrency, iwl_bg_bt_full_concurrency); 3264 INIT_WORK(&priv->bt_full_concurrency, iwl_bg_bt_full_concurrency);
3731 INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config); 3265 INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config);
3732 INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start);
3733 INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start);
3734 INIT_DELAYED_WORK(&priv->_agn.hw_roc_work, iwlagn_bg_roc_done); 3266 INIT_DELAYED_WORK(&priv->_agn.hw_roc_work, iwlagn_bg_roc_done);
3735 3267
3736 iwl_setup_scan_deferred_work(priv); 3268 iwl_setup_scan_deferred_work(priv);
@@ -3750,12 +3282,8 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
3750 priv->watchdog.data = (unsigned long)priv; 3282 priv->watchdog.data = (unsigned long)priv;
3751 priv->watchdog.function = iwl_bg_watchdog; 3283 priv->watchdog.function = iwl_bg_watchdog;
3752 3284
3753 if (!priv->cfg->base_params->use_isr_legacy) 3285 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
3754 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) 3286 iwl_irq_tasklet, (unsigned long)priv);
3755 iwl_irq_tasklet, (unsigned long)priv);
3756 else
3757 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
3758 iwl_irq_tasklet_legacy, (unsigned long)priv);
3759} 3287}
3760 3288
3761static void iwl_cancel_deferred_work(struct iwl_priv *priv) 3289static void iwl_cancel_deferred_work(struct iwl_priv *priv)
@@ -3763,8 +3291,6 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv)
3763 if (priv->cfg->ops->lib->cancel_deferred_work) 3291 if (priv->cfg->ops->lib->cancel_deferred_work)
3764 priv->cfg->ops->lib->cancel_deferred_work(priv); 3292 priv->cfg->ops->lib->cancel_deferred_work(priv);
3765 3293
3766 cancel_delayed_work_sync(&priv->init_alive_start);
3767 cancel_delayed_work(&priv->alive_start);
3768 cancel_work_sync(&priv->run_time_calib_work); 3294 cancel_work_sync(&priv->run_time_calib_work);
3769 cancel_work_sync(&priv->beacon_update); 3295 cancel_work_sync(&priv->beacon_update);
3770 3296
@@ -3805,10 +3331,7 @@ static int iwl_init_drv(struct iwl_priv *priv)
3805 spin_lock_init(&priv->sta_lock); 3331 spin_lock_init(&priv->sta_lock);
3806 spin_lock_init(&priv->hcmd_lock); 3332 spin_lock_init(&priv->hcmd_lock);
3807 3333
3808 INIT_LIST_HEAD(&priv->free_frames);
3809
3810 mutex_init(&priv->mutex); 3334 mutex_init(&priv->mutex);
3811 mutex_init(&priv->sync_cmd_mutex);
3812 3335
3813 priv->ieee_channels = NULL; 3336 priv->ieee_channels = NULL;
3814 priv->ieee_rates = NULL; 3337 priv->ieee_rates = NULL;
@@ -3845,12 +3368,6 @@ static int iwl_init_drv(struct iwl_priv *priv)
3845 priv->dynamic_frag_thresh = BT_FRAG_THRESHOLD_DEF; 3368 priv->dynamic_frag_thresh = BT_FRAG_THRESHOLD_DEF;
3846 } 3369 }
3847 3370
3848 /* Set the tx_power_user_lmt to the lowest power level
3849 * this value will get overwritten by channel max power avg
3850 * from eeprom */
3851 priv->tx_power_user_lmt = IWLAGN_TX_POWER_TARGET_POWER_MIN;
3852 priv->tx_power_next = IWLAGN_TX_POWER_TARGET_POWER_MIN;
3853
3854 ret = iwl_init_channel_map(priv); 3371 ret = iwl_init_channel_map(priv);
3855 if (ret) { 3372 if (ret) {
3856 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret); 3373 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
@@ -3905,28 +3422,30 @@ struct ieee80211_ops iwlagn_hw_ops = {
3905 .cancel_remain_on_channel = iwl_mac_cancel_remain_on_channel, 3422 .cancel_remain_on_channel = iwl_mac_cancel_remain_on_channel,
3906 .offchannel_tx = iwl_mac_offchannel_tx, 3423 .offchannel_tx = iwl_mac_offchannel_tx,
3907 .offchannel_tx_cancel_wait = iwl_mac_offchannel_tx_cancel_wait, 3424 .offchannel_tx_cancel_wait = iwl_mac_offchannel_tx_cancel_wait,
3425 CFG80211_TESTMODE_CMD(iwl_testmode_cmd)
3908}; 3426};
3909 3427
3910static void iwl_hw_detect(struct iwl_priv *priv) 3428static u32 iwl_hw_detect(struct iwl_priv *priv)
3911{ 3429{
3912 priv->hw_rev = _iwl_read32(priv, CSR_HW_REV); 3430 u8 rev_id;
3913 priv->hw_wa_rev = _iwl_read32(priv, CSR_HW_REV_WA_REG); 3431
3914 priv->rev_id = priv->pci_dev->revision; 3432 pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &rev_id);
3915 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id); 3433 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id);
3434 return iwl_read32(priv, CSR_HW_REV);
3916} 3435}
3917 3436
3918static int iwl_set_hw_params(struct iwl_priv *priv) 3437static int iwl_set_hw_params(struct iwl_priv *priv)
3919{ 3438{
3920 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE; 3439 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
3921 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG; 3440 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
3922 if (priv->cfg->mod_params->amsdu_size_8K) 3441 if (iwlagn_mod_params.amsdu_size_8K)
3923 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K); 3442 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
3924 else 3443 else
3925 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K); 3444 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
3926 3445
3927 priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL; 3446 priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
3928 3447
3929 if (priv->cfg->mod_params->disable_11n) 3448 if (iwlagn_mod_params.disable_11n)
3930 priv->cfg->sku &= ~IWL_SKU_N; 3449 priv->cfg->sku &= ~IWL_SKU_N;
3931 3450
3932 /* Device-specific setup */ 3451 /* Device-specific setup */
@@ -3955,6 +3474,28 @@ static const u8 iwlagn_pan_ac_to_queue[] = {
3955 7, 6, 5, 4, 3474 7, 6, 5, 4,
3956}; 3475};
3957 3476
3477/* This function both allocates and initializes hw and priv. */
3478static struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg)
3479{
3480 struct iwl_priv *priv;
3481 /* mac80211 allocates memory for this device instance, including
3482 * space for this driver's private structure */
3483 struct ieee80211_hw *hw;
3484
3485 hw = ieee80211_alloc_hw(sizeof(struct iwl_priv), &iwlagn_hw_ops);
3486 if (hw == NULL) {
3487 pr_err("%s: Can not allocate network device\n",
3488 cfg->name);
3489 goto out;
3490 }
3491
3492 priv = hw->priv;
3493 priv->hw = hw;
3494
3495out:
3496 return hw;
3497}
3498
3958static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3499static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3959{ 3500{
3960 int err = 0, i; 3501 int err = 0, i;
@@ -3963,19 +3504,12 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3963 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); 3504 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
3964 unsigned long flags; 3505 unsigned long flags;
3965 u16 pci_cmd, num_mac; 3506 u16 pci_cmd, num_mac;
3507 u32 hw_rev;
3966 3508
3967 /************************ 3509 /************************
3968 * 1. Allocating HW data 3510 * 1. Allocating HW data
3969 ************************/ 3511 ************************/
3970 3512
3971 /* Disabling hardware scan means that mac80211 will perform scans
3972 * "the hard way", rather than using device's scan. */
3973 if (cfg->mod_params->disable_hw_scan) {
3974 dev_printk(KERN_DEBUG, &(pdev->dev),
3975 "sw scan support is deprecated\n");
3976 iwlagn_hw_ops.hw_scan = NULL;
3977 }
3978
3979 hw = iwl_alloc_all(cfg); 3513 hw = iwl_alloc_all(cfg);
3980 if (!hw) { 3514 if (!hw) {
3981 err = -ENOMEM; 3515 err = -ENOMEM;
@@ -3984,6 +3518,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3984 priv = hw->priv; 3518 priv = hw->priv;
3985 /* At this point both hw and priv are allocated. */ 3519 /* At this point both hw and priv are allocated. */
3986 3520
3521 priv->ucode_type = UCODE_SUBTYPE_NONE_LOADED;
3522
3987 /* 3523 /*
3988 * The default context is always valid, 3524 * The default context is always valid,
3989 * more may be discovered when firmware 3525 * more may be discovered when firmware
@@ -4116,16 +3652,15 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4116 */ 3652 */
4117 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); 3653 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
4118 3654
4119 iwl_hw_detect(priv); 3655 hw_rev = iwl_hw_detect(priv);
4120 IWL_INFO(priv, "Detected %s, REV=0x%X\n", 3656 IWL_INFO(priv, "Detected %s, REV=0x%X\n",
4121 priv->cfg->name, priv->hw_rev); 3657 priv->cfg->name, hw_rev);
4122 3658
4123 /* We disable the RETRY_TIMEOUT register (0x41) to keep 3659 /* We disable the RETRY_TIMEOUT register (0x41) to keep
4124 * PCI Tx retries from interfering with C3 CPU state */ 3660 * PCI Tx retries from interfering with C3 CPU state */
4125 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); 3661 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
4126 3662
4127 iwl_prepare_card_hw(priv); 3663 if (iwl_prepare_card_hw(priv)) {
4128 if (!priv->hw_ready) {
4129 IWL_WARN(priv, "Failed, HW not ready\n"); 3664 IWL_WARN(priv, "Failed, HW not ready\n");
4130 goto out_iounmap; 3665 goto out_iounmap;
4131 } 3666 }
@@ -4134,7 +3669,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4134 * 4. Read EEPROM 3669 * 4. Read EEPROM
4135 *****************/ 3670 *****************/
4136 /* Read the EEPROM */ 3671 /* Read the EEPROM */
4137 err = iwl_eeprom_init(priv); 3672 err = iwl_eeprom_init(priv, hw_rev);
4138 if (err) { 3673 if (err) {
4139 IWL_ERR(priv, "Unable to init EEPROM\n"); 3674 IWL_ERR(priv, "Unable to init EEPROM\n");
4140 goto out_iounmap; 3675 goto out_iounmap;
@@ -4186,10 +3721,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4186 3721
4187 pci_enable_msi(priv->pci_dev); 3722 pci_enable_msi(priv->pci_dev);
4188 3723
4189 if (priv->cfg->ops->lib->isr_ops.alloc) 3724 iwl_alloc_isr_ict(priv);
4190 priv->cfg->ops->lib->isr_ops.alloc(priv);
4191 3725
4192 err = request_irq(priv->pci_dev->irq, priv->cfg->ops->lib->isr_ops.isr, 3726 err = request_irq(priv->pci_dev->irq, iwl_isr_ict,
4193 IRQF_SHARED, DRV_NAME, priv); 3727 IRQF_SHARED, DRV_NAME, priv);
4194 if (err) { 3728 if (err) {
4195 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq); 3729 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
@@ -4198,6 +3732,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4198 3732
4199 iwl_setup_deferred_work(priv); 3733 iwl_setup_deferred_work(priv);
4200 iwl_setup_rx_handlers(priv); 3734 iwl_setup_rx_handlers(priv);
3735 iwl_testmode_init(priv);
4201 3736
4202 /********************************************* 3737 /*********************************************
4203 * 8. Enable interrupts and read RFKILL state 3738 * 8. Enable interrupts and read RFKILL state
@@ -4236,8 +3771,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4236 destroy_workqueue(priv->workqueue); 3771 destroy_workqueue(priv->workqueue);
4237 priv->workqueue = NULL; 3772 priv->workqueue = NULL;
4238 free_irq(priv->pci_dev->irq, priv); 3773 free_irq(priv->pci_dev->irq, priv);
4239 if (priv->cfg->ops->lib->isr_ops.free) 3774 iwl_free_isr_ict(priv);
4240 priv->cfg->ops->lib->isr_ops.free(priv);
4241 out_disable_msi: 3775 out_disable_msi:
4242 pci_disable_msi(priv->pci_dev); 3776 pci_disable_msi(priv->pci_dev);
4243 iwl_uninit_drv(priv); 3777 iwl_uninit_drv(priv);
@@ -4283,17 +3817,9 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
4283 if (priv->mac80211_registered) { 3817 if (priv->mac80211_registered) {
4284 ieee80211_unregister_hw(priv->hw); 3818 ieee80211_unregister_hw(priv->hw);
4285 priv->mac80211_registered = 0; 3819 priv->mac80211_registered = 0;
4286 } else {
4287 iwl_down(priv);
4288 } 3820 }
4289 3821
4290 /* 3822 /* Reset to low power before unloading driver. */
4291 * Make sure device is reset to low power before unloading driver.
4292 * This may be redundant with iwl_down(), but there are paths to
4293 * run iwl_down() without calling apm_ops.stop(), and there are
4294 * paths to avoid running iwl_down() at all before leaving driver.
4295 * This (inexpensive) call *makes sure* device is reset.
4296 */
4297 iwl_apm_stop(priv); 3823 iwl_apm_stop(priv);
4298 3824
4299 iwl_tt_exit(priv); 3825 iwl_tt_exit(priv);
@@ -4335,8 +3861,7 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
4335 3861
4336 iwl_uninit_drv(priv); 3862 iwl_uninit_drv(priv);
4337 3863
4338 if (priv->cfg->ops->lib->isr_ops.free) 3864 iwl_free_isr_ict(priv);
4339 priv->cfg->ops->lib->isr_ops.free(priv);
4340 3865
4341 dev_kfree_skb(priv->beacon_skb); 3866 dev_kfree_skb(priv->beacon_skb);
4342 3867
@@ -4521,21 +4046,21 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
4521 {IWL_PCI_DEVICE(0x088F, 0x4266, iwl6035_2bg_cfg)}, 4046 {IWL_PCI_DEVICE(0x088F, 0x4266, iwl6035_2bg_cfg)},
4522 {IWL_PCI_DEVICE(0x088E, 0x4466, iwl6035_2bg_cfg)}, 4047 {IWL_PCI_DEVICE(0x088E, 0x4466, iwl6035_2bg_cfg)},
4523 4048
4524/* 200 Series */ 4049/* 105 Series */
4525 {IWL_PCI_DEVICE(0x0894, 0x0022, iwl200_bgn_cfg)}, 4050 {IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_bgn_cfg)},
4526 {IWL_PCI_DEVICE(0x0895, 0x0222, iwl200_bgn_cfg)}, 4051 {IWL_PCI_DEVICE(0x0895, 0x0222, iwl105_bgn_cfg)},
4527 {IWL_PCI_DEVICE(0x0894, 0x0422, iwl200_bgn_cfg)}, 4052 {IWL_PCI_DEVICE(0x0894, 0x0422, iwl105_bgn_cfg)},
4528 {IWL_PCI_DEVICE(0x0894, 0x0026, iwl200_bg_cfg)}, 4053 {IWL_PCI_DEVICE(0x0894, 0x0026, iwl105_bg_cfg)},
4529 {IWL_PCI_DEVICE(0x0895, 0x0226, iwl200_bg_cfg)}, 4054 {IWL_PCI_DEVICE(0x0895, 0x0226, iwl105_bg_cfg)},
4530 {IWL_PCI_DEVICE(0x0894, 0x0426, iwl200_bg_cfg)}, 4055 {IWL_PCI_DEVICE(0x0894, 0x0426, iwl105_bg_cfg)},
4531 4056
4532/* 230 Series */ 4057/* 135 Series */
4533 {IWL_PCI_DEVICE(0x0892, 0x0062, iwl230_bgn_cfg)}, 4058 {IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)},
4534 {IWL_PCI_DEVICE(0x0893, 0x0262, iwl230_bgn_cfg)}, 4059 {IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)},
4535 {IWL_PCI_DEVICE(0x0892, 0x0462, iwl230_bgn_cfg)}, 4060 {IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)},
4536 {IWL_PCI_DEVICE(0x0892, 0x0066, iwl230_bg_cfg)}, 4061 {IWL_PCI_DEVICE(0x0892, 0x0066, iwl135_bg_cfg)},
4537 {IWL_PCI_DEVICE(0x0893, 0x0266, iwl230_bg_cfg)}, 4062 {IWL_PCI_DEVICE(0x0893, 0x0266, iwl135_bg_cfg)},
4538 {IWL_PCI_DEVICE(0x0892, 0x0466, iwl230_bg_cfg)}, 4063 {IWL_PCI_DEVICE(0x0892, 0x0466, iwl135_bg_cfg)},
4539 4064
4540 {0} 4065 {0}
4541}; 4066};
@@ -4585,43 +4110,21 @@ module_exit(iwl_exit);
4585module_init(iwl_init); 4110module_init(iwl_init);
4586 4111
4587#ifdef CONFIG_IWLWIFI_DEBUG 4112#ifdef CONFIG_IWLWIFI_DEBUG
4588module_param_named(debug50, iwl_debug_level, uint, S_IRUGO);
4589MODULE_PARM_DESC(debug50, "50XX debug output mask (deprecated)");
4590module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR); 4113module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR);
4591MODULE_PARM_DESC(debug, "debug output mask"); 4114MODULE_PARM_DESC(debug, "debug output mask");
4592#endif 4115#endif
4593 4116
4594module_param_named(swcrypto50, iwlagn_mod_params.sw_crypto, bool, S_IRUGO);
4595MODULE_PARM_DESC(swcrypto50,
4596 "using crypto in software (default 0 [hardware]) (deprecated)");
4597module_param_named(swcrypto, iwlagn_mod_params.sw_crypto, int, S_IRUGO); 4117module_param_named(swcrypto, iwlagn_mod_params.sw_crypto, int, S_IRUGO);
4598MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])"); 4118MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
4599module_param_named(queues_num50,
4600 iwlagn_mod_params.num_of_queues, int, S_IRUGO);
4601MODULE_PARM_DESC(queues_num50,
4602 "number of hw queues in 50xx series (deprecated)");
4603module_param_named(queues_num, iwlagn_mod_params.num_of_queues, int, S_IRUGO); 4119module_param_named(queues_num, iwlagn_mod_params.num_of_queues, int, S_IRUGO);
4604MODULE_PARM_DESC(queues_num, "number of hw queues."); 4120MODULE_PARM_DESC(queues_num, "number of hw queues.");
4605module_param_named(11n_disable50, iwlagn_mod_params.disable_11n, int, S_IRUGO);
4606MODULE_PARM_DESC(11n_disable50, "disable 50XX 11n functionality (deprecated)");
4607module_param_named(11n_disable, iwlagn_mod_params.disable_11n, int, S_IRUGO); 4121module_param_named(11n_disable, iwlagn_mod_params.disable_11n, int, S_IRUGO);
4608MODULE_PARM_DESC(11n_disable, "disable 11n functionality"); 4122MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
4609module_param_named(amsdu_size_8K50, iwlagn_mod_params.amsdu_size_8K,
4610 int, S_IRUGO);
4611MODULE_PARM_DESC(amsdu_size_8K50,
4612 "enable 8K amsdu size in 50XX series (deprecated)");
4613module_param_named(amsdu_size_8K, iwlagn_mod_params.amsdu_size_8K, 4123module_param_named(amsdu_size_8K, iwlagn_mod_params.amsdu_size_8K,
4614 int, S_IRUGO); 4124 int, S_IRUGO);
4615MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size"); 4125MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
4616module_param_named(fw_restart50, iwlagn_mod_params.restart_fw, int, S_IRUGO);
4617MODULE_PARM_DESC(fw_restart50,
4618 "restart firmware in case of error (deprecated)");
4619module_param_named(fw_restart, iwlagn_mod_params.restart_fw, int, S_IRUGO); 4126module_param_named(fw_restart, iwlagn_mod_params.restart_fw, int, S_IRUGO);
4620MODULE_PARM_DESC(fw_restart, "restart firmware in case of error"); 4127MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
4621module_param_named(
4622 disable_hw_scan, iwlagn_mod_params.disable_hw_scan, int, S_IRUGO);
4623MODULE_PARM_DESC(disable_hw_scan,
4624 "disable hardware scanning (default 0) (deprecated)");
4625 4128
4626module_param_named(ucode_alternative, iwlagn_wanted_ucode_alternative, int, 4129module_param_named(ucode_alternative, iwlagn_wanted_ucode_alternative, int,
4627 S_IRUGO); 4130 S_IRUGO);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index 20f8e4188994..fe33fe8aa418 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. 8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -66,7 +66,6 @@
66#include "iwl-dev.h" 66#include "iwl-dev.h"
67 67
68/* configuration for the _agn devices */ 68/* configuration for the _agn devices */
69extern struct iwl_cfg iwl4965_agn_cfg;
70extern struct iwl_cfg iwl5300_agn_cfg; 69extern struct iwl_cfg iwl5300_agn_cfg;
71extern struct iwl_cfg iwl5100_agn_cfg; 70extern struct iwl_cfg iwl5100_agn_cfg;
72extern struct iwl_cfg iwl5350_agn_cfg; 71extern struct iwl_cfg iwl5350_agn_cfg;
@@ -103,10 +102,10 @@ extern struct iwl_cfg iwl2030_2bg_cfg;
103extern struct iwl_cfg iwl6035_2agn_cfg; 102extern struct iwl_cfg iwl6035_2agn_cfg;
104extern struct iwl_cfg iwl6035_2abg_cfg; 103extern struct iwl_cfg iwl6035_2abg_cfg;
105extern struct iwl_cfg iwl6035_2bg_cfg; 104extern struct iwl_cfg iwl6035_2bg_cfg;
106extern struct iwl_cfg iwl200_bg_cfg; 105extern struct iwl_cfg iwl105_bg_cfg;
107extern struct iwl_cfg iwl200_bgn_cfg; 106extern struct iwl_cfg iwl105_bgn_cfg;
108extern struct iwl_cfg iwl230_bg_cfg; 107extern struct iwl_cfg iwl135_bg_cfg;
109extern struct iwl_cfg iwl230_bgn_cfg; 108extern struct iwl_cfg iwl135_bgn_cfg;
110 109
111extern struct iwl_mod_params iwlagn_mod_params; 110extern struct iwl_mod_params iwlagn_mod_params;
112extern struct iwl_hcmd_ops iwlagn_hcmd; 111extern struct iwl_hcmd_ops iwlagn_hcmd;
@@ -114,7 +113,6 @@ extern struct iwl_hcmd_ops iwlagn_bt_hcmd;
114extern struct iwl_hcmd_utils_ops iwlagn_hcmd_utils; 113extern struct iwl_hcmd_utils_ops iwlagn_hcmd_utils;
115 114
116extern struct ieee80211_ops iwlagn_hw_ops; 115extern struct ieee80211_ops iwlagn_hw_ops;
117extern struct ieee80211_ops iwl4965_hw_ops;
118 116
119int iwl_reset_ict(struct iwl_priv *priv); 117int iwl_reset_ict(struct iwl_priv *priv);
120void iwl_disable_ict(struct iwl_priv *priv); 118void iwl_disable_ict(struct iwl_priv *priv);
@@ -122,21 +120,25 @@ int iwl_alloc_isr_ict(struct iwl_priv *priv);
122void iwl_free_isr_ict(struct iwl_priv *priv); 120void iwl_free_isr_ict(struct iwl_priv *priv);
123irqreturn_t iwl_isr_ict(int irq, void *data); 121irqreturn_t iwl_isr_ict(int irq, void *data);
124 122
123/* call this function to flush any scheduled tasklet */
124static inline void iwl_synchronize_irq(struct iwl_priv *priv)
125{
126 /* wait to make sure we flush pending tasklet*/
127 synchronize_irq(priv->pci_dev->irq);
128 tasklet_kill(&priv->irq_tasklet);
129}
130
131int iwl_prepare_card_hw(struct iwl_priv *priv);
132
133int iwlagn_start_device(struct iwl_priv *priv);
134void iwlagn_stop_device(struct iwl_priv *priv);
135
125/* tx queue */ 136/* tx queue */
126void iwlagn_set_wr_ptrs(struct iwl_priv *priv, 137void iwlagn_set_wr_ptrs(struct iwl_priv *priv,
127 int txq_id, u32 index); 138 int txq_id, u32 index);
128void iwlagn_tx_queue_set_status(struct iwl_priv *priv, 139void iwlagn_tx_queue_set_status(struct iwl_priv *priv,
129 struct iwl_tx_queue *txq, 140 struct iwl_tx_queue *txq,
130 int tx_fifo_id, int scd_retry); 141 int tx_fifo_id, int scd_retry);
131void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
132 struct iwl_tx_queue *txq,
133 u16 byte_cnt);
134void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
135 struct iwl_tx_queue *txq);
136int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id,
137 int tx_fifo, int sta_id, int tid, u16 ssn_idx);
138int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
139 u16 ssn_idx, u8 tx_fifo);
140void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask); 142void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask);
141void iwl_free_tfds_in_queue(struct iwl_priv *priv, 143void iwl_free_tfds_in_queue(struct iwl_priv *priv,
142 int sta_id, int tid, int freed); 144 int sta_id, int tid, int freed);
@@ -151,16 +153,14 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
151 u32 changes); 153 u32 changes);
152 154
153/* uCode */ 155/* uCode */
154int iwlagn_load_ucode(struct iwl_priv *priv);
155void iwlagn_rx_calib_result(struct iwl_priv *priv, 156void iwlagn_rx_calib_result(struct iwl_priv *priv,
156 struct iwl_rx_mem_buffer *rxb); 157 struct iwl_rx_mem_buffer *rxb);
157void iwlagn_rx_calib_complete(struct iwl_priv *priv, 158int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type);
158 struct iwl_rx_mem_buffer *rxb);
159void iwlagn_init_alive_start(struct iwl_priv *priv);
160int iwlagn_alive_notify(struct iwl_priv *priv);
161int iwl_verify_ucode(struct iwl_priv *priv);
162void iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type);
163void iwlagn_send_prio_tbl(struct iwl_priv *priv); 159void iwlagn_send_prio_tbl(struct iwl_priv *priv);
160int iwlagn_run_init_ucode(struct iwl_priv *priv);
161int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
162 struct fw_img *image,
163 int subtype, int alternate_subtype);
164 164
165/* lib */ 165/* lib */
166void iwl_check_abort_status(struct iwl_priv *priv, 166void iwl_check_abort_status(struct iwl_priv *priv,
@@ -179,8 +179,6 @@ int iwlagn_hw_nic_init(struct iwl_priv *priv);
179int iwlagn_wait_tx_queue_empty(struct iwl_priv *priv); 179int iwlagn_wait_tx_queue_empty(struct iwl_priv *priv);
180int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control); 180int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
181void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control); 181void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
182void iwl_dump_csr(struct iwl_priv *priv);
183int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display);
184 182
185/* rx */ 183/* rx */
186void iwlagn_rx_queue_restock(struct iwl_priv *priv); 184void iwlagn_rx_queue_restock(struct iwl_priv *priv);
@@ -206,6 +204,9 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
206 struct ieee80211_sta *sta, u16 tid, u16 *ssn); 204 struct ieee80211_sta *sta, u16 tid, u16 *ssn);
207int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, 205int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
208 struct ieee80211_sta *sta, u16 tid); 206 struct ieee80211_sta *sta, u16 tid);
207void iwlagn_txq_agg_queue_setup(struct iwl_priv *priv,
208 struct ieee80211_sta *sta,
209 int tid, int frame_limit);
209int iwlagn_txq_check_empty(struct iwl_priv *priv, 210int iwlagn_txq_check_empty(struct iwl_priv *priv,
210 int sta_id, u8 tid, int txq_id); 211 int sta_id, u8 tid, int txq_id);
211void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, 212void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
@@ -225,6 +226,7 @@ static inline u32 iwl_tx_status_to_mac80211(u32 status)
225 case TX_STATUS_DIRECT_DONE: 226 case TX_STATUS_DIRECT_DONE:
226 return IEEE80211_TX_STAT_ACK; 227 return IEEE80211_TX_STAT_ACK;
227 case TX_STATUS_FAIL_DEST_PS: 228 case TX_STATUS_FAIL_DEST_PS:
229 case TX_STATUS_FAIL_PASSIVE_NO_RX:
228 return IEEE80211_TX_STAT_TX_FILTERED; 230 return IEEE80211_TX_STAT_TX_FILTERED;
229 default: 231 default:
230 return 0; 232 return 0;
@@ -249,8 +251,6 @@ int iwlagn_manage_ibss_station(struct iwl_priv *priv,
249 struct ieee80211_vif *vif, bool add); 251 struct ieee80211_vif *vif, bool add);
250 252
251/* hcmd */ 253/* hcmd */
252int iwlagn_send_rxon_assoc(struct iwl_priv *priv,
253 struct iwl_rxon_context *ctx);
254int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant); 254int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant);
255int iwlagn_send_beacon_cmd(struct iwl_priv *priv); 255int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
256 256
@@ -311,7 +311,7 @@ static inline u32 iwl_ant_idx_to_flags(u8 ant_idx)
311 311
312static inline u8 iwl_hw_get_rate(__le32 rate_n_flags) 312static inline u8 iwl_hw_get_rate(__le32 rate_n_flags)
313{ 313{
314 return le32_to_cpu(rate_n_flags) & 0xFF; 314 return le32_to_cpu(rate_n_flags) & RATE_MCS_RATE_MSK;
315} 315}
316 316
317static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags) 317static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
@@ -322,50 +322,39 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
322/* eeprom */ 322/* eeprom */
323void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv); 323void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv);
324void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac); 324void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
325int iwlcore_eeprom_acquire_semaphore(struct iwl_priv *priv);
326void iwlcore_eeprom_release_semaphore(struct iwl_priv *priv);
327 325
328/* notification wait support */ 326/* notification wait support */
329void __acquires(wait_entry) 327void __acquires(wait_entry)
330iwlagn_init_notification_wait(struct iwl_priv *priv, 328iwlagn_init_notification_wait(struct iwl_priv *priv,
331 struct iwl_notification_wait *wait_entry, 329 struct iwl_notification_wait *wait_entry,
330 u8 cmd,
332 void (*fn)(struct iwl_priv *priv, 331 void (*fn)(struct iwl_priv *priv,
333 struct iwl_rx_packet *pkt), 332 struct iwl_rx_packet *pkt,
334 u8 cmd); 333 void *data),
335signed long __releases(wait_entry) 334 void *fn_data);
335int __must_check __releases(wait_entry)
336iwlagn_wait_notification(struct iwl_priv *priv, 336iwlagn_wait_notification(struct iwl_priv *priv,
337 struct iwl_notification_wait *wait_entry, 337 struct iwl_notification_wait *wait_entry,
338 unsigned long timeout); 338 unsigned long timeout);
339void __releases(wait_entry) 339void __releases(wait_entry)
340iwlagn_remove_notification(struct iwl_priv *priv, 340iwlagn_remove_notification(struct iwl_priv *priv,
341 struct iwl_notification_wait *wait_entry); 341 struct iwl_notification_wait *wait_entry);
342 342extern int iwlagn_init_alive_start(struct iwl_priv *priv);
343/* mac80211 handlers (for 4965) */ 343extern int iwl_alive_start(struct iwl_priv *priv);
344void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb); 344/* svtool */
345int iwlagn_mac_start(struct ieee80211_hw *hw); 345#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
346void iwlagn_mac_stop(struct ieee80211_hw *hw); 346extern int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len);
347void iwlagn_configure_filter(struct ieee80211_hw *hw, 347extern void iwl_testmode_init(struct iwl_priv *priv);
348 unsigned int changed_flags, 348#else
349 unsigned int *total_flags, 349static inline
350 u64 multicast); 350int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
351int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 351{
352 struct ieee80211_vif *vif, struct ieee80211_sta *sta, 352 return -ENOSYS;
353 struct ieee80211_key_conf *key); 353}
354void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw, 354static inline
355 struct ieee80211_vif *vif, 355void iwl_testmode_init(struct iwl_priv *priv)
356 struct ieee80211_key_conf *keyconf, 356{
357 struct ieee80211_sta *sta, 357}
358 u32 iv32, u16 *phase1key); 358#endif
359int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
360 struct ieee80211_vif *vif,
361 enum ieee80211_ampdu_mlme_action action,
362 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
363 u8 buf_size);
364int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
365 struct ieee80211_vif *vif,
366 struct ieee80211_sta *sta);
367void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
368 struct ieee80211_channel_switch *ch_switch);
369void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop);
370 359
371#endif /* __iwl_agn_h__ */ 360#endif /* __iwl_agn_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index ca42ffa63ed7..5fdad6532118 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -103,9 +103,7 @@ enum {
103 REPLY_WEPKEY = 0x20, 103 REPLY_WEPKEY = 0x20,
104 104
105 /* RX, TX, LEDs */ 105 /* RX, TX, LEDs */
106 REPLY_3945_RX = 0x1b, /* 3945 only */
107 REPLY_TX = 0x1c, 106 REPLY_TX = 0x1c,
108 REPLY_RATE_SCALE = 0x47, /* 3945 only */
109 REPLY_LEDS_CMD = 0x48, 107 REPLY_LEDS_CMD = 0x48,
110 REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */ 108 REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */
111 109
@@ -229,7 +227,7 @@ struct iwl_cmd_header {
229 * There is one exception: uCode sets bit 15 when it originates 227 * There is one exception: uCode sets bit 15 when it originates
230 * the response/notification, i.e. when the response/notification 228 * the response/notification, i.e. when the response/notification
231 * is not a direct response to a command sent by the driver. For 229 * is not a direct response to a command sent by the driver. For
232 * example, uCode issues REPLY_3945_RX when it sends a received frame 230 * example, uCode issues REPLY_RX when it sends a received frame
233 * to the driver; it is not a direct response to any driver command. 231 * to the driver; it is not a direct response to any driver command.
234 * 232 *
235 * The Linux driver uses the following format: 233 * The Linux driver uses the following format:
@@ -249,36 +247,6 @@ struct iwl_cmd_header {
249 247
250 248
251/** 249/**
252 * struct iwl3945_tx_power
253 *
254 * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_SCAN_CMD, REPLY_CHANNEL_SWITCH
255 *
256 * Each entry contains two values:
257 * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained
258 * linear value that multiplies the output of the digital signal processor,
259 * before being sent to the analog radio.
260 * 2) Radio gain. This sets the analog gain of the radio Tx path.
261 * It is a coarser setting, and behaves in a logarithmic (dB) fashion.
262 *
263 * Driver obtains values from struct iwl3945_tx_power power_gain_table[][].
264 */
265struct iwl3945_tx_power {
266 u8 tx_gain; /* gain for analog radio */
267 u8 dsp_atten; /* gain for DSP */
268} __packed;
269
270/**
271 * struct iwl3945_power_per_rate
272 *
273 * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
274 */
275struct iwl3945_power_per_rate {
276 u8 rate; /* plcp */
277 struct iwl3945_tx_power tpc;
278 u8 reserved;
279} __packed;
280
281/**
282 * iwlagn rate_n_flags bit fields 250 * iwlagn rate_n_flags bit fields
283 * 251 *
284 * rate_n_flags format is used in following iwlagn commands: 252 * rate_n_flags format is used in following iwlagn commands:
@@ -324,6 +292,8 @@ struct iwl3945_power_per_rate {
324#define RATE_MCS_SPATIAL_MSK 0x18 292#define RATE_MCS_SPATIAL_MSK 0x18
325#define RATE_MCS_HT_DUP_POS 5 293#define RATE_MCS_HT_DUP_POS 5
326#define RATE_MCS_HT_DUP_MSK 0x20 294#define RATE_MCS_HT_DUP_MSK 0x20
295/* Both legacy and HT use bits 7:0 as the CCK/OFDM rate or HT MCS */
296#define RATE_MCS_RATE_MSK 0xff
327 297
328/* Bit 8: (1) HT format, (0) legacy format in bits 7:0 */ 298/* Bit 8: (1) HT format, (0) legacy format in bits 7:0 */
329#define RATE_MCS_FLAGS_POS 8 299#define RATE_MCS_FLAGS_POS 8
@@ -375,30 +345,6 @@ struct iwl3945_power_per_rate {
375#define IWL_PWR_CCK_ENTRIES 2 345#define IWL_PWR_CCK_ENTRIES 2
376 346
377/** 347/**
378 * union iwl4965_tx_power_dual_stream
379 *
380 * Host format used for REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
381 * Use __le32 version (struct tx_power_dual_stream) when building command.
382 *
383 * Driver provides radio gain and DSP attenuation settings to device in pairs,
384 * one value for each transmitter chain. The first value is for transmitter A,
385 * second for transmitter B.
386 *
387 * For SISO bit rates, both values in a pair should be identical.
388 * For MIMO rates, one value may be different from the other,
389 * in order to balance the Tx output between the two transmitters.
390 *
391 * See more details in doc for TXPOWER in iwl-4965-hw.h.
392 */
393union iwl4965_tx_power_dual_stream {
394 struct {
395 u8 radio_tx_gain[2];
396 u8 dsp_predis_atten[2];
397 } s;
398 u32 dw;
399};
400
401/**
402 * struct tx_power_dual_stream 348 * struct tx_power_dual_stream
403 * 349 *
404 * Table entries in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH 350 * Table entries in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
@@ -410,15 +356,6 @@ struct tx_power_dual_stream {
410} __packed; 356} __packed;
411 357
412/** 358/**
413 * struct iwl4965_tx_power_db
414 *
415 * Entire table within REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
416 */
417struct iwl4965_tx_power_db {
418 struct tx_power_dual_stream power_tbl[POWER_TABLE_NUM_ENTRIES];
419} __packed;
420
421/**
422 * Command REPLY_TX_POWER_DBM_CMD = 0x98 359 * Command REPLY_TX_POWER_DBM_CMD = 0x98
423 * struct iwlagn_tx_power_dbm_cmd 360 * struct iwlagn_tx_power_dbm_cmd
424 */ 361 */
@@ -449,55 +386,18 @@ struct iwl_tx_ant_config_cmd {
449 *****************************************************************************/ 386 *****************************************************************************/
450 387
451#define UCODE_VALID_OK cpu_to_le32(0x1) 388#define UCODE_VALID_OK cpu_to_le32(0x1)
452#define INITIALIZE_SUBTYPE (9)
453 389
454/* 390enum iwlagn_ucode_subtype {
455 * ("Initialize") REPLY_ALIVE = 0x1 (response only, not a command) 391 UCODE_SUBTYPE_REGULAR = 0,
456 * 392 UCODE_SUBTYPE_REGULAR_NEW = 1,
457 * uCode issues this "initialize alive" notification once the initialization 393 UCODE_SUBTYPE_INIT = 9,
458 * uCode image has completed its work, and is ready to load the runtime image.
459 * This is the *first* "alive" notification that the driver will receive after
460 * rebooting uCode; the "initialize" alive is indicated by subtype field == 9.
461 *
462 * See comments documenting "BSM" (bootstrap state machine).
463 *
464 * For 4965, this notification contains important calibration data for
465 * calculating txpower settings:
466 *
467 * 1) Power supply voltage indication. The voltage sensor outputs higher
468 * values for lower voltage, and vice verse.
469 *
470 * 2) Temperature measurement parameters, for each of two channel widths
471 * (20 MHz and 40 MHz) supported by the radios. Temperature sensing
472 * is done via one of the receiver chains, and channel width influences
473 * the results.
474 *
475 * 3) Tx gain compensation to balance 4965's 2 Tx chains for MIMO operation,
476 * for each of 5 frequency ranges.
477 */
478struct iwl_init_alive_resp {
479 u8 ucode_minor;
480 u8 ucode_major;
481 __le16 reserved1;
482 u8 sw_rev[8];
483 u8 ver_type;
484 u8 ver_subtype; /* "9" for initialize alive */
485 __le16 reserved2;
486 __le32 log_event_table_ptr;
487 __le32 error_event_table_ptr;
488 __le32 timestamp;
489 __le32 is_valid;
490
491 /* calibration values from "initialize" uCode */
492 __le32 voltage; /* signed, higher value is lower voltage */
493 __le32 therm_r1[2]; /* signed, 1st for normal, 2nd for HT40 */
494 __le32 therm_r2[2]; /* signed */
495 __le32 therm_r3[2]; /* signed */
496 __le32 therm_r4[2]; /* signed */
497 __le32 tx_atten[5][2]; /* signed MIMO gain comp, 5 freq groups,
498 * 2 Tx chains */
499} __packed;
500 394
395 /*
396 * Not a valid subtype, the ucode has just a u8, so
397 * we can use something > 0xff for this value.
398 */
399 UCODE_SUBTYPE_NONE_LOADED = 0x100,
400};
501 401
502/** 402/**
503 * REPLY_ALIVE = 0x1 (response only, not a command) 403 * REPLY_ALIVE = 0x1 (response only, not a command)
@@ -533,49 +433,61 @@ struct iwl_init_alive_resp {
533 * 433 *
534 * 2) error_event_table_ptr indicates base of the error log. This contains 434 * 2) error_event_table_ptr indicates base of the error log. This contains
535 * information about any uCode error that occurs. For agn, the format 435 * information about any uCode error that occurs. For agn, the format
536 * of the error log is: 436 * of the error log is defined by struct iwl_error_event_table.
537 *
538 * __le32 valid; (nonzero) valid, (0) log is empty
539 * __le32 error_id; type of error
540 * __le32 pc; program counter
541 * __le32 blink1; branch link
542 * __le32 blink2; branch link
543 * __le32 ilink1; interrupt link
544 * __le32 ilink2; interrupt link
545 * __le32 data1; error-specific data
546 * __le32 data2; error-specific data
547 * __le32 line; source code line of error
548 * __le32 bcon_time; beacon timer
549 * __le32 tsf_low; network timestamp function timer
550 * __le32 tsf_hi; network timestamp function timer
551 * __le32 gp1; GP1 timer register
552 * __le32 gp2; GP2 timer register
553 * __le32 gp3; GP3 timer register
554 * __le32 ucode_ver; uCode version
555 * __le32 hw_ver; HW Silicon version
556 * __le32 brd_ver; HW board version
557 * __le32 log_pc; log program counter
558 * __le32 frame_ptr; frame pointer
559 * __le32 stack_ptr; stack pointer
560 * __le32 hcmd; last host command
561 * __le32 isr0; isr status register LMPM_NIC_ISR0: rxtx_flag
562 * __le32 isr1; isr status register LMPM_NIC_ISR1: host_flag
563 * __le32 isr2; isr status register LMPM_NIC_ISR2: enc_flag
564 * __le32 isr3; isr status register LMPM_NIC_ISR3: time_flag
565 * __le32 isr4; isr status register LMPM_NIC_ISR4: wico interrupt
566 * __le32 isr_pref; isr status register LMPM_NIC_PREF_STAT
567 * __le32 wait_event; wait event() caller address
568 * __le32 l2p_control; L2pControlField
569 * __le32 l2p_duration; L2pDurationField
570 * __le32 l2p_mhvalid; L2pMhValidBits
571 * __le32 l2p_addr_match; L2pAddrMatchStat
572 * __le32 lmpm_pmg_sel; indicate which clocks are turned on (LMPM_PMG_SEL)
573 * __le32 u_timestamp; indicate when the date and time of the compilation
574 * __le32 reserved;
575 * 437 *
576 * The Linux driver can print both logs to the system log when a uCode error 438 * The Linux driver can print both logs to the system log when a uCode error
577 * occurs. 439 * occurs.
578 */ 440 */
441
442/*
443 * Note: This structure is read from the device with IO accesses,
444 * and the reading already does the endian conversion. As it is
445 * read with u32-sized accesses, any members with a different size
446 * need to be ordered correctly though!
447 */
448struct iwl_error_event_table {
449 u32 valid; /* (nonzero) valid, (0) log is empty */
450 u32 error_id; /* type of error */
451 u32 pc; /* program counter */
452 u32 blink1; /* branch link */
453 u32 blink2; /* branch link */
454 u32 ilink1; /* interrupt link */
455 u32 ilink2; /* interrupt link */
456 u32 data1; /* error-specific data */
457 u32 data2; /* error-specific data */
458 u32 line; /* source code line of error */
459 u32 bcon_time; /* beacon timer */
460 u32 tsf_low; /* network timestamp function timer */
461 u32 tsf_hi; /* network timestamp function timer */
462 u32 gp1; /* GP1 timer register */
463 u32 gp2; /* GP2 timer register */
464 u32 gp3; /* GP3 timer register */
465 u32 ucode_ver; /* uCode version */
466 u32 hw_ver; /* HW Silicon version */
467 u32 brd_ver; /* HW board version */
468 u32 log_pc; /* log program counter */
469 u32 frame_ptr; /* frame pointer */
470 u32 stack_ptr; /* stack pointer */
471 u32 hcmd; /* last host command header */
472#if 0
473 /* no need to read the remainder, we don't use the values */
474 u32 isr0; /* isr status register LMPM_NIC_ISR0: rxtx_flag */
475 u32 isr1; /* isr status register LMPM_NIC_ISR1: host_flag */
476 u32 isr2; /* isr status register LMPM_NIC_ISR2: enc_flag */
477 u32 isr3; /* isr status register LMPM_NIC_ISR3: time_flag */
478 u32 isr4; /* isr status register LMPM_NIC_ISR4: wico interrupt */
479 u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
480 u32 wait_event; /* wait event() caller address */
481 u32 l2p_control; /* L2pControlField */
482 u32 l2p_duration; /* L2pDurationField */
483 u32 l2p_mhvalid; /* L2pMhValidBits */
484 u32 l2p_addr_match; /* L2pAddrMatchStat */
485 u32 lmpm_pmg_sel; /* indicate which clocks are turned on (LMPM_PMG_SEL) */
486 u32 u_timestamp; /* indicate when the date and time of the compilation */
487 u32 flow_handler; /* FH read/write pointers, RX credit */
488#endif
489} __packed;
490
579struct iwl_alive_resp { 491struct iwl_alive_resp {
580 u8 ucode_minor; 492 u8 ucode_minor;
581 u8 ucode_major; 493 u8 ucode_major;
@@ -722,46 +634,6 @@ enum {
722 * regardless of whether RXON_FILTER_ASSOC_MSK is set. 634 * regardless of whether RXON_FILTER_ASSOC_MSK is set.
723 */ 635 */
724 636
725struct iwl3945_rxon_cmd {
726 u8 node_addr[6];
727 __le16 reserved1;
728 u8 bssid_addr[6];
729 __le16 reserved2;
730 u8 wlap_bssid_addr[6];
731 __le16 reserved3;
732 u8 dev_type;
733 u8 air_propagation;
734 __le16 reserved4;
735 u8 ofdm_basic_rates;
736 u8 cck_basic_rates;
737 __le16 assoc_id;
738 __le32 flags;
739 __le32 filter_flags;
740 __le16 channel;
741 __le16 reserved5;
742} __packed;
743
744struct iwl4965_rxon_cmd {
745 u8 node_addr[6];
746 __le16 reserved1;
747 u8 bssid_addr[6];
748 __le16 reserved2;
749 u8 wlap_bssid_addr[6];
750 __le16 reserved3;
751 u8 dev_type;
752 u8 air_propagation;
753 __le16 rx_chain;
754 u8 ofdm_basic_rates;
755 u8 cck_basic_rates;
756 __le16 assoc_id;
757 __le32 flags;
758 __le32 filter_flags;
759 __le16 channel;
760 u8 ofdm_ht_single_stream_basic_rates;
761 u8 ofdm_ht_dual_stream_basic_rates;
762} __packed;
763
764/* 5000 HW just extend this command */
765struct iwl_rxon_cmd { 637struct iwl_rxon_cmd {
766 u8 node_addr[6]; 638 u8 node_addr[6];
767 __le16 reserved1; 639 __le16 reserved1;
@@ -789,26 +661,7 @@ struct iwl_rxon_cmd {
789/* 661/*
790 * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response) 662 * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response)
791 */ 663 */
792struct iwl3945_rxon_assoc_cmd { 664struct iwl_rxon_assoc_cmd {
793 __le32 flags;
794 __le32 filter_flags;
795 u8 ofdm_basic_rates;
796 u8 cck_basic_rates;
797 __le16 reserved;
798} __packed;
799
800struct iwl4965_rxon_assoc_cmd {
801 __le32 flags;
802 __le32 filter_flags;
803 u8 ofdm_basic_rates;
804 u8 cck_basic_rates;
805 u8 ofdm_ht_single_stream_basic_rates;
806 u8 ofdm_ht_dual_stream_basic_rates;
807 __le16 rx_chain_select_flags;
808 __le16 reserved;
809} __packed;
810
811struct iwl5000_rxon_assoc_cmd {
812 __le32 flags; 665 __le32 flags;
813 __le32 filter_flags; 666 __le32 filter_flags;
814 u8 ofdm_basic_rates; 667 u8 ofdm_basic_rates;
@@ -843,26 +696,6 @@ struct iwl_rxon_time_cmd {
843/* 696/*
844 * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response) 697 * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
845 */ 698 */
846struct iwl3945_channel_switch_cmd {
847 u8 band;
848 u8 expect_beacon;
849 __le16 channel;
850 __le32 rxon_flags;
851 __le32 rxon_filter_flags;
852 __le32 switch_time;
853 struct iwl3945_power_per_rate power[IWL_MAX_RATES];
854} __packed;
855
856struct iwl4965_channel_switch_cmd {
857 u8 band;
858 u8 expect_beacon;
859 __le16 channel;
860 __le32 rxon_flags;
861 __le32 rxon_filter_flags;
862 __le32 switch_time;
863 struct iwl4965_tx_power_db tx_power;
864} __packed;
865
866/** 699/**
867 * struct iwl5000_channel_switch_cmd 700 * struct iwl5000_channel_switch_cmd
868 * @band: 0- 5.2GHz, 1- 2.4GHz 701 * @band: 0- 5.2GHz, 1- 2.4GHz
@@ -976,15 +809,10 @@ struct iwl_qosparam_cmd {
976#define IWL_AP_ID 0 809#define IWL_AP_ID 0
977#define IWL_AP_ID_PAN 1 810#define IWL_AP_ID_PAN 1
978#define IWL_STA_ID 2 811#define IWL_STA_ID 2
979#define IWL3945_BROADCAST_ID 24
980#define IWL3945_STATION_COUNT 25
981#define IWL4965_BROADCAST_ID 31
982#define IWL4965_STATION_COUNT 32
983#define IWLAGN_PAN_BCAST_ID 14 812#define IWLAGN_PAN_BCAST_ID 14
984#define IWLAGN_BROADCAST_ID 15 813#define IWLAGN_BROADCAST_ID 15
985#define IWLAGN_STATION_COUNT 16 814#define IWLAGN_STATION_COUNT 16
986 815
987#define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/
988#define IWL_INVALID_STATION 255 816#define IWL_INVALID_STATION 255
989 817
990#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2) 818#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)
@@ -1032,16 +860,6 @@ struct iwl_qosparam_cmd {
1032 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ 860 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
1033#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) 861#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
1034 862
1035struct iwl4965_keyinfo {
1036 __le16 key_flags;
1037 u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */
1038 u8 reserved1;
1039 __le16 tkip_rx_ttak[5]; /* 10-byte unicast TKIP TTAK */
1040 u8 key_offset;
1041 u8 reserved2;
1042 u8 key[16]; /* 16-byte unicast decryption key */
1043} __packed;
1044
1045/* agn */ 863/* agn */
1046struct iwl_keyinfo { 864struct iwl_keyinfo {
1047 __le16 key_flags; 865 __le16 key_flags;
@@ -1083,7 +901,6 @@ struct sta_id_modify {
1083 * with info on security keys, aggregation parameters, and Tx rates for 901 * with info on security keys, aggregation parameters, and Tx rates for
1084 * initial Tx attempt and any retries (agn devices uses 902 * initial Tx attempt and any retries (agn devices uses
1085 * REPLY_TX_LINK_QUALITY_CMD, 903 * REPLY_TX_LINK_QUALITY_CMD,
1086 * 3945 uses REPLY_RATE_SCALE to set up rate tables).
1087 * 904 *
1088 * REPLY_ADD_STA sets up the table entry for one station, either creating 905 * REPLY_ADD_STA sets up the table entry for one station, either creating
1089 * a new entry, or modifying a pre-existing one. 906 * a new entry, or modifying a pre-existing one.
@@ -1103,72 +920,6 @@ struct sta_id_modify {
1103 * entries for all STAs in network, starting with index IWL_STA_ID. 920 * entries for all STAs in network, starting with index IWL_STA_ID.
1104 */ 921 */
1105 922
1106struct iwl3945_addsta_cmd {
1107 u8 mode; /* 1: modify existing, 0: add new station */
1108 u8 reserved[3];
1109 struct sta_id_modify sta;
1110 struct iwl4965_keyinfo key;
1111 __le32 station_flags; /* STA_FLG_* */
1112 __le32 station_flags_msk; /* STA_FLG_* */
1113
1114 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
1115 * corresponding to bit (e.g. bit 5 controls TID 5).
1116 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
1117 __le16 tid_disable_tx;
1118
1119 __le16 rate_n_flags;
1120
1121 /* TID for which to add block-ack support.
1122 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
1123 u8 add_immediate_ba_tid;
1124
1125 /* TID for which to remove block-ack support.
1126 * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
1127 u8 remove_immediate_ba_tid;
1128
1129 /* Starting Sequence Number for added block-ack support.
1130 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
1131 __le16 add_immediate_ba_ssn;
1132} __packed;
1133
1134struct iwl4965_addsta_cmd {
1135 u8 mode; /* 1: modify existing, 0: add new station */
1136 u8 reserved[3];
1137 struct sta_id_modify sta;
1138 struct iwl4965_keyinfo key;
1139 __le32 station_flags; /* STA_FLG_* */
1140 __le32 station_flags_msk; /* STA_FLG_* */
1141
1142 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
1143 * corresponding to bit (e.g. bit 5 controls TID 5).
1144 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
1145 __le16 tid_disable_tx;
1146
1147 __le16 reserved1;
1148
1149 /* TID for which to add block-ack support.
1150 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
1151 u8 add_immediate_ba_tid;
1152
1153 /* TID for which to remove block-ack support.
1154 * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
1155 u8 remove_immediate_ba_tid;
1156
1157 /* Starting Sequence Number for added block-ack support.
1158 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
1159 __le16 add_immediate_ba_ssn;
1160
1161 /*
1162 * Number of packets OK to transmit to station even though
1163 * it is asleep -- used to synchronise PS-poll and u-APSD
1164 * responses while ucode keeps track of STA sleep state.
1165 */
1166 __le16 sleep_tx_count;
1167
1168 __le16 reserved2;
1169} __packed;
1170
1171/* agn */
1172struct iwl_addsta_cmd { 923struct iwl_addsta_cmd {
1173 u8 mode; /* 1: modify existing, 0: add new station */ 924 u8 mode; /* 1: modify existing, 0: add new station */
1174 u8 reserved[3]; 925 u8 reserved[3];
@@ -1337,62 +1088,6 @@ struct iwl_wep_cmd {
1337#define RX_MPDU_RES_STATUS_DEC_DONE_MSK (0x800) 1088#define RX_MPDU_RES_STATUS_DEC_DONE_MSK (0x800)
1338 1089
1339 1090
1340struct iwl3945_rx_frame_stats {
1341 u8 phy_count;
1342 u8 id;
1343 u8 rssi;
1344 u8 agc;
1345 __le16 sig_avg;
1346 __le16 noise_diff;
1347 u8 payload[0];
1348} __packed;
1349
1350struct iwl3945_rx_frame_hdr {
1351 __le16 channel;
1352 __le16 phy_flags;
1353 u8 reserved1;
1354 u8 rate;
1355 __le16 len;
1356 u8 payload[0];
1357} __packed;
1358
1359struct iwl3945_rx_frame_end {
1360 __le32 status;
1361 __le64 timestamp;
1362 __le32 beacon_timestamp;
1363} __packed;
1364
1365/*
1366 * REPLY_3945_RX = 0x1b (response only, not a command)
1367 *
1368 * NOTE: DO NOT dereference from casts to this structure
1369 * It is provided only for calculating minimum data set size.
1370 * The actual offsets of the hdr and end are dynamic based on
1371 * stats.phy_count
1372 */
1373struct iwl3945_rx_frame {
1374 struct iwl3945_rx_frame_stats stats;
1375 struct iwl3945_rx_frame_hdr hdr;
1376 struct iwl3945_rx_frame_end end;
1377} __packed;
1378
1379#define IWL39_RX_FRAME_SIZE (4 + sizeof(struct iwl3945_rx_frame))
1380
1381/* Fixed (non-configurable) rx data from phy */
1382
1383#define IWL49_RX_RES_PHY_CNT 14
1384#define IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET (4)
1385#define IWL49_RX_PHY_FLAGS_ANTENNAE_MASK (0x70)
1386#define IWL49_AGC_DB_MASK (0x3f80) /* MASK(7,13) */
1387#define IWL49_AGC_DB_POS (7)
1388struct iwl4965_rx_non_cfg_phy {
1389 __le16 ant_selection; /* ant A bit 4, ant B bit 5, ant C bit 6 */
1390 __le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */
1391 u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */
1392 u8 pad[0];
1393} __packed;
1394
1395
1396#define IWLAGN_RX_RES_PHY_CNT 8 1091#define IWLAGN_RX_RES_PHY_CNT 8
1397#define IWLAGN_RX_RES_AGC_IDX 1 1092#define IWLAGN_RX_RES_AGC_IDX 1
1398#define IWLAGN_RX_RES_RSSI_AB_IDX 2 1093#define IWLAGN_RX_RES_RSSI_AB_IDX 2
@@ -1576,80 +1271,6 @@ struct iwl_rx_mpdu_res_start {
1576 * REPLY_TX = 0x1c (command) 1271 * REPLY_TX = 0x1c (command)
1577 */ 1272 */
1578 1273
1579struct iwl3945_tx_cmd {
1580 /*
1581 * MPDU byte count:
1582 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
1583 * + 8 byte IV for CCM or TKIP (not used for WEP)
1584 * + Data payload
1585 * + 8-byte MIC (not used for CCM/WEP)
1586 * NOTE: Does not include Tx command bytes, post-MAC pad bytes,
1587 * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
1588 * Range: 14-2342 bytes.
1589 */
1590 __le16 len;
1591
1592 /*
1593 * MPDU or MSDU byte count for next frame.
1594 * Used for fragmentation and bursting, but not 11n aggregation.
1595 * Same as "len", but for next frame. Set to 0 if not applicable.
1596 */
1597 __le16 next_frame_len;
1598
1599 __le32 tx_flags; /* TX_CMD_FLG_* */
1600
1601 u8 rate;
1602
1603 /* Index of recipient station in uCode's station table */
1604 u8 sta_id;
1605 u8 tid_tspec;
1606 u8 sec_ctl;
1607 u8 key[16];
1608 union {
1609 u8 byte[8];
1610 __le16 word[4];
1611 __le32 dw[2];
1612 } tkip_mic;
1613 __le32 next_frame_info;
1614 union {
1615 __le32 life_time;
1616 __le32 attempt;
1617 } stop_time;
1618 u8 supp_rates[2];
1619 u8 rts_retry_limit; /*byte 50 */
1620 u8 data_retry_limit; /*byte 51 */
1621 union {
1622 __le16 pm_frame_timeout;
1623 __le16 attempt_duration;
1624 } timeout;
1625
1626 /*
1627 * Duration of EDCA burst Tx Opportunity, in 32-usec units.
1628 * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
1629 */
1630 __le16 driver_txop;
1631
1632 /*
1633 * MAC header goes here, followed by 2 bytes padding if MAC header
1634 * length is 26 or 30 bytes, followed by payload data
1635 */
1636 u8 payload[0];
1637 struct ieee80211_hdr hdr[0];
1638} __packed;
1639
1640/*
1641 * REPLY_TX = 0x1c (response)
1642 */
1643struct iwl3945_tx_resp {
1644 u8 failure_rts;
1645 u8 failure_frame;
1646 u8 bt_kill_count;
1647 u8 rate;
1648 __le32 wireless_media_time;
1649 __le32 status; /* TX status */
1650} __packed;
1651
1652
1653/* 1274/*
1654 * 4965 uCode updates these Tx attempt count values in host DRAM. 1275 * 4965 uCode updates these Tx attempt count values in host DRAM.
1655 * Used for managing Tx retries when expecting block-acks. 1276 * Used for managing Tx retries when expecting block-acks.
@@ -1740,54 +1361,6 @@ struct iwl_tx_cmd {
1740 struct ieee80211_hdr hdr[0]; 1361 struct ieee80211_hdr hdr[0];
1741} __packed; 1362} __packed;
1742 1363
1743/* TX command response is sent after *3945* transmission attempts.
1744 *
1745 * NOTES:
1746 *
1747 * TX_STATUS_FAIL_NEXT_FRAG
1748 *
1749 * If the fragment flag in the MAC header for the frame being transmitted
1750 * is set and there is insufficient time to transmit the next frame, the
1751 * TX status will be returned with 'TX_STATUS_FAIL_NEXT_FRAG'.
1752 *
1753 * TX_STATUS_FIFO_UNDERRUN
1754 *
1755 * Indicates the host did not provide bytes to the FIFO fast enough while
1756 * a TX was in progress.
1757 *
1758 * TX_STATUS_FAIL_MGMNT_ABORT
1759 *
1760 * This status is only possible if the ABORT ON MGMT RX parameter was
1761 * set to true with the TX command.
1762 *
1763 * If the MSB of the status parameter is set then an abort sequence is
1764 * required. This sequence consists of the host activating the TX Abort
1765 * control line, and then waiting for the TX Abort command response. This
1766 * indicates that a the device is no longer in a transmit state, and that the
1767 * command FIFO has been cleared. The host must then deactivate the TX Abort
1768 * control line. Receiving is still allowed in this case.
1769 */
1770enum {
1771 TX_3945_STATUS_SUCCESS = 0x01,
1772 TX_3945_STATUS_DIRECT_DONE = 0x02,
1773 TX_3945_STATUS_FAIL_SHORT_LIMIT = 0x82,
1774 TX_3945_STATUS_FAIL_LONG_LIMIT = 0x83,
1775 TX_3945_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
1776 TX_3945_STATUS_FAIL_MGMNT_ABORT = 0x85,
1777 TX_3945_STATUS_FAIL_NEXT_FRAG = 0x86,
1778 TX_3945_STATUS_FAIL_LIFE_EXPIRE = 0x87,
1779 TX_3945_STATUS_FAIL_DEST_PS = 0x88,
1780 TX_3945_STATUS_FAIL_ABORTED = 0x89,
1781 TX_3945_STATUS_FAIL_BT_RETRY = 0x8a,
1782 TX_3945_STATUS_FAIL_STA_INVALID = 0x8b,
1783 TX_3945_STATUS_FAIL_FRAG_DROPPED = 0x8c,
1784 TX_3945_STATUS_FAIL_TID_DISABLE = 0x8d,
1785 TX_3945_STATUS_FAIL_FRAME_FLUSHED = 0x8e,
1786 TX_3945_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
1787 TX_3945_STATUS_FAIL_TX_LOCKED = 0x90,
1788 TX_3945_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
1789};
1790
1791/* 1364/*
1792 * TX command response is sent after *agn* transmission attempts. 1365 * TX command response is sent after *agn* transmission attempts.
1793 * 1366 *
@@ -1905,43 +1478,6 @@ struct agg_tx_status {
1905 __le16 sequence; 1478 __le16 sequence;
1906} __packed; 1479} __packed;
1907 1480
1908struct iwl4965_tx_resp {
1909 u8 frame_count; /* 1 no aggregation, >1 aggregation */
1910 u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */
1911 u8 failure_rts; /* # failures due to unsuccessful RTS */
1912 u8 failure_frame; /* # failures due to no ACK (unused for agg) */
1913
1914 /* For non-agg: Rate at which frame was successful.
1915 * For agg: Rate at which all frames were transmitted. */
1916 __le32 rate_n_flags; /* RATE_MCS_* */
1917
1918 /* For non-agg: RTS + CTS + frame tx attempts time + ACK.
1919 * For agg: RTS + CTS + aggregation tx time + block-ack time. */
1920 __le16 wireless_media_time; /* uSecs */
1921
1922 __le16 reserved;
1923 __le32 pa_power1; /* RF power amplifier measurement (not used) */
1924 __le32 pa_power2;
1925
1926 /*
1927 * For non-agg: frame status TX_STATUS_*
1928 * For agg: status of 1st frame, AGG_TX_STATE_*; other frame status
1929 * fields follow this one, up to frame_count.
1930 * Bit fields:
1931 * 11- 0: AGG_TX_STATE_* status code
1932 * 15-12: Retry count for 1st frame in aggregation (retries
1933 * occur if tx failed for this frame when it was a
1934 * member of a previous aggregation block). If rate
1935 * scaling is used, retry count indicates the rate
1936 * table entry used for all frames in the new agg.
1937 * 31-16: Sequence # for this frame's Tx cmd (not SSN!)
1938 */
1939 union {
1940 __le32 status;
1941 struct agg_tx_status agg_status[0]; /* for each agg frame */
1942 } u;
1943} __packed;
1944
1945/* 1481/*
1946 * definitions for initial rate index field 1482 * definitions for initial rate index field
1947 * bits [3:0] initial rate index 1483 * bits [3:0] initial rate index
@@ -2030,51 +1566,7 @@ struct iwl_compressed_ba_resp {
2030/* 1566/*
2031 * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response) 1567 * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response)
2032 * 1568 *
2033 * See details under "TXPOWER" in iwl-4965-hw.h.
2034 */
2035
2036struct iwl3945_txpowertable_cmd {
2037 u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
2038 u8 reserved;
2039 __le16 channel;
2040 struct iwl3945_power_per_rate power[IWL_MAX_RATES];
2041} __packed;
2042
2043struct iwl4965_txpowertable_cmd {
2044 u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
2045 u8 reserved;
2046 __le16 channel;
2047 struct iwl4965_tx_power_db tx_power;
2048} __packed;
2049
2050
2051/**
2052 * struct iwl3945_rate_scaling_cmd - Rate Scaling Command & Response
2053 *
2054 * REPLY_RATE_SCALE = 0x47 (command, has simple generic response)
2055 *
2056 * NOTE: The table of rates passed to the uCode via the
2057 * RATE_SCALE command sets up the corresponding order of
2058 * rates used for all related commands, including rate
2059 * masks, etc.
2060 *
2061 * For example, if you set 9MB (PLCP 0x0f) as the first
2062 * rate in the rate table, the bit mask for that rate
2063 * when passed through ofdm_basic_rates on the REPLY_RXON
2064 * command would be bit 0 (1 << 0)
2065 */ 1569 */
2066struct iwl3945_rate_scaling_info {
2067 __le16 rate_n_flags;
2068 u8 try_cnt;
2069 u8 next_rate_index;
2070} __packed;
2071
2072struct iwl3945_rate_scaling_cmd {
2073 u8 table_id;
2074 u8 reserved[3];
2075 struct iwl3945_rate_scaling_info table[IWL_MAX_RATES];
2076} __packed;
2077
2078 1570
2079/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */ 1571/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */
2080#define LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK (1 << 0) 1572#define LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK (1 << 0)
@@ -2130,7 +1622,7 @@ struct iwl_link_qual_general_params {
2130#define LINK_QUAL_AGG_DISABLE_START_MAX (255) 1622#define LINK_QUAL_AGG_DISABLE_START_MAX (255)
2131#define LINK_QUAL_AGG_DISABLE_START_MIN (0) 1623#define LINK_QUAL_AGG_DISABLE_START_MIN (0)
2132 1624
2133#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (31) 1625#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (63)
2134#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63) 1626#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63)
2135#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0) 1627#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
2136 1628
@@ -2696,14 +2188,6 @@ struct iwl_spectrum_notification {
2696#define IWL_POWER_BT_SCO_ENA cpu_to_le16(BIT(8)) 2188#define IWL_POWER_BT_SCO_ENA cpu_to_le16(BIT(8))
2697#define IWL_POWER_ADVANCE_PM_ENA_MSK cpu_to_le16(BIT(9)) 2189#define IWL_POWER_ADVANCE_PM_ENA_MSK cpu_to_le16(BIT(9))
2698 2190
2699struct iwl3945_powertable_cmd {
2700 __le16 flags;
2701 u8 reserved[2];
2702 __le32 rx_data_timeout;
2703 __le32 tx_data_timeout;
2704 __le32 sleep_interval[IWL_POWER_VEC_SIZE];
2705} __packed;
2706
2707struct iwl_powertable_cmd { 2191struct iwl_powertable_cmd {
2708 __le16 flags; 2192 __le16 flags;
2709 u8 keep_alive_seconds; /* 3945 reserved */ 2193 u8 keep_alive_seconds; /* 3945 reserved */
@@ -2806,25 +2290,6 @@ struct iwl_ct_kill_throttling_config {
2806 * active_dwell < max_out_time 2290 * active_dwell < max_out_time
2807 */ 2291 */
2808 2292
2809/* FIXME: rename to AP1, remove tpc */
2810struct iwl3945_scan_channel {
2811 /*
2812 * type is defined as:
2813 * 0:0 1 = active, 0 = passive
2814 * 1:4 SSID direct bit map; if a bit is set, then corresponding
2815 * SSID IE is transmitted in probe request.
2816 * 5:7 reserved
2817 */
2818 u8 type;
2819 u8 channel; /* band is selected by iwl3945_scan_cmd "flags" field */
2820 struct iwl3945_tx_power tpc;
2821 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
2822 __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */
2823} __packed;
2824
2825/* set number of direct probes u8 type */
2826#define IWL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1))))
2827
2828struct iwl_scan_channel { 2293struct iwl_scan_channel {
2829 /* 2294 /*
2830 * type is defined as: 2295 * type is defined as:
@@ -2920,50 +2385,6 @@ struct iwl_ssid_ie {
2920 * struct iwl_scan_channel. 2385 * struct iwl_scan_channel.
2921 */ 2386 */
2922 2387
2923struct iwl3945_scan_cmd {
2924 __le16 len;
2925 u8 reserved0;
2926 u8 channel_count; /* # channels in channel list */
2927 __le16 quiet_time; /* dwell only this # millisecs on quiet channel
2928 * (only for active scan) */
2929 __le16 quiet_plcp_th; /* quiet chnl is < this # pkts (typ. 1) */
2930 __le16 good_CRC_th; /* passive -> active promotion threshold */
2931 __le16 reserved1;
2932 __le32 max_out_time; /* max usec to be away from associated (service)
2933 * channel */
2934 __le32 suspend_time; /* pause scan this long (in "extended beacon
2935 * format") when returning to service channel:
2936 * 3945; 31:24 # beacons, 19:0 additional usec,
2937 * 4965; 31:22 # beacons, 21:0 additional usec.
2938 */
2939 __le32 flags; /* RXON_FLG_* */
2940 __le32 filter_flags; /* RXON_FILTER_* */
2941
2942 /* For active scans (set to all-0s for passive scans).
2943 * Does not include payload. Must specify Tx rate; no rate scaling. */
2944 struct iwl3945_tx_cmd tx_cmd;
2945
2946 /* For directed active scans (set to all-0s otherwise) */
2947 struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX_3945];
2948
2949 /*
2950 * Probe request frame, followed by channel list.
2951 *
2952 * Size of probe request frame is specified by byte count in tx_cmd.
2953 * Channel list follows immediately after probe request frame.
2954 * Number of channels in list is specified by channel_count.
2955 * Each channel in list is of type:
2956 *
2957 * struct iwl3945_scan_channel channels[0];
2958 *
2959 * NOTE: Only one band of channels can be scanned per pass. You
2960 * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
2961 * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
2962 * before requesting another scan.
2963 */
2964 u8 data[0];
2965} __packed;
2966
2967enum iwl_scan_flags { 2388enum iwl_scan_flags {
2968 /* BIT(0) currently unused */ 2389 /* BIT(0) currently unused */
2969 IWL_SCAN_FLAGS_ACTION_FRAME_TX = BIT(1), 2390 IWL_SCAN_FLAGS_ACTION_FRAME_TX = BIT(1),
@@ -3090,20 +2511,6 @@ enum iwl_ibss_manager {
3090 * BEACON_NOTIFICATION = 0x90 (notification only, not a command) 2511 * BEACON_NOTIFICATION = 0x90 (notification only, not a command)
3091 */ 2512 */
3092 2513
3093struct iwl3945_beacon_notif {
3094 struct iwl3945_tx_resp beacon_notify_hdr;
3095 __le32 low_tsf;
3096 __le32 high_tsf;
3097 __le32 ibss_mgr_status;
3098} __packed;
3099
3100struct iwl4965_beacon_notif {
3101 struct iwl4965_tx_resp beacon_notify_hdr;
3102 __le32 low_tsf;
3103 __le32 high_tsf;
3104 __le32 ibss_mgr_status;
3105} __packed;
3106
3107struct iwlagn_beacon_notif { 2514struct iwlagn_beacon_notif {
3108 struct iwlagn_tx_resp beacon_notify_hdr; 2515 struct iwlagn_tx_resp beacon_notify_hdr;
3109 __le32 low_tsf; 2516 __le32 low_tsf;
@@ -3115,14 +2522,6 @@ struct iwlagn_beacon_notif {
3115 * REPLY_TX_BEACON = 0x91 (command, has simple generic response) 2522 * REPLY_TX_BEACON = 0x91 (command, has simple generic response)
3116 */ 2523 */
3117 2524
3118struct iwl3945_tx_beacon_cmd {
3119 struct iwl3945_tx_cmd tx;
3120 __le16 tim_idx;
3121 u8 tim_size;
3122 u8 reserved1;
3123 struct ieee80211_hdr frame[0]; /* beacon frame */
3124} __packed;
3125
3126struct iwl_tx_beacon_cmd { 2525struct iwl_tx_beacon_cmd {
3127 struct iwl_tx_cmd tx; 2526 struct iwl_tx_cmd tx;
3128 __le16 tim_idx; 2527 __le16 tim_idx;
@@ -3159,53 +2558,6 @@ struct rate_histogram {
3159 2558
3160/* statistics command response */ 2559/* statistics command response */
3161 2560
3162struct iwl39_statistics_rx_phy {
3163 __le32 ina_cnt;
3164 __le32 fina_cnt;
3165 __le32 plcp_err;
3166 __le32 crc32_err;
3167 __le32 overrun_err;
3168 __le32 early_overrun_err;
3169 __le32 crc32_good;
3170 __le32 false_alarm_cnt;
3171 __le32 fina_sync_err_cnt;
3172 __le32 sfd_timeout;
3173 __le32 fina_timeout;
3174 __le32 unresponded_rts;
3175 __le32 rxe_frame_limit_overrun;
3176 __le32 sent_ack_cnt;
3177 __le32 sent_cts_cnt;
3178} __packed;
3179
3180struct iwl39_statistics_rx_non_phy {
3181 __le32 bogus_cts; /* CTS received when not expecting CTS */
3182 __le32 bogus_ack; /* ACK received when not expecting ACK */
3183 __le32 non_bssid_frames; /* number of frames with BSSID that
3184 * doesn't belong to the STA BSSID */
3185 __le32 filtered_frames; /* count frames that were dumped in the
3186 * filtering process */
3187 __le32 non_channel_beacons; /* beacons with our bss id but not on
3188 * our serving channel */
3189} __packed;
3190
3191struct iwl39_statistics_rx {
3192 struct iwl39_statistics_rx_phy ofdm;
3193 struct iwl39_statistics_rx_phy cck;
3194 struct iwl39_statistics_rx_non_phy general;
3195} __packed;
3196
3197struct iwl39_statistics_tx {
3198 __le32 preamble_cnt;
3199 __le32 rx_detected_cnt;
3200 __le32 bt_prio_defer_cnt;
3201 __le32 bt_prio_kill_cnt;
3202 __le32 few_bytes_cnt;
3203 __le32 cts_timeout;
3204 __le32 ack_timeout;
3205 __le32 expected_ack_cnt;
3206 __le32 actual_ack_cnt;
3207} __packed;
3208
3209struct statistics_dbg { 2561struct statistics_dbg {
3210 __le32 burst_check; 2562 __le32 burst_check;
3211 __le32 burst_count; 2563 __le32 burst_count;
@@ -3213,23 +2565,6 @@ struct statistics_dbg {
3213 __le32 reserved[3]; 2565 __le32 reserved[3];
3214} __packed; 2566} __packed;
3215 2567
3216struct iwl39_statistics_div {
3217 __le32 tx_on_a;
3218 __le32 tx_on_b;
3219 __le32 exec_time;
3220 __le32 probe_time;
3221} __packed;
3222
3223struct iwl39_statistics_general {
3224 __le32 temperature;
3225 struct statistics_dbg dbg;
3226 __le32 sleep_time;
3227 __le32 slots_out;
3228 __le32 slots_idle;
3229 __le32 ttl_timestamp;
3230 struct iwl39_statistics_div div;
3231} __packed;
3232
3233struct statistics_rx_phy { 2568struct statistics_rx_phy {
3234 __le32 ina_cnt; 2569 __le32 ina_cnt;
3235 __le32 fina_cnt; 2570 __le32 fina_cnt;
@@ -3471,13 +2806,6 @@ struct iwl_statistics_cmd {
3471#define STATISTICS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2) 2806#define STATISTICS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2)
3472#define STATISTICS_REPLY_FLG_HT40_MODE_MSK cpu_to_le32(0x8) 2807#define STATISTICS_REPLY_FLG_HT40_MODE_MSK cpu_to_le32(0x8)
3473 2808
3474struct iwl3945_notif_statistics {
3475 __le32 flag;
3476 struct iwl39_statistics_rx rx;
3477 struct iwl39_statistics_tx tx;
3478 struct iwl39_statistics_general general;
3479} __packed;
3480
3481struct iwl_notif_statistics { 2809struct iwl_notif_statistics {
3482 __le32 flag; 2810 __le32 flag;
3483 struct statistics_rx rx; 2811 struct statistics_rx rx;
@@ -4451,10 +3779,6 @@ struct iwl_rx_packet {
4451 __le32 len_n_flags; 3779 __le32 len_n_flags;
4452 struct iwl_cmd_header hdr; 3780 struct iwl_cmd_header hdr;
4453 union { 3781 union {
4454 struct iwl3945_rx_frame rx_frame;
4455 struct iwl3945_tx_resp tx_resp;
4456 struct iwl3945_beacon_notif beacon_status;
4457
4458 struct iwl_alive_resp alive_frame; 3782 struct iwl_alive_resp alive_frame;
4459 struct iwl_spectrum_notification spectrum_notif; 3783 struct iwl_spectrum_notification spectrum_notif;
4460 struct iwl_csa_notification csa_notif; 3784 struct iwl_csa_notification csa_notif;
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index bafbe57c9602..4653deada05b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -2,7 +2,7 @@
2 * 2 *
3 * GPL LICENSE SUMMARY 3 * GPL LICENSE SUMMARY
4 * 4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. 5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as 8 * it under the terms of version 2 of the GNU General Public License as
@@ -41,6 +41,7 @@
41#include "iwl-power.h" 41#include "iwl-power.h"
42#include "iwl-sta.h" 42#include "iwl-sta.h"
43#include "iwl-helpers.h" 43#include "iwl-helpers.h"
44#include "iwl-agn.h"
44 45
45 46
46/* 47/*
@@ -67,30 +68,6 @@ u32 iwl_debug_level;
67 68
68const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 69const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
69 70
70
71/* This function both allocates and initializes hw and priv. */
72struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg)
73{
74 struct iwl_priv *priv;
75 /* mac80211 allocates memory for this device instance, including
76 * space for this driver's private structure */
77 struct ieee80211_hw *hw;
78
79 hw = ieee80211_alloc_hw(sizeof(struct iwl_priv),
80 cfg->ops->ieee80211_ops);
81 if (hw == NULL) {
82 pr_err("%s: Can not allocate network device\n",
83 cfg->name);
84 goto out;
85 }
86
87 priv = hw->priv;
88 priv->hw = hw;
89
90out:
91 return hw;
92}
93
94#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ 71#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
95#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ 72#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
96static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv, 73static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
@@ -118,7 +95,7 @@ static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
118 max_bit_rate = MAX_BIT_RATE_40_MHZ; 95 max_bit_rate = MAX_BIT_RATE_40_MHZ;
119 } 96 }
120 97
121 if (priv->cfg->mod_params->amsdu_size_8K) 98 if (iwlagn_mod_params.amsdu_size_8K)
122 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; 99 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
123 100
124 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF; 101 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
@@ -159,6 +136,7 @@ int iwlcore_init_geos(struct iwl_priv *priv)
159 struct ieee80211_channel *geo_ch; 136 struct ieee80211_channel *geo_ch;
160 struct ieee80211_rate *rates; 137 struct ieee80211_rate *rates;
161 int i = 0; 138 int i = 0;
139 s8 max_tx_power = IWLAGN_TX_POWER_TARGET_POWER_MIN;
162 140
163 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates || 141 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
164 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) { 142 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
@@ -232,8 +210,8 @@ int iwlcore_init_geos(struct iwl_priv *priv)
232 210
233 geo_ch->flags |= ch->ht40_extension_channel; 211 geo_ch->flags |= ch->ht40_extension_channel;
234 212
235 if (ch->max_power_avg > priv->tx_power_device_lmt) 213 if (ch->max_power_avg > max_tx_power)
236 priv->tx_power_device_lmt = ch->max_power_avg; 214 max_tx_power = ch->max_power_avg;
237 } else { 215 } else {
238 geo_ch->flags |= IEEE80211_CHAN_DISABLED; 216 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
239 } 217 }
@@ -246,6 +224,10 @@ int iwlcore_init_geos(struct iwl_priv *priv)
246 geo_ch->flags); 224 geo_ch->flags);
247 } 225 }
248 226
227 priv->tx_power_device_lmt = max_tx_power;
228 priv->tx_power_user_lmt = max_tx_power;
229 priv->tx_power_next = max_tx_power;
230
249 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) && 231 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
250 priv->cfg->sku & IWL_SKU_A) { 232 priv->cfg->sku & IWL_SKU_A) {
251 IWL_INFO(priv, "Incorrectly detected BG card as ABG. " 233 IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
@@ -434,72 +416,72 @@ void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
434int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx) 416int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
435{ 417{
436 struct iwl_rxon_cmd *rxon = &ctx->staging; 418 struct iwl_rxon_cmd *rxon = &ctx->staging;
437 bool error = false; 419 u32 errors = 0;
438 420
439 if (rxon->flags & RXON_FLG_BAND_24G_MSK) { 421 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
440 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) { 422 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
441 IWL_WARN(priv, "check 2.4G: wrong narrow\n"); 423 IWL_WARN(priv, "check 2.4G: wrong narrow\n");
442 error = true; 424 errors |= BIT(0);
443 } 425 }
444 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) { 426 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
445 IWL_WARN(priv, "check 2.4G: wrong radar\n"); 427 IWL_WARN(priv, "check 2.4G: wrong radar\n");
446 error = true; 428 errors |= BIT(1);
447 } 429 }
448 } else { 430 } else {
449 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) { 431 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
450 IWL_WARN(priv, "check 5.2G: not short slot!\n"); 432 IWL_WARN(priv, "check 5.2G: not short slot!\n");
451 error = true; 433 errors |= BIT(2);
452 } 434 }
453 if (rxon->flags & RXON_FLG_CCK_MSK) { 435 if (rxon->flags & RXON_FLG_CCK_MSK) {
454 IWL_WARN(priv, "check 5.2G: CCK!\n"); 436 IWL_WARN(priv, "check 5.2G: CCK!\n");
455 error = true; 437 errors |= BIT(3);
456 } 438 }
457 } 439 }
458 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) { 440 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
459 IWL_WARN(priv, "mac/bssid mcast!\n"); 441 IWL_WARN(priv, "mac/bssid mcast!\n");
460 error = true; 442 errors |= BIT(4);
461 } 443 }
462 444
463 /* make sure basic rates 6Mbps and 1Mbps are supported */ 445 /* make sure basic rates 6Mbps and 1Mbps are supported */
464 if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 && 446 if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
465 (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) { 447 (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
466 IWL_WARN(priv, "neither 1 nor 6 are basic\n"); 448 IWL_WARN(priv, "neither 1 nor 6 are basic\n");
467 error = true; 449 errors |= BIT(5);
468 } 450 }
469 451
470 if (le16_to_cpu(rxon->assoc_id) > 2007) { 452 if (le16_to_cpu(rxon->assoc_id) > 2007) {
471 IWL_WARN(priv, "aid > 2007\n"); 453 IWL_WARN(priv, "aid > 2007\n");
472 error = true; 454 errors |= BIT(6);
473 } 455 }
474 456
475 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) 457 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
476 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) { 458 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
477 IWL_WARN(priv, "CCK and short slot\n"); 459 IWL_WARN(priv, "CCK and short slot\n");
478 error = true; 460 errors |= BIT(7);
479 } 461 }
480 462
481 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) 463 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
482 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) { 464 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
483 IWL_WARN(priv, "CCK and auto detect"); 465 IWL_WARN(priv, "CCK and auto detect");
484 error = true; 466 errors |= BIT(8);
485 } 467 }
486 468
487 if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK | 469 if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
488 RXON_FLG_TGG_PROTECT_MSK)) == 470 RXON_FLG_TGG_PROTECT_MSK)) ==
489 RXON_FLG_TGG_PROTECT_MSK) { 471 RXON_FLG_TGG_PROTECT_MSK) {
490 IWL_WARN(priv, "TGg but no auto-detect\n"); 472 IWL_WARN(priv, "TGg but no auto-detect\n");
491 error = true; 473 errors |= BIT(9);
492 } 474 }
493 475
494 if (error) 476 if (rxon->channel == 0) {
495 IWL_WARN(priv, "Tuning to channel %d\n", 477 IWL_WARN(priv, "zero channel is invalid\n");
496 le16_to_cpu(rxon->channel)); 478 errors |= BIT(10);
497
498 if (error) {
499 IWL_ERR(priv, "Invalid RXON\n");
500 return -EINVAL;
501 } 479 }
502 return 0; 480
481 WARN(errors, "Invalid RXON (%#x), channel %d",
482 errors, le16_to_cpu(rxon->channel));
483
484 return errors ? -EINVAL : 0;
503} 485}
504 486
505/** 487/**
@@ -890,10 +872,21 @@ void iwl_print_rx_config_cmd(struct iwl_priv *priv,
890 IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id)); 872 IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
891} 873}
892#endif 874#endif
893/** 875
894 * iwl_irq_handle_error - called for HW or SW error interrupt from card 876static void iwlagn_abort_notification_waits(struct iwl_priv *priv)
895 */ 877{
896void iwl_irq_handle_error(struct iwl_priv *priv) 878 unsigned long flags;
879 struct iwl_notification_wait *wait_entry;
880
881 spin_lock_irqsave(&priv->_agn.notif_wait_lock, flags);
882 list_for_each_entry(wait_entry, &priv->_agn.notif_waits, list)
883 wait_entry->aborted = true;
884 spin_unlock_irqrestore(&priv->_agn.notif_wait_lock, flags);
885
886 wake_up_all(&priv->_agn.notif_waitq);
887}
888
889void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
897{ 890{
898 unsigned int reload_msec; 891 unsigned int reload_msec;
899 unsigned long reload_jiffies; 892 unsigned long reload_jiffies;
@@ -904,18 +897,64 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
904 /* Cancel currently queued command. */ 897 /* Cancel currently queued command. */
905 clear_bit(STATUS_HCMD_ACTIVE, &priv->status); 898 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
906 899
900 iwlagn_abort_notification_waits(priv);
901
902 /* Keep the restart process from trying to send host
903 * commands by clearing the ready bit */
904 clear_bit(STATUS_READY, &priv->status);
905
906 wake_up_interruptible(&priv->wait_command_queue);
907
908 if (!ondemand) {
909 /*
910 * If firmware keep reloading, then it indicate something
911 * serious wrong and firmware having problem to recover
912 * from it. Instead of keep trying which will fill the syslog
913 * and hang the system, let's just stop it
914 */
915 reload_jiffies = jiffies;
916 reload_msec = jiffies_to_msecs((long) reload_jiffies -
917 (long) priv->reload_jiffies);
918 priv->reload_jiffies = reload_jiffies;
919 if (reload_msec <= IWL_MIN_RELOAD_DURATION) {
920 priv->reload_count++;
921 if (priv->reload_count >= IWL_MAX_CONTINUE_RELOAD_CNT) {
922 IWL_ERR(priv, "BUG_ON, Stop restarting\n");
923 return;
924 }
925 } else
926 priv->reload_count = 0;
927 }
928
929 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
930 if (iwlagn_mod_params.restart_fw) {
931 IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
932 "Restarting adapter due to uCode error.\n");
933 queue_work(priv->workqueue, &priv->restart);
934 } else
935 IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
936 "Detected FW error, but not restarting\n");
937 }
938}
939
940/**
941 * iwl_irq_handle_error - called for HW or SW error interrupt from card
942 */
943void iwl_irq_handle_error(struct iwl_priv *priv)
944{
907 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ 945 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
908 if (priv->cfg->internal_wimax_coex && 946 if (priv->cfg->internal_wimax_coex &&
909 (!(iwl_read_prph(priv, APMG_CLK_CTRL_REG) & 947 (!(iwl_read_prph(priv, APMG_CLK_CTRL_REG) &
910 APMS_CLK_VAL_MRB_FUNC_MODE) || 948 APMS_CLK_VAL_MRB_FUNC_MODE) ||
911 (iwl_read_prph(priv, APMG_PS_CTRL_REG) & 949 (iwl_read_prph(priv, APMG_PS_CTRL_REG) &
912 APMG_PS_CTRL_VAL_RESET_REQ))) { 950 APMG_PS_CTRL_VAL_RESET_REQ))) {
913 wake_up_interruptible(&priv->wait_command_queue);
914 /* 951 /*
915 *Keep the restart process from trying to send host 952 * Keep the restart process from trying to send host
916 * commands by clearing the INIT status bit 953 * commands by clearing the ready bit.
917 */ 954 */
918 clear_bit(STATUS_READY, &priv->status); 955 clear_bit(STATUS_READY, &priv->status);
956 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
957 wake_up_interruptible(&priv->wait_command_queue);
919 IWL_ERR(priv, "RF is used by WiMAX\n"); 958 IWL_ERR(priv, "RF is used by WiMAX\n");
920 return; 959 return;
921 } 960 }
@@ -923,50 +962,17 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
923 IWL_ERR(priv, "Loaded firmware version: %s\n", 962 IWL_ERR(priv, "Loaded firmware version: %s\n",
924 priv->hw->wiphy->fw_version); 963 priv->hw->wiphy->fw_version);
925 964
926 priv->cfg->ops->lib->dump_nic_error_log(priv); 965 iwl_dump_nic_error_log(priv);
927 if (priv->cfg->ops->lib->dump_csr) 966 iwl_dump_csr(priv);
928 priv->cfg->ops->lib->dump_csr(priv); 967 iwl_dump_fh(priv, NULL, false);
929 if (priv->cfg->ops->lib->dump_fh) 968 iwl_dump_nic_event_log(priv, false, NULL, false);
930 priv->cfg->ops->lib->dump_fh(priv, NULL, false);
931 priv->cfg->ops->lib->dump_nic_event_log(priv, false, NULL, false);
932#ifdef CONFIG_IWLWIFI_DEBUG 969#ifdef CONFIG_IWLWIFI_DEBUG
933 if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) 970 if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS)
934 iwl_print_rx_config_cmd(priv, 971 iwl_print_rx_config_cmd(priv,
935 &priv->contexts[IWL_RXON_CTX_BSS]); 972 &priv->contexts[IWL_RXON_CTX_BSS]);
936#endif 973#endif
937 974
938 wake_up_interruptible(&priv->wait_command_queue); 975 iwlagn_fw_error(priv, false);
939
940 /* Keep the restart process from trying to send host
941 * commands by clearing the INIT status bit */
942 clear_bit(STATUS_READY, &priv->status);
943
944 /*
945 * If firmware keep reloading, then it indicate something
946 * serious wrong and firmware having problem to recover
947 * from it. Instead of keep trying which will fill the syslog
948 * and hang the system, let's just stop it
949 */
950 reload_jiffies = jiffies;
951 reload_msec = jiffies_to_msecs((long) reload_jiffies -
952 (long) priv->reload_jiffies);
953 priv->reload_jiffies = reload_jiffies;
954 if (reload_msec <= IWL_MIN_RELOAD_DURATION) {
955 priv->reload_count++;
956 if (priv->reload_count >= IWL_MAX_CONTINUE_RELOAD_CNT) {
957 IWL_ERR(priv, "BUG_ON, Stop restarting\n");
958 return;
959 }
960 } else
961 priv->reload_count = 0;
962
963 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
964 IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
965 "Restarting adapter due to uCode error.\n");
966
967 if (priv->cfg->mod_params->restart_fw)
968 queue_work(priv->workqueue, &priv->restart);
969 }
970} 976}
971 977
972static int iwl_apm_stop_master(struct iwl_priv *priv) 978static int iwl_apm_stop_master(struct iwl_priv *priv)
@@ -990,6 +996,8 @@ void iwl_apm_stop(struct iwl_priv *priv)
990{ 996{
991 IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n"); 997 IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
992 998
999 clear_bit(STATUS_DEVICE_ENABLED, &priv->status);
1000
993 /* Stop device's DMA activity */ 1001 /* Stop device's DMA activity */
994 iwl_apm_stop_master(priv); 1002 iwl_apm_stop_master(priv);
995 1003
@@ -1040,7 +1048,6 @@ int iwl_apm_init(struct iwl_priv *priv)
1040 /* 1048 /*
1041 * Enable HAP INTA (interrupt from management bus) to 1049 * Enable HAP INTA (interrupt from management bus) to
1042 * wake device's PCI Express link L1a -> L0s 1050 * wake device's PCI Express link L1a -> L0s
1043 * NOTE: This is no-op for 3945 (non-existent bit)
1044 */ 1051 */
1045 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 1052 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1046 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); 1053 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
@@ -1053,20 +1060,18 @@ int iwl_apm_init(struct iwl_priv *priv)
1053 * If not (unlikely), enable L0S, so there is at least some 1060 * If not (unlikely), enable L0S, so there is at least some
1054 * power savings, even without L1. 1061 * power savings, even without L1.
1055 */ 1062 */
1056 if (priv->cfg->base_params->set_l0s) { 1063 lctl = iwl_pcie_link_ctl(priv);
1057 lctl = iwl_pcie_link_ctl(priv); 1064 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
1058 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) == 1065 PCI_CFG_LINK_CTRL_VAL_L1_EN) {
1059 PCI_CFG_LINK_CTRL_VAL_L1_EN) { 1066 /* L1-ASPM enabled; disable(!) L0S */
1060 /* L1-ASPM enabled; disable(!) L0S */ 1067 iwl_set_bit(priv, CSR_GIO_REG,
1061 iwl_set_bit(priv, CSR_GIO_REG, 1068 CSR_GIO_REG_VAL_L0S_ENABLED);
1062 CSR_GIO_REG_VAL_L0S_ENABLED); 1069 IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n");
1063 IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n"); 1070 } else {
1064 } else { 1071 /* L1-ASPM disabled; enable(!) L0S */
1065 /* L1-ASPM disabled; enable(!) L0S */ 1072 iwl_clear_bit(priv, CSR_GIO_REG,
1066 iwl_clear_bit(priv, CSR_GIO_REG, 1073 CSR_GIO_REG_VAL_L0S_ENABLED);
1067 CSR_GIO_REG_VAL_L0S_ENABLED); 1074 IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n");
1068 IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n");
1069 }
1070 } 1075 }
1071 1076
1072 /* Configure analog phase-lock-loop before activating to D0A */ 1077 /* Configure analog phase-lock-loop before activating to D0A */
@@ -1094,27 +1099,21 @@ int iwl_apm_init(struct iwl_priv *priv)
1094 } 1099 }
1095 1100
1096 /* 1101 /*
1097 * Enable DMA and BSM (if used) clocks, wait for them to stabilize. 1102 * Enable DMA clock and wait for it to stabilize.
1098 * BSM (Boostrap State Machine) is only in 3945 and 4965;
1099 * later devices (i.e. 5000 and later) have non-volatile SRAM,
1100 * and don't need BSM to restore data after power-saving sleep.
1101 * 1103 *
1102 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits 1104 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1103 * do not disable clocks. This preserves any hardware bits already 1105 * do not disable clocks. This preserves any hardware bits already
1104 * set by default in "CLK_CTRL_REG" after reset. 1106 * set by default in "CLK_CTRL_REG" after reset.
1105 */ 1107 */
1106 if (priv->cfg->base_params->use_bsm) 1108 iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
1107 iwl_write_prph(priv, APMG_CLK_EN_REG,
1108 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
1109 else
1110 iwl_write_prph(priv, APMG_CLK_EN_REG,
1111 APMG_CLK_VAL_DMA_CLK_RQT);
1112 udelay(20); 1109 udelay(20);
1113 1110
1114 /* Disable L1-Active */ 1111 /* Disable L1-Active */
1115 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG, 1112 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
1116 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 1113 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1117 1114
1115 set_bit(STATUS_DEVICE_ENABLED, &priv->status);
1116
1118out: 1117out:
1119 return ret; 1118 return ret;
1120} 1119}
@@ -1430,7 +1429,6 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw,
1430 1429
1431 iwl_teardown_interface(priv, vif, false); 1430 iwl_teardown_interface(priv, vif, false);
1432 1431
1433 memset(priv->bssid, 0, ETH_ALEN);
1434 mutex_unlock(&priv->mutex); 1432 mutex_unlock(&priv->mutex);
1435 1433
1436 IWL_DEBUG_MAC80211(priv, "leave\n"); 1434 IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -1750,21 +1748,13 @@ int iwl_force_reset(struct iwl_priv *priv, int mode, bool external)
1750 * detect failure), then fw_restart module parameter 1748 * detect failure), then fw_restart module parameter
1751 * need to be check before performing firmware reload 1749 * need to be check before performing firmware reload
1752 */ 1750 */
1753 if (!external && !priv->cfg->mod_params->restart_fw) { 1751 if (!external && !iwlagn_mod_params.restart_fw) {
1754 IWL_DEBUG_INFO(priv, "Cancel firmware reload based on " 1752 IWL_DEBUG_INFO(priv, "Cancel firmware reload based on "
1755 "module parameter setting\n"); 1753 "module parameter setting\n");
1756 break; 1754 break;
1757 } 1755 }
1758 IWL_ERR(priv, "On demand firmware reload\n"); 1756 IWL_ERR(priv, "On demand firmware reload\n");
1759 /* Set the FW error flag -- cleared on iwl_down */ 1757 iwlagn_fw_error(priv, true);
1760 set_bit(STATUS_FW_ERROR, &priv->status);
1761 wake_up_interruptible(&priv->wait_command_queue);
1762 /*
1763 * Keep the restart process from trying to send host
1764 * commands by clearing the INIT status bit
1765 */
1766 clear_bit(STATUS_READY, &priv->status);
1767 queue_work(priv->workqueue, &priv->restart);
1768 break; 1758 break;
1769 } 1759 }
1770 return 0; 1760 return 0;
@@ -1775,6 +1765,7 @@ int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1775{ 1765{
1776 struct iwl_priv *priv = hw->priv; 1766 struct iwl_priv *priv = hw->priv;
1777 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); 1767 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1768 struct iwl_rxon_context *bss_ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1778 struct iwl_rxon_context *tmp; 1769 struct iwl_rxon_context *tmp;
1779 u32 interface_modes; 1770 u32 interface_modes;
1780 int err; 1771 int err;
@@ -1783,6 +1774,15 @@ int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1783 1774
1784 mutex_lock(&priv->mutex); 1775 mutex_lock(&priv->mutex);
1785 1776
1777 if (!ctx->vif || !iwl_is_ready_rf(priv)) {
1778 /*
1779 * Huh? But wait ... this can maybe happen when
1780 * we're in the middle of a firmware restart!
1781 */
1782 err = -EBUSY;
1783 goto out;
1784 }
1785
1786 interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes; 1786 interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
1787 1787
1788 if (!(interface_modes & BIT(newtype))) { 1788 if (!(interface_modes & BIT(newtype))) {
@@ -1790,6 +1790,19 @@ int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1790 goto out; 1790 goto out;
1791 } 1791 }
1792 1792
1793 /*
1794 * Refuse a change that should be done by moving from the PAN
1795 * context to the BSS context instead, if the BSS context is
1796 * available and can support the new interface type.
1797 */
1798 if (ctx->ctxid == IWL_RXON_CTX_PAN && !bss_ctx->vif &&
1799 (bss_ctx->interface_modes & BIT(newtype) ||
1800 bss_ctx->exclusive_interface_modes & BIT(newtype))) {
1801 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
1802 err = -EBUSY;
1803 goto out;
1804 }
1805
1793 if (ctx->exclusive_interface_modes & BIT(newtype)) { 1806 if (ctx->exclusive_interface_modes & BIT(newtype)) {
1794 for_each_context(priv, tmp) { 1807 for_each_context(priv, tmp) {
1795 if (ctx == tmp) 1808 if (ctx == tmp)
@@ -1810,6 +1823,7 @@ int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1810 /* success */ 1823 /* success */
1811 iwl_teardown_interface(priv, vif, true); 1824 iwl_teardown_interface(priv, vif, true);
1812 vif->type = newtype; 1825 vif->type = newtype;
1826 vif->p2p = newp2p;
1813 err = iwl_setup_interface(priv, ctx); 1827 err = iwl_setup_interface(priv, ctx);
1814 WARN_ON(err); 1828 WARN_ON(err);
1815 /* 1829 /*
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index b316d833d9a2..5b5b0cce4a54 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. 8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -73,7 +73,7 @@ struct iwl_cmd;
73 73
74 74
75#define IWLWIFI_VERSION "in-tree:" 75#define IWLWIFI_VERSION "in-tree:"
76#define DRV_COPYRIGHT "Copyright(c) 2003-2010 Intel Corporation" 76#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
77#define DRV_AUTHOR "<ilw@linux.intel.com>" 77#define DRV_AUTHOR "<ilw@linux.intel.com>"
78 78
79#define IWL_PCI_DEVICE(dev, subdev, cfg) \ 79#define IWL_PCI_DEVICE(dev, subdev, cfg) \
@@ -90,7 +90,6 @@ struct iwl_cmd;
90#define IWL_CMD(x) case x: return #x 90#define IWL_CMD(x) case x: return #x
91 91
92struct iwl_hcmd_ops { 92struct iwl_hcmd_ops {
93 int (*rxon_assoc)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
94 int (*commit_rxon)(struct iwl_priv *priv, struct iwl_rxon_context *ctx); 93 int (*commit_rxon)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
95 void (*set_rxon_chain)(struct iwl_priv *priv, 94 void (*set_rxon_chain)(struct iwl_priv *priv,
96 struct iwl_rxon_context *ctx); 95 struct iwl_rxon_context *ctx);
@@ -100,7 +99,6 @@ struct iwl_hcmd_ops {
100}; 99};
101 100
102struct iwl_hcmd_utils_ops { 101struct iwl_hcmd_utils_ops {
103 u16 (*get_hcmd_size)(u8 cmd_id, u16 len);
104 u16 (*build_addsta_hcmd)(const struct iwl_addsta_cmd *cmd, u8 *data); 102 u16 (*build_addsta_hcmd)(const struct iwl_addsta_cmd *cmd, u8 *data);
105 void (*gain_computation)(struct iwl_priv *priv, 103 void (*gain_computation)(struct iwl_priv *priv,
106 u32 *average_noise, 104 u32 *average_noise,
@@ -122,46 +120,14 @@ struct iwl_apm_ops {
122 void (*config)(struct iwl_priv *priv); 120 void (*config)(struct iwl_priv *priv);
123}; 121};
124 122
125struct iwl_isr_ops {
126 irqreturn_t (*isr) (int irq, void *data);
127 void (*free)(struct iwl_priv *priv);
128 int (*alloc)(struct iwl_priv *priv);
129 int (*reset)(struct iwl_priv *priv);
130 void (*disable)(struct iwl_priv *priv);
131};
132
133struct iwl_debugfs_ops {
134 ssize_t (*rx_stats_read)(struct file *file, char __user *user_buf,
135 size_t count, loff_t *ppos);
136 ssize_t (*tx_stats_read)(struct file *file, char __user *user_buf,
137 size_t count, loff_t *ppos);
138 ssize_t (*general_stats_read)(struct file *file, char __user *user_buf,
139 size_t count, loff_t *ppos);
140 ssize_t (*bt_stats_read)(struct file *file, char __user *user_buf,
141 size_t count, loff_t *ppos);
142 ssize_t (*reply_tx_error)(struct file *file, char __user *user_buf,
143 size_t count, loff_t *ppos);
144};
145
146struct iwl_temp_ops { 123struct iwl_temp_ops {
147 void (*temperature)(struct iwl_priv *priv); 124 void (*temperature)(struct iwl_priv *priv);
148}; 125};
149 126
150struct iwl_tt_ops {
151 bool (*lower_power_detection)(struct iwl_priv *priv);
152 u8 (*tt_power_mode)(struct iwl_priv *priv);
153 bool (*ct_kill_check)(struct iwl_priv *priv);
154};
155
156struct iwl_lib_ops { 127struct iwl_lib_ops {
157 /* set hw dependent parameters */ 128 /* set hw dependent parameters */
158 int (*set_hw_params)(struct iwl_priv *priv); 129 int (*set_hw_params)(struct iwl_priv *priv);
159 /* Handling TX */ 130 /* Handling TX */
160 void (*txq_update_byte_cnt_tbl)(struct iwl_priv *priv,
161 struct iwl_tx_queue *txq,
162 u16 byte_cnt);
163 void (*txq_inval_byte_cnt_tbl)(struct iwl_priv *priv,
164 struct iwl_tx_queue *txq);
165 void (*txq_set_sched)(struct iwl_priv *priv, u32 mask); 131 void (*txq_set_sched)(struct iwl_priv *priv, u32 mask);
166 int (*txq_attach_buf_to_tfd)(struct iwl_priv *priv, 132 int (*txq_attach_buf_to_tfd)(struct iwl_priv *priv,
167 struct iwl_tx_queue *txq, 133 struct iwl_tx_queue *txq,
@@ -171,30 +137,14 @@ struct iwl_lib_ops {
171 struct iwl_tx_queue *txq); 137 struct iwl_tx_queue *txq);
172 int (*txq_init)(struct iwl_priv *priv, 138 int (*txq_init)(struct iwl_priv *priv,
173 struct iwl_tx_queue *txq); 139 struct iwl_tx_queue *txq);
174 /* aggregations */
175 int (*txq_agg_enable)(struct iwl_priv *priv, int txq_id, int tx_fifo,
176 int sta_id, int tid, u16 ssn_idx);
177 int (*txq_agg_disable)(struct iwl_priv *priv, u16 txq_id, u16 ssn_idx,
178 u8 tx_fifo);
179 /* setup Rx handler */ 140 /* setup Rx handler */
180 void (*rx_handler_setup)(struct iwl_priv *priv); 141 void (*rx_handler_setup)(struct iwl_priv *priv);
181 /* setup deferred work */ 142 /* setup deferred work */
182 void (*setup_deferred_work)(struct iwl_priv *priv); 143 void (*setup_deferred_work)(struct iwl_priv *priv);
183 /* cancel deferred work */ 144 /* cancel deferred work */
184 void (*cancel_deferred_work)(struct iwl_priv *priv); 145 void (*cancel_deferred_work)(struct iwl_priv *priv);
185 /* alive notification after init uCode load */
186 void (*init_alive_start)(struct iwl_priv *priv);
187 /* alive notification */
188 int (*alive_notify)(struct iwl_priv *priv);
189 /* check validity of rtc data address */ 146 /* check validity of rtc data address */
190 int (*is_valid_rtc_data_addr)(u32 addr); 147 int (*is_valid_rtc_data_addr)(u32 addr);
191 /* 1st ucode load */
192 int (*load_ucode)(struct iwl_priv *priv);
193 int (*dump_nic_event_log)(struct iwl_priv *priv,
194 bool full_log, char **buf, bool display);
195 void (*dump_nic_error_log)(struct iwl_priv *priv);
196 void (*dump_csr)(struct iwl_priv *priv);
197 int (*dump_fh)(struct iwl_priv *priv, char **buf, bool display);
198 int (*set_channel_switch)(struct iwl_priv *priv, 148 int (*set_channel_switch)(struct iwl_priv *priv,
199 struct ieee80211_channel_switch *ch_switch); 149 struct ieee80211_channel_switch *ch_switch);
200 /* power management */ 150 /* power management */
@@ -204,9 +154,6 @@ struct iwl_lib_ops {
204 int (*send_tx_power) (struct iwl_priv *priv); 154 int (*send_tx_power) (struct iwl_priv *priv);
205 void (*update_chain_flags)(struct iwl_priv *priv); 155 void (*update_chain_flags)(struct iwl_priv *priv);
206 156
207 /* isr */
208 struct iwl_isr_ops isr_ops;
209
210 /* eeprom operations (as defined in iwl-eeprom.h) */ 157 /* eeprom operations (as defined in iwl-eeprom.h) */
211 struct iwl_eeprom_ops eeprom_ops; 158 struct iwl_eeprom_ops eeprom_ops;
212 159
@@ -216,14 +163,6 @@ struct iwl_lib_ops {
216 int (*txfifo_flush)(struct iwl_priv *priv, u16 flush_control); 163 int (*txfifo_flush)(struct iwl_priv *priv, u16 flush_control);
217 void (*dev_txfifo_flush)(struct iwl_priv *priv, u16 flush_control); 164 void (*dev_txfifo_flush)(struct iwl_priv *priv, u16 flush_control);
218 165
219 struct iwl_debugfs_ops debugfs_ops;
220
221 /* thermal throttling */
222 struct iwl_tt_ops tt_ops;
223};
224
225struct iwl_led_ops {
226 int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd);
227}; 166};
228 167
229/* NIC specific ops */ 168/* NIC specific ops */
@@ -231,28 +170,15 @@ struct iwl_nic_ops {
231 void (*additional_nic_config)(struct iwl_priv *priv); 170 void (*additional_nic_config)(struct iwl_priv *priv);
232}; 171};
233 172
234struct iwl_legacy_ops {
235 void (*post_associate)(struct iwl_priv *priv);
236 void (*config_ap)(struct iwl_priv *priv);
237 /* station management */
238 int (*update_bcast_stations)(struct iwl_priv *priv);
239 int (*manage_ibss_station)(struct iwl_priv *priv,
240 struct ieee80211_vif *vif, bool add);
241};
242
243struct iwl_ops { 173struct iwl_ops {
244 const struct iwl_lib_ops *lib; 174 const struct iwl_lib_ops *lib;
245 const struct iwl_hcmd_ops *hcmd; 175 const struct iwl_hcmd_ops *hcmd;
246 const struct iwl_hcmd_utils_ops *utils; 176 const struct iwl_hcmd_utils_ops *utils;
247 const struct iwl_led_ops *led;
248 const struct iwl_nic_ops *nic; 177 const struct iwl_nic_ops *nic;
249 const struct iwl_legacy_ops *legacy;
250 const struct ieee80211_ops *ieee80211_ops;
251}; 178};
252 179
253struct iwl_mod_params { 180struct iwl_mod_params {
254 int sw_crypto; /* def: 0 = using hardware encryption */ 181 int sw_crypto; /* def: 0 = using hardware encryption */
255 int disable_hw_scan; /* def: 0 = use h/w scan */
256 int num_of_queues; /* def: HW dependent */ 182 int num_of_queues; /* def: HW dependent */
257 int disable_11n; /* def: 0 = 11n capabilities enabled */ 183 int disable_11n; /* def: 0 = 11n capabilities enabled */
258 int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */ 184 int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
@@ -278,16 +204,7 @@ struct iwl_mod_params {
278 * @wd_timeout: TX queues watchdog timeout 204 * @wd_timeout: TX queues watchdog timeout
279 * @temperature_kelvin: temperature report by uCode in kelvin 205 * @temperature_kelvin: temperature report by uCode in kelvin
280 * @max_event_log_size: size of event log buffer size for ucode event logging 206 * @max_event_log_size: size of event log buffer size for ucode event logging
281 * @tx_power_by_driver: tx power calibration performed by driver
282 * instead of uCode
283 * @ucode_tracing: support ucode continuous tracing
284 * @sensitivity_calib_by_driver: driver has the capability to perform
285 * sensitivity calibration operation
286 * @chain_noise_calib_by_driver: driver has the capability to perform
287 * chain noise calibration operation
288 * @shadow_reg_enable: HW shadhow register bit 207 * @shadow_reg_enable: HW shadhow register bit
289 * @no_agg_framecnt_info: uCode do not provide aggregation frame count
290 * information
291 */ 208 */
292struct iwl_base_params { 209struct iwl_base_params {
293 int eeprom_size; 210 int eeprom_size;
@@ -295,14 +212,10 @@ struct iwl_base_params {
295 int num_of_ampdu_queues;/* def: HW dependent */ 212 int num_of_ampdu_queues;/* def: HW dependent */
296 /* for iwl_apm_init() */ 213 /* for iwl_apm_init() */
297 u32 pll_cfg_val; 214 u32 pll_cfg_val;
298 bool set_l0s;
299 bool use_bsm;
300 215
301 bool use_isr_legacy;
302 const u16 max_ll_items; 216 const u16 max_ll_items;
303 const bool shadow_ram_support; 217 const bool shadow_ram_support;
304 u16 led_compensation; 218 u16 led_compensation;
305 const bool broken_powersave;
306 int chain_noise_num_beacons; 219 int chain_noise_num_beacons;
307 bool adv_thermal_throttle; 220 bool adv_thermal_throttle;
308 bool support_ct_kill_exit; 221 bool support_ct_kill_exit;
@@ -312,18 +225,12 @@ struct iwl_base_params {
312 unsigned int wd_timeout; 225 unsigned int wd_timeout;
313 bool temperature_kelvin; 226 bool temperature_kelvin;
314 u32 max_event_log_size; 227 u32 max_event_log_size;
315 const bool tx_power_by_driver;
316 const bool ucode_tracing;
317 const bool sensitivity_calib_by_driver;
318 const bool chain_noise_calib_by_driver;
319 const bool shadow_reg_enable; 228 const bool shadow_reg_enable;
320 const bool no_agg_framecnt_info;
321}; 229};
322/* 230/*
323 * @advanced_bt_coexist: support advanced bt coexist 231 * @advanced_bt_coexist: support advanced bt coexist
324 * @bt_init_traffic_load: specify initial bt traffic load 232 * @bt_init_traffic_load: specify initial bt traffic load
325 * @bt_prio_boost: default bt priority boost value 233 * @bt_prio_boost: default bt priority boost value
326 * @bt_statistics: use BT version of statistics notification
327 * @agg_time_limit: maximum number of uSec in aggregation 234 * @agg_time_limit: maximum number of uSec in aggregation
328 * @ampdu_factor: Maximum A-MPDU length factor 235 * @ampdu_factor: Maximum A-MPDU length factor
329 * @ampdu_density: Minimum A-MPDU spacing 236 * @ampdu_density: Minimum A-MPDU spacing
@@ -333,7 +240,6 @@ struct iwl_bt_params {
333 bool advanced_bt_coexist; 240 bool advanced_bt_coexist;
334 u8 bt_init_traffic_load; 241 u8 bt_init_traffic_load;
335 u8 bt_prio_boost; 242 u8 bt_prio_boost;
336 const bool bt_statistics;
337 u16 agg_time_limit; 243 u16 agg_time_limit;
338 u8 ampdu_factor; 244 u8 ampdu_factor;
339 u8 ampdu_density; 245 u8 ampdu_density;
@@ -364,6 +270,7 @@ struct iwl_ht_params {
364 * @rx_with_siso_diversity: 1x1 device with rx antenna diversity 270 * @rx_with_siso_diversity: 1x1 device with rx antenna diversity
365 * @internal_wimax_coex: internal wifi/wimax combo device 271 * @internal_wimax_coex: internal wifi/wimax combo device
366 * @iq_invert: I/Q inversion 272 * @iq_invert: I/Q inversion
273 * @disable_otp_refresh: disable OTP refresh current limit
367 * 274 *
368 * We enable the driver to be backward compatible wrt API version. The 275 * We enable the driver to be backward compatible wrt API version. The
369 * driver specifies which APIs it supports (with @ucode_api_max being the 276 * driver specifies which APIs it supports (with @ucode_api_max being the
@@ -398,8 +305,6 @@ struct iwl_cfg {
398 u16 eeprom_ver; 305 u16 eeprom_ver;
399 u16 eeprom_calib_ver; 306 u16 eeprom_calib_ver;
400 const struct iwl_ops *ops; 307 const struct iwl_ops *ops;
401 /* module based parameters which can be set from modprobe cmd */
402 const struct iwl_mod_params *mod_params;
403 /* params not likely to change within a device family */ 308 /* params not likely to change within a device family */
404 struct iwl_base_params *base_params; 309 struct iwl_base_params *base_params;
405 /* params likely to change within a device family */ 310 /* params likely to change within a device family */
@@ -414,13 +319,13 @@ struct iwl_cfg {
414 const bool rx_with_siso_diversity; 319 const bool rx_with_siso_diversity;
415 const bool internal_wimax_coex; 320 const bool internal_wimax_coex;
416 const bool iq_invert; 321 const bool iq_invert;
322 const bool disable_otp_refresh;
417}; 323};
418 324
419/*************************** 325/***************************
420 * L i b * 326 * L i b *
421 ***************************/ 327 ***************************/
422 328
423struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg);
424int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue, 329int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
425 const struct ieee80211_tx_queue_params *params); 330 const struct ieee80211_tx_queue_params *params);
426int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw); 331int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw);
@@ -625,6 +530,8 @@ extern const struct dev_pm_ops iwl_pm_ops;
625void iwl_dump_nic_error_log(struct iwl_priv *priv); 530void iwl_dump_nic_error_log(struct iwl_priv *priv);
626int iwl_dump_nic_event_log(struct iwl_priv *priv, 531int iwl_dump_nic_event_log(struct iwl_priv *priv,
627 bool full_log, char **buf, bool display); 532 bool full_log, char **buf, bool display);
533void iwl_dump_csr(struct iwl_priv *priv);
534int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display);
628#ifdef CONFIG_IWLWIFI_DEBUG 535#ifdef CONFIG_IWLWIFI_DEBUG
629void iwl_print_rx_config_cmd(struct iwl_priv *priv, 536void iwl_print_rx_config_cmd(struct iwl_priv *priv,
630 struct iwl_rxon_context *ctx); 537 struct iwl_rxon_context *ctx);
@@ -662,6 +569,7 @@ void iwlcore_free_geos(struct iwl_priv *priv);
662#define STATUS_SCAN_HW 15 569#define STATUS_SCAN_HW 15
663#define STATUS_POWER_PMI 16 570#define STATUS_POWER_PMI 16
664#define STATUS_FW_ERROR 17 571#define STATUS_FW_ERROR 17
572#define STATUS_DEVICE_ENABLED 18
665 573
666 574
667static inline int iwl_is_ready(struct iwl_priv *priv) 575static inline int iwl_is_ready(struct iwl_priv *priv)
@@ -714,11 +622,6 @@ void iwl_apm_stop(struct iwl_priv *priv);
714int iwl_apm_init(struct iwl_priv *priv); 622int iwl_apm_init(struct iwl_priv *priv);
715 623
716int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx); 624int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
717static inline int iwl_send_rxon_assoc(struct iwl_priv *priv,
718 struct iwl_rxon_context *ctx)
719{
720 return priv->cfg->ops->hcmd->rxon_assoc(priv, ctx);
721}
722static inline int iwlcore_commit_rxon(struct iwl_priv *priv, 625static inline int iwlcore_commit_rxon(struct iwl_priv *priv,
723 struct iwl_rxon_context *ctx) 626 struct iwl_rxon_context *ctx)
724{ 627{
@@ -736,12 +639,10 @@ static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv)
736 priv->cfg->bt_params->advanced_bt_coexist; 639 priv->cfg->bt_params->advanced_bt_coexist;
737} 640}
738 641
739static inline bool iwl_bt_statistics(struct iwl_priv *priv)
740{
741 return priv->cfg->bt_params && priv->cfg->bt_params->bt_statistics;
742}
743
744extern bool bt_coex_active; 642extern bool bt_coex_active;
745extern bool bt_siso_mode; 643extern bool bt_siso_mode;
746 644
645
646void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand);
647
747#endif /* __iwl_core_h__ */ 648#endif /* __iwl_core_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index f52bc040bcbf..5ab90ba7a024 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -155,18 +155,10 @@
155#define CSR_DBG_LINK_PWR_MGMT_REG (CSR_BASE+0x250) 155#define CSR_DBG_LINK_PWR_MGMT_REG (CSR_BASE+0x250)
156 156
157/* Bits for CSR_HW_IF_CONFIG_REG */ 157/* Bits for CSR_HW_IF_CONFIG_REG */
158#define CSR49_HW_IF_CONFIG_REG_BIT_4965_R (0x00000010)
159#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x00000C00) 158#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x00000C00)
160#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100) 159#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100)
161#define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200) 160#define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200)
162 161
163#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MB (0x00000100)
164#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MM (0x00000200)
165#define CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC (0x00000400)
166#define CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE (0x00000800)
167#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A (0x00000000)
168#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B (0x00001000)
169
170#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000) 162#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000)
171#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000) 163#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
172#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */ 164#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */
@@ -186,7 +178,7 @@
186#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */ 178#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */
187#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */ 179#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */
188#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */ 180#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */
189#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses, 3945 */ 181#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses */
190#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */ 182#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */
191#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */ 183#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */
192 184
@@ -202,29 +194,17 @@
202/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */ 194/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */
203#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */ 195#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */
204#define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */ 196#define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */
205#define CSR39_FH_INT_BIT_RX_CHNL2 (1 << 18) /* Rx channel 2 (3945 only) */
206#define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */ 197#define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */
207#define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */ 198#define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */
208#define CSR39_FH_INT_BIT_TX_CHNL6 (1 << 6) /* Tx channel 6 (3945 only) */
209#define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */ 199#define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */
210#define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */ 200#define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */
211 201
212#define CSR39_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \ 202#define CSR_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \
213 CSR39_FH_INT_BIT_RX_CHNL2 | \ 203 CSR_FH_INT_BIT_RX_CHNL1 | \
214 CSR_FH_INT_BIT_RX_CHNL1 | \ 204 CSR_FH_INT_BIT_RX_CHNL0)
215 CSR_FH_INT_BIT_RX_CHNL0)
216
217
218#define CSR39_FH_INT_TX_MASK (CSR39_FH_INT_BIT_TX_CHNL6 | \
219 CSR_FH_INT_BIT_TX_CHNL1 | \
220 CSR_FH_INT_BIT_TX_CHNL0)
221
222#define CSR49_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \
223 CSR_FH_INT_BIT_RX_CHNL1 | \
224 CSR_FH_INT_BIT_RX_CHNL0)
225 205
226#define CSR49_FH_INT_TX_MASK (CSR_FH_INT_BIT_TX_CHNL1 | \ 206#define CSR_FH_INT_TX_MASK (CSR_FH_INT_BIT_TX_CHNL1 | \
227 CSR_FH_INT_BIT_TX_CHNL0) 207 CSR_FH_INT_BIT_TX_CHNL0)
228 208
229/* GPIO */ 209/* GPIO */
230#define CSR_GPIO_IN_BIT_AUX_POWER (0x00000200) 210#define CSR_GPIO_IN_BIT_AUX_POWER (0x00000200)
@@ -268,7 +248,7 @@
268 * Indicates MAC (ucode processor, etc.) is powered up and can run. 248 * Indicates MAC (ucode processor, etc.) is powered up and can run.
269 * Internal resources are accessible. 249 * Internal resources are accessible.
270 * NOTE: This does not indicate that the processor is actually running. 250 * NOTE: This does not indicate that the processor is actually running.
271 * NOTE: This does not indicate that 4965 or 3945 has completed 251 * NOTE: This does not indicate that device has completed
272 * init or post-power-down restore of internal SRAM memory. 252 * init or post-power-down restore of internal SRAM memory.
273 * Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that 253 * Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that
274 * SRAM is restored and uCode is in normal operation mode. 254 * SRAM is restored and uCode is in normal operation mode.
@@ -291,8 +271,6 @@
291 271
292/* HW REV */ 272/* HW REV */
293#define CSR_HW_REV_TYPE_MSK (0x00001F0) 273#define CSR_HW_REV_TYPE_MSK (0x00001F0)
294#define CSR_HW_REV_TYPE_3945 (0x00000D0)
295#define CSR_HW_REV_TYPE_4965 (0x0000000)
296#define CSR_HW_REV_TYPE_5300 (0x0000020) 274#define CSR_HW_REV_TYPE_5300 (0x0000020)
297#define CSR_HW_REV_TYPE_5350 (0x0000030) 275#define CSR_HW_REV_TYPE_5350 (0x0000030)
298#define CSR_HW_REV_TYPE_5100 (0x0000050) 276#define CSR_HW_REV_TYPE_5100 (0x0000050)
@@ -363,7 +341,7 @@
363 * 0: MAC_SLEEP 341 * 0: MAC_SLEEP
364 * uCode sets this when preparing a power-saving power-down. 342 * uCode sets this when preparing a power-saving power-down.
365 * uCode resets this when power-up is complete and SRAM is sane. 343 * uCode resets this when power-up is complete and SRAM is sane.
366 * NOTE: 3945/4965 saves internal SRAM data to host when powering down, 344 * NOTE: device saves internal SRAM data to host when powering down,
367 * and must restore this data after powering back up. 345 * and must restore this data after powering back up.
368 * MAC_SLEEP is the best indication that restore is complete. 346 * MAC_SLEEP is the best indication that restore is complete.
369 * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and 347 * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
@@ -394,7 +372,6 @@
394#define CSR_LED_REG_TRUN_OFF (0x38) 372#define CSR_LED_REG_TRUN_OFF (0x38)
395 373
396/* ANA_PLL */ 374/* ANA_PLL */
397#define CSR39_ANA_PLL_CFG_VAL (0x01000000)
398#define CSR50_ANA_PLL_CFG_VAL (0x00880300) 375#define CSR50_ANA_PLL_CFG_VAL (0x00880300)
399 376
400/* HPET MEM debug */ 377/* HPET MEM debug */
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index ebdea3be3ef9..2824ccbcc1fc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project. 5 * Portions of this file are derived from the ipw3945 project.
6 * 6 *
@@ -146,7 +146,6 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
146#define IWL_DL_RX (1 << 24) 146#define IWL_DL_RX (1 << 24)
147#define IWL_DL_ISR (1 << 25) 147#define IWL_DL_ISR (1 << 25)
148#define IWL_DL_HT (1 << 26) 148#define IWL_DL_HT (1 << 26)
149#define IWL_DL_IO (1 << 27)
150/* 0xF0000000 - 0x10000000 */ 149/* 0xF0000000 - 0x10000000 */
151#define IWL_DL_11H (1 << 28) 150#define IWL_DL_11H (1 << 28)
152#define IWL_DL_STATS (1 << 29) 151#define IWL_DL_STATS (1 << 29)
@@ -174,7 +173,6 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
174 IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a) 173 IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a)
175#define IWL_DEBUG_AP(p, f, a...) IWL_DEBUG(p, IWL_DL_AP, f, ## a) 174#define IWL_DEBUG_AP(p, f, a...) IWL_DEBUG(p, IWL_DL_AP, f, ## a)
176#define IWL_DEBUG_TXPOWER(p, f, a...) IWL_DEBUG(p, IWL_DL_TXPOWER, f, ## a) 175#define IWL_DEBUG_TXPOWER(p, f, a...) IWL_DEBUG(p, IWL_DL_TXPOWER, f, ## a)
177#define IWL_DEBUG_IO(p, f, a...) IWL_DEBUG(p, IWL_DL_IO, f, ## a)
178#define IWL_DEBUG_RATE(p, f, a...) IWL_DEBUG(p, IWL_DL_RATE, f, ## a) 176#define IWL_DEBUG_RATE(p, f, a...) IWL_DEBUG(p, IWL_DL_RATE, f, ## a)
179#define IWL_DEBUG_RATE_LIMIT(p, f, a...) \ 177#define IWL_DEBUG_RATE_LIMIT(p, f, a...) \
180 IWL_DEBUG_LIMIT(p, IWL_DL_RATE, f, ## a) 178 IWL_DEBUG_LIMIT(p, IWL_DL_RATE, f, ## a)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 8842411f1cf3..0e6a04b739ad 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -2,7 +2,7 @@
2 * 2 *
3 * GPL LICENSE SUMMARY 3 * GPL LICENSE SUMMARY
4 * 4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. 5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as 8 * it under the terms of version 2 of the GNU General Public License as
@@ -39,6 +39,7 @@
39#include "iwl-debug.h" 39#include "iwl-debug.h"
40#include "iwl-core.h" 40#include "iwl-core.h"
41#include "iwl-io.h" 41#include "iwl-io.h"
42#include "iwl-agn.h"
42 43
43/* create and remove of files */ 44/* create and remove of files */
44#define DEBUGFS_ADD_FILE(name, parent, mode) do { \ 45#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
@@ -226,10 +227,10 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
226 /* default is to dump the entire data segment */ 227 /* default is to dump the entire data segment */
227 if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) { 228 if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
228 priv->dbgfs_sram_offset = 0x800000; 229 priv->dbgfs_sram_offset = 0x800000;
229 if (priv->ucode_type == UCODE_INIT) 230 if (priv->ucode_type == UCODE_SUBTYPE_INIT)
230 priv->dbgfs_sram_len = priv->ucode_init_data.len; 231 priv->dbgfs_sram_len = priv->ucode_init.data.len;
231 else 232 else
232 priv->dbgfs_sram_len = priv->ucode_data.len; 233 priv->dbgfs_sram_len = priv->ucode_rt.data.len;
233 } 234 }
234 len = priv->dbgfs_sram_len; 235 len = priv->dbgfs_sram_len;
235 236
@@ -437,8 +438,7 @@ static ssize_t iwl_dbgfs_log_event_read(struct file *file,
437 int pos = 0; 438 int pos = 0;
438 ssize_t ret = -ENOMEM; 439 ssize_t ret = -ENOMEM;
439 440
440 ret = pos = priv->cfg->ops->lib->dump_nic_event_log( 441 ret = pos = iwl_dump_nic_event_log(priv, true, &buf, true);
441 priv, true, &buf, true);
442 if (buf) { 442 if (buf) {
443 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 443 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
444 kfree(buf); 444 kfree(buf);
@@ -462,8 +462,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
462 if (sscanf(buf, "%d", &event_log_flag) != 1) 462 if (sscanf(buf, "%d", &event_log_flag) != 1)
463 return -EFAULT; 463 return -EFAULT;
464 if (event_log_flag == 1) 464 if (event_log_flag == 1)
465 priv->cfg->ops->lib->dump_nic_event_log(priv, true, 465 iwl_dump_nic_event_log(priv, true, NULL, false);
466 NULL, false);
467 466
468 return count; 467 return count;
469} 468}
@@ -1039,13 +1038,463 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1039 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 1038 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1040} 1039}
1041 1040
1041static const char *fmt_value = " %-30s %10u\n";
1042static const char *fmt_hex = " %-30s 0x%02X\n";
1043static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
1044static const char *fmt_header =
1045 "%-32s current cumulative delta max\n";
1046
1047static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
1048{
1049 int p = 0;
1050 u32 flag;
1051
1052 flag = le32_to_cpu(priv->statistics.flag);
1053
1054 p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
1055 if (flag & UCODE_STATISTICS_CLEAR_MSK)
1056 p += scnprintf(buf + p, bufsz - p,
1057 "\tStatistics have been cleared\n");
1058 p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
1059 (flag & UCODE_STATISTICS_FREQUENCY_MSK)
1060 ? "2.4 GHz" : "5.2 GHz");
1061 p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
1062 (flag & UCODE_STATISTICS_NARROW_BAND_MSK)
1063 ? "enabled" : "disabled");
1064
1065 return p;
1066}
1067
1042static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file, 1068static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
1043 char __user *user_buf, 1069 char __user *user_buf,
1044 size_t count, loff_t *ppos) 1070 size_t count, loff_t *ppos)
1045{ 1071{
1046 struct iwl_priv *priv = file->private_data; 1072 struct iwl_priv *priv = file->private_data;
1047 return priv->cfg->ops->lib->debugfs_ops.rx_stats_read(file, 1073 int pos = 0;
1048 user_buf, count, ppos); 1074 char *buf;
1075 int bufsz = sizeof(struct statistics_rx_phy) * 40 +
1076 sizeof(struct statistics_rx_non_phy) * 40 +
1077 sizeof(struct statistics_rx_ht_phy) * 40 + 400;
1078 ssize_t ret;
1079 struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
1080 struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
1081 struct statistics_rx_non_phy *general, *accum_general;
1082 struct statistics_rx_non_phy *delta_general, *max_general;
1083 struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
1084
1085 if (!iwl_is_alive(priv))
1086 return -EAGAIN;
1087
1088 buf = kzalloc(bufsz, GFP_KERNEL);
1089 if (!buf) {
1090 IWL_ERR(priv, "Can not allocate Buffer\n");
1091 return -ENOMEM;
1092 }
1093
1094 /*
1095 * the statistic information display here is based on
1096 * the last statistics notification from uCode
1097 * might not reflect the current uCode activity
1098 */
1099 ofdm = &priv->statistics.rx_ofdm;
1100 cck = &priv->statistics.rx_cck;
1101 general = &priv->statistics.rx_non_phy;
1102 ht = &priv->statistics.rx_ofdm_ht;
1103 accum_ofdm = &priv->accum_stats.rx_ofdm;
1104 accum_cck = &priv->accum_stats.rx_cck;
1105 accum_general = &priv->accum_stats.rx_non_phy;
1106 accum_ht = &priv->accum_stats.rx_ofdm_ht;
1107 delta_ofdm = &priv->delta_stats.rx_ofdm;
1108 delta_cck = &priv->delta_stats.rx_cck;
1109 delta_general = &priv->delta_stats.rx_non_phy;
1110 delta_ht = &priv->delta_stats.rx_ofdm_ht;
1111 max_ofdm = &priv->max_delta_stats.rx_ofdm;
1112 max_cck = &priv->max_delta_stats.rx_cck;
1113 max_general = &priv->max_delta_stats.rx_non_phy;
1114 max_ht = &priv->max_delta_stats.rx_ofdm_ht;
1115
1116 pos += iwl_statistics_flag(priv, buf, bufsz);
1117 pos += scnprintf(buf + pos, bufsz - pos,
1118 fmt_header, "Statistics_Rx - OFDM:");
1119 pos += scnprintf(buf + pos, bufsz - pos,
1120 fmt_table, "ina_cnt:",
1121 le32_to_cpu(ofdm->ina_cnt),
1122 accum_ofdm->ina_cnt,
1123 delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
1124 pos += scnprintf(buf + pos, bufsz - pos,
1125 fmt_table, "fina_cnt:",
1126 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
1127 delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
1128 pos += scnprintf(buf + pos, bufsz - pos,
1129 fmt_table, "plcp_err:",
1130 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
1131 delta_ofdm->plcp_err, max_ofdm->plcp_err);
1132 pos += scnprintf(buf + pos, bufsz - pos,
1133 fmt_table, "crc32_err:",
1134 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
1135 delta_ofdm->crc32_err, max_ofdm->crc32_err);
1136 pos += scnprintf(buf + pos, bufsz - pos,
1137 fmt_table, "overrun_err:",
1138 le32_to_cpu(ofdm->overrun_err),
1139 accum_ofdm->overrun_err, delta_ofdm->overrun_err,
1140 max_ofdm->overrun_err);
1141 pos += scnprintf(buf + pos, bufsz - pos,
1142 fmt_table, "early_overrun_err:",
1143 le32_to_cpu(ofdm->early_overrun_err),
1144 accum_ofdm->early_overrun_err,
1145 delta_ofdm->early_overrun_err,
1146 max_ofdm->early_overrun_err);
1147 pos += scnprintf(buf + pos, bufsz - pos,
1148 fmt_table, "crc32_good:",
1149 le32_to_cpu(ofdm->crc32_good),
1150 accum_ofdm->crc32_good, delta_ofdm->crc32_good,
1151 max_ofdm->crc32_good);
1152 pos += scnprintf(buf + pos, bufsz - pos,
1153 fmt_table, "false_alarm_cnt:",
1154 le32_to_cpu(ofdm->false_alarm_cnt),
1155 accum_ofdm->false_alarm_cnt,
1156 delta_ofdm->false_alarm_cnt,
1157 max_ofdm->false_alarm_cnt);
1158 pos += scnprintf(buf + pos, bufsz - pos,
1159 fmt_table, "fina_sync_err_cnt:",
1160 le32_to_cpu(ofdm->fina_sync_err_cnt),
1161 accum_ofdm->fina_sync_err_cnt,
1162 delta_ofdm->fina_sync_err_cnt,
1163 max_ofdm->fina_sync_err_cnt);
1164 pos += scnprintf(buf + pos, bufsz - pos,
1165 fmt_table, "sfd_timeout:",
1166 le32_to_cpu(ofdm->sfd_timeout),
1167 accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout,
1168 max_ofdm->sfd_timeout);
1169 pos += scnprintf(buf + pos, bufsz - pos,
1170 fmt_table, "fina_timeout:",
1171 le32_to_cpu(ofdm->fina_timeout),
1172 accum_ofdm->fina_timeout, delta_ofdm->fina_timeout,
1173 max_ofdm->fina_timeout);
1174 pos += scnprintf(buf + pos, bufsz - pos,
1175 fmt_table, "unresponded_rts:",
1176 le32_to_cpu(ofdm->unresponded_rts),
1177 accum_ofdm->unresponded_rts,
1178 delta_ofdm->unresponded_rts,
1179 max_ofdm->unresponded_rts);
1180 pos += scnprintf(buf + pos, bufsz - pos,
1181 fmt_table, "rxe_frame_lmt_ovrun:",
1182 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
1183 accum_ofdm->rxe_frame_limit_overrun,
1184 delta_ofdm->rxe_frame_limit_overrun,
1185 max_ofdm->rxe_frame_limit_overrun);
1186 pos += scnprintf(buf + pos, bufsz - pos,
1187 fmt_table, "sent_ack_cnt:",
1188 le32_to_cpu(ofdm->sent_ack_cnt),
1189 accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt,
1190 max_ofdm->sent_ack_cnt);
1191 pos += scnprintf(buf + pos, bufsz - pos,
1192 fmt_table, "sent_cts_cnt:",
1193 le32_to_cpu(ofdm->sent_cts_cnt),
1194 accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt,
1195 max_ofdm->sent_cts_cnt);
1196 pos += scnprintf(buf + pos, bufsz - pos,
1197 fmt_table, "sent_ba_rsp_cnt:",
1198 le32_to_cpu(ofdm->sent_ba_rsp_cnt),
1199 accum_ofdm->sent_ba_rsp_cnt,
1200 delta_ofdm->sent_ba_rsp_cnt,
1201 max_ofdm->sent_ba_rsp_cnt);
1202 pos += scnprintf(buf + pos, bufsz - pos,
1203 fmt_table, "dsp_self_kill:",
1204 le32_to_cpu(ofdm->dsp_self_kill),
1205 accum_ofdm->dsp_self_kill,
1206 delta_ofdm->dsp_self_kill,
1207 max_ofdm->dsp_self_kill);
1208 pos += scnprintf(buf + pos, bufsz - pos,
1209 fmt_table, "mh_format_err:",
1210 le32_to_cpu(ofdm->mh_format_err),
1211 accum_ofdm->mh_format_err,
1212 delta_ofdm->mh_format_err,
1213 max_ofdm->mh_format_err);
1214 pos += scnprintf(buf + pos, bufsz - pos,
1215 fmt_table, "re_acq_main_rssi_sum:",
1216 le32_to_cpu(ofdm->re_acq_main_rssi_sum),
1217 accum_ofdm->re_acq_main_rssi_sum,
1218 delta_ofdm->re_acq_main_rssi_sum,
1219 max_ofdm->re_acq_main_rssi_sum);
1220
1221 pos += scnprintf(buf + pos, bufsz - pos,
1222 fmt_header, "Statistics_Rx - CCK:");
1223 pos += scnprintf(buf + pos, bufsz - pos,
1224 fmt_table, "ina_cnt:",
1225 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
1226 delta_cck->ina_cnt, max_cck->ina_cnt);
1227 pos += scnprintf(buf + pos, bufsz - pos,
1228 fmt_table, "fina_cnt:",
1229 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
1230 delta_cck->fina_cnt, max_cck->fina_cnt);
1231 pos += scnprintf(buf + pos, bufsz - pos,
1232 fmt_table, "plcp_err:",
1233 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
1234 delta_cck->plcp_err, max_cck->plcp_err);
1235 pos += scnprintf(buf + pos, bufsz - pos,
1236 fmt_table, "crc32_err:",
1237 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
1238 delta_cck->crc32_err, max_cck->crc32_err);
1239 pos += scnprintf(buf + pos, bufsz - pos,
1240 fmt_table, "overrun_err:",
1241 le32_to_cpu(cck->overrun_err),
1242 accum_cck->overrun_err, delta_cck->overrun_err,
1243 max_cck->overrun_err);
1244 pos += scnprintf(buf + pos, bufsz - pos,
1245 fmt_table, "early_overrun_err:",
1246 le32_to_cpu(cck->early_overrun_err),
1247 accum_cck->early_overrun_err,
1248 delta_cck->early_overrun_err,
1249 max_cck->early_overrun_err);
1250 pos += scnprintf(buf + pos, bufsz - pos,
1251 fmt_table, "crc32_good:",
1252 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
1253 delta_cck->crc32_good, max_cck->crc32_good);
1254 pos += scnprintf(buf + pos, bufsz - pos,
1255 fmt_table, "false_alarm_cnt:",
1256 le32_to_cpu(cck->false_alarm_cnt),
1257 accum_cck->false_alarm_cnt,
1258 delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
1259 pos += scnprintf(buf + pos, bufsz - pos,
1260 fmt_table, "fina_sync_err_cnt:",
1261 le32_to_cpu(cck->fina_sync_err_cnt),
1262 accum_cck->fina_sync_err_cnt,
1263 delta_cck->fina_sync_err_cnt,
1264 max_cck->fina_sync_err_cnt);
1265 pos += scnprintf(buf + pos, bufsz - pos,
1266 fmt_table, "sfd_timeout:",
1267 le32_to_cpu(cck->sfd_timeout),
1268 accum_cck->sfd_timeout, delta_cck->sfd_timeout,
1269 max_cck->sfd_timeout);
1270 pos += scnprintf(buf + pos, bufsz - pos,
1271 fmt_table, "fina_timeout:",
1272 le32_to_cpu(cck->fina_timeout),
1273 accum_cck->fina_timeout, delta_cck->fina_timeout,
1274 max_cck->fina_timeout);
1275 pos += scnprintf(buf + pos, bufsz - pos,
1276 fmt_table, "unresponded_rts:",
1277 le32_to_cpu(cck->unresponded_rts),
1278 accum_cck->unresponded_rts, delta_cck->unresponded_rts,
1279 max_cck->unresponded_rts);
1280 pos += scnprintf(buf + pos, bufsz - pos,
1281 fmt_table, "rxe_frame_lmt_ovrun:",
1282 le32_to_cpu(cck->rxe_frame_limit_overrun),
1283 accum_cck->rxe_frame_limit_overrun,
1284 delta_cck->rxe_frame_limit_overrun,
1285 max_cck->rxe_frame_limit_overrun);
1286 pos += scnprintf(buf + pos, bufsz - pos,
1287 fmt_table, "sent_ack_cnt:",
1288 le32_to_cpu(cck->sent_ack_cnt),
1289 accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt,
1290 max_cck->sent_ack_cnt);
1291 pos += scnprintf(buf + pos, bufsz - pos,
1292 fmt_table, "sent_cts_cnt:",
1293 le32_to_cpu(cck->sent_cts_cnt),
1294 accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt,
1295 max_cck->sent_cts_cnt);
1296 pos += scnprintf(buf + pos, bufsz - pos,
1297 fmt_table, "sent_ba_rsp_cnt:",
1298 le32_to_cpu(cck->sent_ba_rsp_cnt),
1299 accum_cck->sent_ba_rsp_cnt,
1300 delta_cck->sent_ba_rsp_cnt,
1301 max_cck->sent_ba_rsp_cnt);
1302 pos += scnprintf(buf + pos, bufsz - pos,
1303 fmt_table, "dsp_self_kill:",
1304 le32_to_cpu(cck->dsp_self_kill),
1305 accum_cck->dsp_self_kill, delta_cck->dsp_self_kill,
1306 max_cck->dsp_self_kill);
1307 pos += scnprintf(buf + pos, bufsz - pos,
1308 fmt_table, "mh_format_err:",
1309 le32_to_cpu(cck->mh_format_err),
1310 accum_cck->mh_format_err, delta_cck->mh_format_err,
1311 max_cck->mh_format_err);
1312 pos += scnprintf(buf + pos, bufsz - pos,
1313 fmt_table, "re_acq_main_rssi_sum:",
1314 le32_to_cpu(cck->re_acq_main_rssi_sum),
1315 accum_cck->re_acq_main_rssi_sum,
1316 delta_cck->re_acq_main_rssi_sum,
1317 max_cck->re_acq_main_rssi_sum);
1318
1319 pos += scnprintf(buf + pos, bufsz - pos,
1320 fmt_header, "Statistics_Rx - GENERAL:");
1321 pos += scnprintf(buf + pos, bufsz - pos,
1322 fmt_table, "bogus_cts:",
1323 le32_to_cpu(general->bogus_cts),
1324 accum_general->bogus_cts, delta_general->bogus_cts,
1325 max_general->bogus_cts);
1326 pos += scnprintf(buf + pos, bufsz - pos,
1327 fmt_table, "bogus_ack:",
1328 le32_to_cpu(general->bogus_ack),
1329 accum_general->bogus_ack, delta_general->bogus_ack,
1330 max_general->bogus_ack);
1331 pos += scnprintf(buf + pos, bufsz - pos,
1332 fmt_table, "non_bssid_frames:",
1333 le32_to_cpu(general->non_bssid_frames),
1334 accum_general->non_bssid_frames,
1335 delta_general->non_bssid_frames,
1336 max_general->non_bssid_frames);
1337 pos += scnprintf(buf + pos, bufsz - pos,
1338 fmt_table, "filtered_frames:",
1339 le32_to_cpu(general->filtered_frames),
1340 accum_general->filtered_frames,
1341 delta_general->filtered_frames,
1342 max_general->filtered_frames);
1343 pos += scnprintf(buf + pos, bufsz - pos,
1344 fmt_table, "non_channel_beacons:",
1345 le32_to_cpu(general->non_channel_beacons),
1346 accum_general->non_channel_beacons,
1347 delta_general->non_channel_beacons,
1348 max_general->non_channel_beacons);
1349 pos += scnprintf(buf + pos, bufsz - pos,
1350 fmt_table, "channel_beacons:",
1351 le32_to_cpu(general->channel_beacons),
1352 accum_general->channel_beacons,
1353 delta_general->channel_beacons,
1354 max_general->channel_beacons);
1355 pos += scnprintf(buf + pos, bufsz - pos,
1356 fmt_table, "num_missed_bcon:",
1357 le32_to_cpu(general->num_missed_bcon),
1358 accum_general->num_missed_bcon,
1359 delta_general->num_missed_bcon,
1360 max_general->num_missed_bcon);
1361 pos += scnprintf(buf + pos, bufsz - pos,
1362 fmt_table, "adc_rx_saturation_time:",
1363 le32_to_cpu(general->adc_rx_saturation_time),
1364 accum_general->adc_rx_saturation_time,
1365 delta_general->adc_rx_saturation_time,
1366 max_general->adc_rx_saturation_time);
1367 pos += scnprintf(buf + pos, bufsz - pos,
1368 fmt_table, "ina_detect_search_tm:",
1369 le32_to_cpu(general->ina_detection_search_time),
1370 accum_general->ina_detection_search_time,
1371 delta_general->ina_detection_search_time,
1372 max_general->ina_detection_search_time);
1373 pos += scnprintf(buf + pos, bufsz - pos,
1374 fmt_table, "beacon_silence_rssi_a:",
1375 le32_to_cpu(general->beacon_silence_rssi_a),
1376 accum_general->beacon_silence_rssi_a,
1377 delta_general->beacon_silence_rssi_a,
1378 max_general->beacon_silence_rssi_a);
1379 pos += scnprintf(buf + pos, bufsz - pos,
1380 fmt_table, "beacon_silence_rssi_b:",
1381 le32_to_cpu(general->beacon_silence_rssi_b),
1382 accum_general->beacon_silence_rssi_b,
1383 delta_general->beacon_silence_rssi_b,
1384 max_general->beacon_silence_rssi_b);
1385 pos += scnprintf(buf + pos, bufsz - pos,
1386 fmt_table, "beacon_silence_rssi_c:",
1387 le32_to_cpu(general->beacon_silence_rssi_c),
1388 accum_general->beacon_silence_rssi_c,
1389 delta_general->beacon_silence_rssi_c,
1390 max_general->beacon_silence_rssi_c);
1391 pos += scnprintf(buf + pos, bufsz - pos,
1392 fmt_table, "interference_data_flag:",
1393 le32_to_cpu(general->interference_data_flag),
1394 accum_general->interference_data_flag,
1395 delta_general->interference_data_flag,
1396 max_general->interference_data_flag);
1397 pos += scnprintf(buf + pos, bufsz - pos,
1398 fmt_table, "channel_load:",
1399 le32_to_cpu(general->channel_load),
1400 accum_general->channel_load,
1401 delta_general->channel_load,
1402 max_general->channel_load);
1403 pos += scnprintf(buf + pos, bufsz - pos,
1404 fmt_table, "dsp_false_alarms:",
1405 le32_to_cpu(general->dsp_false_alarms),
1406 accum_general->dsp_false_alarms,
1407 delta_general->dsp_false_alarms,
1408 max_general->dsp_false_alarms);
1409 pos += scnprintf(buf + pos, bufsz - pos,
1410 fmt_table, "beacon_rssi_a:",
1411 le32_to_cpu(general->beacon_rssi_a),
1412 accum_general->beacon_rssi_a,
1413 delta_general->beacon_rssi_a,
1414 max_general->beacon_rssi_a);
1415 pos += scnprintf(buf + pos, bufsz - pos,
1416 fmt_table, "beacon_rssi_b:",
1417 le32_to_cpu(general->beacon_rssi_b),
1418 accum_general->beacon_rssi_b,
1419 delta_general->beacon_rssi_b,
1420 max_general->beacon_rssi_b);
1421 pos += scnprintf(buf + pos, bufsz - pos,
1422 fmt_table, "beacon_rssi_c:",
1423 le32_to_cpu(general->beacon_rssi_c),
1424 accum_general->beacon_rssi_c,
1425 delta_general->beacon_rssi_c,
1426 max_general->beacon_rssi_c);
1427 pos += scnprintf(buf + pos, bufsz - pos,
1428 fmt_table, "beacon_energy_a:",
1429 le32_to_cpu(general->beacon_energy_a),
1430 accum_general->beacon_energy_a,
1431 delta_general->beacon_energy_a,
1432 max_general->beacon_energy_a);
1433 pos += scnprintf(buf + pos, bufsz - pos,
1434 fmt_table, "beacon_energy_b:",
1435 le32_to_cpu(general->beacon_energy_b),
1436 accum_general->beacon_energy_b,
1437 delta_general->beacon_energy_b,
1438 max_general->beacon_energy_b);
1439 pos += scnprintf(buf + pos, bufsz - pos,
1440 fmt_table, "beacon_energy_c:",
1441 le32_to_cpu(general->beacon_energy_c),
1442 accum_general->beacon_energy_c,
1443 delta_general->beacon_energy_c,
1444 max_general->beacon_energy_c);
1445
1446 pos += scnprintf(buf + pos, bufsz - pos,
1447 fmt_header, "Statistics_Rx - OFDM_HT:");
1448 pos += scnprintf(buf + pos, bufsz - pos,
1449 fmt_table, "plcp_err:",
1450 le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
1451 delta_ht->plcp_err, max_ht->plcp_err);
1452 pos += scnprintf(buf + pos, bufsz - pos,
1453 fmt_table, "overrun_err:",
1454 le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
1455 delta_ht->overrun_err, max_ht->overrun_err);
1456 pos += scnprintf(buf + pos, bufsz - pos,
1457 fmt_table, "early_overrun_err:",
1458 le32_to_cpu(ht->early_overrun_err),
1459 accum_ht->early_overrun_err,
1460 delta_ht->early_overrun_err,
1461 max_ht->early_overrun_err);
1462 pos += scnprintf(buf + pos, bufsz - pos,
1463 fmt_table, "crc32_good:",
1464 le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
1465 delta_ht->crc32_good, max_ht->crc32_good);
1466 pos += scnprintf(buf + pos, bufsz - pos,
1467 fmt_table, "crc32_err:",
1468 le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
1469 delta_ht->crc32_err, max_ht->crc32_err);
1470 pos += scnprintf(buf + pos, bufsz - pos,
1471 fmt_table, "mh_format_err:",
1472 le32_to_cpu(ht->mh_format_err),
1473 accum_ht->mh_format_err,
1474 delta_ht->mh_format_err, max_ht->mh_format_err);
1475 pos += scnprintf(buf + pos, bufsz - pos,
1476 fmt_table, "agg_crc32_good:",
1477 le32_to_cpu(ht->agg_crc32_good),
1478 accum_ht->agg_crc32_good,
1479 delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
1480 pos += scnprintf(buf + pos, bufsz - pos,
1481 fmt_table, "agg_mpdu_cnt:",
1482 le32_to_cpu(ht->agg_mpdu_cnt),
1483 accum_ht->agg_mpdu_cnt,
1484 delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
1485 pos += scnprintf(buf + pos, bufsz - pos,
1486 fmt_table, "agg_cnt:",
1487 le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
1488 delta_ht->agg_cnt, max_ht->agg_cnt);
1489 pos += scnprintf(buf + pos, bufsz - pos,
1490 fmt_table, "unsupport_mcs:",
1491 le32_to_cpu(ht->unsupport_mcs),
1492 accum_ht->unsupport_mcs,
1493 delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
1494
1495 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1496 kfree(buf);
1497 return ret;
1049} 1498}
1050 1499
1051static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file, 1500static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
@@ -1053,8 +1502,190 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
1053 size_t count, loff_t *ppos) 1502 size_t count, loff_t *ppos)
1054{ 1503{
1055 struct iwl_priv *priv = file->private_data; 1504 struct iwl_priv *priv = file->private_data;
1056 return priv->cfg->ops->lib->debugfs_ops.tx_stats_read(file, 1505 int pos = 0;
1057 user_buf, count, ppos); 1506 char *buf;
1507 int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
1508 ssize_t ret;
1509 struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
1510
1511 if (!iwl_is_alive(priv))
1512 return -EAGAIN;
1513
1514 buf = kzalloc(bufsz, GFP_KERNEL);
1515 if (!buf) {
1516 IWL_ERR(priv, "Can not allocate Buffer\n");
1517 return -ENOMEM;
1518 }
1519
1520 /* the statistic information display here is based on
1521 * the last statistics notification from uCode
1522 * might not reflect the current uCode activity
1523 */
1524 tx = &priv->statistics.tx;
1525 accum_tx = &priv->accum_stats.tx;
1526 delta_tx = &priv->delta_stats.tx;
1527 max_tx = &priv->max_delta_stats.tx;
1528
1529 pos += iwl_statistics_flag(priv, buf, bufsz);
1530 pos += scnprintf(buf + pos, bufsz - pos,
1531 fmt_header, "Statistics_Tx:");
1532 pos += scnprintf(buf + pos, bufsz - pos,
1533 fmt_table, "preamble:",
1534 le32_to_cpu(tx->preamble_cnt),
1535 accum_tx->preamble_cnt,
1536 delta_tx->preamble_cnt, max_tx->preamble_cnt);
1537 pos += scnprintf(buf + pos, bufsz - pos,
1538 fmt_table, "rx_detected_cnt:",
1539 le32_to_cpu(tx->rx_detected_cnt),
1540 accum_tx->rx_detected_cnt,
1541 delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
1542 pos += scnprintf(buf + pos, bufsz - pos,
1543 fmt_table, "bt_prio_defer_cnt:",
1544 le32_to_cpu(tx->bt_prio_defer_cnt),
1545 accum_tx->bt_prio_defer_cnt,
1546 delta_tx->bt_prio_defer_cnt,
1547 max_tx->bt_prio_defer_cnt);
1548 pos += scnprintf(buf + pos, bufsz - pos,
1549 fmt_table, "bt_prio_kill_cnt:",
1550 le32_to_cpu(tx->bt_prio_kill_cnt),
1551 accum_tx->bt_prio_kill_cnt,
1552 delta_tx->bt_prio_kill_cnt,
1553 max_tx->bt_prio_kill_cnt);
1554 pos += scnprintf(buf + pos, bufsz - pos,
1555 fmt_table, "few_bytes_cnt:",
1556 le32_to_cpu(tx->few_bytes_cnt),
1557 accum_tx->few_bytes_cnt,
1558 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
1559 pos += scnprintf(buf + pos, bufsz - pos,
1560 fmt_table, "cts_timeout:",
1561 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
1562 delta_tx->cts_timeout, max_tx->cts_timeout);
1563 pos += scnprintf(buf + pos, bufsz - pos,
1564 fmt_table, "ack_timeout:",
1565 le32_to_cpu(tx->ack_timeout),
1566 accum_tx->ack_timeout,
1567 delta_tx->ack_timeout, max_tx->ack_timeout);
1568 pos += scnprintf(buf + pos, bufsz - pos,
1569 fmt_table, "expected_ack_cnt:",
1570 le32_to_cpu(tx->expected_ack_cnt),
1571 accum_tx->expected_ack_cnt,
1572 delta_tx->expected_ack_cnt,
1573 max_tx->expected_ack_cnt);
1574 pos += scnprintf(buf + pos, bufsz - pos,
1575 fmt_table, "actual_ack_cnt:",
1576 le32_to_cpu(tx->actual_ack_cnt),
1577 accum_tx->actual_ack_cnt,
1578 delta_tx->actual_ack_cnt,
1579 max_tx->actual_ack_cnt);
1580 pos += scnprintf(buf + pos, bufsz - pos,
1581 fmt_table, "dump_msdu_cnt:",
1582 le32_to_cpu(tx->dump_msdu_cnt),
1583 accum_tx->dump_msdu_cnt,
1584 delta_tx->dump_msdu_cnt,
1585 max_tx->dump_msdu_cnt);
1586 pos += scnprintf(buf + pos, bufsz - pos,
1587 fmt_table, "abort_nxt_frame_mismatch:",
1588 le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
1589 accum_tx->burst_abort_next_frame_mismatch_cnt,
1590 delta_tx->burst_abort_next_frame_mismatch_cnt,
1591 max_tx->burst_abort_next_frame_mismatch_cnt);
1592 pos += scnprintf(buf + pos, bufsz - pos,
1593 fmt_table, "abort_missing_nxt_frame:",
1594 le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
1595 accum_tx->burst_abort_missing_next_frame_cnt,
1596 delta_tx->burst_abort_missing_next_frame_cnt,
1597 max_tx->burst_abort_missing_next_frame_cnt);
1598 pos += scnprintf(buf + pos, bufsz - pos,
1599 fmt_table, "cts_timeout_collision:",
1600 le32_to_cpu(tx->cts_timeout_collision),
1601 accum_tx->cts_timeout_collision,
1602 delta_tx->cts_timeout_collision,
1603 max_tx->cts_timeout_collision);
1604 pos += scnprintf(buf + pos, bufsz - pos,
1605 fmt_table, "ack_ba_timeout_collision:",
1606 le32_to_cpu(tx->ack_or_ba_timeout_collision),
1607 accum_tx->ack_or_ba_timeout_collision,
1608 delta_tx->ack_or_ba_timeout_collision,
1609 max_tx->ack_or_ba_timeout_collision);
1610 pos += scnprintf(buf + pos, bufsz - pos,
1611 fmt_table, "agg ba_timeout:",
1612 le32_to_cpu(tx->agg.ba_timeout),
1613 accum_tx->agg.ba_timeout,
1614 delta_tx->agg.ba_timeout,
1615 max_tx->agg.ba_timeout);
1616 pos += scnprintf(buf + pos, bufsz - pos,
1617 fmt_table, "agg ba_resched_frames:",
1618 le32_to_cpu(tx->agg.ba_reschedule_frames),
1619 accum_tx->agg.ba_reschedule_frames,
1620 delta_tx->agg.ba_reschedule_frames,
1621 max_tx->agg.ba_reschedule_frames);
1622 pos += scnprintf(buf + pos, bufsz - pos,
1623 fmt_table, "agg scd_query_agg_frame:",
1624 le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
1625 accum_tx->agg.scd_query_agg_frame_cnt,
1626 delta_tx->agg.scd_query_agg_frame_cnt,
1627 max_tx->agg.scd_query_agg_frame_cnt);
1628 pos += scnprintf(buf + pos, bufsz - pos,
1629 fmt_table, "agg scd_query_no_agg:",
1630 le32_to_cpu(tx->agg.scd_query_no_agg),
1631 accum_tx->agg.scd_query_no_agg,
1632 delta_tx->agg.scd_query_no_agg,
1633 max_tx->agg.scd_query_no_agg);
1634 pos += scnprintf(buf + pos, bufsz - pos,
1635 fmt_table, "agg scd_query_agg:",
1636 le32_to_cpu(tx->agg.scd_query_agg),
1637 accum_tx->agg.scd_query_agg,
1638 delta_tx->agg.scd_query_agg,
1639 max_tx->agg.scd_query_agg);
1640 pos += scnprintf(buf + pos, bufsz - pos,
1641 fmt_table, "agg scd_query_mismatch:",
1642 le32_to_cpu(tx->agg.scd_query_mismatch),
1643 accum_tx->agg.scd_query_mismatch,
1644 delta_tx->agg.scd_query_mismatch,
1645 max_tx->agg.scd_query_mismatch);
1646 pos += scnprintf(buf + pos, bufsz - pos,
1647 fmt_table, "agg frame_not_ready:",
1648 le32_to_cpu(tx->agg.frame_not_ready),
1649 accum_tx->agg.frame_not_ready,
1650 delta_tx->agg.frame_not_ready,
1651 max_tx->agg.frame_not_ready);
1652 pos += scnprintf(buf + pos, bufsz - pos,
1653 fmt_table, "agg underrun:",
1654 le32_to_cpu(tx->agg.underrun),
1655 accum_tx->agg.underrun,
1656 delta_tx->agg.underrun, max_tx->agg.underrun);
1657 pos += scnprintf(buf + pos, bufsz - pos,
1658 fmt_table, "agg bt_prio_kill:",
1659 le32_to_cpu(tx->agg.bt_prio_kill),
1660 accum_tx->agg.bt_prio_kill,
1661 delta_tx->agg.bt_prio_kill,
1662 max_tx->agg.bt_prio_kill);
1663 pos += scnprintf(buf + pos, bufsz - pos,
1664 fmt_table, "agg rx_ba_rsp_cnt:",
1665 le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
1666 accum_tx->agg.rx_ba_rsp_cnt,
1667 delta_tx->agg.rx_ba_rsp_cnt,
1668 max_tx->agg.rx_ba_rsp_cnt);
1669
1670 if (tx->tx_power.ant_a || tx->tx_power.ant_b || tx->tx_power.ant_c) {
1671 pos += scnprintf(buf + pos, bufsz - pos,
1672 "tx power: (1/2 dB step)\n");
1673 if ((priv->cfg->valid_tx_ant & ANT_A) && tx->tx_power.ant_a)
1674 pos += scnprintf(buf + pos, bufsz - pos,
1675 fmt_hex, "antenna A:",
1676 tx->tx_power.ant_a);
1677 if ((priv->cfg->valid_tx_ant & ANT_B) && tx->tx_power.ant_b)
1678 pos += scnprintf(buf + pos, bufsz - pos,
1679 fmt_hex, "antenna B:",
1680 tx->tx_power.ant_b);
1681 if ((priv->cfg->valid_tx_ant & ANT_C) && tx->tx_power.ant_c)
1682 pos += scnprintf(buf + pos, bufsz - pos,
1683 fmt_hex, "antenna C:",
1684 tx->tx_power.ant_c);
1685 }
1686 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1687 kfree(buf);
1688 return ret;
1058} 1689}
1059 1690
1060static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file, 1691static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
@@ -1062,8 +1693,347 @@ static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
1062 size_t count, loff_t *ppos) 1693 size_t count, loff_t *ppos)
1063{ 1694{
1064 struct iwl_priv *priv = file->private_data; 1695 struct iwl_priv *priv = file->private_data;
1065 return priv->cfg->ops->lib->debugfs_ops.general_stats_read(file, 1696 int pos = 0;
1066 user_buf, count, ppos); 1697 char *buf;
1698 int bufsz = sizeof(struct statistics_general) * 10 + 300;
1699 ssize_t ret;
1700 struct statistics_general_common *general, *accum_general;
1701 struct statistics_general_common *delta_general, *max_general;
1702 struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
1703 struct statistics_div *div, *accum_div, *delta_div, *max_div;
1704
1705 if (!iwl_is_alive(priv))
1706 return -EAGAIN;
1707
1708 buf = kzalloc(bufsz, GFP_KERNEL);
1709 if (!buf) {
1710 IWL_ERR(priv, "Can not allocate Buffer\n");
1711 return -ENOMEM;
1712 }
1713
1714 /* the statistic information display here is based on
1715 * the last statistics notification from uCode
1716 * might not reflect the current uCode activity
1717 */
1718 general = &priv->statistics.common;
1719 dbg = &priv->statistics.common.dbg;
1720 div = &priv->statistics.common.div;
1721 accum_general = &priv->accum_stats.common;
1722 accum_dbg = &priv->accum_stats.common.dbg;
1723 accum_div = &priv->accum_stats.common.div;
1724 delta_general = &priv->delta_stats.common;
1725 max_general = &priv->max_delta_stats.common;
1726 delta_dbg = &priv->delta_stats.common.dbg;
1727 max_dbg = &priv->max_delta_stats.common.dbg;
1728 delta_div = &priv->delta_stats.common.div;
1729 max_div = &priv->max_delta_stats.common.div;
1730
1731 pos += iwl_statistics_flag(priv, buf, bufsz);
1732 pos += scnprintf(buf + pos, bufsz - pos,
1733 fmt_header, "Statistics_General:");
1734 pos += scnprintf(buf + pos, bufsz - pos,
1735 fmt_value, "temperature:",
1736 le32_to_cpu(general->temperature));
1737 pos += scnprintf(buf + pos, bufsz - pos,
1738 fmt_value, "temperature_m:",
1739 le32_to_cpu(general->temperature_m));
1740 pos += scnprintf(buf + pos, bufsz - pos,
1741 fmt_value, "ttl_timestamp:",
1742 le32_to_cpu(general->ttl_timestamp));
1743 pos += scnprintf(buf + pos, bufsz - pos,
1744 fmt_table, "burst_check:",
1745 le32_to_cpu(dbg->burst_check),
1746 accum_dbg->burst_check,
1747 delta_dbg->burst_check, max_dbg->burst_check);
1748 pos += scnprintf(buf + pos, bufsz - pos,
1749 fmt_table, "burst_count:",
1750 le32_to_cpu(dbg->burst_count),
1751 accum_dbg->burst_count,
1752 delta_dbg->burst_count, max_dbg->burst_count);
1753 pos += scnprintf(buf + pos, bufsz - pos,
1754 fmt_table, "wait_for_silence_timeout_count:",
1755 le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
1756 accum_dbg->wait_for_silence_timeout_cnt,
1757 delta_dbg->wait_for_silence_timeout_cnt,
1758 max_dbg->wait_for_silence_timeout_cnt);
1759 pos += scnprintf(buf + pos, bufsz - pos,
1760 fmt_table, "sleep_time:",
1761 le32_to_cpu(general->sleep_time),
1762 accum_general->sleep_time,
1763 delta_general->sleep_time, max_general->sleep_time);
1764 pos += scnprintf(buf + pos, bufsz - pos,
1765 fmt_table, "slots_out:",
1766 le32_to_cpu(general->slots_out),
1767 accum_general->slots_out,
1768 delta_general->slots_out, max_general->slots_out);
1769 pos += scnprintf(buf + pos, bufsz - pos,
1770 fmt_table, "slots_idle:",
1771 le32_to_cpu(general->slots_idle),
1772 accum_general->slots_idle,
1773 delta_general->slots_idle, max_general->slots_idle);
1774 pos += scnprintf(buf + pos, bufsz - pos,
1775 fmt_table, "tx_on_a:",
1776 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
1777 delta_div->tx_on_a, max_div->tx_on_a);
1778 pos += scnprintf(buf + pos, bufsz - pos,
1779 fmt_table, "tx_on_b:",
1780 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
1781 delta_div->tx_on_b, max_div->tx_on_b);
1782 pos += scnprintf(buf + pos, bufsz - pos,
1783 fmt_table, "exec_time:",
1784 le32_to_cpu(div->exec_time), accum_div->exec_time,
1785 delta_div->exec_time, max_div->exec_time);
1786 pos += scnprintf(buf + pos, bufsz - pos,
1787 fmt_table, "probe_time:",
1788 le32_to_cpu(div->probe_time), accum_div->probe_time,
1789 delta_div->probe_time, max_div->probe_time);
1790 pos += scnprintf(buf + pos, bufsz - pos,
1791 fmt_table, "rx_enable_counter:",
1792 le32_to_cpu(general->rx_enable_counter),
1793 accum_general->rx_enable_counter,
1794 delta_general->rx_enable_counter,
1795 max_general->rx_enable_counter);
1796 pos += scnprintf(buf + pos, bufsz - pos,
1797 fmt_table, "num_of_sos_states:",
1798 le32_to_cpu(general->num_of_sos_states),
1799 accum_general->num_of_sos_states,
1800 delta_general->num_of_sos_states,
1801 max_general->num_of_sos_states);
1802 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1803 kfree(buf);
1804 return ret;
1805}
1806
1807static ssize_t iwl_dbgfs_ucode_bt_stats_read(struct file *file,
1808 char __user *user_buf,
1809 size_t count, loff_t *ppos)
1810{
1811 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
1812 int pos = 0;
1813 char *buf;
1814 int bufsz = (sizeof(struct statistics_bt_activity) * 24) + 200;
1815 ssize_t ret;
1816 struct statistics_bt_activity *bt, *accum_bt;
1817
1818 if (!iwl_is_alive(priv))
1819 return -EAGAIN;
1820
1821 if (!priv->bt_enable_flag)
1822 return -EINVAL;
1823
1824 /* make request to uCode to retrieve statistics information */
1825 mutex_lock(&priv->mutex);
1826 ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
1827 mutex_unlock(&priv->mutex);
1828
1829 if (ret) {
1830 IWL_ERR(priv,
1831 "Error sending statistics request: %zd\n", ret);
1832 return -EAGAIN;
1833 }
1834 buf = kzalloc(bufsz, GFP_KERNEL);
1835 if (!buf) {
1836 IWL_ERR(priv, "Can not allocate Buffer\n");
1837 return -ENOMEM;
1838 }
1839
1840 /*
1841 * the statistic information display here is based on
1842 * the last statistics notification from uCode
1843 * might not reflect the current uCode activity
1844 */
1845 bt = &priv->statistics.bt_activity;
1846 accum_bt = &priv->accum_stats.bt_activity;
1847
1848 pos += iwl_statistics_flag(priv, buf, bufsz);
1849 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_BT:\n");
1850 pos += scnprintf(buf + pos, bufsz - pos,
1851 "\t\t\tcurrent\t\t\taccumulative\n");
1852 pos += scnprintf(buf + pos, bufsz - pos,
1853 "hi_priority_tx_req_cnt:\t\t%u\t\t\t%u\n",
1854 le32_to_cpu(bt->hi_priority_tx_req_cnt),
1855 accum_bt->hi_priority_tx_req_cnt);
1856 pos += scnprintf(buf + pos, bufsz - pos,
1857 "hi_priority_tx_denied_cnt:\t%u\t\t\t%u\n",
1858 le32_to_cpu(bt->hi_priority_tx_denied_cnt),
1859 accum_bt->hi_priority_tx_denied_cnt);
1860 pos += scnprintf(buf + pos, bufsz - pos,
1861 "lo_priority_tx_req_cnt:\t\t%u\t\t\t%u\n",
1862 le32_to_cpu(bt->lo_priority_tx_req_cnt),
1863 accum_bt->lo_priority_tx_req_cnt);
1864 pos += scnprintf(buf + pos, bufsz - pos,
1865 "lo_priority_tx_denied_cnt:\t%u\t\t\t%u\n",
1866 le32_to_cpu(bt->lo_priority_tx_denied_cnt),
1867 accum_bt->lo_priority_tx_denied_cnt);
1868 pos += scnprintf(buf + pos, bufsz - pos,
1869 "hi_priority_rx_req_cnt:\t\t%u\t\t\t%u\n",
1870 le32_to_cpu(bt->hi_priority_rx_req_cnt),
1871 accum_bt->hi_priority_rx_req_cnt);
1872 pos += scnprintf(buf + pos, bufsz - pos,
1873 "hi_priority_rx_denied_cnt:\t%u\t\t\t%u\n",
1874 le32_to_cpu(bt->hi_priority_rx_denied_cnt),
1875 accum_bt->hi_priority_rx_denied_cnt);
1876 pos += scnprintf(buf + pos, bufsz - pos,
1877 "lo_priority_rx_req_cnt:\t\t%u\t\t\t%u\n",
1878 le32_to_cpu(bt->lo_priority_rx_req_cnt),
1879 accum_bt->lo_priority_rx_req_cnt);
1880 pos += scnprintf(buf + pos, bufsz - pos,
1881 "lo_priority_rx_denied_cnt:\t%u\t\t\t%u\n",
1882 le32_to_cpu(bt->lo_priority_rx_denied_cnt),
1883 accum_bt->lo_priority_rx_denied_cnt);
1884
1885 pos += scnprintf(buf + pos, bufsz - pos,
1886 "(rx)num_bt_kills:\t\t%u\t\t\t%u\n",
1887 le32_to_cpu(priv->statistics.num_bt_kills),
1888 priv->statistics.accum_num_bt_kills);
1889
1890 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1891 kfree(buf);
1892 return ret;
1893}
1894
1895static ssize_t iwl_dbgfs_reply_tx_error_read(struct file *file,
1896 char __user *user_buf,
1897 size_t count, loff_t *ppos)
1898{
1899 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
1900 int pos = 0;
1901 char *buf;
1902 int bufsz = (sizeof(struct reply_tx_error_statistics) * 24) +
1903 (sizeof(struct reply_agg_tx_error_statistics) * 24) + 200;
1904 ssize_t ret;
1905
1906 if (!iwl_is_alive(priv))
1907 return -EAGAIN;
1908
1909 buf = kzalloc(bufsz, GFP_KERNEL);
1910 if (!buf) {
1911 IWL_ERR(priv, "Can not allocate Buffer\n");
1912 return -ENOMEM;
1913 }
1914
1915 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_TX_Error:\n");
1916 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t\t%u\n",
1917 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_DELAY),
1918 priv->_agn.reply_tx_stats.pp_delay);
1919 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1920 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_FEW_BYTES),
1921 priv->_agn.reply_tx_stats.pp_few_bytes);
1922 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1923 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_BT_PRIO),
1924 priv->_agn.reply_tx_stats.pp_bt_prio);
1925 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1926 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_QUIET_PERIOD),
1927 priv->_agn.reply_tx_stats.pp_quiet_period);
1928 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1929 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_CALC_TTAK),
1930 priv->_agn.reply_tx_stats.pp_calc_ttak);
1931 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
1932 iwl_get_tx_fail_reason(
1933 TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY),
1934 priv->_agn.reply_tx_stats.int_crossed_retry);
1935 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1936 iwl_get_tx_fail_reason(TX_STATUS_FAIL_SHORT_LIMIT),
1937 priv->_agn.reply_tx_stats.short_limit);
1938 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1939 iwl_get_tx_fail_reason(TX_STATUS_FAIL_LONG_LIMIT),
1940 priv->_agn.reply_tx_stats.long_limit);
1941 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1942 iwl_get_tx_fail_reason(TX_STATUS_FAIL_FIFO_UNDERRUN),
1943 priv->_agn.reply_tx_stats.fifo_underrun);
1944 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1945 iwl_get_tx_fail_reason(TX_STATUS_FAIL_DRAIN_FLOW),
1946 priv->_agn.reply_tx_stats.drain_flow);
1947 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1948 iwl_get_tx_fail_reason(TX_STATUS_FAIL_RFKILL_FLUSH),
1949 priv->_agn.reply_tx_stats.rfkill_flush);
1950 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1951 iwl_get_tx_fail_reason(TX_STATUS_FAIL_LIFE_EXPIRE),
1952 priv->_agn.reply_tx_stats.life_expire);
1953 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1954 iwl_get_tx_fail_reason(TX_STATUS_FAIL_DEST_PS),
1955 priv->_agn.reply_tx_stats.dest_ps);
1956 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1957 iwl_get_tx_fail_reason(TX_STATUS_FAIL_HOST_ABORTED),
1958 priv->_agn.reply_tx_stats.host_abort);
1959 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1960 iwl_get_tx_fail_reason(TX_STATUS_FAIL_BT_RETRY),
1961 priv->_agn.reply_tx_stats.pp_delay);
1962 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1963 iwl_get_tx_fail_reason(TX_STATUS_FAIL_STA_INVALID),
1964 priv->_agn.reply_tx_stats.sta_invalid);
1965 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1966 iwl_get_tx_fail_reason(TX_STATUS_FAIL_FRAG_DROPPED),
1967 priv->_agn.reply_tx_stats.frag_drop);
1968 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1969 iwl_get_tx_fail_reason(TX_STATUS_FAIL_TID_DISABLE),
1970 priv->_agn.reply_tx_stats.tid_disable);
1971 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1972 iwl_get_tx_fail_reason(TX_STATUS_FAIL_FIFO_FLUSHED),
1973 priv->_agn.reply_tx_stats.fifo_flush);
1974 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
1975 iwl_get_tx_fail_reason(
1976 TX_STATUS_FAIL_INSUFFICIENT_CF_POLL),
1977 priv->_agn.reply_tx_stats.insuff_cf_poll);
1978 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1979 iwl_get_tx_fail_reason(TX_STATUS_FAIL_PASSIVE_NO_RX),
1980 priv->_agn.reply_tx_stats.fail_hw_drop);
1981 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
1982 iwl_get_tx_fail_reason(
1983 TX_STATUS_FAIL_NO_BEACON_ON_RADAR),
1984 priv->_agn.reply_tx_stats.sta_color_mismatch);
1985 pos += scnprintf(buf + pos, bufsz - pos, "UNKNOWN:\t\t\t%u\n",
1986 priv->_agn.reply_tx_stats.unknown);
1987
1988 pos += scnprintf(buf + pos, bufsz - pos,
1989 "\nStatistics_Agg_TX_Error:\n");
1990
1991 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1992 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_UNDERRUN_MSK),
1993 priv->_agn.reply_agg_tx_stats.underrun);
1994 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1995 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_BT_PRIO_MSK),
1996 priv->_agn.reply_agg_tx_stats.bt_prio);
1997 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1998 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_FEW_BYTES_MSK),
1999 priv->_agn.reply_agg_tx_stats.few_bytes);
2000 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
2001 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_ABORT_MSK),
2002 priv->_agn.reply_agg_tx_stats.abort);
2003 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
2004 iwl_get_agg_tx_fail_reason(
2005 AGG_TX_STATE_LAST_SENT_TTL_MSK),
2006 priv->_agn.reply_agg_tx_stats.last_sent_ttl);
2007 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
2008 iwl_get_agg_tx_fail_reason(
2009 AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK),
2010 priv->_agn.reply_agg_tx_stats.last_sent_try);
2011 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
2012 iwl_get_agg_tx_fail_reason(
2013 AGG_TX_STATE_LAST_SENT_BT_KILL_MSK),
2014 priv->_agn.reply_agg_tx_stats.last_sent_bt_kill);
2015 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
2016 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_SCD_QUERY_MSK),
2017 priv->_agn.reply_agg_tx_stats.scd_query);
2018 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
2019 iwl_get_agg_tx_fail_reason(
2020 AGG_TX_STATE_TEST_BAD_CRC32_MSK),
2021 priv->_agn.reply_agg_tx_stats.bad_crc32);
2022 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
2023 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_RESPONSE_MSK),
2024 priv->_agn.reply_agg_tx_stats.response);
2025 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
2026 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_DUMP_TX_MSK),
2027 priv->_agn.reply_agg_tx_stats.dump_tx);
2028 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
2029 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_DELAY_TX_MSK),
2030 priv->_agn.reply_agg_tx_stats.delay_tx);
2031 pos += scnprintf(buf + pos, bufsz - pos, "UNKNOWN:\t\t\t%u\n",
2032 priv->_agn.reply_agg_tx_stats.unknown);
2033
2034 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2035 kfree(buf);
2036 return ret;
1067} 2037}
1068 2038
1069static ssize_t iwl_dbgfs_sensitivity_read(struct file *file, 2039static ssize_t iwl_dbgfs_sensitivity_read(struct file *file,
@@ -1268,8 +2238,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
1268 if (sscanf(buf, "%d", &csr) != 1) 2238 if (sscanf(buf, "%d", &csr) != 1)
1269 return -EFAULT; 2239 return -EFAULT;
1270 2240
1271 if (priv->cfg->ops->lib->dump_csr) 2241 iwl_dump_csr(priv);
1272 priv->cfg->ops->lib->dump_csr(priv);
1273 2242
1274 return count; 2243 return count;
1275} 2244}
@@ -1359,13 +2328,11 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
1359 int pos = 0; 2328 int pos = 0;
1360 ssize_t ret = -EFAULT; 2329 ssize_t ret = -EFAULT;
1361 2330
1362 if (priv->cfg->ops->lib->dump_fh) { 2331 ret = pos = iwl_dump_fh(priv, &buf, true);
1363 ret = pos = priv->cfg->ops->lib->dump_fh(priv, &buf, true); 2332 if (buf) {
1364 if (buf) { 2333 ret = simple_read_from_buffer(user_buf,
1365 ret = simple_read_from_buffer(user_buf, 2334 count, ppos, buf, pos);
1366 count, ppos, buf, pos); 2335 kfree(buf);
1367 kfree(buf);
1368 }
1369 } 2336 }
1370 2337
1371 return ret; 2338 return ret;
@@ -1531,16 +2498,6 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
1531 return count; 2498 return count;
1532} 2499}
1533 2500
1534static ssize_t iwl_dbgfs_ucode_bt_stats_read(struct file *file,
1535 char __user *user_buf,
1536 size_t count, loff_t *ppos)
1537{
1538 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
1539
1540 return priv->cfg->ops->lib->debugfs_ops.bt_stats_read(file,
1541 user_buf, count, ppos);
1542}
1543
1544static ssize_t iwl_dbgfs_wd_timeout_write(struct file *file, 2501static ssize_t iwl_dbgfs_wd_timeout_write(struct file *file,
1545 const char __user *user_buf, 2502 const char __user *user_buf,
1546 size_t count, loff_t *ppos) { 2503 size_t count, loff_t *ppos) {
@@ -1572,12 +2529,10 @@ static ssize_t iwl_dbgfs_bt_traffic_read(struct file *file,
1572 int pos = 0; 2529 int pos = 0;
1573 char buf[200]; 2530 char buf[200];
1574 const size_t bufsz = sizeof(buf); 2531 const size_t bufsz = sizeof(buf);
1575 ssize_t ret;
1576 2532
1577 if (!priv->bt_enable_flag) { 2533 if (!priv->bt_enable_flag) {
1578 pos += scnprintf(buf + pos, bufsz - pos, "BT coex disabled\n"); 2534 pos += scnprintf(buf + pos, bufsz - pos, "BT coex disabled\n");
1579 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2535 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1580 return ret;
1581 } 2536 }
1582 pos += scnprintf(buf + pos, bufsz - pos, "BT enable flag: 0x%x\n", 2537 pos += scnprintf(buf + pos, bufsz - pos, "BT enable flag: 0x%x\n",
1583 priv->bt_enable_flag); 2538 priv->bt_enable_flag);
@@ -1608,8 +2563,7 @@ static ssize_t iwl_dbgfs_bt_traffic_read(struct file *file,
1608 break; 2563 break;
1609 } 2564 }
1610 2565
1611 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2566 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1612 return ret;
1613} 2567}
1614 2568
1615static ssize_t iwl_dbgfs_protection_mode_read(struct file *file, 2569static ssize_t iwl_dbgfs_protection_mode_read(struct file *file,
@@ -1658,18 +2612,6 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
1658 return count; 2612 return count;
1659} 2613}
1660 2614
1661static ssize_t iwl_dbgfs_reply_tx_error_read(struct file *file,
1662 char __user *user_buf,
1663 size_t count, loff_t *ppos)
1664{
1665 struct iwl_priv *priv = file->private_data;
1666
1667 if (priv->cfg->ops->lib->debugfs_ops.reply_tx_error)
1668 return priv->cfg->ops->lib->debugfs_ops.reply_tx_error(
1669 file, user_buf, count, ppos);
1670 else
1671 return -ENODATA;
1672}
1673DEBUGFS_READ_FILE_OPS(rx_statistics); 2615DEBUGFS_READ_FILE_OPS(rx_statistics);
1674DEBUGFS_READ_FILE_OPS(tx_statistics); 2616DEBUGFS_READ_FILE_OPS(tx_statistics);
1675DEBUGFS_READ_WRITE_FILE_OPS(traffic_log); 2617DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
@@ -1731,11 +2673,8 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
1731 DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR); 2673 DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
1732 DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR); 2674 DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
1733 DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR); 2675 DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
1734 if (!priv->cfg->base_params->broken_powersave) { 2676 DEBUGFS_ADD_FILE(sleep_level_override, dir_data, S_IWUSR | S_IRUSR);
1735 DEBUGFS_ADD_FILE(sleep_level_override, dir_data, 2677 DEBUGFS_ADD_FILE(current_sleep_command, dir_data, S_IRUSR);
1736 S_IWUSR | S_IRUSR);
1737 DEBUGFS_ADD_FILE(current_sleep_command, dir_data, S_IRUSR);
1738 }
1739 DEBUGFS_ADD_FILE(thermal_throttling, dir_data, S_IRUSR); 2678 DEBUGFS_ADD_FILE(thermal_throttling, dir_data, S_IRUSR);
1740 DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR); 2679 DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
1741 DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR); 2680 DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR);
@@ -1758,29 +2697,20 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
1758 DEBUGFS_ADD_FILE(txfifo_flush, dir_debug, S_IWUSR); 2697 DEBUGFS_ADD_FILE(txfifo_flush, dir_debug, S_IWUSR);
1759 DEBUGFS_ADD_FILE(protection_mode, dir_debug, S_IWUSR | S_IRUSR); 2698 DEBUGFS_ADD_FILE(protection_mode, dir_debug, S_IWUSR | S_IRUSR);
1760 2699
1761 if (priv->cfg->base_params->sensitivity_calib_by_driver) 2700 DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
1762 DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR); 2701 DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
1763 if (priv->cfg->base_params->chain_noise_calib_by_driver) 2702 DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR);
1764 DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR); 2703 DEBUGFS_ADD_FILE(ucode_bt_stats, dir_debug, S_IRUSR);
1765 if (priv->cfg->base_params->ucode_tracing)
1766 DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR);
1767 if (iwl_bt_statistics(priv))
1768 DEBUGFS_ADD_FILE(ucode_bt_stats, dir_debug, S_IRUSR);
1769 DEBUGFS_ADD_FILE(reply_tx_error, dir_debug, S_IRUSR); 2704 DEBUGFS_ADD_FILE(reply_tx_error, dir_debug, S_IRUSR);
1770 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR); 2705 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
1771 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR); 2706 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
1772 DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR); 2707 DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
1773 if (iwl_advanced_bt_coexist(priv)) 2708 if (iwl_advanced_bt_coexist(priv))
1774 DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR); 2709 DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR);
1775 if (priv->cfg->base_params->sensitivity_calib_by_driver) 2710 DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
1776 DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf, 2711 &priv->disable_sens_cal);
1777 &priv->disable_sens_cal); 2712 DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
1778 if (priv->cfg->base_params->chain_noise_calib_by_driver) 2713 &priv->disable_chain_noise_cal);
1779 DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
1780 &priv->disable_chain_noise_cal);
1781 if (priv->cfg->base_params->tx_power_by_driver)
1782 DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf,
1783 &priv->disable_tx_power_cal);
1784 return 0; 2714 return 0;
1785 2715
1786err: 2716err:
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 68b953f2bdc7..214e4658c495 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -26,7 +26,6 @@
26/* 26/*
27 * Please use this file (iwl-dev.h) for driver implementation definitions. 27 * Please use this file (iwl-dev.h) for driver implementation definitions.
28 * Please use iwl-commands.h for uCode API definitions. 28 * Please use iwl-commands.h for uCode API definitions.
29 * Please use iwl-4965-hw.h for hardware-related definitions.
30 */ 29 */
31 30
32#ifndef __iwl_dev_h__ 31#ifndef __iwl_dev_h__
@@ -179,53 +178,12 @@ struct iwl_tx_queue {
179 178
180#define IWL_NUM_SCAN_RATES (2) 179#define IWL_NUM_SCAN_RATES (2)
181 180
182struct iwl4965_channel_tgd_info {
183 u8 type;
184 s8 max_power;
185};
186
187struct iwl4965_channel_tgh_info {
188 s64 last_radar_time;
189};
190
191#define IWL4965_MAX_RATE (33)
192
193struct iwl3945_clip_group {
194 /* maximum power level to prevent clipping for each rate, derived by
195 * us from this band's saturation power in EEPROM */
196 const s8 clip_powers[IWL_MAX_RATES];
197};
198
199/* current Tx power values to use, one for each rate for each channel.
200 * requested power is limited by:
201 * -- regulatory EEPROM limits for this channel
202 * -- hardware capabilities (clip-powers)
203 * -- spectrum management
204 * -- user preference (e.g. iwconfig)
205 * when requested power is set, base power index must also be set. */
206struct iwl3945_channel_power_info {
207 struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */
208 s8 power_table_index; /* actual (compenst'd) index into gain table */
209 s8 base_power_index; /* gain index for power at factory temp. */
210 s8 requested_power; /* power (dBm) requested for this chnl/rate */
211};
212
213/* current scan Tx power values to use, one for each scan rate for each
214 * channel. */
215struct iwl3945_scan_power_info {
216 struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */
217 s8 power_table_index; /* actual (compenst'd) index into gain table */
218 s8 requested_power; /* scan pwr (dBm) requested for chnl/rate */
219};
220
221/* 181/*
222 * One for each channel, holds all channel setup data 182 * One for each channel, holds all channel setup data
223 * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant 183 * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
224 * with one another! 184 * with one another!
225 */ 185 */
226struct iwl_channel_info { 186struct iwl_channel_info {
227 struct iwl4965_channel_tgd_info tgd;
228 struct iwl4965_channel_tgh_info tgh;
229 struct iwl_eeprom_channel eeprom; /* EEPROM regulatory limit */ 187 struct iwl_eeprom_channel eeprom; /* EEPROM regulatory limit */
230 struct iwl_eeprom_channel ht40_eeprom; /* EEPROM regulatory limit for 188 struct iwl_eeprom_channel ht40_eeprom; /* EEPROM regulatory limit for
231 * HT40 channel */ 189 * HT40 channel */
@@ -245,14 +203,6 @@ struct iwl_channel_info {
245 s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */ 203 s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
246 u8 ht40_flags; /* flags copied from EEPROM */ 204 u8 ht40_flags; /* flags copied from EEPROM */
247 u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */ 205 u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */
248
249 /* Radio/DSP gain settings for each "normal" data Tx rate.
250 * These include, in addition to RF and DSP gain, a few fields for
251 * remembering/modifying gain settings (indexes). */
252 struct iwl3945_channel_power_info power_info[IWL4965_MAX_RATE];
253
254 /* Radio/DSP gain settings for each scan rate, for directed scans. */
255 struct iwl3945_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES];
256}; 206};
257 207
258#define IWL_TX_FIFO_BK 0 /* shared */ 208#define IWL_TX_FIFO_BK 0 /* shared */
@@ -288,15 +238,6 @@ struct iwl_channel_info {
288#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN) 238#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
289#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN) 239#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
290 240
291struct iwl_frame {
292 union {
293 struct ieee80211_hdr frame;
294 struct iwl_tx_beacon_cmd beacon;
295 u8 raw[IEEE80211_FRAME_LEN];
296 u8 cmd[360];
297 } u;
298 struct list_head list;
299};
300 241
301#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4) 242#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
302#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ) 243#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
@@ -309,6 +250,7 @@ enum {
309 CMD_SIZE_HUGE = (1 << 0), 250 CMD_SIZE_HUGE = (1 << 0),
310 CMD_ASYNC = (1 << 1), 251 CMD_ASYNC = (1 << 1),
311 CMD_WANT_SKB = (1 << 2), 252 CMD_WANT_SKB = (1 << 2),
253 CMD_MAPPED = (1 << 3),
312}; 254};
313 255
314#define DEF_CMD_PAYLOAD_SIZE 320 256#define DEF_CMD_PAYLOAD_SIZE 320
@@ -416,6 +358,7 @@ struct iwl_ht_agg {
416#define IWL_EMPTYING_HW_QUEUE_ADDBA 2 358#define IWL_EMPTYING_HW_QUEUE_ADDBA 2
417#define IWL_EMPTYING_HW_QUEUE_DELBA 3 359#define IWL_EMPTYING_HW_QUEUE_DELBA 3
418 u8 state; 360 u8 state;
361 u8 tx_fifo;
419}; 362};
420 363
421 364
@@ -499,9 +442,6 @@ struct iwl_station_priv_common {
499 * When mac80211 creates a station it reserves some space (hw->sta_data_size) 442 * When mac80211 creates a station it reserves some space (hw->sta_data_size)
500 * in the structure for use by driver. This structure is places in that 443 * in the structure for use by driver. This structure is places in that
501 * space. 444 * space.
502 *
503 * The common struct MUST be first because it is shared between
504 * 3945 and agn!
505 */ 445 */
506struct iwl_station_priv { 446struct iwl_station_priv {
507 struct iwl_station_priv_common common; 447 struct iwl_station_priv_common common;
@@ -530,6 +470,10 @@ struct fw_desc {
530 u32 len; /* bytes */ 470 u32 len; /* bytes */
531}; 471};
532 472
473struct fw_img {
474 struct fw_desc code, data;
475};
476
533/* v1/v2 uCode file layout */ 477/* v1/v2 uCode file layout */
534struct iwl_ucode_header { 478struct iwl_ucode_header {
535 __le32 ver; /* major/minor/API/serial */ 479 __le32 ver; /* major/minor/API/serial */
@@ -586,6 +530,22 @@ enum iwl_ucode_tlv_type {
586 IWL_UCODE_TLV_INIT_ERRLOG_PTR = 13, 530 IWL_UCODE_TLV_INIT_ERRLOG_PTR = 13,
587 IWL_UCODE_TLV_ENHANCE_SENS_TBL = 14, 531 IWL_UCODE_TLV_ENHANCE_SENS_TBL = 14,
588 IWL_UCODE_TLV_PHY_CALIBRATION_SIZE = 15, 532 IWL_UCODE_TLV_PHY_CALIBRATION_SIZE = 15,
533 /* 16 and 17 reserved for future use */
534 IWL_UCODE_TLV_FLAGS = 18,
535};
536
537/**
538 * enum iwl_ucode_tlv_flag - ucode API flags
539 * @IWL_UCODE_TLV_FLAGS_PAN: This is PAN capable microcode; this previously
540 * was a separate TLV but moved here to save space.
541 * @IWL_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behaviour on hidden SSID,
542 * treats good CRC threshold as a boolean
543 * @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
544 */
545enum iwl_ucode_tlv_flag {
546 IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
547 IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1),
548 IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
589}; 549};
590 550
591struct iwl_ucode_tlv { 551struct iwl_ucode_tlv {
@@ -619,14 +579,6 @@ struct iwl_tlv_ucode_header {
619 u8 data[0]; 579 u8 data[0];
620}; 580};
621 581
622struct iwl4965_ibss_seq {
623 u8 mac[ETH_ALEN];
624 u16 seq_num;
625 u16 frag_num;
626 unsigned long packet_time;
627 struct list_head list;
628};
629
630struct iwl_sensitivity_ranges { 582struct iwl_sensitivity_ranges {
631 u16 min_nrg_cck; 583 u16 min_nrg_cck;
632 u16 max_nrg_cck; 584 u16 max_nrg_cck;
@@ -700,7 +652,6 @@ struct iwl_hw_params {
700 u8 max_beacon_itrvl; /* in 1024 ms */ 652 u8 max_beacon_itrvl; /* in 1024 ms */
701 u32 max_inst_size; 653 u32 max_inst_size;
702 u32 max_data_size; 654 u32 max_data_size;
703 u32 max_bsm_size;
704 u32 ct_kill_threshold; /* value in hw-dependent units */ 655 u32 ct_kill_threshold; /* value in hw-dependent units */
705 u32 ct_kill_exit_threshold; /* value in hw-dependent units */ 656 u32 ct_kill_exit_threshold; /* value in hw-dependent units */
706 /* for 1000, 6000 series and up */ 657 /* for 1000, 6000 series and up */
@@ -722,8 +673,6 @@ struct iwl_hw_params {
722 * Naming convention -- 673 * Naming convention --
723 * iwl_ <-- Is part of iwlwifi 674 * iwl_ <-- Is part of iwlwifi
724 * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX) 675 * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
725 * iwl4965_bg_ <-- Called from work queue context
726 * iwl4965_mac_ <-- mac80211 callback
727 * 676 *
728 ****************************************************************************/ 677 ****************************************************************************/
729extern void iwl_update_chain_flags(struct iwl_priv *priv); 678extern void iwl_update_chain_flags(struct iwl_priv *priv);
@@ -772,7 +721,6 @@ struct iwl_dma_ptr {
772 721
773/* Sensitivity and chain noise calibration */ 722/* Sensitivity and chain noise calibration */
774#define INITIALIZATION_VALUE 0xFFFF 723#define INITIALIZATION_VALUE 0xFFFF
775#define IWL4965_CAL_NUM_BEACONS 20
776#define IWL_CAL_NUM_BEACONS 16 724#define IWL_CAL_NUM_BEACONS 16
777#define MAXIMUM_ALLOWED_PATHLOSS 15 725#define MAXIMUM_ALLOWED_PATHLOSS 15
778 726
@@ -806,24 +754,19 @@ struct iwl_dma_ptr {
806#define NRG_NUM_PREV_STAT_L 20 754#define NRG_NUM_PREV_STAT_L 20
807#define NUM_RX_CHAINS 3 755#define NUM_RX_CHAINS 3
808 756
809enum iwl4965_false_alarm_state { 757enum iwlagn_false_alarm_state {
810 IWL_FA_TOO_MANY = 0, 758 IWL_FA_TOO_MANY = 0,
811 IWL_FA_TOO_FEW = 1, 759 IWL_FA_TOO_FEW = 1,
812 IWL_FA_GOOD_RANGE = 2, 760 IWL_FA_GOOD_RANGE = 2,
813}; 761};
814 762
815enum iwl4965_chain_noise_state { 763enum iwlagn_chain_noise_state {
816 IWL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */ 764 IWL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */
817 IWL_CHAIN_NOISE_ACCUMULATE, 765 IWL_CHAIN_NOISE_ACCUMULATE,
818 IWL_CHAIN_NOISE_CALIBRATED, 766 IWL_CHAIN_NOISE_CALIBRATED,
819 IWL_CHAIN_NOISE_DONE, 767 IWL_CHAIN_NOISE_DONE,
820}; 768};
821 769
822enum iwl4965_calib_enabled_state {
823 IWL_CALIB_DISABLED = 0, /* must be 0 */
824 IWL_CALIB_ENABLED = 1,
825};
826
827 770
828/* 771/*
829 * enum iwl_calib 772 * enum iwl_calib
@@ -847,12 +790,6 @@ struct iwl_calib_result {
847 size_t buf_len; 790 size_t buf_len;
848}; 791};
849 792
850enum ucode_type {
851 UCODE_NONE = 0,
852 UCODE_INIT,
853 UCODE_RT
854};
855
856/* Sensitivity calib data */ 793/* Sensitivity calib data */
857struct iwl_sensitivity_data { 794struct iwl_sensitivity_data {
858 u32 auto_corr_ofdm; 795 u32 auto_corr_ofdm;
@@ -1131,12 +1068,6 @@ struct iwl_force_reset {
1131 1068
1132/* extend beacon time format bit shifting */ 1069/* extend beacon time format bit shifting */
1133/* 1070/*
1134 * for _3945 devices
1135 * bits 31:24 - extended
1136 * bits 23:0 - interval
1137 */
1138#define IWL3945_EXT_BEACON_TIME_POS 24
1139/*
1140 * for _agn devices 1071 * for _agn devices
1141 * bits 31:22 - extended 1072 * bits 31:22 - extended
1142 * bits 21:0 - interval 1073 * bits 21:0 - interval
@@ -1164,10 +1095,12 @@ struct iwl_force_reset {
1164struct iwl_notification_wait { 1095struct iwl_notification_wait {
1165 struct list_head list; 1096 struct list_head list;
1166 1097
1167 void (*fn)(struct iwl_priv *priv, struct iwl_rx_packet *pkt); 1098 void (*fn)(struct iwl_priv *priv, struct iwl_rx_packet *pkt,
1099 void *data);
1100 void *fn_data;
1168 1101
1169 u8 cmd; 1102 u8 cmd;
1170 bool triggered; 1103 bool triggered, aborted;
1171}; 1104};
1172 1105
1173enum iwl_rxon_context_id { 1106enum iwl_rxon_context_id {
@@ -1228,6 +1161,8 @@ struct iwl_rxon_context {
1228 bool enabled, is_40mhz; 1161 bool enabled, is_40mhz;
1229 u8 extension_chan_offset; 1162 u8 extension_chan_offset;
1230 } ht; 1163 } ht;
1164
1165 bool last_tx_rejected;
1231}; 1166};
1232 1167
1233enum iwl_scan_type { 1168enum iwl_scan_type {
@@ -1244,13 +1179,10 @@ struct iwl_priv {
1244 struct ieee80211_rate *ieee_rates; 1179 struct ieee80211_rate *ieee_rates;
1245 struct iwl_cfg *cfg; 1180 struct iwl_cfg *cfg;
1246 1181
1247 /* temporary frame storage list */
1248 struct list_head free_frames;
1249 int frames_count;
1250
1251 enum ieee80211_band band; 1182 enum ieee80211_band band;
1252 int alloc_rxb_page;
1253 1183
1184 void (*pre_rx_handler)(struct iwl_priv *priv,
1185 struct iwl_rx_mem_buffer *rxb);
1254 void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv, 1186 void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
1255 struct iwl_rx_mem_buffer *rxb); 1187 struct iwl_rx_mem_buffer *rxb);
1256 1188
@@ -1305,16 +1237,12 @@ struct iwl_priv {
1305 spinlock_t hcmd_lock; /* protect hcmd */ 1237 spinlock_t hcmd_lock; /* protect hcmd */
1306 spinlock_t reg_lock; /* protect hw register access */ 1238 spinlock_t reg_lock; /* protect hw register access */
1307 struct mutex mutex; 1239 struct mutex mutex;
1308 struct mutex sync_cmd_mutex; /* enable serialization of sync commands */
1309 1240
1310 /* basic pci-network driver stuff */ 1241 /* basic pci-network driver stuff */
1311 struct pci_dev *pci_dev; 1242 struct pci_dev *pci_dev;
1312 1243
1313 /* pci hardware address support */ 1244 /* pci hardware address support */
1314 void __iomem *hw_base; 1245 void __iomem *hw_base;
1315 u32 hw_rev;
1316 u32 hw_wa_rev;
1317 u8 rev_id;
1318 1246
1319 /* microcode/device supports multiple contexts */ 1247 /* microcode/device supports multiple contexts */
1320 u8 valid_contexts; 1248 u8 valid_contexts;
@@ -1325,6 +1253,8 @@ struct iwl_priv {
1325 /* max number of station keys */ 1253 /* max number of station keys */
1326 u8 sta_key_max_num; 1254 u8 sta_key_max_num;
1327 1255
1256 bool new_scan_threshold_behaviour;
1257
1328 /* EEPROM MAC addresses */ 1258 /* EEPROM MAC addresses */
1329 struct mac_address addresses[2]; 1259 struct mac_address addresses[2];
1330 1260
@@ -1332,13 +1262,10 @@ struct iwl_priv {
1332 int fw_index; /* firmware we're trying to load */ 1262 int fw_index; /* firmware we're trying to load */
1333 u32 ucode_ver; /* version of ucode, copy of 1263 u32 ucode_ver; /* version of ucode, copy of
1334 iwl_ucode.ver */ 1264 iwl_ucode.ver */
1335 struct fw_desc ucode_code; /* runtime inst */ 1265 struct fw_img ucode_rt;
1336 struct fw_desc ucode_data; /* runtime data original */ 1266 struct fw_img ucode_init;
1337 struct fw_desc ucode_data_backup; /* runtime data save/restore */ 1267
1338 struct fw_desc ucode_init; /* initialization inst */ 1268 enum iwlagn_ucode_subtype ucode_type;
1339 struct fw_desc ucode_init_data; /* initialization data */
1340 struct fw_desc ucode_boot; /* bootstrap inst */
1341 enum ucode_type ucode_type;
1342 u8 ucode_write_complete; /* the image write is complete */ 1269 u8 ucode_write_complete; /* the image write is complete */
1343 char firmware_name[25]; 1270 char firmware_name[25];
1344 1271
@@ -1346,10 +1273,10 @@ struct iwl_priv {
1346 1273
1347 struct iwl_switch_rxon switch_rxon; 1274 struct iwl_switch_rxon switch_rxon;
1348 1275
1349 /* 1st responses from initialize and runtime uCode images. 1276 struct {
1350 * _agn's initialize alive response contains some calibration data. */ 1277 u32 error_event_table;
1351 struct iwl_init_alive_resp card_alive_init; 1278 u32 log_event_table;
1352 struct iwl_alive_resp card_alive; 1279 } device_pointers;
1353 1280
1354 u16 active_rate; 1281 u16 active_rate;
1355 1282
@@ -1390,15 +1317,12 @@ struct iwl_priv {
1390 struct iwl_power_mgr power_data; 1317 struct iwl_power_mgr power_data;
1391 struct iwl_tt_mgmt thermal_throttle; 1318 struct iwl_tt_mgmt thermal_throttle;
1392 1319
1393 /* context information */
1394 u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */
1395
1396 /* station table variables */ 1320 /* station table variables */
1397 1321
1398 /* Note: if lock and sta_lock are needed, lock must be acquired first */ 1322 /* Note: if lock and sta_lock are needed, lock must be acquired first */
1399 spinlock_t sta_lock; 1323 spinlock_t sta_lock;
1400 int num_stations; 1324 int num_stations;
1401 struct iwl_station_entry stations[IWL_STATION_COUNT]; 1325 struct iwl_station_entry stations[IWLAGN_STATION_COUNT];
1402 unsigned long ucode_key_table; 1326 unsigned long ucode_key_table;
1403 1327
1404 /* queue refcounts */ 1328 /* queue refcounts */
@@ -1422,101 +1346,81 @@ struct iwl_priv {
1422 /* Last Rx'd beacon timestamp */ 1346 /* Last Rx'd beacon timestamp */
1423 u64 timestamp; 1347 u64 timestamp;
1424 1348
1425 union { 1349 struct {
1426#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE) 1350 __le32 flag;
1427 struct { 1351 struct statistics_general_common common;
1428 void *shared_virt; 1352 struct statistics_rx_non_phy rx_non_phy;
1429 dma_addr_t shared_phys; 1353 struct statistics_rx_phy rx_ofdm;
1430 1354 struct statistics_rx_ht_phy rx_ofdm_ht;
1431 struct delayed_work thermal_periodic; 1355 struct statistics_rx_phy rx_cck;
1432 struct delayed_work rfkill_poll; 1356 struct statistics_tx tx;
1433
1434 struct iwl3945_notif_statistics statistics;
1435#ifdef CONFIG_IWLWIFI_DEBUGFS 1357#ifdef CONFIG_IWLWIFI_DEBUGFS
1436 struct iwl3945_notif_statistics accum_statistics; 1358 struct statistics_bt_activity bt_activity;
1437 struct iwl3945_notif_statistics delta_statistics; 1359 __le32 num_bt_kills, accum_num_bt_kills;
1438 struct iwl3945_notif_statistics max_delta;
1439#endif
1440
1441 u32 sta_supp_rates;
1442 int last_rx_rssi; /* From Rx packet statistics */
1443
1444 /* Rx'd packet timing information */
1445 u32 last_beacon_time;
1446 u64 last_tsf;
1447
1448 /*
1449 * each calibration channel group in the
1450 * EEPROM has a derived clip setting for
1451 * each rate.
1452 */
1453 const struct iwl3945_clip_group clip_groups[5];
1454
1455 } _3945;
1456#endif 1360#endif
1457#if defined(CONFIG_IWLAGN) || defined(CONFIG_IWLAGN_MODULE) 1361 } statistics;
1458 struct {
1459 /* INT ICT Table */
1460 __le32 *ict_tbl;
1461 void *ict_tbl_vir;
1462 dma_addr_t ict_tbl_dma;
1463 dma_addr_t aligned_ict_tbl_dma;
1464 int ict_index;
1465 u32 inta;
1466 bool use_ict;
1467 /*
1468 * reporting the number of tids has AGG on. 0 means
1469 * no AGGREGATION
1470 */
1471 u8 agg_tids_count;
1472
1473 struct iwl_rx_phy_res last_phy_res;
1474 bool last_phy_res_valid;
1475
1476 struct completion firmware_loading_complete;
1477
1478 u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
1479 u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
1480
1481 /*
1482 * chain noise reset and gain commands are the
1483 * two extra calibration commands follows the standard
1484 * phy calibration commands
1485 */
1486 u8 phy_calib_chain_noise_reset_cmd;
1487 u8 phy_calib_chain_noise_gain_cmd;
1488
1489 struct iwl_notif_statistics statistics;
1490 struct iwl_bt_notif_statistics statistics_bt;
1491 /* counts reply_tx error */
1492 struct reply_tx_error_statistics reply_tx_stats;
1493 struct reply_agg_tx_error_statistics reply_agg_tx_stats;
1494#ifdef CONFIG_IWLWIFI_DEBUGFS 1362#ifdef CONFIG_IWLWIFI_DEBUGFS
1495 struct iwl_notif_statistics accum_statistics; 1363 struct {
1496 struct iwl_notif_statistics delta_statistics; 1364 struct statistics_general_common common;
1497 struct iwl_notif_statistics max_delta; 1365 struct statistics_rx_non_phy rx_non_phy;
1498 struct iwl_bt_notif_statistics accum_statistics_bt; 1366 struct statistics_rx_phy rx_ofdm;
1499 struct iwl_bt_notif_statistics delta_statistics_bt; 1367 struct statistics_rx_ht_phy rx_ofdm_ht;
1500 struct iwl_bt_notif_statistics max_delta_bt; 1368 struct statistics_rx_phy rx_cck;
1369 struct statistics_tx tx;
1370 struct statistics_bt_activity bt_activity;
1371 } accum_stats, delta_stats, max_delta_stats;
1501#endif 1372#endif
1502 1373
1503 /* notification wait support */ 1374 struct {
1504 struct list_head notif_waits; 1375 /* INT ICT Table */
1505 spinlock_t notif_wait_lock; 1376 __le32 *ict_tbl;
1506 wait_queue_head_t notif_waitq; 1377 void *ict_tbl_vir;
1507 1378 dma_addr_t ict_tbl_dma;
1508 /* remain-on-channel offload support */ 1379 dma_addr_t aligned_ict_tbl_dma;
1509 struct ieee80211_channel *hw_roc_channel; 1380 int ict_index;
1510 struct delayed_work hw_roc_work; 1381 u32 inta;
1511 enum nl80211_channel_type hw_roc_chantype; 1382 bool use_ict;
1512 int hw_roc_duration; 1383 /*
1513 1384 * reporting the number of tids has AGG on. 0 means
1514 struct sk_buff *offchan_tx_skb; 1385 * no AGGREGATION
1515 int offchan_tx_timeout; 1386 */
1516 struct ieee80211_channel *offchan_tx_chan; 1387 u8 agg_tids_count;
1517 } _agn; 1388
1518#endif 1389 struct iwl_rx_phy_res last_phy_res;
1519 }; 1390 bool last_phy_res_valid;
1391
1392 struct completion firmware_loading_complete;
1393
1394 u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
1395 u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
1396
1397 /*
1398 * chain noise reset and gain commands are the
1399 * two extra calibration commands follows the standard
1400 * phy calibration commands
1401 */
1402 u8 phy_calib_chain_noise_reset_cmd;
1403 u8 phy_calib_chain_noise_gain_cmd;
1404
1405 /* counts reply_tx error */
1406 struct reply_tx_error_statistics reply_tx_stats;
1407 struct reply_agg_tx_error_statistics reply_agg_tx_stats;
1408 /* notification wait support */
1409 struct list_head notif_waits;
1410 spinlock_t notif_wait_lock;
1411 wait_queue_head_t notif_waitq;
1412
1413 /* remain-on-channel offload support */
1414 struct ieee80211_channel *hw_roc_channel;
1415 struct delayed_work hw_roc_work;
1416 enum nl80211_channel_type hw_roc_chantype;
1417 int hw_roc_duration;
1418 bool hw_roc_setup;
1419
1420 struct sk_buff *offchan_tx_skb;
1421 int offchan_tx_timeout;
1422 struct ieee80211_channel *offchan_tx_chan;
1423 } _agn;
1520 1424
1521 /* bt coex */ 1425 /* bt coex */
1522 u8 bt_enable_flag; 1426 u8 bt_enable_flag;
@@ -1559,8 +1463,6 @@ struct iwl_priv {
1559 1463
1560 struct tasklet_struct irq_tasklet; 1464 struct tasklet_struct irq_tasklet;
1561 1465
1562 struct delayed_work init_alive_start;
1563 struct delayed_work alive_start;
1564 struct delayed_work scan_check; 1466 struct delayed_work scan_check;
1565 1467
1566 /* TX Power */ 1468 /* TX Power */
@@ -1589,12 +1491,10 @@ struct iwl_priv {
1589 struct work_struct txpower_work; 1491 struct work_struct txpower_work;
1590 u32 disable_sens_cal; 1492 u32 disable_sens_cal;
1591 u32 disable_chain_noise_cal; 1493 u32 disable_chain_noise_cal;
1592 u32 disable_tx_power_cal;
1593 struct work_struct run_time_calib_work; 1494 struct work_struct run_time_calib_work;
1594 struct timer_list statistics_periodic; 1495 struct timer_list statistics_periodic;
1595 struct timer_list ucode_trace; 1496 struct timer_list ucode_trace;
1596 struct timer_list watchdog; 1497 struct timer_list watchdog;
1597 bool hw_ready;
1598 1498
1599 struct iwl_event_log event_log; 1499 struct iwl_event_log event_log;
1600 1500
@@ -1658,21 +1558,24 @@ iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif)
1658 ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \ 1558 ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \
1659 if (priv->valid_contexts & BIT(ctx->ctxid)) 1559 if (priv->valid_contexts & BIT(ctx->ctxid))
1660 1560
1661static inline int iwl_is_associated(struct iwl_priv *priv, 1561static inline int iwl_is_associated_ctx(struct iwl_rxon_context *ctx)
1662 enum iwl_rxon_context_id ctxid)
1663{ 1562{
1664 return (priv->contexts[ctxid].active.filter_flags & 1563 return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
1665 RXON_FILTER_ASSOC_MSK) ? 1 : 0;
1666} 1564}
1667 1565
1668static inline int iwl_is_any_associated(struct iwl_priv *priv) 1566static inline int iwl_is_associated(struct iwl_priv *priv,
1567 enum iwl_rxon_context_id ctxid)
1669{ 1568{
1670 return iwl_is_associated(priv, IWL_RXON_CTX_BSS); 1569 return iwl_is_associated_ctx(&priv->contexts[ctxid]);
1671} 1570}
1672 1571
1673static inline int iwl_is_associated_ctx(struct iwl_rxon_context *ctx) 1572static inline int iwl_is_any_associated(struct iwl_priv *priv)
1674{ 1573{
1675 return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0; 1574 struct iwl_rxon_context *ctx;
1575 for_each_context(priv, ctx)
1576 if (iwl_is_associated_ctx(ctx))
1577 return true;
1578 return false;
1676} 1579}
1677 1580
1678static inline int is_channel_valid(const struct iwl_channel_info *ch_info) 1581static inline int is_channel_valid(const struct iwl_channel_info *ch_info)
@@ -1710,12 +1613,10 @@ static inline int is_channel_ibss(const struct iwl_channel_info *ch)
1710static inline void __iwl_free_pages(struct iwl_priv *priv, struct page *page) 1613static inline void __iwl_free_pages(struct iwl_priv *priv, struct page *page)
1711{ 1614{
1712 __free_pages(page, priv->hw_params.rx_page_order); 1615 __free_pages(page, priv->hw_params.rx_page_order);
1713 priv->alloc_rxb_page--;
1714} 1616}
1715 1617
1716static inline void iwl_free_pages(struct iwl_priv *priv, unsigned long page) 1618static inline void iwl_free_pages(struct iwl_priv *priv, unsigned long page)
1717{ 1619{
1718 free_pages(page, priv->hw_params.rx_page_order); 1620 free_pages(page, priv->hw_params.rx_page_order);
1719 priv->alloc_rxb_page--;
1720} 1621}
1721#endif /* __iwl_dev_h__ */ 1622#endif /* __iwl_dev_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
index 4a487639d932..a635a7e75447 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2009 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 4cf864c664ee..f00172cb8a6d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2009 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index 833194a2c639..c8397962632c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. 8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -142,6 +142,45 @@ static const u8 iwl_eeprom_band_7[] = { /* 5.2 ht40 channel */
142 * 142 *
143******************************************************************************/ 143******************************************************************************/
144 144
145/*
146 * The device's EEPROM semaphore prevents conflicts between driver and uCode
147 * when accessing the EEPROM; each access is a series of pulses to/from the
148 * EEPROM chip, not a single event, so even reads could conflict if they
149 * weren't arbitrated by the semaphore.
150 */
151static int iwl_eeprom_acquire_semaphore(struct iwl_priv *priv)
152{
153 u16 count;
154 int ret;
155
156 for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
157 /* Request semaphore */
158 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
159 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
160
161 /* See if we got it */
162 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
163 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
164 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
165 EEPROM_SEM_TIMEOUT);
166 if (ret >= 0) {
167 IWL_DEBUG_EEPROM(priv,
168 "Acquired semaphore after %d tries.\n",
169 count+1);
170 return ret;
171 }
172 }
173
174 return ret;
175}
176
177static void iwl_eeprom_release_semaphore(struct iwl_priv *priv)
178{
179 iwl_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
180 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
181
182}
183
145static int iwl_eeprom_verify_signature(struct iwl_priv *priv) 184static int iwl_eeprom_verify_signature(struct iwl_priv *priv)
146{ 185{
147 u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK; 186 u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
@@ -188,18 +227,16 @@ static void iwl_set_otp_access(struct iwl_priv *priv, enum iwl_access_mode mode)
188 CSR_OTP_GP_REG_OTP_ACCESS_MODE); 227 CSR_OTP_GP_REG_OTP_ACCESS_MODE);
189} 228}
190 229
191static int iwlcore_get_nvm_type(struct iwl_priv *priv) 230static int iwlcore_get_nvm_type(struct iwl_priv *priv, u32 hw_rev)
192{ 231{
193 u32 otpgp; 232 u32 otpgp;
194 int nvm_type; 233 int nvm_type;
195 234
196 /* OTP only valid for CP/PP and after */ 235 /* OTP only valid for CP/PP and after */
197 switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) { 236 switch (hw_rev & CSR_HW_REV_TYPE_MSK) {
198 case CSR_HW_REV_TYPE_NONE: 237 case CSR_HW_REV_TYPE_NONE:
199 IWL_ERR(priv, "Unknown hardware type\n"); 238 IWL_ERR(priv, "Unknown hardware type\n");
200 return -ENOENT; 239 return -ENOENT;
201 case CSR_HW_REV_TYPE_3945:
202 case CSR_HW_REV_TYPE_4965:
203 case CSR_HW_REV_TYPE_5300: 240 case CSR_HW_REV_TYPE_5300:
204 case CSR_HW_REV_TYPE_5350: 241 case CSR_HW_REV_TYPE_5350:
205 case CSR_HW_REV_TYPE_5100: 242 case CSR_HW_REV_TYPE_5100:
@@ -217,26 +254,20 @@ static int iwlcore_get_nvm_type(struct iwl_priv *priv)
217 return nvm_type; 254 return nvm_type;
218} 255}
219 256
220const u8 *iwlcore_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
221{
222 BUG_ON(offset >= priv->cfg->base_params->eeprom_size);
223 return &priv->eeprom[offset];
224}
225
226static int iwl_init_otp_access(struct iwl_priv *priv) 257static int iwl_init_otp_access(struct iwl_priv *priv)
227{ 258{
228 int ret; 259 int ret;
229 260
230 /* Enable 40MHz radio clock */ 261 /* Enable 40MHz radio clock */
231 _iwl_write32(priv, CSR_GP_CNTRL, 262 iwl_write32(priv, CSR_GP_CNTRL,
232 _iwl_read32(priv, CSR_GP_CNTRL) | 263 iwl_read32(priv, CSR_GP_CNTRL) |
233 CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 264 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
234 265
235 /* wait for clock to be ready */ 266 /* wait for clock to be ready */
236 ret = iwl_poll_bit(priv, CSR_GP_CNTRL, 267 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
237 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 268 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
238 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 269 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
239 25000); 270 25000);
240 if (ret < 0) 271 if (ret < 0)
241 IWL_ERR(priv, "Time out access OTP\n"); 272 IWL_ERR(priv, "Time out access OTP\n");
242 else { 273 else {
@@ -263,17 +294,17 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_dat
263 u32 r; 294 u32 r;
264 u32 otpgp; 295 u32 otpgp;
265 296
266 _iwl_write32(priv, CSR_EEPROM_REG, 297 iwl_write32(priv, CSR_EEPROM_REG,
267 CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); 298 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
268 ret = iwl_poll_bit(priv, CSR_EEPROM_REG, 299 ret = iwl_poll_bit(priv, CSR_EEPROM_REG,
269 CSR_EEPROM_REG_READ_VALID_MSK, 300 CSR_EEPROM_REG_READ_VALID_MSK,
270 CSR_EEPROM_REG_READ_VALID_MSK, 301 CSR_EEPROM_REG_READ_VALID_MSK,
271 IWL_EEPROM_ACCESS_TIMEOUT); 302 IWL_EEPROM_ACCESS_TIMEOUT);
272 if (ret < 0) { 303 if (ret < 0) {
273 IWL_ERR(priv, "Time out reading OTP[%d]\n", addr); 304 IWL_ERR(priv, "Time out reading OTP[%d]\n", addr);
274 return ret; 305 return ret;
275 } 306 }
276 r = _iwl_read_direct32(priv, CSR_EEPROM_REG); 307 r = iwl_read32(priv, CSR_EEPROM_REG);
277 /* check for ECC errors: */ 308 /* check for ECC errors: */
278 otpgp = iwl_read32(priv, CSR_OTP_GP_REG); 309 otpgp = iwl_read32(priv, CSR_OTP_GP_REG);
279 if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) { 310 if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) {
@@ -396,7 +427,7 @@ u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset)
396 * 427 *
397 * NOTE: This routine uses the non-debug IO access functions. 428 * NOTE: This routine uses the non-debug IO access functions.
398 */ 429 */
399int iwl_eeprom_init(struct iwl_priv *priv) 430int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
400{ 431{
401 __le16 *e; 432 __le16 *e;
402 u32 gp = iwl_read32(priv, CSR_EEPROM_GP); 433 u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
@@ -406,7 +437,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
406 u16 validblockaddr = 0; 437 u16 validblockaddr = 0;
407 u16 cache_addr = 0; 438 u16 cache_addr = 0;
408 439
409 priv->nvm_device_type = iwlcore_get_nvm_type(priv); 440 priv->nvm_device_type = iwlcore_get_nvm_type(priv, hw_rev);
410 if (priv->nvm_device_type == -ENOENT) 441 if (priv->nvm_device_type == -ENOENT)
411 return -ENOENT; 442 return -ENOENT;
412 /* allocate eeprom */ 443 /* allocate eeprom */
@@ -429,7 +460,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
429 } 460 }
430 461
431 /* Make sure driver (instead of uCode) is allowed to read EEPROM */ 462 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
432 ret = priv->cfg->ops->lib->eeprom_ops.acquire_semaphore(priv); 463 ret = iwl_eeprom_acquire_semaphore(priv);
433 if (ret < 0) { 464 if (ret < 0) {
434 IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n"); 465 IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
435 ret = -ENOENT; 466 ret = -ENOENT;
@@ -444,9 +475,9 @@ int iwl_eeprom_init(struct iwl_priv *priv)
444 ret = -ENOENT; 475 ret = -ENOENT;
445 goto done; 476 goto done;
446 } 477 }
447 _iwl_write32(priv, CSR_EEPROM_GP, 478 iwl_write32(priv, CSR_EEPROM_GP,
448 iwl_read32(priv, CSR_EEPROM_GP) & 479 iwl_read32(priv, CSR_EEPROM_GP) &
449 ~CSR_EEPROM_GP_IF_OWNER_MSK); 480 ~CSR_EEPROM_GP_IF_OWNER_MSK);
450 481
451 iwl_set_bit(priv, CSR_OTP_GP_REG, 482 iwl_set_bit(priv, CSR_OTP_GP_REG,
452 CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK | 483 CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK |
@@ -473,8 +504,8 @@ int iwl_eeprom_init(struct iwl_priv *priv)
473 for (addr = 0; addr < sz; addr += sizeof(u16)) { 504 for (addr = 0; addr < sz; addr += sizeof(u16)) {
474 u32 r; 505 u32 r;
475 506
476 _iwl_write32(priv, CSR_EEPROM_REG, 507 iwl_write32(priv, CSR_EEPROM_REG,
477 CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); 508 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
478 509
479 ret = iwl_poll_bit(priv, CSR_EEPROM_REG, 510 ret = iwl_poll_bit(priv, CSR_EEPROM_REG,
480 CSR_EEPROM_REG_READ_VALID_MSK, 511 CSR_EEPROM_REG_READ_VALID_MSK,
@@ -484,7 +515,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
484 IWL_ERR(priv, "Time out reading EEPROM[%d]\n", addr); 515 IWL_ERR(priv, "Time out reading EEPROM[%d]\n", addr);
485 goto done; 516 goto done;
486 } 517 }
487 r = _iwl_read_direct32(priv, CSR_EEPROM_REG); 518 r = iwl_read32(priv, CSR_EEPROM_REG);
488 e[addr / 2] = cpu_to_le16(r >> 16); 519 e[addr / 2] = cpu_to_le16(r >> 16);
489 } 520 }
490 } 521 }
@@ -496,7 +527,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
496 527
497 ret = 0; 528 ret = 0;
498done: 529done:
499 priv->cfg->ops->lib->eeprom_ops.release_semaphore(priv); 530 iwl_eeprom_release_semaphore(priv);
500 531
501err: 532err:
502 if (ret) 533 if (ret)
@@ -719,13 +750,6 @@ int iwl_init_channel_map(struct iwl_priv *priv)
719 flags & EEPROM_CHANNEL_RADAR)) 750 flags & EEPROM_CHANNEL_RADAR))
720 ? "" : "not "); 751 ? "" : "not ");
721 752
722 /* Set the tx_power_user_lmt to the highest power
723 * supported by any channel */
724 if (eeprom_ch_info[ch].max_power_avg >
725 priv->tx_power_user_lmt)
726 priv->tx_power_user_lmt =
727 eeprom_ch_info[ch].max_power_avg;
728
729 ch_info++; 753 ch_info++;
730 } 754 }
731 } 755 }
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index 20b66469d68f..c960c6fa009b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. 8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -110,10 +110,6 @@ enum {
110}; 110};
111 111
112/* SKU Capabilities */ 112/* SKU Capabilities */
113/* 3945 only */
114#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0)
115#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1)
116
117/* 5000 and up */ 113/* 5000 and up */
118#define EEPROM_SKU_CAP_BAND_POS (4) 114#define EEPROM_SKU_CAP_BAND_POS (4)
119#define EEPROM_SKU_CAP_BAND_SELECTION \ 115#define EEPROM_SKU_CAP_BAND_SELECTION \
@@ -168,28 +164,6 @@ struct iwl_eeprom_enhanced_txpwr {
168 s8 mimo3_max; 164 s8 mimo3_max;
169} __packed; 165} __packed;
170 166
171/* 3945 Specific */
172#define EEPROM_3945_EEPROM_VERSION (0x2f)
173
174/* 4965 has two radio transmitters (and 3 radio receivers) */
175#define EEPROM_TX_POWER_TX_CHAINS (2)
176
177/* 4965 has room for up to 8 sets of txpower calibration data */
178#define EEPROM_TX_POWER_BANDS (8)
179
180/* 4965 factory calibration measures txpower gain settings for
181 * each of 3 target output levels */
182#define EEPROM_TX_POWER_MEASUREMENTS (3)
183
184/* 4965 Specific */
185/* 4965 driver does not work with txpower calibration version < 5 */
186#define EEPROM_4965_TX_POWER_VERSION (5)
187#define EEPROM_4965_EEPROM_VERSION (0x2f)
188#define EEPROM_4965_CALIB_VERSION_OFFSET (2*0xB6) /* 2 bytes */
189#define EEPROM_4965_CALIB_TXPOWER_OFFSET (2*0xE8) /* 48 bytes */
190#define EEPROM_4965_BOARD_REVISION (2*0x4F) /* 2 bytes */
191#define EEPROM_4965_BOARD_PBA (2*0x56+1) /* 9 bytes */
192
193/* 5000 Specific */ 167/* 5000 Specific */
194#define EEPROM_5000_TX_POWER_VERSION (4) 168#define EEPROM_5000_TX_POWER_VERSION (4)
195#define EEPROM_5000_EEPROM_VERSION (0x11A) 169#define EEPROM_5000_EEPROM_VERSION (0x11A)
@@ -282,90 +256,6 @@ struct iwl_eeprom_enhanced_txpwr {
282/* 2.4 GHz */ 256/* 2.4 GHz */
283extern const u8 iwl_eeprom_band_1[14]; 257extern const u8 iwl_eeprom_band_1[14];
284 258
285/*
286 * factory calibration data for one txpower level, on one channel,
287 * measured on one of the 2 tx chains (radio transmitter and associated
288 * antenna). EEPROM contains:
289 *
290 * 1) Temperature (degrees Celsius) of device when measurement was made.
291 *
292 * 2) Gain table index used to achieve the target measurement power.
293 * This refers to the "well-known" gain tables (see iwl-4965-hw.h).
294 *
295 * 3) Actual measured output power, in half-dBm ("34" = 17 dBm).
296 *
297 * 4) RF power amplifier detector level measurement (not used).
298 */
299struct iwl_eeprom_calib_measure {
300 u8 temperature; /* Device temperature (Celsius) */
301 u8 gain_idx; /* Index into gain table */
302 u8 actual_pow; /* Measured RF output power, half-dBm */
303 s8 pa_det; /* Power amp detector level (not used) */
304} __packed;
305
306
307/*
308 * measurement set for one channel. EEPROM contains:
309 *
310 * 1) Channel number measured
311 *
312 * 2) Measurements for each of 3 power levels for each of 2 radio transmitters
313 * (a.k.a. "tx chains") (6 measurements altogether)
314 */
315struct iwl_eeprom_calib_ch_info {
316 u8 ch_num;
317 struct iwl_eeprom_calib_measure
318 measurements[EEPROM_TX_POWER_TX_CHAINS]
319 [EEPROM_TX_POWER_MEASUREMENTS];
320} __packed;
321
322/*
323 * txpower subband info.
324 *
325 * For each frequency subband, EEPROM contains the following:
326 *
327 * 1) First and last channels within range of the subband. "0" values
328 * indicate that this sample set is not being used.
329 *
330 * 2) Sample measurement sets for 2 channels close to the range endpoints.
331 */
332struct iwl_eeprom_calib_subband_info {
333 u8 ch_from; /* channel number of lowest channel in subband */
334 u8 ch_to; /* channel number of highest channel in subband */
335 struct iwl_eeprom_calib_ch_info ch1;
336 struct iwl_eeprom_calib_ch_info ch2;
337} __packed;
338
339
340/*
341 * txpower calibration info. EEPROM contains:
342 *
343 * 1) Factory-measured saturation power levels (maximum levels at which
344 * tx power amplifier can output a signal without too much distortion).
345 * There is one level for 2.4 GHz band and one for 5 GHz band. These
346 * values apply to all channels within each of the bands.
347 *
348 * 2) Factory-measured power supply voltage level. This is assumed to be
349 * constant (i.e. same value applies to all channels/bands) while the
350 * factory measurements are being made.
351 *
352 * 3) Up to 8 sets of factory-measured txpower calibration values.
353 * These are for different frequency ranges, since txpower gain
354 * characteristics of the analog radio circuitry vary with frequency.
355 *
356 * Not all sets need to be filled with data;
357 * struct iwl_eeprom_calib_subband_info contains range of channels
358 * (0 if unused) for each set of data.
359 */
360struct iwl_eeprom_calib_info {
361 u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
362 u8 saturation_power52; /* half-dBm */
363 __le16 voltage; /* signed */
364 struct iwl_eeprom_calib_subband_info
365 band_info[EEPROM_TX_POWER_BANDS];
366} __packed;
367
368
369#define ADDRESS_MSK 0x0000FFFF 259#define ADDRESS_MSK 0x0000FFFF
370#define INDIRECT_TYPE_MSK 0x000F0000 260#define INDIRECT_TYPE_MSK 0x000F0000
371#define INDIRECT_HOST 0x00010000 261#define INDIRECT_HOST 0x00010000
@@ -398,103 +288,24 @@ struct iwl_eeprom_calib_info {
398#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */ 288#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
399#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */ 289#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
400 290
401#define EEPROM_3945_RF_CFG_TYPE_MAX 0x0
402#define EEPROM_4965_RF_CFG_TYPE_MAX 0x1
403
404/* Radio Config for 5000 and up */
405#define EEPROM_RF_CONFIG_TYPE_R3x3 0x0
406#define EEPROM_RF_CONFIG_TYPE_R2x2 0x1
407#define EEPROM_RF_CONFIG_TYPE_R1x2 0x2
408#define EEPROM_RF_CONFIG_TYPE_MAX 0x3 291#define EEPROM_RF_CONFIG_TYPE_MAX 0x3
409 292
410/*
411 * Per-channel regulatory data.
412 *
413 * Each channel that *might* be supported by iwl has a fixed location
414 * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
415 * txpower (MSB).
416 *
417 * Entries immediately below are for 20 MHz channel width. HT40 (40 MHz)
418 * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
419 *
420 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
421 */
422#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */
423#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */
424#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */
425
426/*
427 * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
428 * 5.0 GHz channels 7, 8, 11, 12, 16
429 * (4915-5080MHz) (none of these is ever supported)
430 */
431#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */
432#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */
433
434/*
435 * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
436 * (5170-5320MHz)
437 */
438#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */
439#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */
440
441/*
442 * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
443 * (5500-5700MHz)
444 */
445#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */
446#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */
447
448/*
449 * 5.7 GHz channels 145, 149, 153, 157, 161, 165
450 * (5725-5825MHz)
451 */
452#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */
453#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */
454
455/*
456 * 2.4 GHz HT40 channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11)
457 *
458 * The channel listed is the center of the lower 20 MHz half of the channel.
459 * The overall center frequency is actually 2 channels (10 MHz) above that,
460 * and the upper half of each HT40 channel is centered 4 channels (20 MHz) away
461 * from the lower half; e.g. the upper half of HT40 channel 1 is channel 5,
462 * and the overall HT40 channel width centers on channel 3.
463 *
464 * NOTE: The RXON command uses 20 MHz channel numbers to specify the
465 * control channel to which to tune. RXON also specifies whether the
466 * control channel is the upper or lower half of a HT40 channel.
467 *
468 * NOTE: 4965 does not support HT40 channels on 2.4 GHz.
469 */
470#define EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS (2*0xA0) /* 14 bytes */
471
472/*
473 * 5.2 GHz HT40 channels 36 (40), 44 (48), 52 (56), 60 (64),
474 * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161)
475 */
476#define EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS (2*0xA8) /* 22 bytes */
477
478#define EEPROM_REGULATORY_BAND_NO_HT40 (0) 293#define EEPROM_REGULATORY_BAND_NO_HT40 (0)
479 294
480struct iwl_eeprom_ops { 295struct iwl_eeprom_ops {
481 const u32 regulatory_bands[7]; 296 const u32 regulatory_bands[7];
482 int (*acquire_semaphore) (struct iwl_priv *priv);
483 void (*release_semaphore) (struct iwl_priv *priv);
484 u16 (*calib_version) (struct iwl_priv *priv);
485 const u8* (*query_addr) (const struct iwl_priv *priv, size_t offset); 297 const u8* (*query_addr) (const struct iwl_priv *priv, size_t offset);
486 void (*update_enhanced_txpower) (struct iwl_priv *priv); 298 void (*update_enhanced_txpower) (struct iwl_priv *priv);
487}; 299};
488 300
489 301
490int iwl_eeprom_init(struct iwl_priv *priv); 302int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev);
491void iwl_eeprom_free(struct iwl_priv *priv); 303void iwl_eeprom_free(struct iwl_priv *priv);
492int iwl_eeprom_check_version(struct iwl_priv *priv); 304int iwl_eeprom_check_version(struct iwl_priv *priv);
493int iwl_eeprom_check_sku(struct iwl_priv *priv); 305int iwl_eeprom_check_sku(struct iwl_priv *priv);
494const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset); 306const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset);
495int iwlcore_eeprom_verify_signature(struct iwl_priv *priv); 307int iwlcore_eeprom_verify_signature(struct iwl_priv *priv);
496u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset); 308u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset);
497const u8 *iwlcore_eeprom_query_addr(const struct iwl_priv *priv, size_t offset);
498int iwl_init_channel_map(struct iwl_priv *priv); 309int iwl_init_channel_map(struct iwl_priv *priv);
499void iwl_free_channel_map(struct iwl_priv *priv); 310void iwl_free_channel_map(struct iwl_priv *priv);
500const struct iwl_channel_info *iwl_get_channel_info( 311const struct iwl_channel_info *iwl_get_channel_info(
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 474009a244d4..6dfa806aefec 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -77,14 +77,14 @@
77/** 77/**
78 * Keep-Warm (KW) buffer base address. 78 * Keep-Warm (KW) buffer base address.
79 * 79 *
80 * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the 80 * Driver must allocate a 4KByte buffer that is for keeping the
81 * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency 81 * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
82 * DRAM access when 4965 is Txing or Rxing. The dummy accesses prevent host 82 * DRAM access when doing Txing or Rxing. The dummy accesses prevent host
83 * from going into a power-savings mode that would cause higher DRAM latency, 83 * from going into a power-savings mode that would cause higher DRAM latency,
84 * and possible data over/under-runs, before all Tx/Rx is complete. 84 * and possible data over/under-runs, before all Tx/Rx is complete.
85 * 85 *
86 * Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4) 86 * Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4)
87 * of the buffer, which must be 4K aligned. Once this is set up, the 4965 87 * of the buffer, which must be 4K aligned. Once this is set up, the device
88 * automatically invokes keep-warm accesses when normal accesses might not 88 * automatically invokes keep-warm accesses when normal accesses might not
89 * be sufficient to maintain fast DRAM response. 89 * be sufficient to maintain fast DRAM response.
90 * 90 *
@@ -97,7 +97,7 @@
97/** 97/**
98 * TFD Circular Buffers Base (CBBC) addresses 98 * TFD Circular Buffers Base (CBBC) addresses
99 * 99 *
100 * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident 100 * Device has 16 base pointer registers, one for each of 16 host-DRAM-resident
101 * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs) 101 * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
102 * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04 102 * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04
103 * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte 103 * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
@@ -116,16 +116,16 @@
116/** 116/**
117 * Rx SRAM Control and Status Registers (RSCSR) 117 * Rx SRAM Control and Status Registers (RSCSR)
118 * 118 *
119 * These registers provide handshake between driver and 4965 for the Rx queue 119 * These registers provide handshake between driver and device for the Rx queue
120 * (this queue handles *all* command responses, notifications, Rx data, etc. 120 * (this queue handles *all* command responses, notifications, Rx data, etc.
121 * sent from 4965 uCode to host driver). Unlike Tx, there is only one Rx 121 * sent from uCode to host driver). Unlike Tx, there is only one Rx
122 * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can 122 * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can
123 * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer 123 * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
124 * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1 124 * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
125 * mapping between RBDs and RBs. 125 * mapping between RBDs and RBs.
126 * 126 *
127 * Driver must allocate host DRAM memory for the following, and set the 127 * Driver must allocate host DRAM memory for the following, and set the
128 * physical address of each into 4965 registers: 128 * physical address of each into device registers:
129 * 129 *
130 * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256 130 * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
131 * entries (although any power of 2, up to 4096, is selectable by driver). 131 * entries (although any power of 2, up to 4096, is selectable by driver).
@@ -140,20 +140,20 @@
140 * Driver sets physical address [35:8] of base of RBD circular buffer 140 * Driver sets physical address [35:8] of base of RBD circular buffer
141 * into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0]. 141 * into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
142 * 142 *
143 * 2) Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers 143 * 2) Rx status buffer, 8 bytes, in which uCode indicates which Rx Buffers
144 * (RBs) have been filled, via a "write pointer", actually the index of 144 * (RBs) have been filled, via a "write pointer", actually the index of
145 * the RB's corresponding RBD within the circular buffer. Driver sets 145 * the RB's corresponding RBD within the circular buffer. Driver sets
146 * physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0]. 146 * physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
147 * 147 *
148 * Bit fields in lower dword of Rx status buffer (upper dword not used 148 * Bit fields in lower dword of Rx status buffer (upper dword not used
149 * by driver; see struct iwl4965_shared, val0): 149 * by driver:
150 * 31-12: Not used by driver 150 * 31-12: Not used by driver
151 * 11- 0: Index of last filled Rx buffer descriptor 151 * 11- 0: Index of last filled Rx buffer descriptor
152 * (4965 writes, driver reads this value) 152 * (device writes, driver reads this value)
153 * 153 *
154 * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must 154 * As the driver prepares Receive Buffers (RBs) for device to fill, driver must
155 * enter pointers to these RBs into contiguous RBD circular buffer entries, 155 * enter pointers to these RBs into contiguous RBD circular buffer entries,
156 * and update the 4965's "write" index register, 156 * and update the device's "write" index register,
157 * FH_RSCSR_CHNL0_RBDCB_WPTR_REG. 157 * FH_RSCSR_CHNL0_RBDCB_WPTR_REG.
158 * 158 *
159 * This "write" index corresponds to the *next* RBD that the driver will make 159 * This "write" index corresponds to the *next* RBD that the driver will make
@@ -162,12 +162,12 @@
162 * RBs), should be 8 after preparing the first 8 RBs (for example), and must 162 * RBs), should be 8 after preparing the first 8 RBs (for example), and must
163 * wrap back to 0 at the end of the circular buffer (but don't wrap before 163 * wrap back to 0 at the end of the circular buffer (but don't wrap before
164 * "read" index has advanced past 1! See below). 164 * "read" index has advanced past 1! See below).
165 * NOTE: 4965 EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8. 165 * NOTE: DEVICE EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8.
166 * 166 *
167 * As the 4965 fills RBs (referenced from contiguous RBDs within the circular 167 * As the device fills RBs (referenced from contiguous RBDs within the circular
168 * buffer), it updates the Rx status buffer in host DRAM, 2) described above, 168 * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
169 * to tell the driver the index of the latest filled RBD. The driver must 169 * to tell the driver the index of the latest filled RBD. The driver must
170 * read this "read" index from DRAM after receiving an Rx interrupt from 4965. 170 * read this "read" index from DRAM after receiving an Rx interrupt from device
171 * 171 *
172 * The driver must also internally keep track of a third index, which is the 172 * The driver must also internally keep track of a third index, which is the
173 * next RBD to process. When receiving an Rx interrupt, driver should process 173 * next RBD to process. When receiving an Rx interrupt, driver should process
@@ -176,7 +176,7 @@
176 * driver may process the RB pointed to by RBD 0. Depending on volume of 176 * driver may process the RB pointed to by RBD 0. Depending on volume of
177 * traffic, there may be many RBs to process. 177 * traffic, there may be many RBs to process.
178 * 178 *
179 * If read index == write index, 4965 thinks there is no room to put new data. 179 * If read index == write index, device thinks there is no room to put new data.
180 * Due to this, the maximum number of filled RBs is 255, instead of 256. To 180 * Due to this, the maximum number of filled RBs is 255, instead of 256. To
181 * be safe, make sure that there is a gap of at least 2 RBDs between "write" 181 * be safe, make sure that there is a gap of at least 2 RBDs between "write"
182 * and "read" indexes; that is, make sure that there are no more than 254 182 * and "read" indexes; that is, make sure that there are no more than 254
@@ -303,7 +303,7 @@
303/** 303/**
304 * Transmit DMA Channel Control/Status Registers (TCSR) 304 * Transmit DMA Channel Control/Status Registers (TCSR)
305 * 305 *
306 * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels 306 * Device has one configuration register for each of 8 Tx DMA/FIFO channels
307 * supported in hardware (don't confuse these with the 16 Tx queues in DRAM, 307 * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
308 * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes. 308 * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
309 * 309 *
@@ -326,7 +326,6 @@
326#define FH_TCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xE60) 326#define FH_TCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xE60)
327 327
328/* Find Control/Status reg for given Tx DMA/FIFO channel */ 328/* Find Control/Status reg for given Tx DMA/FIFO channel */
329#define FH49_TCSR_CHNL_NUM (7)
330#define FH50_TCSR_CHNL_NUM (8) 329#define FH50_TCSR_CHNL_NUM (8)
331 330
332/* TCSR: tx_config register values */ 331/* TCSR: tx_config register values */
@@ -424,7 +423,6 @@
424#define RX_LOW_WATERMARK 8 423#define RX_LOW_WATERMARK 8
425 424
426/* Size of one Rx buffer in host DRAM */ 425/* Size of one Rx buffer in host DRAM */
427#define IWL_RX_BUF_SIZE_3K (3 * 1000) /* 3945 only */
428#define IWL_RX_BUF_SIZE_4K (4 * 1024) 426#define IWL_RX_BUF_SIZE_4K (4 * 1024)
429#define IWL_RX_BUF_SIZE_8K (8 * 1024) 427#define IWL_RX_BUF_SIZE_8K (8 * 1024)
430 428
@@ -443,7 +441,7 @@ struct iwl_rb_status {
443 __le16 closed_fr_num; 441 __le16 closed_fr_num;
444 __le16 finished_rb_num; 442 __le16 finished_rb_num;
445 __le16 finished_fr_nam; 443 __le16 finished_fr_nam;
446 __le32 __unused; /* 3945 only */ 444 __le32 __unused;
447} __packed; 445} __packed;
448 446
449 447
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index 02499f684683..8f0beb992ccf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -2,7 +2,7 @@
2 * 2 *
3 * GPL LICENSE SUMMARY 3 * GPL LICENSE SUMMARY
4 * 4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. 5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as 8 * it under the terms of version 2 of the GNU General Public License as
@@ -51,9 +51,7 @@ const char *get_cmd_string(u8 cmd)
51 IWL_CMD(REPLY_REMOVE_ALL_STA); 51 IWL_CMD(REPLY_REMOVE_ALL_STA);
52 IWL_CMD(REPLY_TXFIFO_FLUSH); 52 IWL_CMD(REPLY_TXFIFO_FLUSH);
53 IWL_CMD(REPLY_WEPKEY); 53 IWL_CMD(REPLY_WEPKEY);
54 IWL_CMD(REPLY_3945_RX);
55 IWL_CMD(REPLY_TX); 54 IWL_CMD(REPLY_TX);
56 IWL_CMD(REPLY_RATE_SCALE);
57 IWL_CMD(REPLY_LEDS_CMD); 55 IWL_CMD(REPLY_LEDS_CMD);
58 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD); 56 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
59 IWL_CMD(COEX_PRIORITY_TABLE_CMD); 57 IWL_CMD(COEX_PRIORITY_TABLE_CMD);
@@ -145,10 +143,12 @@ static int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
145{ 143{
146 int ret; 144 int ret;
147 145
148 BUG_ON(!(cmd->flags & CMD_ASYNC)); 146 if (WARN_ON(!(cmd->flags & CMD_ASYNC)))
147 return -EINVAL;
149 148
150 /* An asynchronous command can not expect an SKB to be set. */ 149 /* An asynchronous command can not expect an SKB to be set. */
151 BUG_ON(cmd->flags & CMD_WANT_SKB); 150 if (WARN_ON(cmd->flags & CMD_WANT_SKB))
151 return -EINVAL;
152 152
153 /* Assign a generic callback if one is not provided */ 153 /* Assign a generic callback if one is not provided */
154 if (!cmd->callback) 154 if (!cmd->callback)
@@ -171,14 +171,15 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
171 int cmd_idx; 171 int cmd_idx;
172 int ret; 172 int ret;
173 173
174 BUG_ON(cmd->flags & CMD_ASYNC); 174 if (WARN_ON(cmd->flags & CMD_ASYNC))
175 return -EINVAL;
175 176
176 /* A synchronous command can not have a callback set. */ 177 /* A synchronous command can not have a callback set. */
177 BUG_ON(cmd->callback); 178 if (WARN_ON(cmd->callback))
179 return -EINVAL;
178 180
179 IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n", 181 IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n",
180 get_cmd_string(cmd->id)); 182 get_cmd_string(cmd->id));
181 mutex_lock(&priv->sync_cmd_mutex);
182 183
183 set_bit(STATUS_HCMD_ACTIVE, &priv->status); 184 set_bit(STATUS_HCMD_ACTIVE, &priv->status);
184 IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n", 185 IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n",
@@ -189,7 +190,7 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
189 ret = cmd_idx; 190 ret = cmd_idx;
190 IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n", 191 IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
191 get_cmd_string(cmd->id), ret); 192 get_cmd_string(cmd->id), ret);
192 goto out; 193 return ret;
193 } 194 }
194 195
195 ret = wait_event_interruptible_timeout(priv->wait_command_queue, 196 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
@@ -229,8 +230,7 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
229 goto cancel; 230 goto cancel;
230 } 231 }
231 232
232 ret = 0; 233 return 0;
233 goto out;
234 234
235cancel: 235cancel:
236 if (cmd->flags & CMD_WANT_SKB) { 236 if (cmd->flags & CMD_WANT_SKB) {
@@ -248,8 +248,7 @@ fail:
248 iwl_free_pages(priv, cmd->reply_page); 248 iwl_free_pages(priv, cmd->reply_page);
249 cmd->reply_page = 0; 249 cmd->reply_page = 0;
250 } 250 }
251out: 251
252 mutex_unlock(&priv->sync_cmd_mutex);
253 return ret; 252 return ret;
254} 253}
255 254
diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
index 8821f088ba7f..41207a3645b8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
+++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
@@ -64,30 +64,6 @@ static inline int iwl_queue_dec_wrap(int index, int n_bd)
64 return --index & (n_bd - 1); 64 return --index & (n_bd - 1);
65} 65}
66 66
67/* TODO: Move fw_desc functions to iwl-pci.ko */
68static inline void iwl_free_fw_desc(struct pci_dev *pci_dev,
69 struct fw_desc *desc)
70{
71 if (desc->v_addr)
72 dma_free_coherent(&pci_dev->dev, desc->len,
73 desc->v_addr, desc->p_addr);
74 desc->v_addr = NULL;
75 desc->len = 0;
76}
77
78static inline int iwl_alloc_fw_desc(struct pci_dev *pci_dev,
79 struct fw_desc *desc)
80{
81 if (!desc->len) {
82 desc->v_addr = NULL;
83 return -EINVAL;
84 }
85
86 desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
87 &desc->p_addr, GFP_KERNEL);
88 return (desc->v_addr != NULL) ? 0 : -ENOMEM;
89}
90
91/* 67/*
92 * we have 8 bits used like this: 68 * we have 8 bits used like this:
93 * 69 *
@@ -131,6 +107,19 @@ static inline void iwl_stop_queue(struct iwl_priv *priv,
131 ieee80211_stop_queue(priv->hw, ac); 107 ieee80211_stop_queue(priv->hw, ac);
132} 108}
133 109
110static inline void iwl_wake_any_queue(struct iwl_priv *priv,
111 struct iwl_rxon_context *ctx)
112{
113 u8 ac;
114
115 for (ac = 0; ac < AC_NUM; ac++) {
116 IWL_DEBUG_INFO(priv, "Queue Status: Q[%d] %s\n",
117 ac, (atomic_read(&priv->queue_stop_count[ac]) > 0)
118 ? "stopped" : "awake");
119 iwl_wake_queue(priv, &priv->txq[ctx->ac_to_queue[ac]]);
120 }
121}
122
134#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue 123#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
135#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue 124#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
136 125
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
new file mode 100644
index 000000000000..aa4a90674452
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -0,0 +1,294 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
19 *
20 * The full GNU General Public License is included in this distribution in the
21 * file called LICENSE.
22 *
23 * Contact Information:
24 * Intel Linux Wireless <ilw@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *
27 *****************************************************************************/
28
29#include "iwl-io.h"
30
31#define IWL_POLL_INTERVAL 10 /* microseconds */
32
33static inline void __iwl_set_bit(struct iwl_priv *priv, u32 reg, u32 mask)
34{
35 iwl_write32(priv, reg, iwl_read32(priv, reg) | mask);
36}
37
38static inline void __iwl_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask)
39{
40 iwl_write32(priv, reg, iwl_read32(priv, reg) & ~mask);
41}
42
43void iwl_set_bit(struct iwl_priv *priv, u32 reg, u32 mask)
44{
45 unsigned long flags;
46
47 spin_lock_irqsave(&priv->reg_lock, flags);
48 __iwl_set_bit(priv, reg, mask);
49 spin_unlock_irqrestore(&priv->reg_lock, flags);
50}
51
52void iwl_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask)
53{
54 unsigned long flags;
55
56 spin_lock_irqsave(&priv->reg_lock, flags);
57 __iwl_clear_bit(priv, reg, mask);
58 spin_unlock_irqrestore(&priv->reg_lock, flags);
59}
60
61int iwl_poll_bit(struct iwl_priv *priv, u32 addr,
62 u32 bits, u32 mask, int timeout)
63{
64 int t = 0;
65
66 do {
67 if ((iwl_read32(priv, addr) & mask) == (bits & mask))
68 return t;
69 udelay(IWL_POLL_INTERVAL);
70 t += IWL_POLL_INTERVAL;
71 } while (t < timeout);
72
73 return -ETIMEDOUT;
74}
75
76int iwl_grab_nic_access_silent(struct iwl_priv *priv)
77{
78 int ret;
79
80 lockdep_assert_held(&priv->reg_lock);
81
82 /* this bit wakes up the NIC */
83 __iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
84
85 /*
86 * These bits say the device is running, and should keep running for
87 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
88 * but they do not indicate that embedded SRAM is restored yet;
89 * 3945 and 4965 have volatile SRAM, and must save/restore contents
90 * to/from host DRAM when sleeping/waking for power-saving.
91 * Each direction takes approximately 1/4 millisecond; with this
92 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
93 * series of register accesses are expected (e.g. reading Event Log),
94 * to keep device from sleeping.
95 *
96 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
97 * SRAM is okay/restored. We don't check that here because this call
98 * is just for hardware register access; but GP1 MAC_SLEEP check is a
99 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
100 *
101 * 5000 series and later (including 1000 series) have non-volatile SRAM,
102 * and do not save/restore SRAM when power cycling.
103 */
104 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
105 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
106 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
107 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
108 if (ret < 0) {
109 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
110 return -EIO;
111 }
112
113 return 0;
114}
115
116int iwl_grab_nic_access(struct iwl_priv *priv)
117{
118 int ret = iwl_grab_nic_access_silent(priv);
119 if (ret) {
120 u32 val = iwl_read32(priv, CSR_GP_CNTRL);
121 IWL_ERR(priv,
122 "MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val);
123 }
124
125 return ret;
126}
127
128void iwl_release_nic_access(struct iwl_priv *priv)
129{
130 lockdep_assert_held(&priv->reg_lock);
131 __iwl_clear_bit(priv, CSR_GP_CNTRL,
132 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
133}
134
135u32 iwl_read_direct32(struct iwl_priv *priv, u32 reg)
136{
137 u32 value;
138 unsigned long flags;
139
140 spin_lock_irqsave(&priv->reg_lock, flags);
141 iwl_grab_nic_access(priv);
142 value = iwl_read32(priv, reg);
143 iwl_release_nic_access(priv);
144 spin_unlock_irqrestore(&priv->reg_lock, flags);
145
146 return value;
147}
148
149void iwl_write_direct32(struct iwl_priv *priv, u32 reg, u32 value)
150{
151 unsigned long flags;
152
153 spin_lock_irqsave(&priv->reg_lock, flags);
154 if (!iwl_grab_nic_access(priv)) {
155 iwl_write32(priv, reg, value);
156 iwl_release_nic_access(priv);
157 }
158 spin_unlock_irqrestore(&priv->reg_lock, flags);
159}
160
161int iwl_poll_direct_bit(struct iwl_priv *priv, u32 addr, u32 mask,
162 int timeout)
163{
164 int t = 0;
165
166 do {
167 if ((iwl_read_direct32(priv, addr) & mask) == mask)
168 return t;
169 udelay(IWL_POLL_INTERVAL);
170 t += IWL_POLL_INTERVAL;
171 } while (t < timeout);
172
173 return -ETIMEDOUT;
174}
175
176static inline u32 __iwl_read_prph(struct iwl_priv *priv, u32 reg)
177{
178 iwl_write32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
179 rmb();
180 return iwl_read32(priv, HBUS_TARG_PRPH_RDAT);
181}
182
183static inline void __iwl_write_prph(struct iwl_priv *priv, u32 addr, u32 val)
184{
185 iwl_write32(priv, HBUS_TARG_PRPH_WADDR,
186 ((addr & 0x0000FFFF) | (3 << 24)));
187 wmb();
188 iwl_write32(priv, HBUS_TARG_PRPH_WDAT, val);
189}
190
191u32 iwl_read_prph(struct iwl_priv *priv, u32 reg)
192{
193 unsigned long flags;
194 u32 val;
195
196 spin_lock_irqsave(&priv->reg_lock, flags);
197 iwl_grab_nic_access(priv);
198 val = __iwl_read_prph(priv, reg);
199 iwl_release_nic_access(priv);
200 spin_unlock_irqrestore(&priv->reg_lock, flags);
201 return val;
202}
203
204void iwl_write_prph(struct iwl_priv *priv, u32 addr, u32 val)
205{
206 unsigned long flags;
207
208 spin_lock_irqsave(&priv->reg_lock, flags);
209 if (!iwl_grab_nic_access(priv)) {
210 __iwl_write_prph(priv, addr, val);
211 iwl_release_nic_access(priv);
212 }
213 spin_unlock_irqrestore(&priv->reg_lock, flags);
214}
215
216void iwl_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask)
217{
218 unsigned long flags;
219
220 spin_lock_irqsave(&priv->reg_lock, flags);
221 iwl_grab_nic_access(priv);
222 __iwl_write_prph(priv, reg, __iwl_read_prph(priv, reg) | mask);
223 iwl_release_nic_access(priv);
224 spin_unlock_irqrestore(&priv->reg_lock, flags);
225}
226
227void iwl_set_bits_mask_prph(struct iwl_priv *priv, u32 reg,
228 u32 bits, u32 mask)
229{
230 unsigned long flags;
231
232 spin_lock_irqsave(&priv->reg_lock, flags);
233 iwl_grab_nic_access(priv);
234 __iwl_write_prph(priv, reg,
235 (__iwl_read_prph(priv, reg) & mask) | bits);
236 iwl_release_nic_access(priv);
237 spin_unlock_irqrestore(&priv->reg_lock, flags);
238}
239
240void iwl_clear_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask)
241{
242 unsigned long flags;
243 u32 val;
244
245 spin_lock_irqsave(&priv->reg_lock, flags);
246 iwl_grab_nic_access(priv);
247 val = __iwl_read_prph(priv, reg);
248 __iwl_write_prph(priv, reg, (val & ~mask));
249 iwl_release_nic_access(priv);
250 spin_unlock_irqrestore(&priv->reg_lock, flags);
251}
252
253void _iwl_read_targ_mem_words(struct iwl_priv *priv, u32 addr,
254 void *buf, int words)
255{
256 unsigned long flags;
257 int offs;
258 u32 *vals = buf;
259
260 spin_lock_irqsave(&priv->reg_lock, flags);
261 iwl_grab_nic_access(priv);
262
263 iwl_write32(priv, HBUS_TARG_MEM_RADDR, addr);
264 rmb();
265
266 for (offs = 0; offs < words; offs++)
267 vals[offs] = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
268
269 iwl_release_nic_access(priv);
270 spin_unlock_irqrestore(&priv->reg_lock, flags);
271}
272
273u32 iwl_read_targ_mem(struct iwl_priv *priv, u32 addr)
274{
275 u32 value;
276
277 _iwl_read_targ_mem_words(priv, addr, &value, 1);
278
279 return value;
280}
281
282void iwl_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val)
283{
284 unsigned long flags;
285
286 spin_lock_irqsave(&priv->reg_lock, flags);
287 if (!iwl_grab_nic_access(priv)) {
288 iwl_write32(priv, HBUS_TARG_MEM_WADDR, addr);
289 wmb();
290 iwl_write32(priv, HBUS_TARG_MEM_WDAT, val);
291 iwl_release_nic_access(priv);
292 }
293 spin_unlock_irqrestore(&priv->reg_lock, flags);
294}
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index 0203a3bbf872..869edc580ec6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project. 5 * Portions of this file are derived from the ipw3945 project.
6 * 6 *
@@ -35,494 +35,58 @@
35#include "iwl-debug.h" 35#include "iwl-debug.h"
36#include "iwl-devtrace.h" 36#include "iwl-devtrace.h"
37 37
38/* 38static inline void iwl_write8(struct iwl_priv *priv, u32 ofs, u8 val)
39 * IO, register, and NIC memory access functions
40 *
41 * NOTE on naming convention and macro usage for these
42 *
43 * A single _ prefix before a an access function means that no state
44 * check or debug information is printed when that function is called.
45 *
46 * A double __ prefix before an access function means that state is checked
47 * and the current line number and caller function name are printed in addition
48 * to any other debug output.
49 *
50 * The non-prefixed name is the #define that maps the caller into a
51 * #define that provides the caller's name and __LINE__ to the double
52 * prefix version.
53 *
54 * If you wish to call the function without any debug or state checking,
55 * you should use the single _ prefix version (as is used by dependent IO
56 * routines, for example _iwl_read_direct32 calls the non-check version of
57 * _iwl_read32.)
58 *
59 * These declarations are *extremely* useful in quickly isolating code deltas
60 * which result in misconfiguration of the hardware I/O. In combination with
61 * git-bisect and the IO debug level you can quickly determine the specific
62 * commit which breaks the IO sequence to the hardware.
63 *
64 */
65
66static inline void _iwl_write8(struct iwl_priv *priv, u32 ofs, u8 val)
67{ 39{
68 trace_iwlwifi_dev_iowrite8(priv, ofs, val); 40 trace_iwlwifi_dev_iowrite8(priv, ofs, val);
69 iowrite8(val, priv->hw_base + ofs); 41 iowrite8(val, priv->hw_base + ofs);
70} 42}
71 43
72#ifdef CONFIG_IWLWIFI_DEBUG 44static inline void iwl_write32(struct iwl_priv *priv, u32 ofs, u32 val)
73static inline void __iwl_write8(const char *f, u32 l, struct iwl_priv *priv,
74 u32 ofs, u8 val)
75{
76 IWL_DEBUG_IO(priv, "write8(0x%08X, 0x%02X) - %s %d\n", ofs, val, f, l);
77 _iwl_write8(priv, ofs, val);
78}
79#define iwl_write8(priv, ofs, val) \
80 __iwl_write8(__FILE__, __LINE__, priv, ofs, val)
81#else
82#define iwl_write8(priv, ofs, val) _iwl_write8(priv, ofs, val)
83#endif
84
85
86static inline void _iwl_write32(struct iwl_priv *priv, u32 ofs, u32 val)
87{ 45{
88 trace_iwlwifi_dev_iowrite32(priv, ofs, val); 46 trace_iwlwifi_dev_iowrite32(priv, ofs, val);
89 iowrite32(val, priv->hw_base + ofs); 47 iowrite32(val, priv->hw_base + ofs);
90} 48}
91 49
92#ifdef CONFIG_IWLWIFI_DEBUG 50static inline u32 iwl_read32(struct iwl_priv *priv, u32 ofs)
93static inline void __iwl_write32(const char *f, u32 l, struct iwl_priv *priv,
94 u32 ofs, u32 val)
95{
96 IWL_DEBUG_IO(priv, "write32(0x%08X, 0x%08X) - %s %d\n", ofs, val, f, l);
97 _iwl_write32(priv, ofs, val);
98}
99#define iwl_write32(priv, ofs, val) \
100 __iwl_write32(__FILE__, __LINE__, priv, ofs, val)
101#else
102#define iwl_write32(priv, ofs, val) _iwl_write32(priv, ofs, val)
103#endif
104
105static inline u32 _iwl_read32(struct iwl_priv *priv, u32 ofs)
106{ 51{
107 u32 val = ioread32(priv->hw_base + ofs); 52 u32 val = ioread32(priv->hw_base + ofs);
108 trace_iwlwifi_dev_ioread32(priv, ofs, val); 53 trace_iwlwifi_dev_ioread32(priv, ofs, val);
109 return val; 54 return val;
110} 55}
111 56
112#ifdef CONFIG_IWLWIFI_DEBUG 57void iwl_set_bit(struct iwl_priv *priv, u32 reg, u32 mask);
113static inline u32 __iwl_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs) 58void iwl_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask);
114{
115 IWL_DEBUG_IO(priv, "read_direct32(0x%08X) - %s %d\n", ofs, f, l);
116 return _iwl_read32(priv, ofs);
117}
118#define iwl_read32(priv, ofs) __iwl_read32(__FILE__, __LINE__, priv, ofs)
119#else
120#define iwl_read32(p, o) _iwl_read32(p, o)
121#endif
122
123#define IWL_POLL_INTERVAL 10 /* microseconds */
124static inline int _iwl_poll_bit(struct iwl_priv *priv, u32 addr,
125 u32 bits, u32 mask, int timeout)
126{
127 int t = 0;
128
129 do {
130 if ((_iwl_read32(priv, addr) & mask) == (bits & mask))
131 return t;
132 udelay(IWL_POLL_INTERVAL);
133 t += IWL_POLL_INTERVAL;
134 } while (t < timeout);
135
136 return -ETIMEDOUT;
137}
138#ifdef CONFIG_IWLWIFI_DEBUG
139static inline int __iwl_poll_bit(const char *f, u32 l,
140 struct iwl_priv *priv, u32 addr,
141 u32 bits, u32 mask, int timeout)
142{
143 int ret = _iwl_poll_bit(priv, addr, bits, mask, timeout);
144 IWL_DEBUG_IO(priv, "poll_bit(0x%08X, 0x%08X, 0x%08X) - %s- %s %d\n",
145 addr, bits, mask,
146 unlikely(ret == -ETIMEDOUT) ? "timeout" : "", f, l);
147 return ret;
148}
149#define iwl_poll_bit(priv, addr, bits, mask, timeout) \
150 __iwl_poll_bit(__FILE__, __LINE__, priv, addr, bits, mask, timeout)
151#else
152#define iwl_poll_bit(p, a, b, m, t) _iwl_poll_bit(p, a, b, m, t)
153#endif
154
155static inline void _iwl_set_bit(struct iwl_priv *priv, u32 reg, u32 mask)
156{
157 _iwl_write32(priv, reg, _iwl_read32(priv, reg) | mask);
158}
159#ifdef CONFIG_IWLWIFI_DEBUG
160static inline void __iwl_set_bit(const char *f, u32 l,
161 struct iwl_priv *priv, u32 reg, u32 mask)
162{
163 u32 val = _iwl_read32(priv, reg) | mask;
164 IWL_DEBUG_IO(priv, "set_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val);
165 _iwl_write32(priv, reg, val);
166}
167static inline void iwl_set_bit(struct iwl_priv *p, u32 r, u32 m)
168{
169 unsigned long reg_flags;
170
171 spin_lock_irqsave(&p->reg_lock, reg_flags);
172 __iwl_set_bit(__FILE__, __LINE__, p, r, m);
173 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
174}
175#else
176static inline void iwl_set_bit(struct iwl_priv *p, u32 r, u32 m)
177{
178 unsigned long reg_flags;
179
180 spin_lock_irqsave(&p->reg_lock, reg_flags);
181 _iwl_set_bit(p, r, m);
182 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
183}
184#endif
185
186static inline void _iwl_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask)
187{
188 _iwl_write32(priv, reg, _iwl_read32(priv, reg) & ~mask);
189}
190#ifdef CONFIG_IWLWIFI_DEBUG
191static inline void __iwl_clear_bit(const char *f, u32 l,
192 struct iwl_priv *priv, u32 reg, u32 mask)
193{
194 u32 val = _iwl_read32(priv, reg) & ~mask;
195 IWL_DEBUG_IO(priv, "clear_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val);
196 _iwl_write32(priv, reg, val);
197}
198static inline void iwl_clear_bit(struct iwl_priv *p, u32 r, u32 m)
199{
200 unsigned long reg_flags;
201
202 spin_lock_irqsave(&p->reg_lock, reg_flags);
203 __iwl_clear_bit(__FILE__, __LINE__, p, r, m);
204 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
205}
206#else
207static inline void iwl_clear_bit(struct iwl_priv *p, u32 r, u32 m)
208{
209 unsigned long reg_flags;
210
211 spin_lock_irqsave(&p->reg_lock, reg_flags);
212 _iwl_clear_bit(p, r, m);
213 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
214}
215#endif
216
217static inline int _iwl_grab_nic_access(struct iwl_priv *priv)
218{
219 int ret;
220 u32 val;
221
222 /* this bit wakes up the NIC */
223 _iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
224
225 /*
226 * These bits say the device is running, and should keep running for
227 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
228 * but they do not indicate that embedded SRAM is restored yet;
229 * 3945 and 4965 have volatile SRAM, and must save/restore contents
230 * to/from host DRAM when sleeping/waking for power-saving.
231 * Each direction takes approximately 1/4 millisecond; with this
232 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
233 * series of register accesses are expected (e.g. reading Event Log),
234 * to keep device from sleeping.
235 *
236 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
237 * SRAM is okay/restored. We don't check that here because this call
238 * is just for hardware register access; but GP1 MAC_SLEEP check is a
239 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
240 *
241 * 5000 series and later (including 1000 series) have non-volatile SRAM,
242 * and do not save/restore SRAM when power cycling.
243 */
244 ret = _iwl_poll_bit(priv, CSR_GP_CNTRL,
245 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
246 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
247 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
248 if (ret < 0) {
249 val = _iwl_read32(priv, CSR_GP_CNTRL);
250 IWL_ERR(priv, "MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val);
251 _iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
252 return -EIO;
253 }
254
255 return 0;
256}
257
258#ifdef CONFIG_IWLWIFI_DEBUG
259static inline int __iwl_grab_nic_access(const char *f, u32 l,
260 struct iwl_priv *priv)
261{
262 IWL_DEBUG_IO(priv, "grabbing nic access - %s %d\n", f, l);
263 return _iwl_grab_nic_access(priv);
264}
265#define iwl_grab_nic_access(priv) \
266 __iwl_grab_nic_access(__FILE__, __LINE__, priv)
267#else
268#define iwl_grab_nic_access(priv) \
269 _iwl_grab_nic_access(priv)
270#endif
271
272static inline void _iwl_release_nic_access(struct iwl_priv *priv)
273{
274 _iwl_clear_bit(priv, CSR_GP_CNTRL,
275 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
276}
277#ifdef CONFIG_IWLWIFI_DEBUG
278static inline void __iwl_release_nic_access(const char *f, u32 l,
279 struct iwl_priv *priv)
280{
281
282 IWL_DEBUG_IO(priv, "releasing nic access - %s %d\n", f, l);
283 _iwl_release_nic_access(priv);
284}
285#define iwl_release_nic_access(priv) \
286 __iwl_release_nic_access(__FILE__, __LINE__, priv)
287#else
288#define iwl_release_nic_access(priv) \
289 _iwl_release_nic_access(priv)
290#endif
291
292static inline u32 _iwl_read_direct32(struct iwl_priv *priv, u32 reg)
293{
294 return _iwl_read32(priv, reg);
295}
296#ifdef CONFIG_IWLWIFI_DEBUG
297static inline u32 __iwl_read_direct32(const char *f, u32 l,
298 struct iwl_priv *priv, u32 reg)
299{
300 u32 value = _iwl_read_direct32(priv, reg);
301 IWL_DEBUG_IO(priv, "read_direct32(0x%4X) = 0x%08x - %s %d\n", reg, value,
302 f, l);
303 return value;
304}
305static inline u32 iwl_read_direct32(struct iwl_priv *priv, u32 reg)
306{
307 u32 value;
308 unsigned long reg_flags;
309
310 spin_lock_irqsave(&priv->reg_lock, reg_flags);
311 iwl_grab_nic_access(priv);
312 value = __iwl_read_direct32(__FILE__, __LINE__, priv, reg);
313 iwl_release_nic_access(priv);
314 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
315 return value;
316}
317
318#else
319static inline u32 iwl_read_direct32(struct iwl_priv *priv, u32 reg)
320{
321 u32 value;
322 unsigned long reg_flags;
323
324 spin_lock_irqsave(&priv->reg_lock, reg_flags);
325 iwl_grab_nic_access(priv);
326 value = _iwl_read_direct32(priv, reg);
327 iwl_release_nic_access(priv);
328 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
329 return value;
330
331}
332#endif
333
334static inline void _iwl_write_direct32(struct iwl_priv *priv,
335 u32 reg, u32 value)
336{
337 _iwl_write32(priv, reg, value);
338}
339static inline void iwl_write_direct32(struct iwl_priv *priv, u32 reg, u32 value)
340{
341 unsigned long reg_flags;
342
343 spin_lock_irqsave(&priv->reg_lock, reg_flags);
344 if (!iwl_grab_nic_access(priv)) {
345 _iwl_write_direct32(priv, reg, value);
346 iwl_release_nic_access(priv);
347 }
348 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
349}
350
351static inline void iwl_write_reg_buf(struct iwl_priv *priv,
352 u32 reg, u32 len, u32 *values)
353{
354 u32 count = sizeof(u32);
355
356 if ((priv != NULL) && (values != NULL)) {
357 for (; 0 < len; len -= count, reg += count, values++)
358 iwl_write_direct32(priv, reg, *values);
359 }
360}
361
362static inline int _iwl_poll_direct_bit(struct iwl_priv *priv, u32 addr,
363 u32 mask, int timeout)
364{
365 int t = 0;
366
367 do {
368 if ((iwl_read_direct32(priv, addr) & mask) == mask)
369 return t;
370 udelay(IWL_POLL_INTERVAL);
371 t += IWL_POLL_INTERVAL;
372 } while (t < timeout);
373
374 return -ETIMEDOUT;
375}
376
377#ifdef CONFIG_IWLWIFI_DEBUG
378static inline int __iwl_poll_direct_bit(const char *f, u32 l,
379 struct iwl_priv *priv,
380 u32 addr, u32 mask, int timeout)
381{
382 int ret = _iwl_poll_direct_bit(priv, addr, mask, timeout);
383
384 if (unlikely(ret == -ETIMEDOUT))
385 IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) - "
386 "timedout - %s %d\n", addr, mask, f, l);
387 else
388 IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) = 0x%08X "
389 "- %s %d\n", addr, mask, ret, f, l);
390 return ret;
391}
392#define iwl_poll_direct_bit(priv, addr, mask, timeout) \
393 __iwl_poll_direct_bit(__FILE__, __LINE__, priv, addr, mask, timeout)
394#else
395#define iwl_poll_direct_bit _iwl_poll_direct_bit
396#endif
397
398static inline u32 _iwl_read_prph(struct iwl_priv *priv, u32 reg)
399{
400 _iwl_write_direct32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
401 rmb();
402 return _iwl_read_direct32(priv, HBUS_TARG_PRPH_RDAT);
403}
404static inline u32 iwl_read_prph(struct iwl_priv *priv, u32 reg)
405{
406 unsigned long reg_flags;
407 u32 val;
408
409 spin_lock_irqsave(&priv->reg_lock, reg_flags);
410 iwl_grab_nic_access(priv);
411 val = _iwl_read_prph(priv, reg);
412 iwl_release_nic_access(priv);
413 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
414 return val;
415}
416
417static inline void _iwl_write_prph(struct iwl_priv *priv,
418 u32 addr, u32 val)
419{
420 _iwl_write_direct32(priv, HBUS_TARG_PRPH_WADDR,
421 ((addr & 0x0000FFFF) | (3 << 24)));
422 wmb();
423 _iwl_write_direct32(priv, HBUS_TARG_PRPH_WDAT, val);
424}
425
426static inline void iwl_write_prph(struct iwl_priv *priv, u32 addr, u32 val)
427{
428 unsigned long reg_flags;
429
430 spin_lock_irqsave(&priv->reg_lock, reg_flags);
431 if (!iwl_grab_nic_access(priv)) {
432 _iwl_write_prph(priv, addr, val);
433 iwl_release_nic_access(priv);
434 }
435 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
436}
437
438#define _iwl_set_bits_prph(priv, reg, mask) \
439 _iwl_write_prph(priv, reg, (_iwl_read_prph(priv, reg) | mask))
440
441static inline void iwl_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask)
442{
443 unsigned long reg_flags;
444
445 spin_lock_irqsave(&priv->reg_lock, reg_flags);
446 iwl_grab_nic_access(priv);
447 _iwl_set_bits_prph(priv, reg, mask);
448 iwl_release_nic_access(priv);
449 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
450}
451
452#define _iwl_set_bits_mask_prph(priv, reg, bits, mask) \
453 _iwl_write_prph(priv, reg, ((_iwl_read_prph(priv, reg) & mask) | bits))
454 59
455static inline void iwl_set_bits_mask_prph(struct iwl_priv *priv, u32 reg, 60int iwl_poll_bit(struct iwl_priv *priv, u32 addr,
456 u32 bits, u32 mask) 61 u32 bits, u32 mask, int timeout);
457{ 62int iwl_poll_direct_bit(struct iwl_priv *priv, u32 addr, u32 mask,
458 unsigned long reg_flags; 63 int timeout);
459 64
460 spin_lock_irqsave(&priv->reg_lock, reg_flags); 65int iwl_grab_nic_access_silent(struct iwl_priv *priv);
461 iwl_grab_nic_access(priv); 66int iwl_grab_nic_access(struct iwl_priv *priv);
462 _iwl_set_bits_mask_prph(priv, reg, bits, mask); 67void iwl_release_nic_access(struct iwl_priv *priv);
463 iwl_release_nic_access(priv);
464 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
465}
466 68
467static inline void iwl_clear_bits_prph(struct iwl_priv 69u32 iwl_read_direct32(struct iwl_priv *priv, u32 reg);
468 *priv, u32 reg, u32 mask) 70void iwl_write_direct32(struct iwl_priv *priv, u32 reg, u32 value);
469{
470 unsigned long reg_flags;
471 u32 val;
472 71
473 spin_lock_irqsave(&priv->reg_lock, reg_flags);
474 iwl_grab_nic_access(priv);
475 val = _iwl_read_prph(priv, reg);
476 _iwl_write_prph(priv, reg, (val & ~mask));
477 iwl_release_nic_access(priv);
478 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
479}
480
481static inline u32 iwl_read_targ_mem(struct iwl_priv *priv, u32 addr)
482{
483 unsigned long reg_flags;
484 u32 value;
485 72
486 spin_lock_irqsave(&priv->reg_lock, reg_flags); 73u32 iwl_read_prph(struct iwl_priv *priv, u32 reg);
487 iwl_grab_nic_access(priv); 74void iwl_write_prph(struct iwl_priv *priv, u32 addr, u32 val);
75void iwl_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask);
76void iwl_set_bits_mask_prph(struct iwl_priv *priv, u32 reg,
77 u32 bits, u32 mask);
78void iwl_clear_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask);
488 79
489 _iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, addr); 80void _iwl_read_targ_mem_words(struct iwl_priv *priv, u32 addr,
490 rmb(); 81 void *buf, int words);
491 value = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
492 82
493 iwl_release_nic_access(priv); 83#define iwl_read_targ_mem_words(priv, addr, buf, bufsize) \
494 spin_unlock_irqrestore(&priv->reg_lock, reg_flags); 84 do { \
495 return value; 85 BUILD_BUG_ON((bufsize) % sizeof(u32)); \
496} 86 _iwl_read_targ_mem_words(priv, addr, buf, \
87 (bufsize) / sizeof(u32));\
88 } while (0)
497 89
498static inline void iwl_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val) 90u32 iwl_read_targ_mem(struct iwl_priv *priv, u32 addr);
499{ 91void iwl_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val);
500 unsigned long reg_flags;
501
502 spin_lock_irqsave(&priv->reg_lock, reg_flags);
503 if (!iwl_grab_nic_access(priv)) {
504 _iwl_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
505 wmb();
506 _iwl_write_direct32(priv, HBUS_TARG_MEM_WDAT, val);
507 iwl_release_nic_access(priv);
508 }
509 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
510}
511
512static inline void iwl_write_targ_mem_buf(struct iwl_priv *priv, u32 addr,
513 u32 len, u32 *values)
514{
515 unsigned long reg_flags;
516
517 spin_lock_irqsave(&priv->reg_lock, reg_flags);
518 if (!iwl_grab_nic_access(priv)) {
519 _iwl_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
520 wmb();
521 for (; 0 < len; len -= sizeof(u32), values++)
522 _iwl_write_direct32(priv, HBUS_TARG_MEM_WDAT, *values);
523
524 iwl_release_nic_access(priv);
525 }
526 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
527}
528#endif 92#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index d7f2a0bb32c9..439187f903c9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -48,8 +48,21 @@ module_param(led_mode, int, S_IRUGO);
48MODULE_PARM_DESC(led_mode, "0=system default, " 48MODULE_PARM_DESC(led_mode, "0=system default, "
49 "1=On(RF On)/Off(RF Off), 2=blinking"); 49 "1=On(RF On)/Off(RF Off), 2=blinking");
50 50
51/* Throughput OFF time(ms) ON time (ms)
52 * >300 25 25
53 * >200 to 300 40 40
54 * >100 to 200 55 55
55 * >70 to 100 65 65
56 * >50 to 70 75 75
57 * >20 to 50 85 85
58 * >10 to 20 95 95
59 * >5 to 10 110 110
60 * >1 to 5 130 130
61 * >0 to 1 167 167
62 * <=0 SOLID ON
63 */
51static const struct ieee80211_tpt_blink iwl_blink[] = { 64static const struct ieee80211_tpt_blink iwl_blink[] = {
52 { .throughput = 0 * 1024 - 1, .blink_time = 334 }, 65 { .throughput = 0, .blink_time = 334 },
53 { .throughput = 1 * 1024 - 1, .blink_time = 260 }, 66 { .throughput = 1 * 1024 - 1, .blink_time = 260 },
54 { .throughput = 5 * 1024 - 1, .blink_time = 220 }, 67 { .throughput = 5 * 1024 - 1, .blink_time = 220 },
55 { .throughput = 10 * 1024 - 1, .blink_time = 190 }, 68 { .throughput = 10 * 1024 - 1, .blink_time = 190 },
@@ -61,10 +74,16 @@ static const struct ieee80211_tpt_blink iwl_blink[] = {
61 { .throughput = 300 * 1024 - 1, .blink_time = 50 }, 74 { .throughput = 300 * 1024 - 1, .blink_time = 50 },
62}; 75};
63 76
77/* Set led register off */
78void iwlagn_led_enable(struct iwl_priv *priv)
79{
80 iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
81}
82
64/* 83/*
65 * Adjust led blink rate to compensate on a MAC Clock difference on every HW 84 * Adjust led blink rate to compensate on a MAC Clock difference on every HW
66 * Led blink rate analysis showed an average deviation of 0% on 3945, 85 * Led blink rate analysis showed an average deviation of 20% on 5000 series
67 * 5% on 4965 HW and 20% on 5000 series and up. 86 * and up.
68 * Need to compensate on the led on/off time per HW according to the deviation 87 * Need to compensate on the led on/off time per HW according to the deviation
69 * to achieve the desired led frequency 88 * to achieve the desired led frequency
70 * The calculation is: (100-averageDeviation)/100 * blinkTime 89 * The calculation is: (100-averageDeviation)/100 * blinkTime
@@ -84,6 +103,24 @@ static inline u8 iwl_blink_compensation(struct iwl_priv *priv,
84 return (u8)((time * compensation) >> 6); 103 return (u8)((time * compensation) >> 6);
85} 104}
86 105
106static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
107{
108 struct iwl_host_cmd cmd = {
109 .id = REPLY_LEDS_CMD,
110 .len = sizeof(struct iwl_led_cmd),
111 .data = led_cmd,
112 .flags = CMD_ASYNC,
113 .callback = NULL,
114 };
115 u32 reg;
116
117 reg = iwl_read32(priv, CSR_LED_REG);
118 if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
119 iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
120
121 return iwl_send_cmd(priv, &cmd);
122}
123
87/* Set led pattern command */ 124/* Set led pattern command */
88static int iwl_led_cmd(struct iwl_priv *priv, 125static int iwl_led_cmd(struct iwl_priv *priv,
89 unsigned long on, 126 unsigned long on,
@@ -101,6 +138,11 @@ static int iwl_led_cmd(struct iwl_priv *priv,
101 if (priv->blink_on == on && priv->blink_off == off) 138 if (priv->blink_on == on && priv->blink_off == off)
102 return 0; 139 return 0;
103 140
141 if (off == 0) {
142 /* led is SOLID_ON */
143 on = IWL_LED_SOLID;
144 }
145
104 IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n", 146 IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
105 priv->cfg->base_params->led_compensation); 147 priv->cfg->base_params->led_compensation);
106 led_cmd.on = iwl_blink_compensation(priv, on, 148 led_cmd.on = iwl_blink_compensation(priv, on,
@@ -108,7 +150,7 @@ static int iwl_led_cmd(struct iwl_priv *priv,
108 led_cmd.off = iwl_blink_compensation(priv, off, 150 led_cmd.off = iwl_blink_compensation(priv, off,
109 priv->cfg->base_params->led_compensation); 151 priv->cfg->base_params->led_compensation);
110 152
111 ret = priv->cfg->ops->led->cmd(priv, &led_cmd); 153 ret = iwl_send_led_cmd(priv, &led_cmd);
112 if (!ret) { 154 if (!ret) {
113 priv->blink_on = on; 155 priv->blink_on = on;
114 priv->blink_off = off; 156 priv->blink_off = off;
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.h b/drivers/net/wireless/iwlwifi/iwl-led.h
index 101eef12b3bb..1c93dfef6933 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-led.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -50,6 +50,7 @@ enum iwl_led_mode {
50 IWL_LED_BLINK, 50 IWL_LED_BLINK,
51}; 51};
52 52
53void iwlagn_led_enable(struct iwl_priv *priv);
53void iwl_leds_init(struct iwl_priv *priv); 54void iwl_leds_init(struct iwl_priv *priv);
54void iwl_leds_exit(struct iwl_priv *priv); 55void iwl_leds_exit(struct iwl_priv *priv);
55 56
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 576795e2c75b..595c930b28ae 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
@@ -188,9 +188,10 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
188 table = range_0; 188 table = range_0;
189 } 189 }
190 190
191 BUG_ON(lvl < 0 || lvl >= IWL_POWER_NUM); 191 if (WARN_ON(lvl < 0 || lvl >= IWL_POWER_NUM))
192 192 memset(cmd, 0, sizeof(*cmd));
193 *cmd = table[lvl].cmd; 193 else
194 *cmd = table[lvl].cmd;
194 195
195 if (period == 0) { 196 if (period == 0) {
196 skip = 0; 197 skip = 0;
@@ -354,16 +355,12 @@ static void iwl_power_build_cmd(struct iwl_priv *priv,
354 355
355 dtimper = priv->hw->conf.ps_dtim_period ?: 1; 356 dtimper = priv->hw->conf.ps_dtim_period ?: 1;
356 357
357 if (priv->cfg->base_params->broken_powersave) 358 if (priv->hw->conf.flags & IEEE80211_CONF_IDLE)
358 iwl_power_sleep_cam_cmd(priv, cmd);
359 else if (priv->hw->conf.flags & IEEE80211_CONF_IDLE)
360 iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20); 359 iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20);
361 else if (priv->cfg->ops->lib->tt_ops.lower_power_detection && 360 else if (iwl_tt_is_low_power_state(priv)) {
362 priv->cfg->ops->lib->tt_ops.tt_power_mode &&
363 priv->cfg->ops->lib->tt_ops.lower_power_detection(priv)) {
364 /* in thermal throttling low power state */ 361 /* in thermal throttling low power state */
365 iwl_static_sleep_cmd(priv, cmd, 362 iwl_static_sleep_cmd(priv, cmd,
366 priv->cfg->ops->lib->tt_ops.tt_power_mode(priv), dtimper); 363 iwl_tt_current_power_mode(priv), dtimper);
367 } else if (!enabled) 364 } else if (!enabled)
368 iwl_power_sleep_cam_cmd(priv, cmd); 365 iwl_power_sleep_cam_cmd(priv, cmd);
369 else if (priv->power_data.debug_sleep_level_override >= 0) 366 else if (priv->power_data.debug_sleep_level_override >= 0)
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/iwl-power.h
index fe012032c28c..59635d784e27 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.h
+++ b/drivers/net/wireless/iwlwifi/iwl-power.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 86f5123bccda..f00d188b2cfc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -91,7 +91,6 @@
91#define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000) 91#define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000)
92#define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000) 92#define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000)
93#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000) 93#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000)
94#define APMG_PS_CTRL_VAL_PWR_SRC_MAX (0x01000000) /* 3945 only */
95#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x02000000) 94#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x02000000)
96#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */ 95#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */
97#define APMG_SVR_DIGITAL_VOLTAGE_1_32 (0x00000060) 96#define APMG_SVR_DIGITAL_VOLTAGE_1_32 (0x00000060)
@@ -99,152 +98,6 @@
99#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800) 98#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
100 99
101/** 100/**
102 * BSM (Bootstrap State Machine)
103 *
104 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
105 * in special SRAM that does not power down when the embedded control
106 * processor is sleeping (e.g. for periodic power-saving shutdowns of radio).
107 *
108 * When powering back up after sleeps (or during initial uCode load), the BSM
109 * internally loads the short bootstrap program from the special SRAM into the
110 * embedded processor's instruction SRAM, and starts the processor so it runs
111 * the bootstrap program.
112 *
113 * This bootstrap program loads (via PCI busmaster DMA) instructions and data
114 * images for a uCode program from host DRAM locations. The host driver
115 * indicates DRAM locations and sizes for instruction and data images via the
116 * four BSM_DRAM_* registers. Once the bootstrap program loads the new program,
117 * the new program starts automatically.
118 *
119 * The uCode used for open-source drivers includes two programs:
120 *
121 * 1) Initialization -- performs hardware calibration and sets up some
122 * internal data, then notifies host via "initialize alive" notification
123 * (struct iwl_init_alive_resp) that it has completed all of its work.
124 * After signal from host, it then loads and starts the runtime program.
125 * The initialization program must be used when initially setting up the
126 * NIC after loading the driver.
127 *
128 * 2) Runtime/Protocol -- performs all normal runtime operations. This
129 * notifies host via "alive" notification (struct iwl_alive_resp) that it
130 * is ready to be used.
131 *
132 * When initializing the NIC, the host driver does the following procedure:
133 *
134 * 1) Load bootstrap program (instructions only, no data image for bootstrap)
135 * into bootstrap memory. Use dword writes starting at BSM_SRAM_LOWER_BOUND
136 *
137 * 2) Point (via BSM_DRAM_*) to the "initialize" uCode data and instruction
138 * images in host DRAM.
139 *
140 * 3) Set up BSM to copy from BSM SRAM into uCode instruction SRAM when asked:
141 * BSM_WR_MEM_SRC_REG = 0
142 * BSM_WR_MEM_DST_REG = RTC_INST_LOWER_BOUND
143 * BSM_WR_MEM_DWCOUNT_REG = # dwords in bootstrap instruction image
144 *
145 * 4) Load bootstrap into instruction SRAM:
146 * BSM_WR_CTRL_REG = BSM_WR_CTRL_REG_BIT_START
147 *
148 * 5) Wait for load completion:
149 * Poll BSM_WR_CTRL_REG for BSM_WR_CTRL_REG_BIT_START = 0
150 *
151 * 6) Enable future boot loads whenever NIC's power management triggers it:
152 * BSM_WR_CTRL_REG = BSM_WR_CTRL_REG_BIT_START_EN
153 *
154 * 7) Start the NIC by removing all reset bits:
155 * CSR_RESET = 0
156 *
157 * The bootstrap uCode (already in instruction SRAM) loads initialization
158 * uCode. Initialization uCode performs data initialization, sends
159 * "initialize alive" notification to host, and waits for a signal from
160 * host to load runtime code.
161 *
162 * 4) Point (via BSM_DRAM_*) to the "runtime" uCode data and instruction
163 * images in host DRAM. The last register loaded must be the instruction
164 * byte count register ("1" in MSbit tells initialization uCode to load
165 * the runtime uCode):
166 * BSM_DRAM_INST_BYTECOUNT_REG = byte count | BSM_DRAM_INST_LOAD
167 *
168 * 5) Wait for "alive" notification, then issue normal runtime commands.
169 *
170 * Data caching during power-downs:
171 *
172 * Just before the embedded controller powers down (e.g for automatic
173 * power-saving modes, or for RFKILL), uCode stores (via PCI busmaster DMA)
174 * a current snapshot of the embedded processor's data SRAM into host DRAM.
175 * This caches the data while the embedded processor's memory is powered down.
176 * Location and size are controlled by BSM_DRAM_DATA_* registers.
177 *
178 * NOTE: Instruction SRAM does not need to be saved, since that doesn't
179 * change during operation; the original image (from uCode distribution
180 * file) can be used for reload.
181 *
182 * When powering back up, the BSM loads the bootstrap program. Bootstrap looks
183 * at the BSM_DRAM_* registers, which now point to the runtime instruction
184 * image and the cached (modified) runtime data (*not* the initialization
185 * uCode). Bootstrap reloads these runtime images into SRAM, and restarts the
186 * uCode from where it left off before the power-down.
187 *
188 * NOTE: Initialization uCode does *not* run as part of the save/restore
189 * procedure.
190 *
191 * This save/restore method is mostly for autonomous power management during
192 * normal operation (result of POWER_TABLE_CMD). Platform suspend/resume and
193 * RFKILL should use complete restarts (with total re-initialization) of uCode,
194 * allowing total shutdown (including BSM memory).
195 *
196 * Note that, during normal operation, the host DRAM that held the initial
197 * startup data for the runtime code is now being used as a backup data cache
198 * for modified data! If you need to completely re-initialize the NIC, make
199 * sure that you use the runtime data image from the uCode distribution file,
200 * not the modified/saved runtime data. You may want to store a separate
201 * "clean" runtime data image in DRAM to avoid disk reads of distribution file.
202 */
203
204/* BSM bit fields */
205#define BSM_WR_CTRL_REG_BIT_START (0x80000000) /* start boot load now */
206#define BSM_WR_CTRL_REG_BIT_START_EN (0x40000000) /* enable boot after pwrup*/
207#define BSM_DRAM_INST_LOAD (0x80000000) /* start program load now */
208
209/* BSM addresses */
210#define BSM_BASE (PRPH_BASE + 0x3400)
211#define BSM_END (PRPH_BASE + 0x3800)
212
213#define BSM_WR_CTRL_REG (BSM_BASE + 0x000) /* ctl and status */
214#define BSM_WR_MEM_SRC_REG (BSM_BASE + 0x004) /* source in BSM mem */
215#define BSM_WR_MEM_DST_REG (BSM_BASE + 0x008) /* dest in SRAM mem */
216#define BSM_WR_DWCOUNT_REG (BSM_BASE + 0x00C) /* bytes */
217#define BSM_WR_STATUS_REG (BSM_BASE + 0x010) /* bit 0: 1 == done */
218
219/*
220 * Pointers and size regs for bootstrap load and data SRAM save/restore.
221 * NOTE: 3945 pointers use bits 31:0 of DRAM address.
222 * 4965 pointers use bits 35:4 of DRAM address.
223 */
224#define BSM_DRAM_INST_PTR_REG (BSM_BASE + 0x090)
225#define BSM_DRAM_INST_BYTECOUNT_REG (BSM_BASE + 0x094)
226#define BSM_DRAM_DATA_PTR_REG (BSM_BASE + 0x098)
227#define BSM_DRAM_DATA_BYTECOUNT_REG (BSM_BASE + 0x09C)
228
229/*
230 * BSM special memory, stays powered on during power-save sleeps.
231 * Read/write, address range from LOWER_BOUND to (LOWER_BOUND + SIZE -1)
232 */
233#define BSM_SRAM_LOWER_BOUND (PRPH_BASE + 0x3800)
234#define BSM_SRAM_SIZE (1024) /* bytes */
235
236
237/* 3945 Tx scheduler registers */
238#define ALM_SCD_BASE (PRPH_BASE + 0x2E00)
239#define ALM_SCD_MODE_REG (ALM_SCD_BASE + 0x000)
240#define ALM_SCD_ARASTAT_REG (ALM_SCD_BASE + 0x004)
241#define ALM_SCD_TXFACT_REG (ALM_SCD_BASE + 0x010)
242#define ALM_SCD_TXF4MF_REG (ALM_SCD_BASE + 0x014)
243#define ALM_SCD_TXF5MF_REG (ALM_SCD_BASE + 0x020)
244#define ALM_SCD_SBYP_MODE_1_REG (ALM_SCD_BASE + 0x02C)
245#define ALM_SCD_SBYP_MODE_2_REG (ALM_SCD_BASE + 0x030)
246
247/**
248 * Tx Scheduler 101 * Tx Scheduler
249 * 102 *
250 * The Tx Scheduler selects the next frame to be transmitted, choosing TFDs 103 * The Tx Scheduler selects the next frame to be transmitted, choosing TFDs
@@ -254,17 +107,7 @@
254 * device. A queue maps to only one (selectable by driver) Tx DMA channel, 107 * device. A queue maps to only one (selectable by driver) Tx DMA channel,
255 * but one DMA channel may take input from several queues. 108 * but one DMA channel may take input from several queues.
256 * 109 *
257 * Tx DMA FIFOs have dedicated purposes. For 4965, they are used as follows 110 * Tx DMA FIFOs have dedicated purposes.
258 * (cf. default_queue_to_tx_fifo in iwl-4965.c):
259 *
260 * 0 -- EDCA BK (background) frames, lowest priority
261 * 1 -- EDCA BE (best effort) frames, normal priority
262 * 2 -- EDCA VI (video) frames, higher priority
263 * 3 -- EDCA VO (voice) and management frames, highest priority
264 * 4 -- Commands (e.g. RXON, etc.)
265 * 5 -- unused (HCCA)
266 * 6 -- unused (HCCA)
267 * 7 -- not used by driver (device-internal only)
268 * 111 *
269 * For 5000 series and up, they are used differently 112 * For 5000 series and up, they are used differently
270 * (cf. iwl5000_default_queue_to_tx_fifo in iwl-5000.c): 113 * (cf. iwl5000_default_queue_to_tx_fifo in iwl-5000.c):
@@ -298,7 +141,7 @@
298 * Tx completion may end up being out-of-order). 141 * Tx completion may end up being out-of-order).
299 * 142 *
300 * The driver must maintain the queue's Byte Count table in host DRAM 143 * The driver must maintain the queue's Byte Count table in host DRAM
301 * (struct iwl4965_sched_queue_byte_cnt_tbl) for this mode. 144 * for this mode.
302 * This mode does not support fragmentation. 145 * This mode does not support fragmentation.
303 * 146 *
304 * 2) FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order. 147 * 2) FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order.
@@ -311,7 +154,7 @@
311 * 154 *
312 * Driver controls scheduler operation via 3 means: 155 * Driver controls scheduler operation via 3 means:
313 * 1) Scheduler registers 156 * 1) Scheduler registers
314 * 2) Shared scheduler data base in internal 4956 SRAM 157 * 2) Shared scheduler data base in internal SRAM
315 * 3) Shared data in host DRAM 158 * 3) Shared data in host DRAM
316 * 159 *
317 * Initialization: 160 * Initialization:
@@ -330,201 +173,10 @@
330 * Max Tx window size is the max number of contiguous TFDs that the scheduler 173 * Max Tx window size is the max number of contiguous TFDs that the scheduler
331 * can keep track of at one time when creating block-ack chains of frames. 174 * can keep track of at one time when creating block-ack chains of frames.
332 * Note that "64" matches the number of ack bits in a block-ack packet. 175 * Note that "64" matches the number of ack bits in a block-ack packet.
333 * Driver should use SCD_WIN_SIZE and SCD_FRAME_LIMIT values to initialize
334 * IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) values.
335 */ 176 */
336#define SCD_WIN_SIZE 64 177#define SCD_WIN_SIZE 64
337#define SCD_FRAME_LIMIT 64 178#define SCD_FRAME_LIMIT 64
338 179
339/* SCD registers are internal, must be accessed via HBUS_TARG_PRPH regs */
340#define IWL49_SCD_START_OFFSET 0xa02c00
341
342/*
343 * 4965 tells driver SRAM address for internal scheduler structs via this reg.
344 * Value is valid only after "Alive" response from uCode.
345 */
346#define IWL49_SCD_SRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x0)
347
348/*
349 * Driver may need to update queue-empty bits after changing queue's
350 * write and read pointers (indexes) during (re-)initialization (i.e. when
351 * scheduler is not tracking what's happening).
352 * Bit fields:
353 * 31-16: Write mask -- 1: update empty bit, 0: don't change empty bit
354 * 15-00: Empty state, one for each queue -- 1: empty, 0: non-empty
355 * NOTE: This register is not used by Linux driver.
356 */
357#define IWL49_SCD_EMPTY_BITS (IWL49_SCD_START_OFFSET + 0x4)
358
359/*
360 * Physical base address of array of byte count (BC) circular buffers (CBs).
361 * Each Tx queue has a BC CB in host DRAM to support Scheduler-ACK mode.
362 * This register points to BC CB for queue 0, must be on 1024-byte boundary.
363 * Others are spaced by 1024 bytes.
364 * Each BC CB is 2 bytes * (256 + 64) = 740 bytes, followed by 384 bytes pad.
365 * (Index into a queue's BC CB) = (index into queue's TFD CB) = (SSN & 0xff).
366 * Bit fields:
367 * 25-00: Byte Count CB physical address [35:10], must be 1024-byte aligned.
368 */
369#define IWL49_SCD_DRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x10)
370
371/*
372 * Enables any/all Tx DMA/FIFO channels.
373 * Scheduler generates requests for only the active channels.
374 * Set this to 0xff to enable all 8 channels (normal usage).
375 * Bit fields:
376 * 7- 0: Enable (1), disable (0), one bit for each channel 0-7
377 */
378#define IWL49_SCD_TXFACT (IWL49_SCD_START_OFFSET + 0x1c)
379/*
380 * Queue (x) Write Pointers (indexes, really!), one for each Tx queue.
381 * Initialized and updated by driver as new TFDs are added to queue.
382 * NOTE: If using Block Ack, index must correspond to frame's
383 * Start Sequence Number; index = (SSN & 0xff)
384 * NOTE: Alternative to HBUS_TARG_WRPTR, which is what Linux driver uses?
385 */
386#define IWL49_SCD_QUEUE_WRPTR(x) (IWL49_SCD_START_OFFSET + 0x24 + (x) * 4)
387
388/*
389 * Queue (x) Read Pointers (indexes, really!), one for each Tx queue.
390 * For FIFO mode, index indicates next frame to transmit.
391 * For Scheduler-ACK mode, index indicates first frame in Tx window.
392 * Initialized by driver, updated by scheduler.
393 */
394#define IWL49_SCD_QUEUE_RDPTR(x) (IWL49_SCD_START_OFFSET + 0x64 + (x) * 4)
395
396/*
397 * Select which queues work in chain mode (1) vs. not (0).
398 * Use chain mode to build chains of aggregated frames.
399 * Bit fields:
400 * 31-16: Reserved
401 * 15-00: Mode, one bit for each queue -- 1: Chain mode, 0: one-at-a-time
402 * NOTE: If driver sets up queue for chain mode, it should be also set up
403 * Scheduler-ACK mode as well, via SCD_QUEUE_STATUS_BITS(x).
404 */
405#define IWL49_SCD_QUEUECHAIN_SEL (IWL49_SCD_START_OFFSET + 0xd0)
406
407/*
408 * Select which queues interrupt driver when scheduler increments
409 * a queue's read pointer (index).
410 * Bit fields:
411 * 31-16: Reserved
412 * 15-00: Interrupt enable, one bit for each queue -- 1: enabled, 0: disabled
413 * NOTE: This functionality is apparently a no-op; driver relies on interrupts
414 * from Rx queue to read Tx command responses and update Tx queues.
415 */
416#define IWL49_SCD_INTERRUPT_MASK (IWL49_SCD_START_OFFSET + 0xe4)
417
418/*
419 * Queue search status registers. One for each queue.
420 * Sets up queue mode and assigns queue to Tx DMA channel.
421 * Bit fields:
422 * 19-10: Write mask/enable bits for bits 0-9
423 * 9: Driver should init to "0"
424 * 8: Scheduler-ACK mode (1), non-Scheduler-ACK (i.e. FIFO) mode (0).
425 * Driver should init to "1" for aggregation mode, or "0" otherwise.
426 * 7-6: Driver should init to "0"
427 * 5: Window Size Left; indicates whether scheduler can request
428 * another TFD, based on window size, etc. Driver should init
429 * this bit to "1" for aggregation mode, or "0" for non-agg.
430 * 4-1: Tx FIFO to use (range 0-7).
431 * 0: Queue is active (1), not active (0).
432 * Other bits should be written as "0"
433 *
434 * NOTE: If enabling Scheduler-ACK mode, chain mode should also be enabled
435 * via SCD_QUEUECHAIN_SEL.
436 */
437#define IWL49_SCD_QUEUE_STATUS_BITS(x)\
438 (IWL49_SCD_START_OFFSET + 0x104 + (x) * 4)
439
440/* Bit field positions */
441#define IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE (0)
442#define IWL49_SCD_QUEUE_STTS_REG_POS_TXF (1)
443#define IWL49_SCD_QUEUE_STTS_REG_POS_WSL (5)
444#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK (8)
445
446/* Write masks */
447#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10)
448#define IWL49_SCD_QUEUE_STTS_REG_MSK (0x0007FC00)
449
450/**
451 * 4965 internal SRAM structures for scheduler, shared with driver ...
452 *
453 * Driver should clear and initialize the following areas after receiving
454 * "Alive" response from 4965 uCode, i.e. after initial
455 * uCode load, or after a uCode load done for error recovery:
456 *
457 * SCD_CONTEXT_DATA_OFFSET (size 128 bytes)
458 * SCD_TX_STTS_BITMAP_OFFSET (size 256 bytes)
459 * SCD_TRANSLATE_TBL_OFFSET (size 32 bytes)
460 *
461 * Driver accesses SRAM via HBUS_TARG_MEM_* registers.
462 * Driver reads base address of this scheduler area from SCD_SRAM_BASE_ADDR.
463 * All OFFSET values must be added to this base address.
464 */
465
466/*
467 * Queue context. One 8-byte entry for each of 16 queues.
468 *
469 * Driver should clear this entire area (size 0x80) to 0 after receiving
470 * "Alive" notification from uCode. Additionally, driver should init
471 * each queue's entry as follows:
472 *
473 * LS Dword bit fields:
474 * 0-06: Max Tx window size for Scheduler-ACK. Driver should init to 64.
475 *
476 * MS Dword bit fields:
477 * 16-22: Frame limit. Driver should init to 10 (0xa).
478 *
479 * Driver should init all other bits to 0.
480 *
481 * Init must be done after driver receives "Alive" response from 4965 uCode,
482 * and when setting up queue for aggregation.
483 */
484#define IWL49_SCD_CONTEXT_DATA_OFFSET 0x380
485#define IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) \
486 (IWL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
487
488#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0)
489#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F)
490#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
491#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
492
493/*
494 * Tx Status Bitmap
495 *
496 * Driver should clear this entire area (size 0x100) to 0 after receiving
497 * "Alive" notification from uCode. Area is used only by device itself;
498 * no other support (besides clearing) is required from driver.
499 */
500#define IWL49_SCD_TX_STTS_BITMAP_OFFSET 0x400
501
502/*
503 * RAxTID to queue translation mapping.
504 *
505 * When queue is in Scheduler-ACK mode, frames placed in a that queue must be
506 * for only one combination of receiver address (RA) and traffic ID (TID), i.e.
507 * one QOS priority level destined for one station (for this wireless link,
508 * not final destination). The SCD_TRANSLATE_TABLE area provides 16 16-bit
509 * mappings, one for each of the 16 queues. If queue is not in Scheduler-ACK
510 * mode, the device ignores the mapping value.
511 *
512 * Bit fields, for each 16-bit map:
513 * 15-9: Reserved, set to 0
514 * 8-4: Index into device's station table for recipient station
515 * 3-0: Traffic ID (tid), range 0-15
516 *
517 * Driver should clear this entire area (size 32 bytes) to 0 after receiving
518 * "Alive" notification from uCode. To update a 16-bit map value, driver
519 * must read a dword-aligned value from device SRAM, replace the 16-bit map
520 * value of interest, and write the dword value back into device SRAM.
521 */
522#define IWL49_SCD_TRANSLATE_TBL_OFFSET 0x500
523
524/* Find translation table dword to read/write for given queue */
525#define IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
526 ((IWL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc)
527
528#define IWL_SCD_TXFIFO_POS_TID (0) 180#define IWL_SCD_TXFIFO_POS_TID (0)
529#define IWL_SCD_TXFIFO_POS_RA (4) 181#define IWL_SCD_TXFIFO_POS_RA (4)
530#define IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF) 182#define IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF)
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 6f9a2fa04763..0053e9ea9021 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
@@ -225,55 +225,6 @@ err_bd:
225 * 225 *
226 ******************************************************************************/ 226 ******************************************************************************/
227 227
228static void iwl_rx_reply_alive(struct iwl_priv *priv,
229 struct iwl_rx_mem_buffer *rxb)
230{
231 struct iwl_rx_packet *pkt = rxb_addr(rxb);
232 struct iwl_alive_resp *palive;
233 struct delayed_work *pwork;
234
235 palive = &pkt->u.alive_frame;
236
237 IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
238 "0x%01X 0x%01X\n",
239 palive->is_valid, palive->ver_type,
240 palive->ver_subtype);
241
242 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
243 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
244 memcpy(&priv->card_alive_init,
245 &pkt->u.alive_frame,
246 sizeof(struct iwl_init_alive_resp));
247 pwork = &priv->init_alive_start;
248 } else {
249 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
250 memcpy(&priv->card_alive, &pkt->u.alive_frame,
251 sizeof(struct iwl_alive_resp));
252 pwork = &priv->alive_start;
253 }
254
255 /* We delay the ALIVE response by 5ms to
256 * give the HW RF Kill time to activate... */
257 if (palive->is_valid == UCODE_VALID_OK)
258 queue_delayed_work(priv->workqueue, pwork,
259 msecs_to_jiffies(5));
260 else {
261 IWL_WARN(priv, "%s uCode did not respond OK.\n",
262 (palive->ver_subtype == INITIALIZE_SUBTYPE) ?
263 "init" : "runtime");
264 /*
265 * If fail to load init uCode,
266 * let's try to load the init uCode again.
267 * We should not get into this situation, but if it
268 * does happen, we should not move on and loading "runtime"
269 * without proper calibrate the device.
270 */
271 if (palive->ver_subtype == INITIALIZE_SUBTYPE)
272 priv->ucode_type = UCODE_NONE;
273 queue_work(priv->workqueue, &priv->restart);
274 }
275}
276
277static void iwl_rx_reply_error(struct iwl_priv *priv, 228static void iwl_rx_reply_error(struct iwl_priv *priv,
278 struct iwl_rx_mem_buffer *rxb) 229 struct iwl_rx_mem_buffer *rxb)
279{ 230{
@@ -390,21 +341,16 @@ static void iwl_rx_beacon_notif(struct iwl_priv *priv,
390 * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal 341 * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
391 * operation state. 342 * operation state.
392 */ 343 */
393static bool iwl_good_ack_health(struct iwl_priv *priv, struct iwl_rx_packet *pkt) 344static bool iwl_good_ack_health(struct iwl_priv *priv,
345 struct statistics_tx *cur)
394{ 346{
395 int actual_delta, expected_delta, ba_timeout_delta; 347 int actual_delta, expected_delta, ba_timeout_delta;
396 struct statistics_tx *cur, *old; 348 struct statistics_tx *old;
397 349
398 if (priv->_agn.agg_tids_count) 350 if (priv->_agn.agg_tids_count)
399 return true; 351 return true;
400 352
401 if (iwl_bt_statistics(priv)) { 353 old = &priv->statistics.tx;
402 cur = &pkt->u.stats_bt.tx;
403 old = &priv->_agn.statistics_bt.tx;
404 } else {
405 cur = &pkt->u.stats.tx;
406 old = &priv->_agn.statistics.tx;
407 }
408 354
409 actual_delta = le32_to_cpu(cur->actual_ack_cnt) - 355 actual_delta = le32_to_cpu(cur->actual_ack_cnt) -
410 le32_to_cpu(old->actual_ack_cnt); 356 le32_to_cpu(old->actual_ack_cnt);
@@ -430,10 +376,10 @@ static bool iwl_good_ack_health(struct iwl_priv *priv, struct iwl_rx_packet *pkt
430 * DEBUG is not, these will just compile out. 376 * DEBUG is not, these will just compile out.
431 */ 377 */
432 IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta %d\n", 378 IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta %d\n",
433 priv->_agn.delta_statistics.tx.rx_detected_cnt); 379 priv->delta_stats.tx.rx_detected_cnt);
434 IWL_DEBUG_RADIO(priv, 380 IWL_DEBUG_RADIO(priv,
435 "ack_or_ba_timeout_collision delta %d\n", 381 "ack_or_ba_timeout_collision delta %d\n",
436 priv->_agn.delta_statistics.tx.ack_or_ba_timeout_collision); 382 priv->delta_stats.tx.ack_or_ba_timeout_collision);
437#endif 383#endif
438 384
439 if (ba_timeout_delta >= BA_TIMEOUT_MAX) 385 if (ba_timeout_delta >= BA_TIMEOUT_MAX)
@@ -450,7 +396,9 @@ static bool iwl_good_ack_health(struct iwl_priv *priv, struct iwl_rx_packet *pkt
450 * to improve the throughput. 396 * to improve the throughput.
451 */ 397 */
452static bool iwl_good_plcp_health(struct iwl_priv *priv, 398static bool iwl_good_plcp_health(struct iwl_priv *priv,
453 struct iwl_rx_packet *pkt, unsigned int msecs) 399 struct statistics_rx_phy *cur_ofdm,
400 struct statistics_rx_ht_phy *cur_ofdm_ht,
401 unsigned int msecs)
454{ 402{
455 int delta; 403 int delta;
456 int threshold = priv->cfg->base_params->plcp_delta_threshold; 404 int threshold = priv->cfg->base_params->plcp_delta_threshold;
@@ -460,29 +408,12 @@ static bool iwl_good_plcp_health(struct iwl_priv *priv,
460 return true; 408 return true;
461 } 409 }
462 410
463 if (iwl_bt_statistics(priv)) { 411 delta = le32_to_cpu(cur_ofdm->plcp_err) -
464 struct statistics_rx_bt *cur, *old; 412 le32_to_cpu(priv->statistics.rx_ofdm.plcp_err) +
465 413 le32_to_cpu(cur_ofdm_ht->plcp_err) -
466 cur = &pkt->u.stats_bt.rx; 414 le32_to_cpu(priv->statistics.rx_ofdm_ht.plcp_err);
467 old = &priv->_agn.statistics_bt.rx;
468 415
469 delta = le32_to_cpu(cur->ofdm.plcp_err) - 416 /* Can be negative if firmware reset statistics */
470 le32_to_cpu(old->ofdm.plcp_err) +
471 le32_to_cpu(cur->ofdm_ht.plcp_err) -
472 le32_to_cpu(old->ofdm_ht.plcp_err);
473 } else {
474 struct statistics_rx *cur, *old;
475
476 cur = &pkt->u.stats.rx;
477 old = &priv->_agn.statistics.rx;
478
479 delta = le32_to_cpu(cur->ofdm.plcp_err) -
480 le32_to_cpu(old->ofdm.plcp_err) +
481 le32_to_cpu(cur->ofdm_ht.plcp_err) -
482 le32_to_cpu(old->ofdm_ht.plcp_err);
483 }
484
485 /* Can be negative if firmware reseted statistics */
486 if (delta <= 0) 417 if (delta <= 0)
487 return true; 418 return true;
488 419
@@ -497,44 +428,35 @@ static bool iwl_good_plcp_health(struct iwl_priv *priv,
497} 428}
498 429
499static void iwl_recover_from_statistics(struct iwl_priv *priv, 430static void iwl_recover_from_statistics(struct iwl_priv *priv,
500 struct iwl_rx_packet *pkt) 431 struct statistics_rx_phy *cur_ofdm,
432 struct statistics_rx_ht_phy *cur_ofdm_ht,
433 struct statistics_tx *tx,
434 unsigned long stamp)
501{ 435{
502 const struct iwl_mod_params *mod_params = priv->cfg->mod_params;
503 unsigned int msecs; 436 unsigned int msecs;
504 unsigned long stamp;
505 437
506 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 438 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
507 return; 439 return;
508 440
509 stamp = jiffies;
510 msecs = jiffies_to_msecs(stamp - priv->rx_statistics_jiffies); 441 msecs = jiffies_to_msecs(stamp - priv->rx_statistics_jiffies);
511 442
512 /* Only gather statistics and update time stamp when not associated */ 443 /* Only gather statistics and update time stamp when not associated */
513 if (!iwl_is_any_associated(priv)) 444 if (!iwl_is_any_associated(priv))
514 goto out; 445 return;
515 446
516 /* Do not check/recover when do not have enough statistics data */ 447 /* Do not check/recover when do not have enough statistics data */
517 if (msecs < 99) 448 if (msecs < 99)
518 return; 449 return;
519 450
520 if (mod_params->ack_check && !iwl_good_ack_health(priv, pkt)) { 451 if (iwlagn_mod_params.ack_check && !iwl_good_ack_health(priv, tx)) {
521 IWL_ERR(priv, "low ack count detected, restart firmware\n"); 452 IWL_ERR(priv, "low ack count detected, restart firmware\n");
522 if (!iwl_force_reset(priv, IWL_FW_RESET, false)) 453 if (!iwl_force_reset(priv, IWL_FW_RESET, false))
523 return; 454 return;
524 } 455 }
525 456
526 if (mod_params->plcp_check && !iwl_good_plcp_health(priv, pkt, msecs)) 457 if (iwlagn_mod_params.plcp_check &&
458 !iwl_good_plcp_health(priv, cur_ofdm, cur_ofdm_ht, msecs))
527 iwl_force_reset(priv, IWL_RF_RESET, false); 459 iwl_force_reset(priv, IWL_RF_RESET, false);
528
529out:
530 if (iwl_bt_statistics(priv))
531 memcpy(&priv->_agn.statistics_bt, &pkt->u.stats_bt,
532 sizeof(priv->_agn.statistics_bt));
533 else
534 memcpy(&priv->_agn.statistics, &pkt->u.stats,
535 sizeof(priv->_agn.statistics));
536
537 priv->rx_statistics_jiffies = stamp;
538} 460}
539 461
540/* Calculate noise level, based on measurements during network silence just 462/* Calculate noise level, based on measurements during network silence just
@@ -548,10 +470,8 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv)
548 int bcn_silence_a, bcn_silence_b, bcn_silence_c; 470 int bcn_silence_a, bcn_silence_b, bcn_silence_c;
549 int last_rx_noise; 471 int last_rx_noise;
550 472
551 if (iwl_bt_statistics(priv)) 473 rx_info = &priv->statistics.rx_non_phy;
552 rx_info = &(priv->_agn.statistics_bt.rx.general.common); 474
553 else
554 rx_info = &(priv->_agn.statistics.rx.general);
555 bcn_silence_a = 475 bcn_silence_a =
556 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER; 476 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
557 bcn_silence_b = 477 bcn_silence_b =
@@ -583,105 +503,153 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv)
583 last_rx_noise); 503 last_rx_noise);
584} 504}
585 505
506#ifdef CONFIG_IWLWIFI_DEBUGFS
586/* 507/*
587 * based on the assumption of all statistics counter are in DWORD 508 * based on the assumption of all statistics counter are in DWORD
588 * FIXME: This function is for debugging, do not deal with 509 * FIXME: This function is for debugging, do not deal with
589 * the case of counters roll-over. 510 * the case of counters roll-over.
590 */ 511 */
591static void iwl_accumulative_statistics(struct iwl_priv *priv, 512static void accum_stats(__le32 *prev, __le32 *cur, __le32 *delta,
592 __le32 *stats) 513 __le32 *max_delta, __le32 *accum, int size)
593{ 514{
594#ifdef CONFIG_IWLWIFI_DEBUGFS 515 int i;
595 int i, size; 516
596 __le32 *prev_stats; 517 for (i = 0;
597 u32 *accum_stats; 518 i < size / sizeof(__le32);
598 u32 *delta, *max_delta; 519 i++, prev++, cur++, delta++, max_delta++, accum++) {
599 struct statistics_general_common *general, *accum_general; 520 if (le32_to_cpu(*cur) > le32_to_cpu(*prev)) {
600 struct statistics_tx *tx, *accum_tx; 521 *delta = cpu_to_le32(
601 522 le32_to_cpu(*cur) - le32_to_cpu(*prev));
602 if (iwl_bt_statistics(priv)) { 523 le32_add_cpu(accum, le32_to_cpu(*delta));
603 prev_stats = (__le32 *)&priv->_agn.statistics_bt; 524 if (le32_to_cpu(*delta) > le32_to_cpu(*max_delta))
604 accum_stats = (u32 *)&priv->_agn.accum_statistics_bt;
605 size = sizeof(struct iwl_bt_notif_statistics);
606 general = &priv->_agn.statistics_bt.general.common;
607 accum_general = &priv->_agn.accum_statistics_bt.general.common;
608 tx = &priv->_agn.statistics_bt.tx;
609 accum_tx = &priv->_agn.accum_statistics_bt.tx;
610 delta = (u32 *)&priv->_agn.delta_statistics_bt;
611 max_delta = (u32 *)&priv->_agn.max_delta_bt;
612 } else {
613 prev_stats = (__le32 *)&priv->_agn.statistics;
614 accum_stats = (u32 *)&priv->_agn.accum_statistics;
615 size = sizeof(struct iwl_notif_statistics);
616 general = &priv->_agn.statistics.general.common;
617 accum_general = &priv->_agn.accum_statistics.general.common;
618 tx = &priv->_agn.statistics.tx;
619 accum_tx = &priv->_agn.accum_statistics.tx;
620 delta = (u32 *)&priv->_agn.delta_statistics;
621 max_delta = (u32 *)&priv->_agn.max_delta;
622 }
623 for (i = sizeof(__le32); i < size;
624 i += sizeof(__le32), stats++, prev_stats++, delta++,
625 max_delta++, accum_stats++) {
626 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
627 *delta = (le32_to_cpu(*stats) -
628 le32_to_cpu(*prev_stats));
629 *accum_stats += *delta;
630 if (*delta > *max_delta)
631 *max_delta = *delta; 525 *max_delta = *delta;
632 } 526 }
633 } 527 }
528}
634 529
635 /* reset accumulative statistics for "no-counter" type statistics */ 530static void
636 accum_general->temperature = general->temperature; 531iwl_accumulative_statistics(struct iwl_priv *priv,
637 accum_general->temperature_m = general->temperature_m; 532 struct statistics_general_common *common,
638 accum_general->ttl_timestamp = general->ttl_timestamp; 533 struct statistics_rx_non_phy *rx_non_phy,
639 accum_tx->tx_power.ant_a = tx->tx_power.ant_a; 534 struct statistics_rx_phy *rx_ofdm,
640 accum_tx->tx_power.ant_b = tx->tx_power.ant_b; 535 struct statistics_rx_ht_phy *rx_ofdm_ht,
641 accum_tx->tx_power.ant_c = tx->tx_power.ant_c; 536 struct statistics_rx_phy *rx_cck,
642#endif 537 struct statistics_tx *tx,
538 struct statistics_bt_activity *bt_activity)
539{
540#define ACCUM(_name) \
541 accum_stats((__le32 *)&priv->statistics._name, \
542 (__le32 *)_name, \
543 (__le32 *)&priv->delta_stats._name, \
544 (__le32 *)&priv->max_delta_stats._name, \
545 (__le32 *)&priv->accum_stats._name, \
546 sizeof(*_name));
547
548 ACCUM(common);
549 ACCUM(rx_non_phy);
550 ACCUM(rx_ofdm);
551 ACCUM(rx_ofdm_ht);
552 ACCUM(rx_cck);
553 ACCUM(tx);
554 if (bt_activity)
555 ACCUM(bt_activity);
556#undef ACCUM
557}
558#else
559static inline void
560iwl_accumulative_statistics(struct iwl_priv *priv,
561 struct statistics_general_common *common,
562 struct statistics_rx_non_phy *rx_non_phy,
563 struct statistics_rx_phy *rx_ofdm,
564 struct statistics_rx_ht_phy *rx_ofdm_ht,
565 struct statistics_rx_phy *rx_cck,
566 struct statistics_tx *tx,
567 struct statistics_bt_activity *bt_activity)
568{
643} 569}
570#endif
644 571
645static void iwl_rx_statistics(struct iwl_priv *priv, 572static void iwl_rx_statistics(struct iwl_priv *priv,
646 struct iwl_rx_mem_buffer *rxb) 573 struct iwl_rx_mem_buffer *rxb)
647{ 574{
575 unsigned long stamp = jiffies;
648 const int reg_recalib_period = 60; 576 const int reg_recalib_period = 60;
649 int change; 577 int change;
650 struct iwl_rx_packet *pkt = rxb_addr(rxb); 578 struct iwl_rx_packet *pkt = rxb_addr(rxb);
579 u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
580 __le32 *flag;
581 struct statistics_general_common *common;
582 struct statistics_rx_non_phy *rx_non_phy;
583 struct statistics_rx_phy *rx_ofdm;
584 struct statistics_rx_ht_phy *rx_ofdm_ht;
585 struct statistics_rx_phy *rx_cck;
586 struct statistics_tx *tx;
587 struct statistics_bt_activity *bt_activity;
588
589 len -= sizeof(struct iwl_cmd_header); /* skip header */
590
591 IWL_DEBUG_RX(priv, "Statistics notification received (%d bytes).\n",
592 len);
593
594 if (len == sizeof(struct iwl_bt_notif_statistics)) {
595 struct iwl_bt_notif_statistics *stats;
596 stats = &pkt->u.stats_bt;
597 flag = &stats->flag;
598 common = &stats->general.common;
599 rx_non_phy = &stats->rx.general.common;
600 rx_ofdm = &stats->rx.ofdm;
601 rx_ofdm_ht = &stats->rx.ofdm_ht;
602 rx_cck = &stats->rx.cck;
603 tx = &stats->tx;
604 bt_activity = &stats->general.activity;
651 605
652 if (iwl_bt_statistics(priv)) { 606#ifdef CONFIG_IWLWIFI_DEBUGFS
653 IWL_DEBUG_RX(priv, 607 /* handle this exception directly */
654 "Statistics notification received (%d vs %d).\n", 608 priv->statistics.num_bt_kills = stats->rx.general.num_bt_kills;
655 (int)sizeof(struct iwl_bt_notif_statistics), 609 le32_add_cpu(&priv->statistics.accum_num_bt_kills,
656 le32_to_cpu(pkt->len_n_flags) & 610 le32_to_cpu(stats->rx.general.num_bt_kills));
657 FH_RSCSR_FRAME_SIZE_MSK); 611#endif
658 612 } else if (len == sizeof(struct iwl_notif_statistics)) {
659 change = ((priv->_agn.statistics_bt.general.common.temperature != 613 struct iwl_notif_statistics *stats;
660 pkt->u.stats_bt.general.common.temperature) || 614 stats = &pkt->u.stats;
661 ((priv->_agn.statistics_bt.flag & 615 flag = &stats->flag;
662 STATISTICS_REPLY_FLG_HT40_MODE_MSK) != 616 common = &stats->general.common;
663 (pkt->u.stats_bt.flag & 617 rx_non_phy = &stats->rx.general;
664 STATISTICS_REPLY_FLG_HT40_MODE_MSK))); 618 rx_ofdm = &stats->rx.ofdm;
665 619 rx_ofdm_ht = &stats->rx.ofdm_ht;
666 iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats_bt); 620 rx_cck = &stats->rx.cck;
621 tx = &stats->tx;
622 bt_activity = NULL;
667 } else { 623 } else {
668 IWL_DEBUG_RX(priv, 624 WARN_ONCE(1, "len %d doesn't match BT (%zu) or normal (%zu)\n",
669 "Statistics notification received (%d vs %d).\n", 625 len, sizeof(struct iwl_bt_notif_statistics),
670 (int)sizeof(struct iwl_notif_statistics), 626 sizeof(struct iwl_notif_statistics));
671 le32_to_cpu(pkt->len_n_flags) & 627 return;
672 FH_RSCSR_FRAME_SIZE_MSK);
673
674 change = ((priv->_agn.statistics.general.common.temperature !=
675 pkt->u.stats.general.common.temperature) ||
676 ((priv->_agn.statistics.flag &
677 STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
678 (pkt->u.stats.flag &
679 STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
680
681 iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
682 } 628 }
683 629
684 iwl_recover_from_statistics(priv, pkt); 630 change = common->temperature != priv->statistics.common.temperature ||
631 (*flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
632 (priv->statistics.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK);
633
634 iwl_accumulative_statistics(priv, common, rx_non_phy, rx_ofdm,
635 rx_ofdm_ht, rx_cck, tx, bt_activity);
636
637 iwl_recover_from_statistics(priv, rx_ofdm, rx_ofdm_ht, tx, stamp);
638
639 priv->statistics.flag = *flag;
640 memcpy(&priv->statistics.common, common, sizeof(*common));
641 memcpy(&priv->statistics.rx_non_phy, rx_non_phy, sizeof(*rx_non_phy));
642 memcpy(&priv->statistics.rx_ofdm, rx_ofdm, sizeof(*rx_ofdm));
643 memcpy(&priv->statistics.rx_ofdm_ht, rx_ofdm_ht, sizeof(*rx_ofdm_ht));
644 memcpy(&priv->statistics.rx_cck, rx_cck, sizeof(*rx_cck));
645 memcpy(&priv->statistics.tx, tx, sizeof(*tx));
646#ifdef CONFIG_IWLWIFI_DEBUGFS
647 if (bt_activity)
648 memcpy(&priv->statistics.bt_activity, bt_activity,
649 sizeof(*bt_activity));
650#endif
651
652 priv->rx_statistics_jiffies = stamp;
685 653
686 set_bit(STATUS_STATISTICS, &priv->status); 654 set_bit(STATUS_STATISTICS, &priv->status);
687 655
@@ -708,18 +676,12 @@ static void iwl_rx_reply_statistics(struct iwl_priv *priv,
708 676
709 if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) { 677 if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
710#ifdef CONFIG_IWLWIFI_DEBUGFS 678#ifdef CONFIG_IWLWIFI_DEBUGFS
711 memset(&priv->_agn.accum_statistics, 0, 679 memset(&priv->accum_stats, 0,
712 sizeof(struct iwl_notif_statistics)); 680 sizeof(priv->accum_stats));
713 memset(&priv->_agn.delta_statistics, 0, 681 memset(&priv->delta_stats, 0,
714 sizeof(struct iwl_notif_statistics)); 682 sizeof(priv->delta_stats));
715 memset(&priv->_agn.max_delta, 0, 683 memset(&priv->max_delta_stats, 0,
716 sizeof(struct iwl_notif_statistics)); 684 sizeof(priv->max_delta_stats));
717 memset(&priv->_agn.accum_statistics_bt, 0,
718 sizeof(struct iwl_bt_notif_statistics));
719 memset(&priv->_agn.delta_statistics_bt, 0,
720 sizeof(struct iwl_bt_notif_statistics));
721 memset(&priv->_agn.max_delta_bt, 0,
722 sizeof(struct iwl_bt_notif_statistics));
723#endif 685#endif
724 IWL_DEBUG_RX(priv, "Statistics have been cleared\n"); 686 IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
725 } 687 }
@@ -873,6 +835,7 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
873{ 835{
874 struct sk_buff *skb; 836 struct sk_buff *skb;
875 __le16 fc = hdr->frame_control; 837 __le16 fc = hdr->frame_control;
838 struct iwl_rxon_context *ctx;
876 839
877 /* We only process data packets if the interface is open */ 840 /* We only process data packets if the interface is open */
878 if (unlikely(!priv->is_open)) { 841 if (unlikely(!priv->is_open)) {
@@ -882,7 +845,7 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
882 } 845 }
883 846
884 /* In case of HW accelerated crypto and bad decryption, drop */ 847 /* In case of HW accelerated crypto and bad decryption, drop */
885 if (!priv->cfg->mod_params->sw_crypto && 848 if (!iwlagn_mod_params.sw_crypto &&
886 iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats)) 849 iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
887 return; 850 return;
888 851
@@ -895,10 +858,29 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
895 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len); 858 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
896 859
897 iwl_update_stats(priv, false, fc, len); 860 iwl_update_stats(priv, false, fc, len);
861
862 /*
863 * Wake any queues that were stopped due to a passive channel tx
864 * failure. This can happen because the regulatory enforcement in
865 * the device waits for a beacon before allowing transmission,
866 * sometimes even after already having transmitted frames for the
867 * association because the new RXON may reset the information.
868 */
869 if (unlikely(ieee80211_is_beacon(fc))) {
870 for_each_context(priv, ctx) {
871 if (!ctx->last_tx_rejected)
872 continue;
873 if (compare_ether_addr(hdr->addr3,
874 ctx->active.bssid_addr))
875 continue;
876 ctx->last_tx_rejected = false;
877 iwl_wake_any_queue(priv, ctx);
878 }
879 }
880
898 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); 881 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
899 882
900 ieee80211_rx(priv->hw, skb); 883 ieee80211_rx(priv->hw, skb);
901 priv->alloc_rxb_page--;
902 rxb->page = NULL; 884 rxb->page = NULL;
903} 885}
904 886
@@ -1093,7 +1075,6 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv)
1093 1075
1094 handlers = priv->rx_handlers; 1076 handlers = priv->rx_handlers;
1095 1077
1096 handlers[REPLY_ALIVE] = iwl_rx_reply_alive;
1097 handlers[REPLY_ERROR] = iwl_rx_reply_error; 1078 handlers[REPLY_ERROR] = iwl_rx_reply_error;
1098 handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa; 1079 handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
1099 handlers[SPECTRUM_MEASURE_NOTIFICATION] = iwl_rx_spectrum_measure_notif; 1080 handlers[SPECTRUM_MEASURE_NOTIFICATION] = iwl_rx_spectrum_measure_notif;
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 914c77e44588..d60d630cb93a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -2,7 +2,7 @@
2 * 2 *
3 * GPL LICENSE SUMMARY 3 * GPL LICENSE SUMMARY
4 * 4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. 5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as 8 * it under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-spectrum.h b/drivers/net/wireless/iwlwifi/iwl-spectrum.h
deleted file mode 100644
index c4ca0b5d77da..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-spectrum.h
+++ /dev/null
@@ -1,92 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ieee80211 subsystem header files.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
19 *
20 * The full GNU General Public License is included in this distribution in the
21 * file called LICENSE.
22 *
23 * Contact Information:
24 * Intel Linux Wireless <ilw@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *
27 *****************************************************************************/
28
29#ifndef __iwl_spectrum_h__
30#define __iwl_spectrum_h__
31enum { /* ieee80211_basic_report.map */
32 IEEE80211_BASIC_MAP_BSS = (1 << 0),
33 IEEE80211_BASIC_MAP_OFDM = (1 << 1),
34 IEEE80211_BASIC_MAP_UNIDENTIFIED = (1 << 2),
35 IEEE80211_BASIC_MAP_RADAR = (1 << 3),
36 IEEE80211_BASIC_MAP_UNMEASURED = (1 << 4),
37 /* Bits 5-7 are reserved */
38
39};
40struct ieee80211_basic_report {
41 u8 channel;
42 __le64 start_time;
43 __le16 duration;
44 u8 map;
45} __packed;
46
47enum { /* ieee80211_measurement_request.mode */
48 /* Bit 0 is reserved */
49 IEEE80211_MEASUREMENT_ENABLE = (1 << 1),
50 IEEE80211_MEASUREMENT_REQUEST = (1 << 2),
51 IEEE80211_MEASUREMENT_REPORT = (1 << 3),
52 /* Bits 4-7 are reserved */
53};
54
55enum {
56 IEEE80211_REPORT_BASIC = 0, /* required */
57 IEEE80211_REPORT_CCA = 1, /* optional */
58 IEEE80211_REPORT_RPI = 2, /* optional */
59 /* 3-255 reserved */
60};
61
62struct ieee80211_measurement_params {
63 u8 channel;
64 __le64 start_time;
65 __le16 duration;
66} __packed;
67
68struct ieee80211_info_element {
69 u8 id;
70 u8 len;
71 u8 data[0];
72} __packed;
73
74struct ieee80211_measurement_request {
75 struct ieee80211_info_element ie;
76 u8 token;
77 u8 mode;
78 u8 type;
79 struct ieee80211_measurement_params params[0];
80} __packed;
81
82struct ieee80211_measurement_report {
83 struct ieee80211_info_element ie;
84 u8 token;
85 u8 mode;
86 u8 type;
87 union {
88 struct ieee80211_basic_report basic[0];
89 } u;
90} __packed;
91
92#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index bc90a12408a3..3c8cebde16cc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
@@ -233,7 +233,6 @@ u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
233 struct iwl_station_entry *station; 233 struct iwl_station_entry *station;
234 int i; 234 int i;
235 u8 sta_id = IWL_INVALID_STATION; 235 u8 sta_id = IWL_INVALID_STATION;
236 u16 rate;
237 236
238 if (is_ap) 237 if (is_ap)
239 sta_id = ctx->ap_sta_id; 238 sta_id = ctx->ap_sta_id;
@@ -306,12 +305,6 @@ u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
306 */ 305 */
307 iwl_set_ht_add_station(priv, sta_id, sta, ctx); 306 iwl_set_ht_add_station(priv, sta_id, sta, ctx);
308 307
309 /* 3945 only */
310 rate = (priv->band == IEEE80211_BAND_5GHZ) ?
311 IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP;
312 /* Turn on both antennas for the station... */
313 station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
314
315 return sta_id; 308 return sta_id;
316 309
317} 310}
@@ -501,7 +494,8 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
501 494
502 priv->num_stations--; 495 priv->num_stations--;
503 496
504 BUG_ON(priv->num_stations < 0); 497 if (WARN_ON(priv->num_stations < 0))
498 priv->num_stations = 0;
505 499
506 spin_unlock_irqrestore(&priv->sta_lock, flags); 500 spin_unlock_irqrestore(&priv->sta_lock, flags);
507 501
@@ -686,7 +680,8 @@ void iwl_dealloc_bcast_stations(struct iwl_priv *priv)
686 680
687 priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE; 681 priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
688 priv->num_stations--; 682 priv->num_stations--;
689 BUG_ON(priv->num_stations < 0); 683 if (WARN_ON(priv->num_stations < 0))
684 priv->num_stations = 0;
690 kfree(priv->stations[i].lq); 685 kfree(priv->stations[i].lq);
691 priv->stations[i].lq = NULL; 686 priv->stations[i].lq = NULL;
692 } 687 }
@@ -782,7 +777,8 @@ int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
782 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 777 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
783 778
784 iwl_dump_lq_cmd(priv, lq); 779 iwl_dump_lq_cmd(priv, lq);
785 BUG_ON(init && (cmd.flags & CMD_ASYNC)); 780 if (WARN_ON(init && (cmd.flags & CMD_ASYNC)))
781 return -EINVAL;
786 782
787 if (is_lq_table_valid(priv, ctx, lq)) 783 if (is_lq_table_valid(priv, ctx, lq))
788 ret = iwl_send_cmd(priv, &cmd); 784 ret = iwl_send_cmd(priv, &cmd);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h
index 206f1e1a0caf..ff64027ff4cb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.h
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
diff --git a/drivers/net/wireless/iwlwifi/iwl-sv-open.c b/drivers/net/wireless/iwlwifi/iwl-sv-open.c
new file mode 100644
index 000000000000..89b6696622c1
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-sv-open.c
@@ -0,0 +1,469 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2010 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2010 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#include <linux/init.h>
64#include <linux/kernel.h>
65#include <linux/module.h>
66#include <net/net_namespace.h>
67#include <linux/netdevice.h>
68#include <net/cfg80211.h>
69#include <net/mac80211.h>
70#include <net/netlink.h>
71
72
73#include "iwl-dev.h"
74#include "iwl-core.h"
75#include "iwl-debug.h"
76#include "iwl-fh.h"
77#include "iwl-io.h"
78#include "iwl-agn.h"
79#include "iwl-testmode.h"
80
81
82/* The TLVs used in the gnl message policy between the kernel module and
83 * user space application. iwl_testmode_gnl_msg_policy is to be carried
84 * through the NL80211_CMD_TESTMODE channel regulated by nl80211.
85 * See iwl-testmode.h
86 */
87static
88struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = {
89 [IWL_TM_ATTR_COMMAND] = { .type = NLA_U32, },
90
91 [IWL_TM_ATTR_UCODE_CMD_ID] = { .type = NLA_U8, },
92 [IWL_TM_ATTR_UCODE_CMD_DATA] = { .type = NLA_UNSPEC, },
93
94 [IWL_TM_ATTR_REG_OFFSET] = { .type = NLA_U32, },
95 [IWL_TM_ATTR_REG_VALUE8] = { .type = NLA_U8, },
96 [IWL_TM_ATTR_REG_VALUE32] = { .type = NLA_U32, },
97
98 [IWL_TM_ATTR_SYNC_RSP] = { .type = NLA_UNSPEC, },
99 [IWL_TM_ATTR_UCODE_RX_PKT] = { .type = NLA_UNSPEC, },
100};
101
102/*
103 * See the struct iwl_rx_packet in iwl-commands.h for the format of the
104 * received events from the device
105 */
106static inline int get_event_length(struct iwl_rx_mem_buffer *rxb)
107{
108 struct iwl_rx_packet *pkt = rxb_addr(rxb);
109 if (pkt)
110 return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
111 else
112 return 0;
113}
114
115
116/*
117 * This function multicasts the spontaneous messages from the device to the
118 * user space. It is invoked whenever there is a received messages
119 * from the device. This function is called within the ISR of the rx handlers
120 * in iwlagn driver.
121 *
122 * The parsing of the message content is left to the user space application,
123 * The message content is treated as unattacked raw data and is encapsulated
124 * with IWL_TM_ATTR_UCODE_RX_PKT multicasting to the user space.
125 *
126 * @priv: the instance of iwlwifi device
127 * @rxb: pointer to rx data content received by the ISR
128 *
129 * See the message policies and TLVs in iwl_testmode_gnl_msg_policy[].
130 * For the messages multicasting to the user application, the mandatory
131 * TLV fields are :
132 * IWL_TM_ATTR_COMMAND must be IWL_TM_CMD_DEV2APP_UCODE_RX_PKT
133 * IWL_TM_ATTR_UCODE_RX_PKT for carrying the message content
134 */
135
136static void iwl_testmode_ucode_rx_pkt(struct iwl_priv *priv,
137 struct iwl_rx_mem_buffer *rxb)
138{
139 struct ieee80211_hw *hw = priv->hw;
140 struct sk_buff *skb;
141 void *data;
142 int length;
143
144 data = (void *)rxb_addr(rxb);
145 length = get_event_length(rxb);
146
147 if (!data || length == 0)
148 return;
149
150 skb = cfg80211_testmode_alloc_event_skb(hw->wiphy, 20 + length,
151 GFP_ATOMIC);
152 if (skb == NULL) {
153 IWL_DEBUG_INFO(priv,
154 "Run out of memory for messages to user space ?\n");
155 return;
156 }
157 NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT);
158 NLA_PUT(skb, IWL_TM_ATTR_UCODE_RX_PKT, length, data);
159 cfg80211_testmode_event(skb, GFP_ATOMIC);
160 return;
161
162nla_put_failure:
163 kfree_skb(skb);
164 IWL_DEBUG_INFO(priv, "Ouch, overran buffer, check allocation!\n");
165}
166
167void iwl_testmode_init(struct iwl_priv *priv)
168{
169 priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt;
170}
171
172/*
173 * This function handles the user application commands to the ucode.
174 *
175 * It retrieves the mandatory fields IWL_TM_ATTR_UCODE_CMD_ID and
176 * IWL_TM_ATTR_UCODE_CMD_DATA and calls to the handler to send the
177 * host command to the ucode.
178 *
179 * If any mandatory field is missing, -ENOMSG is replied to the user space
180 * application; otherwise, the actual execution result of the host command to
181 * ucode is replied.
182 *
183 * @hw: ieee80211_hw object that represents the device
184 * @tb: gnl message fields from the user space
185 */
186static int iwl_testmode_ucode(struct ieee80211_hw *hw, struct nlattr **tb)
187{
188 struct iwl_priv *priv = hw->priv;
189 struct iwl_host_cmd cmd;
190
191 memset(&cmd, 0, sizeof(struct iwl_host_cmd));
192
193 if (!tb[IWL_TM_ATTR_UCODE_CMD_ID] ||
194 !tb[IWL_TM_ATTR_UCODE_CMD_DATA]) {
195 IWL_DEBUG_INFO(priv,
196 "Error finding ucode command mandatory fields\n");
197 return -ENOMSG;
198 }
199
200 cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]);
201 cmd.data = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
202 cmd.len = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
203 IWL_INFO(priv, "testmode ucode command ID 0x%x, flags 0x%x,"
204 " len %d\n", cmd.id, cmd.flags, cmd.len);
205 /* ok, let's submit the command to ucode */
206 return iwl_send_cmd(priv, &cmd);
207}
208
209
210/*
211 * This function handles the user application commands for register access.
212 *
213 * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
214 * handlers respectively.
215 *
216 * If it's an unknown commdn ID, -ENOSYS is returned; or -ENOMSG if the
217 * mandatory fields(IWL_TM_ATTR_REG_OFFSET,IWL_TM_ATTR_REG_VALUE32,
218 * IWL_TM_ATTR_REG_VALUE8) are missing; Otherwise 0 is replied indicating
219 * the success of the command execution.
220 *
221 * If IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_REG_READ32, the register read
222 * value is returned with IWL_TM_ATTR_REG_VALUE32.
223 *
224 * @hw: ieee80211_hw object that represents the device
225 * @tb: gnl message fields from the user space
226 */
227static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
228{
229 struct iwl_priv *priv = hw->priv;
230 u32 ofs, val32;
231 u8 val8;
232 struct sk_buff *skb;
233 int status = 0;
234
235 if (!tb[IWL_TM_ATTR_REG_OFFSET]) {
236 IWL_DEBUG_INFO(priv, "Error finding register offset\n");
237 return -ENOMSG;
238 }
239 ofs = nla_get_u32(tb[IWL_TM_ATTR_REG_OFFSET]);
240 IWL_INFO(priv, "testmode register access command offset 0x%x\n", ofs);
241
242 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
243 case IWL_TM_CMD_APP2DEV_REG_READ32:
244 val32 = iwl_read32(priv, ofs);
245 IWL_INFO(priv, "32bit value to read 0x%x\n", val32);
246
247 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
248 if (!skb) {
249 IWL_DEBUG_INFO(priv, "Error allocating memory\n");
250 return -ENOMEM;
251 }
252 NLA_PUT_U32(skb, IWL_TM_ATTR_REG_VALUE32, val32);
253 status = cfg80211_testmode_reply(skb);
254 if (status < 0)
255 IWL_DEBUG_INFO(priv,
256 "Error sending msg : %d\n", status);
257 break;
258 case IWL_TM_CMD_APP2DEV_REG_WRITE32:
259 if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
260 IWL_DEBUG_INFO(priv,
261 "Error finding value to write\n");
262 return -ENOMSG;
263 } else {
264 val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
265 IWL_INFO(priv, "32bit value to write 0x%x\n", val32);
266 iwl_write32(priv, ofs, val32);
267 }
268 break;
269 case IWL_TM_CMD_APP2DEV_REG_WRITE8:
270 if (!tb[IWL_TM_ATTR_REG_VALUE8]) {
271 IWL_DEBUG_INFO(priv, "Error finding value to write\n");
272 return -ENOMSG;
273 } else {
274 val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]);
275 IWL_INFO(priv, "8bit value to write 0x%x\n", val8);
276 iwl_write8(priv, ofs, val8);
277 }
278 break;
279 default:
280 IWL_DEBUG_INFO(priv, "Unknown testmode register command ID\n");
281 return -ENOSYS;
282 }
283
284 return status;
285
286nla_put_failure:
287 kfree_skb(skb);
288 return -EMSGSIZE;
289}
290
291
292static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv)
293{
294 struct iwl_notification_wait calib_wait;
295 int ret;
296
297 iwlagn_init_notification_wait(priv, &calib_wait,
298 CALIBRATION_COMPLETE_NOTIFICATION,
299 NULL, NULL);
300 ret = iwlagn_init_alive_start(priv);
301 if (ret) {
302 IWL_DEBUG_INFO(priv,
303 "Error configuring init calibration: %d\n", ret);
304 goto cfg_init_calib_error;
305 }
306
307 ret = iwlagn_wait_notification(priv, &calib_wait, 2 * HZ);
308 if (ret)
309 IWL_DEBUG_INFO(priv, "Error detecting"
310 " CALIBRATION_COMPLETE_NOTIFICATION: %d\n", ret);
311 return ret;
312
313cfg_init_calib_error:
314 iwlagn_remove_notification(priv, &calib_wait);
315 return ret;
316}
317
318/*
319 * This function handles the user application commands for driver.
320 *
321 * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
322 * handlers respectively.
323 *
324 * If it's an unknown commdn ID, -ENOSYS is replied; otherwise, the returned
325 * value of the actual command execution is replied to the user application.
326 *
327 * If there's any message responding to the user space, IWL_TM_ATTR_SYNC_RSP
328 * is used for carry the message while IWL_TM_ATTR_COMMAND must set to
329 * IWL_TM_CMD_DEV2APP_SYNC_RSP.
330 *
331 * @hw: ieee80211_hw object that represents the device
332 * @tb: gnl message fields from the user space
333 */
334static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
335{
336 struct iwl_priv *priv = hw->priv;
337 struct sk_buff *skb;
338 unsigned char *rsp_data_ptr = NULL;
339 int status = 0, rsp_data_len = 0;
340
341 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
342 case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
343 rsp_data_ptr = (unsigned char *)priv->cfg->name;
344 rsp_data_len = strlen(priv->cfg->name);
345 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
346 rsp_data_len + 20);
347 if (!skb) {
348 IWL_DEBUG_INFO(priv,
349 "Error allocating memory\n");
350 return -ENOMEM;
351 }
352 NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND,
353 IWL_TM_CMD_DEV2APP_SYNC_RSP);
354 NLA_PUT(skb, IWL_TM_ATTR_SYNC_RSP,
355 rsp_data_len, rsp_data_ptr);
356 status = cfg80211_testmode_reply(skb);
357 if (status < 0)
358 IWL_DEBUG_INFO(priv, "Error sending msg : %d\n",
359 status);
360 break;
361
362 case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
363 status = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_init,
364 UCODE_SUBTYPE_INIT, -1);
365 if (status)
366 IWL_DEBUG_INFO(priv,
367 "Error loading init ucode: %d\n", status);
368 break;
369
370 case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
371 iwl_testmode_cfg_init_calib(priv);
372 iwlagn_stop_device(priv);
373 break;
374
375 case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
376 status = iwlagn_load_ucode_wait_alive(priv,
377 &priv->ucode_rt,
378 UCODE_SUBTYPE_REGULAR,
379 UCODE_SUBTYPE_REGULAR_NEW);
380 if (status) {
381 IWL_DEBUG_INFO(priv,
382 "Error loading runtime ucode: %d\n", status);
383 break;
384 }
385 status = iwl_alive_start(priv);
386 if (status)
387 IWL_DEBUG_INFO(priv,
388 "Error starting the device: %d\n", status);
389 break;
390
391 default:
392 IWL_DEBUG_INFO(priv, "Unknown testmode driver command ID\n");
393 return -ENOSYS;
394 }
395 return status;
396
397nla_put_failure:
398 kfree_skb(skb);
399 return -EMSGSIZE;
400}
401
402/* The testmode gnl message handler that takes the gnl message from the
403 * user space and parses it per the policy iwl_testmode_gnl_msg_policy, then
404 * invoke the corresponding handlers.
405 *
406 * This function is invoked when there is user space application sending
407 * gnl message through the testmode tunnel NL80211_CMD_TESTMODE regulated
408 * by nl80211.
409 *
410 * It retrieves the mandatory field, IWL_TM_ATTR_COMMAND, before
411 * dispatching it to the corresponding handler.
412 *
413 * If IWL_TM_ATTR_COMMAND is missing, -ENOMSG is replied to user application;
414 * -ENOSYS is replied to the user application if the command is unknown;
415 * Otherwise, the command is dispatched to the respective handler.
416 *
417 * @hw: ieee80211_hw object that represents the device
418 * @data: pointer to user space message
419 * @len: length in byte of @data
420 */
421int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
422{
423 struct nlattr *tb[IWL_TM_ATTR_MAX - 1];
424 struct iwl_priv *priv = hw->priv;
425 int result;
426
427 result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len,
428 iwl_testmode_gnl_msg_policy);
429 if (result != 0) {
430 IWL_DEBUG_INFO(priv,
431 "Error parsing the gnl message : %d\n", result);
432 return result;
433 }
434
435 /* IWL_TM_ATTR_COMMAND is absolutely mandatory */
436 if (!tb[IWL_TM_ATTR_COMMAND]) {
437 IWL_DEBUG_INFO(priv, "Error finding testmode command type\n");
438 return -ENOMSG;
439 }
440 /* in case multiple accesses to the device happens */
441 mutex_lock(&priv->mutex);
442
443 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
444 case IWL_TM_CMD_APP2DEV_UCODE:
445 IWL_DEBUG_INFO(priv, "testmode cmd to uCode\n");
446 result = iwl_testmode_ucode(hw, tb);
447 break;
448 case IWL_TM_CMD_APP2DEV_REG_READ32:
449 case IWL_TM_CMD_APP2DEV_REG_WRITE32:
450 case IWL_TM_CMD_APP2DEV_REG_WRITE8:
451 IWL_DEBUG_INFO(priv, "testmode cmd to register\n");
452 result = iwl_testmode_reg(hw, tb);
453 break;
454 case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
455 case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
456 case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
457 case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
458 IWL_DEBUG_INFO(priv, "testmode cmd to driver\n");
459 result = iwl_testmode_driver(hw, tb);
460 break;
461 default:
462 IWL_DEBUG_INFO(priv, "Unknown testmode command\n");
463 result = -ENOSYS;
464 break;
465 }
466
467 mutex_unlock(&priv->mutex);
468 return result;
469}
diff --git a/drivers/net/wireless/iwlwifi/iwl-testmode.h b/drivers/net/wireless/iwlwifi/iwl-testmode.h
new file mode 100644
index 000000000000..31f8949f2801
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-testmode.h
@@ -0,0 +1,151 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2010 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2010 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __IWL_TESTMODE_H__
64#define __IWL_TESTMODE_H__
65
66#include <linux/types.h>
67
68
69/* Commands from user space to kernel space(IWL_TM_CMD_ID_APP2DEV_XX) and
70 * from and kernel space to user space(IWL_TM_CMD_ID_DEV2APP_XX).
71 * The command ID is carried with IWL_TM_ATTR_COMMAND. There are three types of
72 * of command from user space and two types of command from kernel space.
73 * See below.
74 */
75enum iwl_tm_cmd_t {
76 /* commands from user application to the uCode,
77 * the actual uCode host command ID is carried with
78 * IWL_TM_ATTR_UCODE_CMD_ID */
79 IWL_TM_CMD_APP2DEV_UCODE = 1,
80
81 /* commands from user applicaiton to access register */
82 IWL_TM_CMD_APP2DEV_REG_READ32,
83 IWL_TM_CMD_APP2DEV_REG_WRITE32,
84 IWL_TM_CMD_APP2DEV_REG_WRITE8,
85
86 /* commands fom user space for pure driver level operations */
87 IWL_TM_CMD_APP2DEV_GET_DEVICENAME,
88 IWL_TM_CMD_APP2DEV_LOAD_INIT_FW,
89 IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB,
90 IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW,
91 /* if there is other new command for the driver layer operation,
92 * append them here */
93
94
95 /* commands from kernel space to carry the synchronous response
96 * to user application */
97 IWL_TM_CMD_DEV2APP_SYNC_RSP,
98
99 /* commands from kernel space to multicast the spontaneous messages
100 * to user application */
101 IWL_TM_CMD_DEV2APP_UCODE_RX_PKT,
102 IWL_TM_CMD_MAX,
103};
104
105enum iwl_tm_attr_t {
106 IWL_TM_ATTR_NOT_APPLICABLE = 0,
107
108 /* From user space to kernel space:
109 * the command either destines to ucode, driver, or register;
110 * See enum iwl_tm_cmd_t.
111 *
112 * From kernel space to user space:
113 * the command either carries synchronous response,
114 * or the spontaneous message multicast from the device;
115 * See enum iwl_tm_cmd_t. */
116 IWL_TM_ATTR_COMMAND,
117
118 /* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_UCODE,
119 * The mandatory fields are :
120 * IWL_TM_ATTR_UCODE_CMD_ID for recognizable command ID;
121 * IWL_TM_ATTR_COMMAND_FLAG for the flags of the commands;
122 * The optional fields are:
123 * IWL_TM_ATTR_UCODE_CMD_DATA for the actual command payload
124 * to the ucode */
125 IWL_TM_ATTR_UCODE_CMD_ID,
126 IWL_TM_ATTR_UCODE_CMD_DATA,
127
128 /* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_REG_XXX,
129 * The mandatory fields are:
130 * IWL_TM_ATTR_REG_OFFSET for the offset of the target register;
131 * IWL_TM_ATTR_REG_VALUE8 or IWL_TM_ATTR_REG_VALUE32 for value */
132 IWL_TM_ATTR_REG_OFFSET,
133 IWL_TM_ATTR_REG_VALUE8,
134 IWL_TM_ATTR_REG_VALUE32,
135
136 /* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_DEV2APP_SYNC_RSP,
137 * The mandatory fields are:
138 * IWL_TM_ATTR_SYNC_RSP for the data content responding to the user
139 * application command */
140 IWL_TM_ATTR_SYNC_RSP,
141 /* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_DEV2APP_UCODE_RX_PKT,
142 * The mandatory fields are:
143 * IWL_TM_ATTR_UCODE_RX_PKT for the data content multicast to the user
144 * application */
145 IWL_TM_ATTR_UCODE_RX_PKT,
146
147 IWL_TM_ATTR_MAX,
148};
149
150
151#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 277c9175dcf6..e69597ea43e2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
@@ -149,32 +149,31 @@ void iwl_cmd_queue_unmap(struct iwl_priv *priv)
149 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; 149 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
150 struct iwl_queue *q = &txq->q; 150 struct iwl_queue *q = &txq->q;
151 int i; 151 int i;
152 bool huge = false;
153 152
154 if (q->n_bd == 0) 153 if (q->n_bd == 0)
155 return; 154 return;
156 155
157 while (q->read_ptr != q->write_ptr) { 156 while (q->read_ptr != q->write_ptr) {
158 /* we have no way to tell if it is a huge cmd ATM */
159 i = get_cmd_index(q, q->read_ptr, 0); 157 i = get_cmd_index(q, q->read_ptr, 0);
160 158
161 if (txq->meta[i].flags & CMD_SIZE_HUGE) 159 if (txq->meta[i].flags & CMD_MAPPED) {
162 huge = true;
163 else
164 pci_unmap_single(priv->pci_dev, 160 pci_unmap_single(priv->pci_dev,
165 dma_unmap_addr(&txq->meta[i], mapping), 161 dma_unmap_addr(&txq->meta[i], mapping),
166 dma_unmap_len(&txq->meta[i], len), 162 dma_unmap_len(&txq->meta[i], len),
167 PCI_DMA_BIDIRECTIONAL); 163 PCI_DMA_BIDIRECTIONAL);
164 txq->meta[i].flags = 0;
165 }
168 166
169 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); 167 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
170 } 168 }
171 169
172 if (huge) { 170 i = q->n_window;
173 i = q->n_window; 171 if (txq->meta[i].flags & CMD_MAPPED) {
174 pci_unmap_single(priv->pci_dev, 172 pci_unmap_single(priv->pci_dev,
175 dma_unmap_addr(&txq->meta[i], mapping), 173 dma_unmap_addr(&txq->meta[i], mapping),
176 dma_unmap_len(&txq->meta[i], len), 174 dma_unmap_len(&txq->meta[i], len),
177 PCI_DMA_BIDIRECTIONAL); 175 PCI_DMA_BIDIRECTIONAL);
176 txq->meta[i].flags = 0;
178 } 177 }
179} 178}
180 179
@@ -233,7 +232,6 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
233 * reclaiming packets (on 'tx done IRQ), if free space become > high mark, 232 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
234 * Tx queue resumed. 233 * Tx queue resumed.
235 * 234 *
236 * See more detailed info in iwl-4965-hw.h.
237 ***************************************************/ 235 ***************************************************/
238 236
239int iwl_queue_space(const struct iwl_queue *q) 237int iwl_queue_space(const struct iwl_queue *q)
@@ -265,11 +263,13 @@ static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
265 263
266 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap 264 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
267 * and iwl_queue_dec_wrap are broken. */ 265 * and iwl_queue_dec_wrap are broken. */
268 BUG_ON(!is_power_of_2(count)); 266 if (WARN_ON(!is_power_of_2(count)))
267 return -EINVAL;
269 268
270 /* slots_num must be power-of-two size, otherwise 269 /* slots_num must be power-of-two size, otherwise
271 * get_cmd_index is broken. */ 270 * get_cmd_index is broken. */
272 BUG_ON(!is_power_of_2(slots_num)); 271 if (WARN_ON(!is_power_of_2(slots_num)))
272 return -EINVAL;
273 273
274 q->low_mark = q->n_window / 4; 274 q->low_mark = q->n_window / 4;
275 if (q->low_mark < 4) 275 if (q->low_mark < 4)
@@ -386,7 +386,9 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
386 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); 386 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
387 387
388 /* Initialize queue's high/low-water marks, and head/tail indexes */ 388 /* Initialize queue's high/low-water marks, and head/tail indexes */
389 iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); 389 ret = iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
390 if (ret)
391 return ret;
390 392
391 /* Tell device where to find queue */ 393 /* Tell device where to find queue */
392 priv->cfg->ops->lib->txq_init(priv, txq); 394 priv->cfg->ops->lib->txq_init(priv, txq);
@@ -440,22 +442,25 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
440 struct iwl_cmd_meta *out_meta; 442 struct iwl_cmd_meta *out_meta;
441 dma_addr_t phys_addr; 443 dma_addr_t phys_addr;
442 unsigned long flags; 444 unsigned long flags;
443 int len;
444 u32 idx; 445 u32 idx;
445 u16 fix_size; 446 u16 fix_size;
446 bool is_ct_kill = false; 447 bool is_ct_kill = false;
447 448
448 cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
449 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr)); 449 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
450 450
451 /* If any of the command structures end up being larger than 451 /*
452 * If any of the command structures end up being larger than
452 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then 453 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
453 * we will need to increase the size of the TFD entries 454 * we will need to increase the size of the TFD entries
454 * Also, check to see if command buffer should not exceed the size 455 * Also, check to see if command buffer should not exceed the size
455 * of device_cmd and max_cmd_size. */ 456 * of device_cmd and max_cmd_size.
456 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && 457 */
457 !(cmd->flags & CMD_SIZE_HUGE)); 458 if (WARN_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
458 BUG_ON(fix_size > IWL_MAX_CMD_SIZE); 459 !(cmd->flags & CMD_SIZE_HUGE)))
460 return -EINVAL;
461
462 if (WARN_ON(fix_size > IWL_MAX_CMD_SIZE))
463 return -EINVAL;
459 464
460 if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) { 465 if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
461 IWL_WARN(priv, "Not sending command - %s KILL\n", 466 IWL_WARN(priv, "Not sending command - %s KILL\n",
@@ -463,35 +468,38 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
463 return -EIO; 468 return -EIO;
464 } 469 }
465 470
471 /*
472 * As we only have a single huge buffer, check that the command
473 * is synchronous (otherwise buffers could end up being reused).
474 */
475
476 if (WARN_ON((cmd->flags & CMD_ASYNC) && (cmd->flags & CMD_SIZE_HUGE)))
477 return -EINVAL;
478
479 spin_lock_irqsave(&priv->hcmd_lock, flags);
480
466 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 481 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
482 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
483
467 IWL_ERR(priv, "No space in command queue\n"); 484 IWL_ERR(priv, "No space in command queue\n");
468 if (priv->cfg->ops->lib->tt_ops.ct_kill_check) { 485 is_ct_kill = iwl_check_for_ct_kill(priv);
469 is_ct_kill =
470 priv->cfg->ops->lib->tt_ops.ct_kill_check(priv);
471 }
472 if (!is_ct_kill) { 486 if (!is_ct_kill) {
473 IWL_ERR(priv, "Restarting adapter due to queue full\n"); 487 IWL_ERR(priv, "Restarting adapter due to queue full\n");
474 queue_work(priv->workqueue, &priv->restart); 488 iwlagn_fw_error(priv, false);
475 } 489 }
476 return -ENOSPC; 490 return -ENOSPC;
477 } 491 }
478 492
479 spin_lock_irqsave(&priv->hcmd_lock, flags);
480
481 /* If this is a huge cmd, mark the huge flag also on the meta.flags
482 * of the _original_ cmd. This is used for DMA mapping clean up.
483 */
484 if (cmd->flags & CMD_SIZE_HUGE) {
485 idx = get_cmd_index(q, q->write_ptr, 0);
486 txq->meta[idx].flags = CMD_SIZE_HUGE;
487 }
488
489 idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); 493 idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
490 out_cmd = txq->cmd[idx]; 494 out_cmd = txq->cmd[idx];
491 out_meta = &txq->meta[idx]; 495 out_meta = &txq->meta[idx];
492 496
497 if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
498 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
499 return -ENOSPC;
500 }
501
493 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ 502 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
494 out_meta->flags = cmd->flags;
495 if (cmd->flags & CMD_WANT_SKB) 503 if (cmd->flags & CMD_WANT_SKB)
496 out_meta->source = cmd; 504 out_meta->source = cmd;
497 if (cmd->flags & CMD_ASYNC) 505 if (cmd->flags & CMD_ASYNC)
@@ -508,9 +516,6 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
508 INDEX_TO_SEQ(q->write_ptr)); 516 INDEX_TO_SEQ(q->write_ptr));
509 if (cmd->flags & CMD_SIZE_HUGE) 517 if (cmd->flags & CMD_SIZE_HUGE)
510 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; 518 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
511 len = sizeof(struct iwl_device_cmd);
512 if (idx == TFD_CMD_SLOTS)
513 len = IWL_MAX_CMD_SIZE;
514 519
515#ifdef CONFIG_IWLWIFI_DEBUG 520#ifdef CONFIG_IWLWIFI_DEBUG
516 switch (out_cmd->hdr.cmd) { 521 switch (out_cmd->hdr.cmd) {
@@ -532,17 +537,20 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
532 q->write_ptr, idx, priv->cmd_queue); 537 q->write_ptr, idx, priv->cmd_queue);
533 } 538 }
534#endif 539#endif
535 txq->need_update = 1;
536
537 if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl)
538 /* Set up entry in queue's byte count circular buffer */
539 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
540
541 phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr, 540 phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
542 fix_size, PCI_DMA_BIDIRECTIONAL); 541 fix_size, PCI_DMA_BIDIRECTIONAL);
542 if (unlikely(pci_dma_mapping_error(priv->pci_dev, phys_addr))) {
543 idx = -ENOMEM;
544 goto out;
545 }
546
543 dma_unmap_addr_set(out_meta, mapping, phys_addr); 547 dma_unmap_addr_set(out_meta, mapping, phys_addr);
544 dma_unmap_len_set(out_meta, len, fix_size); 548 dma_unmap_len_set(out_meta, len, fix_size);
545 549
550 out_meta->flags = cmd->flags | CMD_MAPPED;
551
552 txq->need_update = 1;
553
546 trace_iwlwifi_dev_hcmd(priv, &out_cmd->hdr, fix_size, cmd->flags); 554 trace_iwlwifi_dev_hcmd(priv, &out_cmd->hdr, fix_size, cmd->flags);
547 555
548 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, 556 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
@@ -553,6 +561,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
553 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 561 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
554 iwl_txq_update_write_ptr(priv, txq); 562 iwl_txq_update_write_ptr(priv, txq);
555 563
564 out:
556 spin_unlock_irqrestore(&priv->hcmd_lock, flags); 565 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
557 return idx; 566 return idx;
558} 567}
@@ -584,7 +593,7 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
584 if (nfreed++ > 0) { 593 if (nfreed++ > 0) {
585 IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx, 594 IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
586 q->write_ptr, q->read_ptr); 595 q->write_ptr, q->read_ptr);
587 queue_work(priv->workqueue, &priv->restart); 596 iwlagn_fw_error(priv, false);
588 } 597 }
589 598
590 } 599 }
@@ -609,6 +618,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
609 struct iwl_device_cmd *cmd; 618 struct iwl_device_cmd *cmd;
610 struct iwl_cmd_meta *meta; 619 struct iwl_cmd_meta *meta;
611 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; 620 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
621 unsigned long flags;
612 622
613 /* If a Tx command is being handled and it isn't in the actual 623 /* If a Tx command is being handled and it isn't in the actual
614 * command queue then there a command routing bug has been introduced 624 * command queue then there a command routing bug has been introduced
@@ -622,14 +632,6 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
622 return; 632 return;
623 } 633 }
624 634
625 /* If this is a huge cmd, clear the huge flag on the meta.flags
626 * of the _original_ cmd. So that iwl_cmd_queue_free won't unmap
627 * the DMA buffer for the scan (huge) command.
628 */
629 if (huge) {
630 cmd_index = get_cmd_index(&txq->q, index, 0);
631 txq->meta[cmd_index].flags = 0;
632 }
633 cmd_index = get_cmd_index(&txq->q, index, huge); 635 cmd_index = get_cmd_index(&txq->q, index, huge);
634 cmd = txq->cmd[cmd_index]; 636 cmd = txq->cmd[cmd_index];
635 meta = &txq->meta[cmd_index]; 637 meta = &txq->meta[cmd_index];
@@ -646,6 +648,8 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
646 } else if (meta->callback) 648 } else if (meta->callback)
647 meta->callback(priv, cmd, pkt); 649 meta->callback(priv, cmd, pkt);
648 650
651 spin_lock_irqsave(&priv->hcmd_lock, flags);
652
649 iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index); 653 iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
650 654
651 if (!(meta->flags & CMD_ASYNC)) { 655 if (!(meta->flags & CMD_ASYNC)) {
@@ -654,5 +658,9 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
654 get_cmd_string(cmd->hdr.cmd)); 658 get_cmd_string(cmd->hdr.cmd));
655 wake_up_interruptible(&priv->wait_command_queue); 659 wake_up_interruptible(&priv->wait_command_queue);
656 } 660 }
661
662 /* Mark as unmapped */
657 meta->flags = 0; 663 meta->flags = 0;
664
665 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
658} 666}
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index 9a57cf6a488f..5665a1a9b99e 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -1576,7 +1576,8 @@ static void iwm_rx_process_amsdu(struct iwm_priv *iwm, struct sk_buff *skb)
1576 IWM_HEXDUMP(iwm, DBG, RX, "A-MSDU: ", skb->data, skb->len); 1576 IWM_HEXDUMP(iwm, DBG, RX, "A-MSDU: ", skb->data, skb->len);
1577 1577
1578 __skb_queue_head_init(&list); 1578 __skb_queue_head_init(&list);
1579 ieee80211_amsdu_to_8023s(skb, &list, ndev->dev_addr, wdev->iftype, 0); 1579 ieee80211_amsdu_to_8023s(skb, &list, ndev->dev_addr, wdev->iftype, 0,
1580 true);
1580 1581
1581 while ((frame = __skb_dequeue(&list))) { 1582 while ((frame = __skb_dequeue(&list))) {
1582 ndev->stats.rx_packets++; 1583 ndev->stats.rx_packets++;
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 5caa2ac14d61..5d637af2d7c3 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -6,6 +6,8 @@
6 * 6 *
7 */ 7 */
8 8
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
9#include <linux/sched.h> 11#include <linux/sched.h>
10#include <linux/wait.h> 12#include <linux/wait.h>
11#include <linux/slab.h> 13#include <linux/slab.h>
@@ -122,8 +124,10 @@ static u8 lbs_auth_to_authtype(enum nl80211_auth_type auth_type)
122} 124}
123 125
124 126
125/* Various firmware commands need the list of supported rates, but with 127/*
126 the hight-bit set for basic rates */ 128 * Various firmware commands need the list of supported rates, but with
129 * the hight-bit set for basic rates
130 */
127static int lbs_add_rates(u8 *rates) 131static int lbs_add_rates(u8 *rates)
128{ 132{
129 size_t i; 133 size_t i;
@@ -425,7 +429,7 @@ static int lbs_add_wpa_tlv(u8 *tlv, const u8 *ie, u8 ie_len)
425 return ie_len + 2; 429 return ie_len + 2;
426} 430}
427 431
428/*************************************************************************** 432/*
429 * Set Channel 433 * Set Channel
430 */ 434 */
431 435
@@ -452,7 +456,7 @@ static int lbs_cfg_set_channel(struct wiphy *wiphy,
452 456
453 457
454 458
455/*************************************************************************** 459/*
456 * Scanning 460 * Scanning
457 */ 461 */
458 462
@@ -538,8 +542,10 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
538 goto done; 542 goto done;
539 } 543 }
540 544
541 /* Validity check: the TLV holds TSF values with 8 bytes each, so 545 /*
542 * the size in the TLV must match the nr_sets value */ 546 * Validity check: the TLV holds TSF values with 8 bytes each, so
547 * the size in the TLV must match the nr_sets value
548 */
543 i = get_unaligned_le16(tsfdesc); 549 i = get_unaligned_le16(tsfdesc);
544 tsfdesc += 2; 550 tsfdesc += 2;
545 if (i / 8 != scanresp->nr_sets) { 551 if (i / 8 != scanresp->nr_sets) {
@@ -581,8 +587,10 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
581 587
582 /* To find out the channel, we must parse the IEs */ 588 /* To find out the channel, we must parse the IEs */
583 ie = pos; 589 ie = pos;
584 /* 6+1+8+2+2: size of BSSID, RSSI, time stamp, beacon 590 /*
585 interval, capabilities */ 591 * 6+1+8+2+2: size of BSSID, RSSI, time stamp, beacon
592 * interval, capabilities
593 */
586 ielen = left = len - (6 + 1 + 8 + 2 + 2); 594 ielen = left = len - (6 + 1 + 8 + 2 + 2);
587 while (left >= 2) { 595 while (left >= 2) {
588 u8 id, elen; 596 u8 id, elen;
@@ -790,7 +798,7 @@ static int lbs_cfg_scan(struct wiphy *wiphy,
790 798
791 799
792 800
793/*************************************************************************** 801/*
794 * Events 802 * Events
795 */ 803 */
796 804
@@ -825,7 +833,7 @@ void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event)
825 833
826 834
827 835
828/*************************************************************************** 836/*
829 * Connect/disconnect 837 * Connect/disconnect
830 */ 838 */
831 839
@@ -950,8 +958,10 @@ static int lbs_enable_rsn(struct lbs_private *priv, int enable)
950 * Set WPA/WPA key material 958 * Set WPA/WPA key material
951 */ 959 */
952 960
953/* like "struct cmd_ds_802_11_key_material", but with cmd_header. Once we 961/*
954 * get rid of WEXT, this should go into host.h */ 962 * like "struct cmd_ds_802_11_key_material", but with cmd_header. Once we
963 * get rid of WEXT, this should go into host.h
964 */
955 965
956struct cmd_key_material { 966struct cmd_key_material {
957 struct cmd_header hdr; 967 struct cmd_header hdr;
@@ -1314,8 +1324,8 @@ static int lbs_cfg_connect(struct wiphy *wiphy, struct net_device *dev,
1314 sme->ssid, sme->ssid_len, 1324 sme->ssid, sme->ssid_len,
1315 WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); 1325 WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
1316 if (!bss) { 1326 if (!bss) {
1317 lbs_pr_err("assoc: bss %pM not in scan results\n", 1327 wiphy_err(wiphy, "assoc: bss %pM not in scan results\n",
1318 sme->bssid); 1328 sme->bssid);
1319 ret = -ENOENT; 1329 ret = -ENOENT;
1320 goto done; 1330 goto done;
1321 } 1331 }
@@ -1372,8 +1382,8 @@ static int lbs_cfg_connect(struct wiphy *wiphy, struct net_device *dev,
1372 lbs_enable_rsn(priv, sme->crypto.cipher_group != 0); 1382 lbs_enable_rsn(priv, sme->crypto.cipher_group != 0);
1373 break; 1383 break;
1374 default: 1384 default:
1375 lbs_pr_err("unsupported cipher group 0x%x\n", 1385 wiphy_err(wiphy, "unsupported cipher group 0x%x\n",
1376 sme->crypto.cipher_group); 1386 sme->crypto.cipher_group);
1377 ret = -ENOTSUPP; 1387 ret = -ENOTSUPP;
1378 goto done; 1388 goto done;
1379 } 1389 }
@@ -1491,7 +1501,7 @@ static int lbs_cfg_add_key(struct wiphy *wiphy, struct net_device *netdev,
1491 params->key, params->key_len); 1501 params->key, params->key_len);
1492 break; 1502 break;
1493 default: 1503 default:
1494 lbs_pr_err("unhandled cipher 0x%x\n", params->cipher); 1504 wiphy_err(wiphy, "unhandled cipher 0x%x\n", params->cipher);
1495 ret = -ENOTSUPP; 1505 ret = -ENOTSUPP;
1496 break; 1506 break;
1497 } 1507 }
@@ -1536,7 +1546,7 @@ static int lbs_cfg_del_key(struct wiphy *wiphy, struct net_device *netdev,
1536} 1546}
1537 1547
1538 1548
1539/*************************************************************************** 1549/*
1540 * Get station 1550 * Get station
1541 */ 1551 */
1542 1552
@@ -1581,7 +1591,7 @@ static int lbs_cfg_get_station(struct wiphy *wiphy, struct net_device *dev,
1581 1591
1582 1592
1583 1593
1584/*************************************************************************** 1594/*
1585 * "Site survey", here just current channel and noise level 1595 * "Site survey", here just current channel and noise level
1586 */ 1596 */
1587 1597
@@ -1614,7 +1624,7 @@ static int lbs_get_survey(struct wiphy *wiphy, struct net_device *dev,
1614 1624
1615 1625
1616 1626
1617/*************************************************************************** 1627/*
1618 * Change interface 1628 * Change interface
1619 */ 1629 */
1620 1630
@@ -1656,11 +1666,12 @@ static int lbs_change_intf(struct wiphy *wiphy, struct net_device *dev,
1656 1666
1657 1667
1658 1668
1659/*************************************************************************** 1669/*
1660 * IBSS (Ad-Hoc) 1670 * IBSS (Ad-Hoc)
1661 */ 1671 */
1662 1672
1663/* The firmware needs the following bits masked out of the beacon-derived 1673/*
1674 * The firmware needs the following bits masked out of the beacon-derived
1664 * capability field when associating/joining to a BSS: 1675 * capability field when associating/joining to a BSS:
1665 * 9 (QoS), 11 (APSD), 12 (unused), 14 (unused), 15 (unused) 1676 * 9 (QoS), 11 (APSD), 12 (unused), 14 (unused), 15 (unused)
1666 */ 1677 */
@@ -1999,7 +2010,7 @@ static int lbs_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
1999 2010
2000 2011
2001 2012
2002/*************************************************************************** 2013/*
2003 * Initialization 2014 * Initialization
2004 */ 2015 */
2005 2016
@@ -2118,13 +2129,13 @@ int lbs_cfg_register(struct lbs_private *priv)
2118 2129
2119 ret = wiphy_register(wdev->wiphy); 2130 ret = wiphy_register(wdev->wiphy);
2120 if (ret < 0) 2131 if (ret < 0)
2121 lbs_pr_err("cannot register wiphy device\n"); 2132 pr_err("cannot register wiphy device\n");
2122 2133
2123 priv->wiphy_registered = true; 2134 priv->wiphy_registered = true;
2124 2135
2125 ret = register_netdev(priv->dev); 2136 ret = register_netdev(priv->dev);
2126 if (ret) 2137 if (ret)
2127 lbs_pr_err("cannot register network device\n"); 2138 pr_err("cannot register network device\n");
2128 2139
2129 INIT_DELAYED_WORK(&priv->scan_work, lbs_scan_worker); 2140 INIT_DELAYED_WORK(&priv->scan_work, lbs_scan_worker);
2130 2141
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index f3ac62431a30..84566db486d2 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -1,7 +1,7 @@
1/** 1/*
2 * This file contains the handling of command. 2 * This file contains the handling of command.
3 * It prepares command and sends it to firmware when it is ready. 3 * It prepares command and sends it to firmware when it is ready.
4 */ 4 */
5 5
6#include <linux/kfifo.h> 6#include <linux/kfifo.h>
7#include <linux/sched.h> 7#include <linux/sched.h>
@@ -16,14 +16,14 @@
16#define CAL_RSSI(snr, nf) ((s32)((s32)(snr) + CAL_NF(nf))) 16#define CAL_RSSI(snr, nf) ((s32)((s32)(snr) + CAL_NF(nf)))
17 17
18/** 18/**
19 * @brief Simple callback that copies response back into command 19 * lbs_cmd_copyback - Simple callback that copies response back into command
20 * 20 *
21 * @param priv A pointer to struct lbs_private structure 21 * @priv: A pointer to &struct lbs_private structure
22 * @param extra A pointer to the original command structure for which 22 * @extra: A pointer to the original command structure for which
23 * 'resp' is a response 23 * 'resp' is a response
24 * @param resp A pointer to the command response 24 * @resp: A pointer to the command response
25 * 25 *
26 * @return 0 on success, error on failure 26 * returns: 0 on success, error on failure
27 */ 27 */
28int lbs_cmd_copyback(struct lbs_private *priv, unsigned long extra, 28int lbs_cmd_copyback(struct lbs_private *priv, unsigned long extra,
29 struct cmd_header *resp) 29 struct cmd_header *resp)
@@ -38,15 +38,15 @@ int lbs_cmd_copyback(struct lbs_private *priv, unsigned long extra,
38EXPORT_SYMBOL_GPL(lbs_cmd_copyback); 38EXPORT_SYMBOL_GPL(lbs_cmd_copyback);
39 39
40/** 40/**
41 * @brief Simple callback that ignores the result. Use this if 41 * lbs_cmd_async_callback - Simple callback that ignores the result.
42 * you just want to send a command to the hardware, but don't 42 * Use this if you just want to send a command to the hardware, but don't
43 * care for the result. 43 * care for the result.
44 * 44 *
45 * @param priv ignored 45 * @priv: ignored
46 * @param extra ignored 46 * @extra: ignored
47 * @param resp ignored 47 * @resp: ignored
48 * 48 *
49 * @return 0 for success 49 * returns: 0 for success
50 */ 50 */
51static int lbs_cmd_async_callback(struct lbs_private *priv, unsigned long extra, 51static int lbs_cmd_async_callback(struct lbs_private *priv, unsigned long extra,
52 struct cmd_header *resp) 52 struct cmd_header *resp)
@@ -56,10 +56,11 @@ static int lbs_cmd_async_callback(struct lbs_private *priv, unsigned long extra,
56 56
57 57
58/** 58/**
59 * @brief Checks whether a command is allowed in Power Save mode 59 * is_command_allowed_in_ps - tests if a command is allowed in Power Save mode
60 *
61 * @cmd: the command ID
60 * 62 *
61 * @param command the command ID 63 * returns: 1 if allowed, 0 if not allowed
62 * @return 1 if allowed, 0 if not allowed
63 */ 64 */
64static u8 is_command_allowed_in_ps(u16 cmd) 65static u8 is_command_allowed_in_ps(u16 cmd)
65{ 66{
@@ -75,11 +76,12 @@ static u8 is_command_allowed_in_ps(u16 cmd)
75} 76}
76 77
77/** 78/**
78 * @brief Updates the hardware details like MAC address and regulatory region 79 * lbs_update_hw_spec - Updates the hardware details like MAC address
80 * and regulatory region
79 * 81 *
80 * @param priv A pointer to struct lbs_private structure 82 * @priv: A pointer to &struct lbs_private structure
81 * 83 *
82 * @return 0 on success, error on failure 84 * returns: 0 on success, error on failure
83 */ 85 */
84int lbs_update_hw_spec(struct lbs_private *priv) 86int lbs_update_hw_spec(struct lbs_private *priv)
85{ 87{
@@ -108,7 +110,7 @@ int lbs_update_hw_spec(struct lbs_private *priv)
108 * CF card firmware 5.0.16p0: cap 0x00000303 110 * CF card firmware 5.0.16p0: cap 0x00000303
109 * USB dongle firmware 5.110.17p2: cap 0x00000303 111 * USB dongle firmware 5.110.17p2: cap 0x00000303
110 */ 112 */
111 lbs_pr_info("%pM, fw %u.%u.%up%u, cap 0x%08x\n", 113 netdev_info(priv->dev, "%pM, fw %u.%u.%up%u, cap 0x%08x\n",
112 cmd.permanentaddr, 114 cmd.permanentaddr,
113 priv->fwrelease >> 24 & 0xff, 115 priv->fwrelease >> 24 & 0xff,
114 priv->fwrelease >> 16 & 0xff, 116 priv->fwrelease >> 16 & 0xff,
@@ -139,7 +141,8 @@ int lbs_update_hw_spec(struct lbs_private *priv)
139 /* if it's unidentified region code, use the default (USA) */ 141 /* if it's unidentified region code, use the default (USA) */
140 if (i >= MRVDRV_MAX_REGION_CODE) { 142 if (i >= MRVDRV_MAX_REGION_CODE) {
141 priv->regioncode = 0x10; 143 priv->regioncode = 0x10;
142 lbs_pr_info("unidentified region code; using the default (USA)\n"); 144 netdev_info(priv->dev,
145 "unidentified region code; using the default (USA)\n");
143 } 146 }
144 147
145 if (priv->current_addr[0] == 0xff) 148 if (priv->current_addr[0] == 0xff)
@@ -209,7 +212,7 @@ int lbs_host_sleep_cfg(struct lbs_private *priv, uint32_t criteria,
209 (uint8_t *)&cmd_config.wol_conf, 212 (uint8_t *)&cmd_config.wol_conf,
210 sizeof(struct wol_config)); 213 sizeof(struct wol_config));
211 } else { 214 } else {
212 lbs_pr_info("HOST_SLEEP_CFG failed %d\n", ret); 215 netdev_info(priv->dev, "HOST_SLEEP_CFG failed %d\n", ret);
213 } 216 }
214 217
215 return ret; 218 return ret;
@@ -217,14 +220,14 @@ int lbs_host_sleep_cfg(struct lbs_private *priv, uint32_t criteria,
217EXPORT_SYMBOL_GPL(lbs_host_sleep_cfg); 220EXPORT_SYMBOL_GPL(lbs_host_sleep_cfg);
218 221
219/** 222/**
220 * @brief Sets the Power Save mode 223 * lbs_set_ps_mode - Sets the Power Save mode
221 * 224 *
222 * @param priv A pointer to struct lbs_private structure 225 * @priv: A pointer to &struct lbs_private structure
223 * @param cmd_action The Power Save operation (PS_MODE_ACTION_ENTER_PS or 226 * @cmd_action: The Power Save operation (PS_MODE_ACTION_ENTER_PS or
224 * PS_MODE_ACTION_EXIT_PS) 227 * PS_MODE_ACTION_EXIT_PS)
225 * @param block Whether to block on a response or not 228 * @block: Whether to block on a response or not
226 * 229 *
227 * @return 0 on success, error on failure 230 * returns: 0 on success, error on failure
228 */ 231 */
229int lbs_set_ps_mode(struct lbs_private *priv, u16 cmd_action, bool block) 232int lbs_set_ps_mode(struct lbs_private *priv, u16 cmd_action, bool block)
230{ 233{
@@ -312,7 +315,7 @@ static int lbs_wait_for_ds_awake(struct lbs_private *priv)
312 if (priv->is_deep_sleep) { 315 if (priv->is_deep_sleep) {
313 if (!wait_event_interruptible_timeout(priv->ds_awake_q, 316 if (!wait_event_interruptible_timeout(priv->ds_awake_q,
314 !priv->is_deep_sleep, (10 * HZ))) { 317 !priv->is_deep_sleep, (10 * HZ))) {
315 lbs_pr_err("ds_awake_q: timer expired\n"); 318 netdev_err(priv->dev, "ds_awake_q: timer expired\n");
316 ret = -1; 319 ret = -1;
317 } 320 }
318 } 321 }
@@ -337,7 +340,7 @@ int lbs_set_deep_sleep(struct lbs_private *priv, int deep_sleep)
337 netif_carrier_off(priv->dev); 340 netif_carrier_off(priv->dev);
338 } 341 }
339 } else { 342 } else {
340 lbs_pr_err("deep sleep: already enabled\n"); 343 netdev_err(priv->dev, "deep sleep: already enabled\n");
341 } 344 }
342 } else { 345 } else {
343 if (priv->is_deep_sleep) { 346 if (priv->is_deep_sleep) {
@@ -347,8 +350,8 @@ int lbs_set_deep_sleep(struct lbs_private *priv, int deep_sleep)
347 if (!ret) { 350 if (!ret) {
348 ret = lbs_wait_for_ds_awake(priv); 351 ret = lbs_wait_for_ds_awake(priv);
349 if (ret) 352 if (ret)
350 lbs_pr_err("deep sleep: wakeup" 353 netdev_err(priv->dev,
351 "failed\n"); 354 "deep sleep: wakeup failed\n");
352 } 355 }
353 } 356 }
354 } 357 }
@@ -382,8 +385,9 @@ int lbs_set_host_sleep(struct lbs_private *priv, int host_sleep)
382 ret = lbs_host_sleep_cfg(priv, priv->wol_criteria, 385 ret = lbs_host_sleep_cfg(priv, priv->wol_criteria,
383 (struct wol_config *)NULL); 386 (struct wol_config *)NULL);
384 if (ret) { 387 if (ret) {
385 lbs_pr_info("Host sleep configuration failed: " 388 netdev_info(priv->dev,
386 "%d\n", ret); 389 "Host sleep configuration failed: %d\n",
390 ret);
387 return ret; 391 return ret;
388 } 392 }
389 if (priv->psstate == PS_STATE_FULL_POWER) { 393 if (priv->psstate == PS_STATE_FULL_POWER) {
@@ -393,19 +397,21 @@ int lbs_set_host_sleep(struct lbs_private *priv, int host_sleep)
393 sizeof(cmd), 397 sizeof(cmd),
394 lbs_ret_host_sleep_activate, 0); 398 lbs_ret_host_sleep_activate, 0);
395 if (ret) 399 if (ret)
396 lbs_pr_info("HOST_SLEEP_ACTIVATE " 400 netdev_info(priv->dev,
397 "failed: %d\n", ret); 401 "HOST_SLEEP_ACTIVATE failed: %d\n",
402 ret);
398 } 403 }
399 404
400 if (!wait_event_interruptible_timeout( 405 if (!wait_event_interruptible_timeout(
401 priv->host_sleep_q, 406 priv->host_sleep_q,
402 priv->is_host_sleep_activated, 407 priv->is_host_sleep_activated,
403 (10 * HZ))) { 408 (10 * HZ))) {
404 lbs_pr_err("host_sleep_q: timer expired\n"); 409 netdev_err(priv->dev,
410 "host_sleep_q: timer expired\n");
405 ret = -1; 411 ret = -1;
406 } 412 }
407 } else { 413 } else {
408 lbs_pr_err("host sleep: already enabled\n"); 414 netdev_err(priv->dev, "host sleep: already enabled\n");
409 } 415 }
410 } else { 416 } else {
411 if (priv->is_host_sleep_activated) 417 if (priv->is_host_sleep_activated)
@@ -417,13 +423,13 @@ int lbs_set_host_sleep(struct lbs_private *priv, int host_sleep)
417} 423}
418 424
419/** 425/**
420 * @brief Set an SNMP MIB value 426 * lbs_set_snmp_mib - Set an SNMP MIB value
421 * 427 *
422 * @param priv A pointer to struct lbs_private structure 428 * @priv: A pointer to &struct lbs_private structure
423 * @param oid The OID to set in the firmware 429 * @oid: The OID to set in the firmware
424 * @param val Value to set the OID to 430 * @val: Value to set the OID to
425 * 431 *
426 * @return 0 on success, error on failure 432 * returns: 0 on success, error on failure
427 */ 433 */
428int lbs_set_snmp_mib(struct lbs_private *priv, u32 oid, u16 val) 434int lbs_set_snmp_mib(struct lbs_private *priv, u32 oid, u16 val)
429{ 435{
@@ -467,13 +473,13 @@ out:
467} 473}
468 474
469/** 475/**
470 * @brief Get an SNMP MIB value 476 * lbs_get_snmp_mib - Get an SNMP MIB value
471 * 477 *
472 * @param priv A pointer to struct lbs_private structure 478 * @priv: A pointer to &struct lbs_private structure
473 * @param oid The OID to retrieve from the firmware 479 * @oid: The OID to retrieve from the firmware
474 * @param out_val Location for the returned value 480 * @out_val: Location for the returned value
475 * 481 *
476 * @return 0 on success, error on failure 482 * returns: 0 on success, error on failure
477 */ 483 */
478int lbs_get_snmp_mib(struct lbs_private *priv, u32 oid, u16 *out_val) 484int lbs_get_snmp_mib(struct lbs_private *priv, u32 oid, u16 *out_val)
479{ 485{
@@ -510,14 +516,14 @@ out:
510} 516}
511 517
512/** 518/**
513 * @brief Get the min, max, and current TX power 519 * lbs_get_tx_power - Get the min, max, and current TX power
514 * 520 *
515 * @param priv A pointer to struct lbs_private structure 521 * @priv: A pointer to &struct lbs_private structure
516 * @param curlevel Current power level in dBm 522 * @curlevel: Current power level in dBm
517 * @param minlevel Minimum supported power level in dBm (optional) 523 * @minlevel: Minimum supported power level in dBm (optional)
518 * @param maxlevel Maximum supported power level in dBm (optional) 524 * @maxlevel: Maximum supported power level in dBm (optional)
519 * 525 *
520 * @return 0 on success, error on failure 526 * returns: 0 on success, error on failure
521 */ 527 */
522int lbs_get_tx_power(struct lbs_private *priv, s16 *curlevel, s16 *minlevel, 528int lbs_get_tx_power(struct lbs_private *priv, s16 *curlevel, s16 *minlevel,
523 s16 *maxlevel) 529 s16 *maxlevel)
@@ -545,12 +551,12 @@ int lbs_get_tx_power(struct lbs_private *priv, s16 *curlevel, s16 *minlevel,
545} 551}
546 552
547/** 553/**
548 * @brief Set the TX power 554 * lbs_set_tx_power - Set the TX power
549 * 555 *
550 * @param priv A pointer to struct lbs_private structure 556 * @priv: A pointer to &struct lbs_private structure
551 * @param dbm The desired power level in dBm 557 * @dbm: The desired power level in dBm
552 * 558 *
553 * @return 0 on success, error on failure 559 * returns: 0 on success, error on failure
554 */ 560 */
555int lbs_set_tx_power(struct lbs_private *priv, s16 dbm) 561int lbs_set_tx_power(struct lbs_private *priv, s16 dbm)
556{ 562{
@@ -573,12 +579,13 @@ int lbs_set_tx_power(struct lbs_private *priv, s16 dbm)
573} 579}
574 580
575/** 581/**
576 * @brief Enable or disable monitor mode (only implemented on OLPC usb8388 FW) 582 * lbs_set_monitor_mode - Enable or disable monitor mode
583 * (only implemented on OLPC usb8388 FW)
577 * 584 *
578 * @param priv A pointer to struct lbs_private structure 585 * @priv: A pointer to &struct lbs_private structure
579 * @param enable 1 to enable monitor mode, 0 to disable 586 * @enable: 1 to enable monitor mode, 0 to disable
580 * 587 *
581 * @return 0 on success, error on failure 588 * returns: 0 on success, error on failure
582 */ 589 */
583int lbs_set_monitor_mode(struct lbs_private *priv, int enable) 590int lbs_set_monitor_mode(struct lbs_private *priv, int enable)
584{ 591{
@@ -604,11 +611,11 @@ int lbs_set_monitor_mode(struct lbs_private *priv, int enable)
604} 611}
605 612
606/** 613/**
607 * @brief Get the radio channel 614 * lbs_get_channel - Get the radio channel
608 * 615 *
609 * @param priv A pointer to struct lbs_private structure 616 * @priv: A pointer to &struct lbs_private structure
610 * 617 *
611 * @return The channel on success, error on failure 618 * returns: The channel on success, error on failure
612 */ 619 */
613static int lbs_get_channel(struct lbs_private *priv) 620static int lbs_get_channel(struct lbs_private *priv)
614{ 621{
@@ -650,12 +657,12 @@ int lbs_update_channel(struct lbs_private *priv)
650} 657}
651 658
652/** 659/**
653 * @brief Set the radio channel 660 * lbs_set_channel - Set the radio channel
654 * 661 *
655 * @param priv A pointer to struct lbs_private structure 662 * @priv: A pointer to &struct lbs_private structure
656 * @param channel The desired channel, or 0 to clear a locked channel 663 * @channel: The desired channel, or 0 to clear a locked channel
657 * 664 *
658 * @return 0 on success, error on failure 665 * returns: 0 on success, error on failure
659 */ 666 */
660int lbs_set_channel(struct lbs_private *priv, u8 channel) 667int lbs_set_channel(struct lbs_private *priv, u8 channel)
661{ 668{
@@ -686,12 +693,13 @@ out:
686} 693}
687 694
688/** 695/**
689 * @brief Get current RSSI and noise floor 696 * lbs_get_rssi - Get current RSSI and noise floor
690 * 697 *
691 * @param priv A pointer to struct lbs_private structure 698 * @priv: A pointer to &struct lbs_private structure
692 * @param rssi On successful return, signal level in mBm 699 * @rssi: On successful return, signal level in mBm
700 * @nf: On successful return, Noise floor
693 * 701 *
694 * @return The channel on success, error on failure 702 * returns: The channel on success, error on failure
695 */ 703 */
696int lbs_get_rssi(struct lbs_private *priv, s8 *rssi, s8 *nf) 704int lbs_get_rssi(struct lbs_private *priv, s8 *rssi, s8 *nf)
697{ 705{
@@ -719,13 +727,14 @@ int lbs_get_rssi(struct lbs_private *priv, s8 *rssi, s8 *nf)
719} 727}
720 728
721/** 729/**
722 * @brief Send regulatory and 802.11d domain information to the firmware 730 * lbs_set_11d_domain_info - Send regulatory and 802.11d domain information
731 * to the firmware
723 * 732 *
724 * @param priv pointer to struct lbs_private 733 * @priv: pointer to &struct lbs_private
725 * @param request cfg80211 regulatory request structure 734 * @request: cfg80211 regulatory request structure
726 * @param bands the device's supported bands and channels 735 * @bands: the device's supported bands and channels
727 * 736 *
728 * @return 0 on success, error code on failure 737 * returns: 0 on success, error code on failure
729*/ 738*/
730int lbs_set_11d_domain_info(struct lbs_private *priv, 739int lbs_set_11d_domain_info(struct lbs_private *priv,
731 struct regulatory_request *request, 740 struct regulatory_request *request,
@@ -842,15 +851,15 @@ int lbs_set_11d_domain_info(struct lbs_private *priv,
842} 851}
843 852
844/** 853/**
845 * @brief Read a MAC, Baseband, or RF register 854 * lbs_get_reg - Read a MAC, Baseband, or RF register
846 * 855 *
847 * @param priv pointer to struct lbs_private 856 * @priv: pointer to &struct lbs_private
848 * @param cmd register command, one of CMD_MAC_REG_ACCESS, 857 * @reg: register command, one of CMD_MAC_REG_ACCESS,
849 * CMD_BBP_REG_ACCESS, or CMD_RF_REG_ACCESS 858 * CMD_BBP_REG_ACCESS, or CMD_RF_REG_ACCESS
850 * @param offset byte offset of the register to get 859 * @offset: byte offset of the register to get
851 * @param value on success, the value of the register at 'offset' 860 * @value: on success, the value of the register at 'offset'
852 * 861 *
853 * @return 0 on success, error code on failure 862 * returns: 0 on success, error code on failure
854*/ 863*/
855int lbs_get_reg(struct lbs_private *priv, u16 reg, u16 offset, u32 *value) 864int lbs_get_reg(struct lbs_private *priv, u16 reg, u16 offset, u32 *value)
856{ 865{
@@ -886,15 +895,15 @@ out:
886} 895}
887 896
888/** 897/**
889 * @brief Write a MAC, Baseband, or RF register 898 * lbs_set_reg - Write a MAC, Baseband, or RF register
890 * 899 *
891 * @param priv pointer to struct lbs_private 900 * @priv: pointer to &struct lbs_private
892 * @param cmd register command, one of CMD_MAC_REG_ACCESS, 901 * @reg: register command, one of CMD_MAC_REG_ACCESS,
893 * CMD_BBP_REG_ACCESS, or CMD_RF_REG_ACCESS 902 * CMD_BBP_REG_ACCESS, or CMD_RF_REG_ACCESS
894 * @param offset byte offset of the register to set 903 * @offset: byte offset of the register to set
895 * @param value the value to write to the register at 'offset' 904 * @value: the value to write to the register at 'offset'
896 * 905 *
897 * @return 0 on success, error code on failure 906 * returns: 0 on success, error code on failure
898*/ 907*/
899int lbs_set_reg(struct lbs_private *priv, u16 reg, u16 offset, u32 value) 908int lbs_set_reg(struct lbs_private *priv, u16 reg, u16 offset, u32 value)
900{ 909{
@@ -1002,7 +1011,8 @@ static void lbs_submit_command(struct lbs_private *priv,
1002 ret = priv->hw_host_to_card(priv, MVMS_CMD, (u8 *) cmd, cmdsize); 1011 ret = priv->hw_host_to_card(priv, MVMS_CMD, (u8 *) cmd, cmdsize);
1003 1012
1004 if (ret) { 1013 if (ret) {
1005 lbs_pr_info("DNLD_CMD: hw_host_to_card failed: %d\n", ret); 1014 netdev_info(priv->dev, "DNLD_CMD: hw_host_to_card failed: %d\n",
1015 ret);
1006 /* Let the timer kick in and retry, and potentially reset 1016 /* Let the timer kick in and retry, and potentially reset
1007 the whole thing if the condition persists */ 1017 the whole thing if the condition persists */
1008 timeo = HZ/4; 1018 timeo = HZ/4;
@@ -1023,7 +1033,7 @@ static void lbs_submit_command(struct lbs_private *priv,
1023 lbs_deb_leave(LBS_DEB_HOST); 1033 lbs_deb_leave(LBS_DEB_HOST);
1024} 1034}
1025 1035
1026/** 1036/*
1027 * This function inserts command node to cmdfreeq 1037 * This function inserts command node to cmdfreeq
1028 * after cleans it. Requires priv->driver_lock held. 1038 * after cleans it. Requires priv->driver_lock held.
1029 */ 1039 */
@@ -1125,11 +1135,12 @@ void lbs_set_mac_control(struct lbs_private *priv)
1125} 1135}
1126 1136
1127/** 1137/**
1128 * @brief This function allocates the command buffer and link 1138 * lbs_allocate_cmd_buffer - allocates the command buffer and links
1129 * it to command free queue. 1139 * it to command free queue
1140 *
1141 * @priv: A pointer to &struct lbs_private structure
1130 * 1142 *
1131 * @param priv A pointer to struct lbs_private structure 1143 * returns: 0 for success or -1 on error
1132 * @return 0 or -1
1133 */ 1144 */
1134int lbs_allocate_cmd_buffer(struct lbs_private *priv) 1145int lbs_allocate_cmd_buffer(struct lbs_private *priv)
1135{ 1146{
@@ -1171,10 +1182,11 @@ done:
1171} 1182}
1172 1183
1173/** 1184/**
1174 * @brief This function frees the command buffer. 1185 * lbs_free_cmd_buffer - free the command buffer
1175 * 1186 *
1176 * @param priv A pointer to struct lbs_private structure 1187 * @priv: A pointer to &struct lbs_private structure
1177 * @return 0 or -1 1188 *
1189 * returns: 0 for success
1178 */ 1190 */
1179int lbs_free_cmd_buffer(struct lbs_private *priv) 1191int lbs_free_cmd_buffer(struct lbs_private *priv)
1180{ 1192{
@@ -1211,11 +1223,13 @@ done:
1211} 1223}
1212 1224
1213/** 1225/**
1214 * @brief This function gets a free command node if available in 1226 * lbs_get_free_cmd_node - gets a free command node if available in
1215 * command free queue. 1227 * command free queue
1228 *
1229 * @priv: A pointer to &struct lbs_private structure
1216 * 1230 *
1217 * @param priv A pointer to struct lbs_private structure 1231 * returns: A pointer to &cmd_ctrl_node structure on success
1218 * @return cmd_ctrl_node A pointer to cmd_ctrl_node structure or NULL 1232 * or %NULL on error
1219 */ 1233 */
1220static struct cmd_ctrl_node *lbs_get_free_cmd_node(struct lbs_private *priv) 1234static struct cmd_ctrl_node *lbs_get_free_cmd_node(struct lbs_private *priv)
1221{ 1235{
@@ -1245,12 +1259,12 @@ static struct cmd_ctrl_node *lbs_get_free_cmd_node(struct lbs_private *priv)
1245} 1259}
1246 1260
1247/** 1261/**
1248 * @brief This function executes next command in command 1262 * lbs_execute_next_command - execute next command in command
1249 * pending queue. It will put firmware back to PS mode 1263 * pending queue. Will put firmware back to PS mode if applicable.
1250 * if applicable.
1251 * 1264 *
1252 * @param priv A pointer to struct lbs_private structure 1265 * @priv: A pointer to &struct lbs_private structure
1253 * @return 0 or -1 1266 *
1267 * returns: 0 on success or -1 on error
1254 */ 1268 */
1255int lbs_execute_next_command(struct lbs_private *priv) 1269int lbs_execute_next_command(struct lbs_private *priv)
1256{ 1270{
@@ -1267,7 +1281,8 @@ int lbs_execute_next_command(struct lbs_private *priv)
1267 spin_lock_irqsave(&priv->driver_lock, flags); 1281 spin_lock_irqsave(&priv->driver_lock, flags);
1268 1282
1269 if (priv->cur_cmd) { 1283 if (priv->cur_cmd) {
1270 lbs_pr_alert( "EXEC_NEXT_CMD: already processing command!\n"); 1284 netdev_alert(priv->dev,
1285 "EXEC_NEXT_CMD: already processing command!\n");
1271 spin_unlock_irqrestore(&priv->driver_lock, flags); 1286 spin_unlock_irqrestore(&priv->driver_lock, flags);
1272 ret = -1; 1287 ret = -1;
1273 goto done; 1288 goto done;
@@ -1431,7 +1446,7 @@ static void lbs_send_confirmsleep(struct lbs_private *priv)
1431 ret = priv->hw_host_to_card(priv, MVMS_CMD, (u8 *) &confirm_sleep, 1446 ret = priv->hw_host_to_card(priv, MVMS_CMD, (u8 *) &confirm_sleep,
1432 sizeof(confirm_sleep)); 1447 sizeof(confirm_sleep));
1433 if (ret) { 1448 if (ret) {
1434 lbs_pr_alert("confirm_sleep failed\n"); 1449 netdev_alert(priv->dev, "confirm_sleep failed\n");
1435 goto out; 1450 goto out;
1436 } 1451 }
1437 1452
@@ -1456,12 +1471,12 @@ out:
1456} 1471}
1457 1472
1458/** 1473/**
1459 * @brief This function checks condition and prepares to 1474 * lbs_ps_confirm_sleep - checks condition and prepares to
1460 * send sleep confirm command to firmware if ok. 1475 * send sleep confirm command to firmware if ok
1476 *
1477 * @priv: A pointer to &struct lbs_private structure
1461 * 1478 *
1462 * @param priv A pointer to struct lbs_private structure 1479 * returns: n/a
1463 * @param psmode Power Saving mode
1464 * @return n/a
1465 */ 1480 */
1466void lbs_ps_confirm_sleep(struct lbs_private *priv) 1481void lbs_ps_confirm_sleep(struct lbs_private *priv)
1467{ 1482{
@@ -1501,16 +1516,16 @@ void lbs_ps_confirm_sleep(struct lbs_private *priv)
1501 1516
1502 1517
1503/** 1518/**
1504 * @brief Configures the transmission power control functionality. 1519 * lbs_set_tpc_cfg - Configures the transmission power control functionality
1505 * 1520 *
1506 * @param priv A pointer to struct lbs_private structure 1521 * @priv: A pointer to &struct lbs_private structure
1507 * @param enable Transmission power control enable 1522 * @enable: Transmission power control enable
1508 * @param p0 Power level when link quality is good (dBm). 1523 * @p0: Power level when link quality is good (dBm).
1509 * @param p1 Power level when link quality is fair (dBm). 1524 * @p1: Power level when link quality is fair (dBm).
1510 * @param p2 Power level when link quality is poor (dBm). 1525 * @p2: Power level when link quality is poor (dBm).
1511 * @param usesnr Use Signal to Noise Ratio in TPC 1526 * @usesnr: Use Signal to Noise Ratio in TPC
1512 * 1527 *
1513 * @return 0 on success 1528 * returns: 0 on success
1514 */ 1529 */
1515int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1, 1530int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1,
1516 int8_t p2, int usesnr) 1531 int8_t p2, int usesnr)
@@ -1533,15 +1548,15 @@ int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1,
1533} 1548}
1534 1549
1535/** 1550/**
1536 * @brief Configures the power adaptation settings. 1551 * lbs_set_power_adapt_cfg - Configures the power adaptation settings
1537 * 1552 *
1538 * @param priv A pointer to struct lbs_private structure 1553 * @priv: A pointer to &struct lbs_private structure
1539 * @param enable Power adaptation enable 1554 * @enable: Power adaptation enable
1540 * @param p0 Power level for 1, 2, 5.5 and 11 Mbps (dBm). 1555 * @p0: Power level for 1, 2, 5.5 and 11 Mbps (dBm).
1541 * @param p1 Power level for 6, 9, 12, 18, 22, 24 and 36 Mbps (dBm). 1556 * @p1: Power level for 6, 9, 12, 18, 22, 24 and 36 Mbps (dBm).
1542 * @param p2 Power level for 48 and 54 Mbps (dBm). 1557 * @p2: Power level for 48 and 54 Mbps (dBm).
1543 * 1558 *
1544 * @return 0 on Success 1559 * returns: 0 on Success
1545 */ 1560 */
1546 1561
1547int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0, 1562int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0,
@@ -1657,7 +1672,7 @@ int __lbs_cmd(struct lbs_private *priv, uint16_t command,
1657 spin_lock_irqsave(&priv->driver_lock, flags); 1672 spin_lock_irqsave(&priv->driver_lock, flags);
1658 ret = cmdnode->result; 1673 ret = cmdnode->result;
1659 if (ret) 1674 if (ret)
1660 lbs_pr_info("PREP_CMD: command 0x%04x failed: %d\n", 1675 netdev_info(priv->dev, "PREP_CMD: command 0x%04x failed: %d\n",
1661 command, ret); 1676 command, ret);
1662 1677
1663 __lbs_cleanup_and_insert_cmd(priv, cmdnode); 1678 __lbs_cleanup_and_insert_cmd(priv, cmdnode);
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c
index 5e95da9dcc2e..207fc361db84 100644
--- a/drivers/net/wireless/libertas/cmdresp.c
+++ b/drivers/net/wireless/libertas/cmdresp.c
@@ -1,7 +1,8 @@
1/** 1/*
2 * This file contains the handling of command 2 * This file contains the handling of command
3 * responses as well as events generated by firmware. 3 * responses as well as events generated by firmware.
4 */ 4 */
5
5#include <linux/slab.h> 6#include <linux/slab.h>
6#include <linux/delay.h> 7#include <linux/delay.h>
7#include <linux/sched.h> 8#include <linux/sched.h>
@@ -12,12 +13,13 @@
12#include "cmd.h" 13#include "cmd.h"
13 14
14/** 15/**
15 * @brief This function handles disconnect event. it 16 * lbs_mac_event_disconnected - handles disconnect event. It
16 * reports disconnect to upper layer, clean tx/rx packets, 17 * reports disconnect to upper layer, clean tx/rx packets,
17 * reset link state etc. 18 * reset link state etc.
19 *
20 * @priv: A pointer to struct lbs_private structure
18 * 21 *
19 * @param priv A pointer to struct lbs_private structure 22 * returns: n/a
20 * @return n/a
21 */ 23 */
22void lbs_mac_event_disconnected(struct lbs_private *priv) 24void lbs_mac_event_disconnected(struct lbs_private *priv)
23{ 25{
@@ -84,15 +86,18 @@ int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len)
84 lbs_deb_hex(LBS_DEB_CMD, "CMD_RESP", (void *) resp, len); 86 lbs_deb_hex(LBS_DEB_CMD, "CMD_RESP", (void *) resp, len);
85 87
86 if (resp->seqnum != priv->cur_cmd->cmdbuf->seqnum) { 88 if (resp->seqnum != priv->cur_cmd->cmdbuf->seqnum) {
87 lbs_pr_info("Received CMD_RESP with invalid sequence %d (expected %d)\n", 89 netdev_info(priv->dev,
88 le16_to_cpu(resp->seqnum), le16_to_cpu(priv->cur_cmd->cmdbuf->seqnum)); 90 "Received CMD_RESP with invalid sequence %d (expected %d)\n",
91 le16_to_cpu(resp->seqnum),
92 le16_to_cpu(priv->cur_cmd->cmdbuf->seqnum));
89 spin_unlock_irqrestore(&priv->driver_lock, flags); 93 spin_unlock_irqrestore(&priv->driver_lock, flags);
90 ret = -1; 94 ret = -1;
91 goto done; 95 goto done;
92 } 96 }
93 if (respcmd != CMD_RET(curcmd) && 97 if (respcmd != CMD_RET(curcmd) &&
94 respcmd != CMD_RET_802_11_ASSOCIATE && curcmd != CMD_802_11_ASSOCIATE) { 98 respcmd != CMD_RET_802_11_ASSOCIATE && curcmd != CMD_802_11_ASSOCIATE) {
95 lbs_pr_info("Invalid CMD_RESP %x to command %x!\n", respcmd, curcmd); 99 netdev_info(priv->dev, "Invalid CMD_RESP %x to command %x!\n",
100 respcmd, curcmd);
96 spin_unlock_irqrestore(&priv->driver_lock, flags); 101 spin_unlock_irqrestore(&priv->driver_lock, flags);
97 ret = -1; 102 ret = -1;
98 goto done; 103 goto done;
@@ -101,7 +106,8 @@ int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len)
101 if (resp->result == cpu_to_le16(0x0004)) { 106 if (resp->result == cpu_to_le16(0x0004)) {
102 /* 0x0004 means -EAGAIN. Drop the response, let it time out 107 /* 0x0004 means -EAGAIN. Drop the response, let it time out
103 and be resubmitted */ 108 and be resubmitted */
104 lbs_pr_info("Firmware returns DEFER to command %x. Will let it time out...\n", 109 netdev_info(priv->dev,
110 "Firmware returns DEFER to command %x. Will let it time out...\n",
105 le16_to_cpu(resp->command)); 111 le16_to_cpu(resp->command));
106 spin_unlock_irqrestore(&priv->driver_lock, flags); 112 spin_unlock_irqrestore(&priv->driver_lock, flags);
107 ret = -1; 113 ret = -1;
@@ -313,28 +319,28 @@ int lbs_process_event(struct lbs_private *priv, u32 event)
313 lbs_deb_cmd("EVENT: ADHOC beacon lost\n"); 319 lbs_deb_cmd("EVENT: ADHOC beacon lost\n");
314 break; 320 break;
315 case MACREG_INT_CODE_RSSI_LOW: 321 case MACREG_INT_CODE_RSSI_LOW:
316 lbs_pr_alert("EVENT: rssi low\n"); 322 netdev_alert(priv->dev, "EVENT: rssi low\n");
317 break; 323 break;
318 case MACREG_INT_CODE_SNR_LOW: 324 case MACREG_INT_CODE_SNR_LOW:
319 lbs_pr_alert("EVENT: snr low\n"); 325 netdev_alert(priv->dev, "EVENT: snr low\n");
320 break; 326 break;
321 case MACREG_INT_CODE_MAX_FAIL: 327 case MACREG_INT_CODE_MAX_FAIL:
322 lbs_pr_alert("EVENT: max fail\n"); 328 netdev_alert(priv->dev, "EVENT: max fail\n");
323 break; 329 break;
324 case MACREG_INT_CODE_RSSI_HIGH: 330 case MACREG_INT_CODE_RSSI_HIGH:
325 lbs_pr_alert("EVENT: rssi high\n"); 331 netdev_alert(priv->dev, "EVENT: rssi high\n");
326 break; 332 break;
327 case MACREG_INT_CODE_SNR_HIGH: 333 case MACREG_INT_CODE_SNR_HIGH:
328 lbs_pr_alert("EVENT: snr high\n"); 334 netdev_alert(priv->dev, "EVENT: snr high\n");
329 break; 335 break;
330 336
331 case MACREG_INT_CODE_MESH_AUTO_STARTED: 337 case MACREG_INT_CODE_MESH_AUTO_STARTED:
332 /* Ignore spurious autostart events */ 338 /* Ignore spurious autostart events */
333 lbs_pr_info("EVENT: MESH_AUTO_STARTED (ignoring)\n"); 339 netdev_info(priv->dev, "EVENT: MESH_AUTO_STARTED (ignoring)\n");
334 break; 340 break;
335 341
336 default: 342 default:
337 lbs_pr_alert("EVENT: unknown event id %d\n", event); 343 netdev_alert(priv->dev, "EVENT: unknown event id %d\n", event);
338 break; 344 break;
339 } 345 }
340 346
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index fbf3b0332bb7..23250f621761 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -151,13 +151,14 @@ static ssize_t lbs_host_sleep_write(struct file *file,
151 ret = lbs_set_host_sleep(priv, 0); 151 ret = lbs_set_host_sleep(priv, 0);
152 else if (host_sleep == 1) { 152 else if (host_sleep == 1) {
153 if (priv->wol_criteria == EHS_REMOVE_WAKEUP) { 153 if (priv->wol_criteria == EHS_REMOVE_WAKEUP) {
154 lbs_pr_info("wake parameters not configured"); 154 netdev_info(priv->dev,
155 "wake parameters not configured\n");
155 ret = -EINVAL; 156 ret = -EINVAL;
156 goto out_unlock; 157 goto out_unlock;
157 } 158 }
158 ret = lbs_set_host_sleep(priv, 1); 159 ret = lbs_set_host_sleep(priv, 1);
159 } else { 160 } else {
160 lbs_pr_err("invalid option\n"); 161 netdev_err(priv->dev, "invalid option\n");
161 ret = -EINVAL; 162 ret = -EINVAL;
162 } 163 }
163 164
@@ -849,15 +850,14 @@ static struct debug_data items[] = {
849static int num_of_items = ARRAY_SIZE(items); 850static int num_of_items = ARRAY_SIZE(items);
850 851
851/** 852/**
852 * @brief proc read function 853 * lbs_debugfs_read - proc read function
853 * 854 *
854 * @param page pointer to buffer 855 * @file: file to read
855 * @param s read data starting position 856 * @userbuf: pointer to buffer
856 * @param off offset 857 * @count: number of bytes to read
857 * @param cnt counter 858 * @ppos: read data starting position
858 * @param eof end of file flag 859 *
859 * @param data data to output 860 * returns: amount of data read or negative error code
860 * @return number of output data
861 */ 861 */
862static ssize_t lbs_debugfs_read(struct file *file, char __user *userbuf, 862static ssize_t lbs_debugfs_read(struct file *file, char __user *userbuf,
863 size_t count, loff_t *ppos) 863 size_t count, loff_t *ppos)
@@ -897,13 +897,14 @@ static ssize_t lbs_debugfs_read(struct file *file, char __user *userbuf,
897} 897}
898 898
899/** 899/**
900 * @brief proc write function 900 * lbs_debugfs_write - proc write function
901 *
902 * @f: file pointer
903 * @buf: pointer to data buffer
904 * @cnt: data number to write
905 * @ppos: file position
901 * 906 *
902 * @param f file pointer 907 * returns: amount of data written
903 * @param buf pointer to data buffer
904 * @param cnt data number to write
905 * @param data data to write
906 * @return number of data
907 */ 908 */
908static ssize_t lbs_debugfs_write(struct file *f, const char __user *buf, 909static ssize_t lbs_debugfs_write(struct file *f, const char __user *buf,
909 size_t cnt, loff_t *ppos) 910 size_t cnt, loff_t *ppos)
@@ -966,11 +967,11 @@ static const struct file_operations lbs_debug_fops = {
966}; 967};
967 968
968/** 969/**
969 * @brief create debug proc file 970 * lbs_debug_init - create debug proc file
971 *
972 * @priv: pointer to &struct lbs_private
970 * 973 *
971 * @param priv pointer struct lbs_private 974 * returns: N/A
972 * @param dev pointer net_device
973 * @return N/A
974 */ 975 */
975static void lbs_debug_init(struct lbs_private *priv) 976static void lbs_debug_init(struct lbs_private *priv)
976{ 977{
diff --git a/drivers/net/wireless/libertas/decl.h b/drivers/net/wireless/libertas/decl.h
index 2ae752d10065..da0b05bb89fe 100644
--- a/drivers/net/wireless/libertas/decl.h
+++ b/drivers/net/wireless/libertas/decl.h
@@ -1,8 +1,8 @@
1 1
2/** 2/*
3 * This file contains declaration referring to 3 * This file contains declaration referring to
4 * functions defined in other source files 4 * functions defined in other source files
5 */ 5 */
6 6
7#ifndef _LBS_DECL_H_ 7#ifndef _LBS_DECL_H_
8#define _LBS_DECL_H_ 8#define _LBS_DECL_H_
diff --git a/drivers/net/wireless/libertas/defs.h b/drivers/net/wireless/libertas/defs.h
index d00c728cec47..ab966f08024a 100644
--- a/drivers/net/wireless/libertas/defs.h
+++ b/drivers/net/wireless/libertas/defs.h
@@ -1,7 +1,7 @@
1/** 1/*
2 * This header file contains global constant/enum definitions, 2 * This header file contains global constant/enum definitions,
3 * global variable declaration. 3 * global variable declaration.
4 */ 4 */
5#ifndef _LBS_DEFS_H_ 5#ifndef _LBS_DEFS_H_
6#define _LBS_DEFS_H_ 6#define _LBS_DEFS_H_
7 7
@@ -89,13 +89,6 @@ do { if ((lbs_debug & (grp)) == (grp)) \
89#define lbs_deb_spi(fmt, args...) LBS_DEB_LL(LBS_DEB_SPI, " spi", fmt, ##args) 89#define lbs_deb_spi(fmt, args...) LBS_DEB_LL(LBS_DEB_SPI, " spi", fmt, ##args)
90#define lbs_deb_cfg80211(fmt, args...) LBS_DEB_LL(LBS_DEB_CFG80211, " cfg80211", fmt, ##args) 90#define lbs_deb_cfg80211(fmt, args...) LBS_DEB_LL(LBS_DEB_CFG80211, " cfg80211", fmt, ##args)
91 91
92#define lbs_pr_info(format, args...) \
93 printk(KERN_INFO DRV_NAME": " format, ## args)
94#define lbs_pr_err(format, args...) \
95 printk(KERN_ERR DRV_NAME": " format, ## args)
96#define lbs_pr_alert(format, args...) \
97 printk(KERN_ALERT DRV_NAME": " format, ## args)
98
99#ifdef DEBUG 92#ifdef DEBUG
100static inline void lbs_deb_hex(unsigned int grp, const char *prompt, u8 *buf, int len) 93static inline void lbs_deb_hex(unsigned int grp, const char *prompt, u8 *buf, int len)
101{ 94{
@@ -123,19 +116,19 @@ static inline void lbs_deb_hex(unsigned int grp, const char *prompt, u8 *buf, in
123 116
124 117
125 118
126/** Buffer Constants */ 119/* Buffer Constants */
127 120
128/* The size of SQ memory PPA, DPA are 8 DWORDs, that keep the physical 121/* The size of SQ memory PPA, DPA are 8 DWORDs, that keep the physical
129* addresses of TxPD buffers. Station has only 8 TxPD available, Whereas 122 * addresses of TxPD buffers. Station has only 8 TxPD available, Whereas
130* driver has more local TxPDs. Each TxPD on the host memory is associated 123 * driver has more local TxPDs. Each TxPD on the host memory is associated
131* with a Tx control node. The driver maintains 8 RxPD descriptors for 124 * with a Tx control node. The driver maintains 8 RxPD descriptors for
132* station firmware to store Rx packet information. 125 * station firmware to store Rx packet information.
133* 126 *
134* Current version of MAC has a 32x6 multicast address buffer. 127 * Current version of MAC has a 32x6 multicast address buffer.
135* 128 *
136* 802.11b can have up to 14 channels, the driver keeps the 129 * 802.11b can have up to 14 channels, the driver keeps the
137* BSSID(MAC address) of each APs or Ad hoc stations it has sensed. 130 * BSSID(MAC address) of each APs or Ad hoc stations it has sensed.
138*/ 131 */
139 132
140#define MRVDRV_MAX_MULTICAST_LIST_SIZE 32 133#define MRVDRV_MAX_MULTICAST_LIST_SIZE 32
141#define LBS_NUM_CMD_BUFFERS 10 134#define LBS_NUM_CMD_BUFFERS 10
@@ -166,7 +159,7 @@ static inline void lbs_deb_hex(unsigned int grp, const char *prompt, u8 *buf, in
166#define WOL_RESULT_NOSPC_ERR 1 159#define WOL_RESULT_NOSPC_ERR 1
167#define WOL_RESULT_EEXIST_ERR 2 160#define WOL_RESULT_EEXIST_ERR 2
168 161
169/** Misc constants */ 162/* Misc constants */
170/* This section defines 802.11 specific contants */ 163/* This section defines 802.11 specific contants */
171 164
172#define MRVDRV_MAX_BSS_DESCRIPTS 16 165#define MRVDRV_MAX_BSS_DESCRIPTS 16
@@ -183,7 +176,8 @@ static inline void lbs_deb_hex(unsigned int grp, const char *prompt, u8 *buf, in
183 176
184#define MARVELL_MESH_IE_LENGTH 9 177#define MARVELL_MESH_IE_LENGTH 9
185 178
186/* Values used to populate the struct mrvl_mesh_ie. The only time you need this 179/*
180 * Values used to populate the struct mrvl_mesh_ie. The only time you need this
187 * is when enabling the mesh using CMD_MESH_CONFIG. 181 * is when enabling the mesh using CMD_MESH_CONFIG.
188 */ 182 */
189#define MARVELL_MESH_IE_TYPE 4 183#define MARVELL_MESH_IE_TYPE 4
@@ -193,7 +187,7 @@ static inline void lbs_deb_hex(unsigned int grp, const char *prompt, u8 *buf, in
193#define MARVELL_MESH_METRIC_ID 0 187#define MARVELL_MESH_METRIC_ID 0
194#define MARVELL_MESH_CAPABILITY 0 188#define MARVELL_MESH_CAPABILITY 0
195 189
196/** INT status Bit Definition*/ 190/* INT status Bit Definition */
197#define MRVDRV_TX_DNLD_RDY 0x0001 191#define MRVDRV_TX_DNLD_RDY 0x0001
198#define MRVDRV_RX_UPLD_RDY 0x0002 192#define MRVDRV_RX_UPLD_RDY 0x0002
199#define MRVDRV_CMD_DNLD_RDY 0x0004 193#define MRVDRV_CMD_DNLD_RDY 0x0004
@@ -208,59 +202,63 @@ static inline void lbs_deb_hex(unsigned int grp, const char *prompt, u8 *buf, in
208#define TPC_DEFAULT_P1 10 202#define TPC_DEFAULT_P1 10
209#define TPC_DEFAULT_P2 13 203#define TPC_DEFAULT_P2 13
210 204
211/** TxPD status */ 205/* TxPD status */
212 206
213/* Station firmware use TxPD status field to report final Tx transmit 207/*
214* result, Bit masks are used to present combined situations. 208 * Station firmware use TxPD status field to report final Tx transmit
215*/ 209 * result, Bit masks are used to present combined situations.
210 */
216 211
217#define MRVDRV_TxPD_POWER_MGMT_NULL_PACKET 0x01 212#define MRVDRV_TxPD_POWER_MGMT_NULL_PACKET 0x01
218#define MRVDRV_TxPD_POWER_MGMT_LAST_PACKET 0x08 213#define MRVDRV_TxPD_POWER_MGMT_LAST_PACKET 0x08
219 214
220/** Tx mesh flag */ 215/* Tx mesh flag */
221/* Currently we are using normal WDS flag as mesh flag. 216/*
217 * Currently we are using normal WDS flag as mesh flag.
222 * TODO: change to proper mesh flag when MAC understands it. 218 * TODO: change to proper mesh flag when MAC understands it.
223 */ 219 */
224#define TxPD_CONTROL_WDS_FRAME (1<<17) 220#define TxPD_CONTROL_WDS_FRAME (1<<17)
225#define TxPD_MESH_FRAME TxPD_CONTROL_WDS_FRAME 221#define TxPD_MESH_FRAME TxPD_CONTROL_WDS_FRAME
226 222
227/** Mesh interface ID */ 223/* Mesh interface ID */
228#define MESH_IFACE_ID 0x0001 224#define MESH_IFACE_ID 0x0001
229/** Mesh id should be in bits 14-13-12 */ 225/* Mesh id should be in bits 14-13-12 */
230#define MESH_IFACE_BIT_OFFSET 0x000c 226#define MESH_IFACE_BIT_OFFSET 0x000c
231/** Mesh enable bit in FW capability */ 227/* Mesh enable bit in FW capability */
232#define MESH_CAPINFO_ENABLE_MASK (1<<16) 228#define MESH_CAPINFO_ENABLE_MASK (1<<16)
233 229
234/** FW definition from Marvell v4 */ 230/* FW definition from Marvell v4 */
235#define MRVL_FW_V4 (0x04) 231#define MRVL_FW_V4 (0x04)
236/** FW definition from Marvell v5 */ 232/* FW definition from Marvell v5 */
237#define MRVL_FW_V5 (0x05) 233#define MRVL_FW_V5 (0x05)
238/** FW definition from Marvell v10 */ 234/* FW definition from Marvell v10 */
239#define MRVL_FW_V10 (0x0a) 235#define MRVL_FW_V10 (0x0a)
240/** FW major revision definition */ 236/* FW major revision definition */
241#define MRVL_FW_MAJOR_REV(x) ((x)>>24) 237#define MRVL_FW_MAJOR_REV(x) ((x)>>24)
242 238
243/** RxPD status */ 239/* RxPD status */
244 240
245#define MRVDRV_RXPD_STATUS_OK 0x0001 241#define MRVDRV_RXPD_STATUS_OK 0x0001
246 242
247/** RxPD status - Received packet types */ 243/* RxPD status - Received packet types */
248/** Rx mesh flag */ 244/* Rx mesh flag */
249/* Currently we are using normal WDS flag as mesh flag. 245/*
246 * Currently we are using normal WDS flag as mesh flag.
250 * TODO: change to proper mesh flag when MAC understands it. 247 * TODO: change to proper mesh flag when MAC understands it.
251 */ 248 */
252#define RxPD_CONTROL_WDS_FRAME (0x40) 249#define RxPD_CONTROL_WDS_FRAME (0x40)
253#define RxPD_MESH_FRAME RxPD_CONTROL_WDS_FRAME 250#define RxPD_MESH_FRAME RxPD_CONTROL_WDS_FRAME
254 251
255/** RSSI-related defines */ 252/* RSSI-related defines */
256/* RSSI constants are used to implement 802.11 RSSI threshold 253/*
257* indication. if the Rx packet signal got too weak for 5 consecutive 254 * RSSI constants are used to implement 802.11 RSSI threshold
258* times, miniport driver (driver) will report this event to wrapper 255 * indication. if the Rx packet signal got too weak for 5 consecutive
259*/ 256 * times, miniport driver (driver) will report this event to wrapper
257 */
260 258
261#define MRVDRV_NF_DEFAULT_SCAN_VALUE (-96) 259#define MRVDRV_NF_DEFAULT_SCAN_VALUE (-96)
262 260
263/** RTS/FRAG related defines */ 261/* RTS/FRAG related defines */
264#define MRVDRV_RTS_MIN_VALUE 0 262#define MRVDRV_RTS_MIN_VALUE 0
265#define MRVDRV_RTS_MAX_VALUE 2347 263#define MRVDRV_RTS_MAX_VALUE 2347
266#define MRVDRV_FRAG_MIN_VALUE 256 264#define MRVDRV_FRAG_MIN_VALUE 256
@@ -300,36 +298,36 @@ static inline void lbs_deb_hex(unsigned int grp, const char *prompt, u8 *buf, in
300 298
301#define MAX_LEDS 8 299#define MAX_LEDS 8
302 300
303/** Global Variable Declaration */ 301/* Global Variable Declaration */
304extern const char lbs_driver_version[]; 302extern const char lbs_driver_version[];
305extern u16 lbs_region_code_to_index[MRVDRV_MAX_REGION_CODE]; 303extern u16 lbs_region_code_to_index[MRVDRV_MAX_REGION_CODE];
306 304
307 305
308/** ENUM definition*/ 306/* ENUM definition */
309/** SNRNF_TYPE */ 307/* SNRNF_TYPE */
310enum SNRNF_TYPE { 308enum SNRNF_TYPE {
311 TYPE_BEACON = 0, 309 TYPE_BEACON = 0,
312 TYPE_RXPD, 310 TYPE_RXPD,
313 MAX_TYPE_B 311 MAX_TYPE_B
314}; 312};
315 313
316/** SNRNF_DATA*/ 314/* SNRNF_DATA */
317enum SNRNF_DATA { 315enum SNRNF_DATA {
318 TYPE_NOAVG = 0, 316 TYPE_NOAVG = 0,
319 TYPE_AVG, 317 TYPE_AVG,
320 MAX_TYPE_AVG 318 MAX_TYPE_AVG
321}; 319};
322 320
323/** LBS_802_11_POWER_MODE */ 321/* LBS_802_11_POWER_MODE */
324enum LBS_802_11_POWER_MODE { 322enum LBS_802_11_POWER_MODE {
325 LBS802_11POWERMODECAM, 323 LBS802_11POWERMODECAM,
326 LBS802_11POWERMODEMAX_PSP, 324 LBS802_11POWERMODEMAX_PSP,
327 LBS802_11POWERMODEFAST_PSP, 325 LBS802_11POWERMODEFAST_PSP,
328 /*not a real mode, defined as an upper bound */ 326 /* not a real mode, defined as an upper bound */
329 LBS802_11POWEMODEMAX 327 LBS802_11POWEMODEMAX
330}; 328};
331 329
332/** PS_STATE */ 330/* PS_STATE */
333enum PS_STATE { 331enum PS_STATE {
334 PS_STATE_FULL_POWER, 332 PS_STATE_FULL_POWER,
335 PS_STATE_AWAKE, 333 PS_STATE_AWAKE,
@@ -337,7 +335,7 @@ enum PS_STATE {
337 PS_STATE_SLEEP 335 PS_STATE_SLEEP
338}; 336};
339 337
340/** DNLD_STATE */ 338/* DNLD_STATE */
341enum DNLD_STATE { 339enum DNLD_STATE {
342 DNLD_RES_RECEIVED, 340 DNLD_RES_RECEIVED,
343 DNLD_DATA_SENT, 341 DNLD_DATA_SENT,
@@ -345,19 +343,19 @@ enum DNLD_STATE {
345 DNLD_BOOTCMD_SENT, 343 DNLD_BOOTCMD_SENT,
346}; 344};
347 345
348/** LBS_MEDIA_STATE */ 346/* LBS_MEDIA_STATE */
349enum LBS_MEDIA_STATE { 347enum LBS_MEDIA_STATE {
350 LBS_CONNECTED, 348 LBS_CONNECTED,
351 LBS_DISCONNECTED 349 LBS_DISCONNECTED
352}; 350};
353 351
354/** LBS_802_11_PRIVACY_FILTER */ 352/* LBS_802_11_PRIVACY_FILTER */
355enum LBS_802_11_PRIVACY_FILTER { 353enum LBS_802_11_PRIVACY_FILTER {
356 LBS802_11PRIVFILTERACCEPTALL, 354 LBS802_11PRIVFILTERACCEPTALL,
357 LBS802_11PRIVFILTER8021XWEP 355 LBS802_11PRIVFILTER8021XWEP
358}; 356};
359 357
360/** mv_ms_type */ 358/* mv_ms_type */
361enum mv_ms_type { 359enum mv_ms_type {
362 MVMS_DAT = 0, 360 MVMS_DAT = 0,
363 MVMS_CMD = 1, 361 MVMS_CMD = 1,
@@ -365,14 +363,14 @@ enum mv_ms_type {
365 MVMS_EVENT 363 MVMS_EVENT
366}; 364};
367 365
368/** KEY_TYPE_ID */ 366/* KEY_TYPE_ID */
369enum KEY_TYPE_ID { 367enum KEY_TYPE_ID {
370 KEY_TYPE_ID_WEP = 0, 368 KEY_TYPE_ID_WEP = 0,
371 KEY_TYPE_ID_TKIP, 369 KEY_TYPE_ID_TKIP,
372 KEY_TYPE_ID_AES 370 KEY_TYPE_ID_AES
373}; 371};
374 372
375/** KEY_INFO_WPA (applies to both TKIP and AES/CCMP) */ 373/* KEY_INFO_WPA (applies to both TKIP and AES/CCMP) */
376enum KEY_INFO_WPA { 374enum KEY_INFO_WPA {
377 KEY_INFO_WPA_MCAST = 0x01, 375 KEY_INFO_WPA_MCAST = 0x01,
378 KEY_INFO_WPA_UNICAST = 0x02, 376 KEY_INFO_WPA_UNICAST = 0x02,
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index bc461eb39660..76d018beebf4 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -1,8 +1,8 @@
1/** 1/*
2 * This file contains definitions and data structures specific 2 * This file contains definitions and data structures specific
3 * to Marvell 802.11 NIC. It contains the Device Information 3 * to Marvell 802.11 NIC. It contains the Device Information
4 * structure struct lbs_private.. 4 * structure struct lbs_private..
5 */ 5 */
6#ifndef _LBS_DEV_H_ 6#ifndef _LBS_DEV_H_
7#define _LBS_DEV_H_ 7#define _LBS_DEV_H_
8 8
@@ -12,7 +12,7 @@
12 12
13#include <linux/kfifo.h> 13#include <linux/kfifo.h>
14 14
15/** sleep_params */ 15/* sleep_params */
16struct sleep_params { 16struct sleep_params {
17 uint16_t sp_error; 17 uint16_t sp_error;
18 uint16_t sp_offset; 18 uint16_t sp_offset;
@@ -23,7 +23,7 @@ struct sleep_params {
23}; 23};
24 24
25 25
26/** Private structure for the MV device */ 26/* Private structure for the MV device */
27struct lbs_private { 27struct lbs_private {
28 28
29 /* Basic networking */ 29 /* Basic networking */
@@ -125,12 +125,12 @@ struct lbs_private {
125 /* Events sent from hardware to driver */ 125 /* Events sent from hardware to driver */
126 struct kfifo event_fifo; 126 struct kfifo event_fifo;
127 127
128 /** thread to service interrupts */ 128 /* thread to service interrupts */
129 struct task_struct *main_thread; 129 struct task_struct *main_thread;
130 wait_queue_head_t waitq; 130 wait_queue_head_t waitq;
131 struct workqueue_struct *work_thread; 131 struct workqueue_struct *work_thread;
132 132
133 /** Encryption stuff */ 133 /* Encryption stuff */
134 u8 authtype_auto; 134 u8 authtype_auto;
135 u8 wep_tx_key; 135 u8 wep_tx_key;
136 u8 wep_key[4][WLAN_KEY_LEN_WEP104]; 136 u8 wep_key[4][WLAN_KEY_LEN_WEP104];
@@ -162,7 +162,7 @@ struct lbs_private {
162 s16 txpower_min; 162 s16 txpower_min;
163 s16 txpower_max; 163 s16 txpower_max;
164 164
165 /** Scanning */ 165 /* Scanning */
166 struct delayed_work scan_work; 166 struct delayed_work scan_work;
167 int scan_channel; 167 int scan_channel;
168 /* Queue of things waiting for scan completion */ 168 /* Queue of things waiting for scan completion */
diff --git a/drivers/net/wireless/libertas/ethtool.c b/drivers/net/wireless/libertas/ethtool.c
index 50193aac679e..29dbce4a9f86 100644
--- a/drivers/net/wireless/libertas/ethtool.c
+++ b/drivers/net/wireless/libertas/ethtool.c
@@ -20,7 +20,8 @@ static void lbs_ethtool_get_drvinfo(struct net_device *dev,
20 strcpy(info->version, lbs_driver_version); 20 strcpy(info->version, lbs_driver_version);
21} 21}
22 22
23/* All 8388 parts have 16KiB EEPROM size at the time of writing. 23/*
24 * All 8388 parts have 16KiB EEPROM size at the time of writing.
24 * In case that changes this needs fixing. 25 * In case that changes this needs fixing.
25 */ 26 */
26#define LBS_EEPROM_LEN 16384 27#define LBS_EEPROM_LEN 16384
diff --git a/drivers/net/wireless/libertas/host.h b/drivers/net/wireless/libertas/host.h
index 6cb6935ee4a3..2e2dbfa2ee50 100644
--- a/drivers/net/wireless/libertas/host.h
+++ b/drivers/net/wireless/libertas/host.h
@@ -1,7 +1,7 @@
1/** 1/*
2 * This file function prototypes, data structure 2 * This file function prototypes, data structure
3 * and definitions for all the host/station commands 3 * and definitions for all the host/station commands
4 */ 4 */
5 5
6#ifndef _LBS_HOST_H_ 6#ifndef _LBS_HOST_H_
7#define _LBS_HOST_H_ 7#define _LBS_HOST_H_
@@ -13,9 +13,10 @@
13 13
14#define CMD_OPTION_WAITFORRSP 0x0002 14#define CMD_OPTION_WAITFORRSP 0x0002
15 15
16/** Host command IDs */ 16/* Host command IDs */
17 17
18/* Return command are almost always the same as the host command, but with 18/*
19 * Return command are almost always the same as the host command, but with
19 * bit 15 set high. There are a few exceptions, though... 20 * bit 15 set high. There are a few exceptions, though...
20 */ 21 */
21#define CMD_RET(cmd) (0x8000 | cmd) 22#define CMD_RET(cmd) (0x8000 | cmd)
@@ -251,7 +252,7 @@ enum cmd_mesh_config_types {
251 CMD_TYPE_MESH_GET_MESH_IE, /* GET_DEFAULTS is superset of GET_MESHIE */ 252 CMD_TYPE_MESH_GET_MESH_IE, /* GET_DEFAULTS is superset of GET_MESHIE */
252}; 253};
253 254
254/** Card Event definition */ 255/* Card Event definition */
255#define MACREG_INT_CODE_TX_PPA_FREE 0 256#define MACREG_INT_CODE_TX_PPA_FREE 0
256#define MACREG_INT_CODE_TX_DMA_DONE 1 257#define MACREG_INT_CODE_TX_DMA_DONE 1
257#define MACREG_INT_CODE_LINK_LOST_W_SCAN 2 258#define MACREG_INT_CODE_LINK_LOST_W_SCAN 2
@@ -624,12 +625,14 @@ struct cmd_ds_802_11_rf_channel {
624struct cmd_ds_802_11_rssi { 625struct cmd_ds_802_11_rssi {
625 struct cmd_header hdr; 626 struct cmd_header hdr;
626 627
627 /* request: number of beacons (N) to average the SNR and NF over 628 /*
629 * request: number of beacons (N) to average the SNR and NF over
628 * response: SNR of most recent beacon 630 * response: SNR of most recent beacon
629 */ 631 */
630 __le16 n_or_snr; 632 __le16 n_or_snr;
631 633
632 /* The following fields are only set in the response. 634 /*
635 * The following fields are only set in the response.
633 * In the request these are reserved and should be set to 0. 636 * In the request these are reserved and should be set to 0.
634 */ 637 */
635 __le16 nf; /* most recent beacon noise floor */ 638 __le16 nf; /* most recent beacon noise floor */
@@ -680,14 +683,16 @@ struct cmd_ds_802_11_ps_mode {
680 683
681 __le16 action; 684 __le16 action;
682 685
683 /* Interval for keepalive in PS mode: 686 /*
687 * Interval for keepalive in PS mode:
684 * 0x0000 = don't change 688 * 0x0000 = don't change
685 * 0x001E = firmware default 689 * 0x001E = firmware default
686 * 0xFFFF = disable 690 * 0xFFFF = disable
687 */ 691 */
688 __le16 nullpktinterval; 692 __le16 nullpktinterval;
689 693
690 /* Number of DTIM intervals to wake up for: 694 /*
695 * Number of DTIM intervals to wake up for:
691 * 0 = don't change 696 * 0 = don't change
692 * 1 = firmware default 697 * 1 = firmware default
693 * 5 = max 698 * 5 = max
@@ -697,7 +702,8 @@ struct cmd_ds_802_11_ps_mode {
697 __le16 reserved; 702 __le16 reserved;
698 __le16 locallisteninterval; 703 __le16 locallisteninterval;
699 704
700 /* AdHoc awake period (FW v9+ only): 705 /*
706 * AdHoc awake period (FW v9+ only):
701 * 0 = don't change 707 * 0 = don't change
702 * 1 = always awake (IEEE standard behavior) 708 * 1 = always awake (IEEE standard behavior)
703 * 2 - 31 = sleep for (n - 1) periods and awake for 1 period 709 * 2 - 31 = sleep for (n - 1) periods and awake for 1 period
@@ -771,7 +777,8 @@ struct adhoc_bssdesc {
771 __le16 capability; 777 __le16 capability;
772 u8 rates[MAX_RATES]; 778 u8 rates[MAX_RATES];
773 779
774 /* DO NOT ADD ANY FIELDS TO THIS STRUCTURE. It is used below in the 780 /*
781 * DO NOT ADD ANY FIELDS TO THIS STRUCTURE. It is used below in the
775 * Adhoc join command and will cause a binary layout mismatch with 782 * Adhoc join command and will cause a binary layout mismatch with
776 * the firmware 783 * the firmware
777 */ 784 */
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index 8712cb213f2f..63ed5798365c 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -21,6 +21,8 @@
21 21
22*/ 22*/
23 23
24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
24#include <linux/module.h> 26#include <linux/module.h>
25#include <linux/slab.h> 27#include <linux/slab.h>
26#include <linux/delay.h> 28#include <linux/delay.h>
@@ -312,7 +314,8 @@ static int if_cs_poll_while_fw_download(struct if_cs_card *card, uint addr, u8 r
312#define CF8385_MANFID 0x02df 314#define CF8385_MANFID 0x02df
313#define CF8385_CARDID 0x8103 315#define CF8385_CARDID 0x8103
314 316
315/* FIXME: just use the 'driver_info' field of 'struct pcmcia_device_id' when 317/*
318 * FIXME: just use the 'driver_info' field of 'struct pcmcia_device_id' when
316 * that gets fixed. Currently there's no way to access it from the probe hook. 319 * that gets fixed. Currently there's no way to access it from the probe hook.
317 */ 320 */
318static inline u32 get_model(u16 manf_id, u16 card_id) 321static inline u32 get_model(u16 manf_id, u16 card_id)
@@ -361,7 +364,7 @@ static int if_cs_send_cmd(struct lbs_private *priv, u8 *buf, u16 nb)
361 if (status & IF_CS_BIT_COMMAND) 364 if (status & IF_CS_BIT_COMMAND)
362 break; 365 break;
363 if (++loops > 100) { 366 if (++loops > 100) {
364 lbs_pr_err("card not ready for commands\n"); 367 netdev_err(priv->dev, "card not ready for commands\n");
365 goto done; 368 goto done;
366 } 369 }
367 mdelay(1); 370 mdelay(1);
@@ -431,14 +434,16 @@ static int if_cs_receive_cmdres(struct lbs_private *priv, u8 *data, u32 *len)
431 /* is hardware ready? */ 434 /* is hardware ready? */
432 status = if_cs_read16(priv->card, IF_CS_CARD_STATUS); 435 status = if_cs_read16(priv->card, IF_CS_CARD_STATUS);
433 if ((status & IF_CS_BIT_RESP) == 0) { 436 if ((status & IF_CS_BIT_RESP) == 0) {
434 lbs_pr_err("no cmd response in card\n"); 437 netdev_err(priv->dev, "no cmd response in card\n");
435 *len = 0; 438 *len = 0;
436 goto out; 439 goto out;
437 } 440 }
438 441
439 *len = if_cs_read16(priv->card, IF_CS_RESP_LEN); 442 *len = if_cs_read16(priv->card, IF_CS_RESP_LEN);
440 if ((*len == 0) || (*len > LBS_CMD_BUFFER_SIZE)) { 443 if ((*len == 0) || (*len > LBS_CMD_BUFFER_SIZE)) {
441 lbs_pr_err("card cmd buffer has invalid # of bytes (%d)\n", *len); 444 netdev_err(priv->dev,
445 "card cmd buffer has invalid # of bytes (%d)\n",
446 *len);
442 goto out; 447 goto out;
443 } 448 }
444 449
@@ -472,7 +477,9 @@ static struct sk_buff *if_cs_receive_data(struct lbs_private *priv)
472 477
473 len = if_cs_read16(priv->card, IF_CS_READ_LEN); 478 len = if_cs_read16(priv->card, IF_CS_READ_LEN);
474 if (len == 0 || len > MRVDRV_ETH_RX_PACKET_BUFFER_SIZE) { 479 if (len == 0 || len > MRVDRV_ETH_RX_PACKET_BUFFER_SIZE) {
475 lbs_pr_err("card data buffer has invalid # of bytes (%d)\n", len); 480 netdev_err(priv->dev,
481 "card data buffer has invalid # of bytes (%d)\n",
482 len);
476 priv->dev->stats.rx_dropped++; 483 priv->dev->stats.rx_dropped++;
477 goto dat_err; 484 goto dat_err;
478 } 485 }
@@ -621,8 +628,10 @@ static int if_cs_prog_helper(struct if_cs_card *card, const struct firmware *fw)
621 if (remain < count) 628 if (remain < count)
622 count = remain; 629 count = remain;
623 630
624 /* "write the number of bytes to be sent to the I/O Command 631 /*
625 * write length register" */ 632 * "write the number of bytes to be sent to the I/O Command
633 * write length register"
634 */
626 if_cs_write16(card, IF_CS_CMD_LEN, count); 635 if_cs_write16(card, IF_CS_CMD_LEN, count);
627 636
628 /* "write this to I/O Command port register as 16 bit writes */ 637 /* "write this to I/O Command port register as 16 bit writes */
@@ -631,21 +640,27 @@ static int if_cs_prog_helper(struct if_cs_card *card, const struct firmware *fw)
631 &fw->data[sent], 640 &fw->data[sent],
632 count >> 1); 641 count >> 1);
633 642
634 /* "Assert the download over interrupt command in the Host 643 /*
635 * status register" */ 644 * "Assert the download over interrupt command in the Host
645 * status register"
646 */
636 if_cs_write8(card, IF_CS_HOST_STATUS, IF_CS_BIT_COMMAND); 647 if_cs_write8(card, IF_CS_HOST_STATUS, IF_CS_BIT_COMMAND);
637 648
638 /* "Assert the download over interrupt command in the Card 649 /*
639 * interrupt case register" */ 650 * "Assert the download over interrupt command in the Card
651 * interrupt case register"
652 */
640 if_cs_write16(card, IF_CS_HOST_INT_CAUSE, IF_CS_BIT_COMMAND); 653 if_cs_write16(card, IF_CS_HOST_INT_CAUSE, IF_CS_BIT_COMMAND);
641 654
642 /* "The host polls the Card Status register ... for 50 ms before 655 /*
643 declaring a failure */ 656 * "The host polls the Card Status register ... for 50 ms before
657 * declaring a failure"
658 */
644 ret = if_cs_poll_while_fw_download(card, IF_CS_CARD_STATUS, 659 ret = if_cs_poll_while_fw_download(card, IF_CS_CARD_STATUS,
645 IF_CS_BIT_COMMAND); 660 IF_CS_BIT_COMMAND);
646 if (ret < 0) { 661 if (ret < 0) {
647 lbs_pr_err("can't download helper at 0x%x, ret %d\n", 662 pr_err("can't download helper at 0x%x, ret %d\n",
648 sent, ret); 663 sent, ret);
649 goto done; 664 goto done;
650 } 665 }
651 666
@@ -675,7 +690,7 @@ static int if_cs_prog_real(struct if_cs_card *card, const struct firmware *fw)
675 ret = if_cs_poll_while_fw_download(card, IF_CS_SQ_READ_LOW, 690 ret = if_cs_poll_while_fw_download(card, IF_CS_SQ_READ_LOW,
676 IF_CS_SQ_HELPER_OK); 691 IF_CS_SQ_HELPER_OK);
677 if (ret < 0) { 692 if (ret < 0) {
678 lbs_pr_err("helper firmware doesn't answer\n"); 693 pr_err("helper firmware doesn't answer\n");
679 goto done; 694 goto done;
680 } 695 }
681 696
@@ -683,13 +698,13 @@ static int if_cs_prog_real(struct if_cs_card *card, const struct firmware *fw)
683 len = if_cs_read16(card, IF_CS_SQ_READ_LOW); 698 len = if_cs_read16(card, IF_CS_SQ_READ_LOW);
684 if (len & 1) { 699 if (len & 1) {
685 retry++; 700 retry++;
686 lbs_pr_info("odd, need to retry this firmware block\n"); 701 pr_info("odd, need to retry this firmware block\n");
687 } else { 702 } else {
688 retry = 0; 703 retry = 0;
689 } 704 }
690 705
691 if (retry > 20) { 706 if (retry > 20) {
692 lbs_pr_err("could not download firmware\n"); 707 pr_err("could not download firmware\n");
693 ret = -ENODEV; 708 ret = -ENODEV;
694 goto done; 709 goto done;
695 } 710 }
@@ -709,14 +724,14 @@ static int if_cs_prog_real(struct if_cs_card *card, const struct firmware *fw)
709 ret = if_cs_poll_while_fw_download(card, IF_CS_CARD_STATUS, 724 ret = if_cs_poll_while_fw_download(card, IF_CS_CARD_STATUS,
710 IF_CS_BIT_COMMAND); 725 IF_CS_BIT_COMMAND);
711 if (ret < 0) { 726 if (ret < 0) {
712 lbs_pr_err("can't download firmware at 0x%x\n", sent); 727 pr_err("can't download firmware at 0x%x\n", sent);
713 goto done; 728 goto done;
714 } 729 }
715 } 730 }
716 731
717 ret = if_cs_poll_while_fw_download(card, IF_CS_SCRATCH, 0x5a); 732 ret = if_cs_poll_while_fw_download(card, IF_CS_SCRATCH, 0x5a);
718 if (ret < 0) 733 if (ret < 0)
719 lbs_pr_err("firmware download failed\n"); 734 pr_err("firmware download failed\n");
720 735
721done: 736done:
722 lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret); 737 lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret);
@@ -750,7 +765,8 @@ static int if_cs_host_to_card(struct lbs_private *priv,
750 ret = if_cs_send_cmd(priv, buf, nb); 765 ret = if_cs_send_cmd(priv, buf, nb);
751 break; 766 break;
752 default: 767 default:
753 lbs_pr_err("%s: unsupported type %d\n", __func__, type); 768 netdev_err(priv->dev, "%s: unsupported type %d\n",
769 __func__, type);
754 } 770 }
755 771
756 lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret); 772 lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret);
@@ -779,7 +795,7 @@ static int if_cs_ioprobe(struct pcmcia_device *p_dev, void *priv_data)
779 p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; 795 p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
780 796
781 if (p_dev->resource[1]->end) { 797 if (p_dev->resource[1]->end) {
782 lbs_pr_err("wrong CIS (check number of IO windows)\n"); 798 pr_err("wrong CIS (check number of IO windows)\n");
783 return -ENODEV; 799 return -ENODEV;
784 } 800 }
785 801
@@ -800,7 +816,7 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
800 816
801 card = kzalloc(sizeof(struct if_cs_card), GFP_KERNEL); 817 card = kzalloc(sizeof(struct if_cs_card), GFP_KERNEL);
802 if (!card) { 818 if (!card) {
803 lbs_pr_err("error in kzalloc\n"); 819 pr_err("error in kzalloc\n");
804 goto out; 820 goto out;
805 } 821 }
806 card->p_dev = p_dev; 822 card->p_dev = p_dev;
@@ -809,7 +825,7 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
809 p_dev->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; 825 p_dev->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
810 826
811 if (pcmcia_loop_config(p_dev, if_cs_ioprobe, NULL)) { 827 if (pcmcia_loop_config(p_dev, if_cs_ioprobe, NULL)) {
812 lbs_pr_err("error in pcmcia_loop_config\n"); 828 pr_err("error in pcmcia_loop_config\n");
813 goto out1; 829 goto out1;
814 } 830 }
815 831
@@ -825,14 +841,14 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
825 card->iobase = ioport_map(p_dev->resource[0]->start, 841 card->iobase = ioport_map(p_dev->resource[0]->start,
826 resource_size(p_dev->resource[0])); 842 resource_size(p_dev->resource[0]));
827 if (!card->iobase) { 843 if (!card->iobase) {
828 lbs_pr_err("error in ioport_map\n"); 844 pr_err("error in ioport_map\n");
829 ret = -EIO; 845 ret = -EIO;
830 goto out1; 846 goto out1;
831 } 847 }
832 848
833 ret = pcmcia_enable_device(p_dev); 849 ret = pcmcia_enable_device(p_dev);
834 if (ret) { 850 if (ret) {
835 lbs_pr_err("error in pcmcia_enable_device\n"); 851 pr_err("error in pcmcia_enable_device\n");
836 goto out2; 852 goto out2;
837 } 853 }
838 854
@@ -841,14 +857,14 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
841 857
842 /* 858 /*
843 * Most of the libertas cards can do unaligned register access, but some 859 * Most of the libertas cards can do unaligned register access, but some
844 * weird ones can not. That's especially true for the CF8305 card. 860 * weird ones cannot. That's especially true for the CF8305 card.
845 */ 861 */
846 card->align_regs = 0; 862 card->align_regs = 0;
847 863
848 card->model = get_model(p_dev->manf_id, p_dev->card_id); 864 card->model = get_model(p_dev->manf_id, p_dev->card_id);
849 if (card->model == MODEL_UNKNOWN) { 865 if (card->model == MODEL_UNKNOWN) {
850 lbs_pr_err("unsupported manf_id 0x%04x / card_id 0x%04x\n", 866 pr_err("unsupported manf_id 0x%04x / card_id 0x%04x\n",
851 p_dev->manf_id, p_dev->card_id); 867 p_dev->manf_id, p_dev->card_id);
852 goto out2; 868 goto out2;
853 } 869 }
854 870
@@ -857,20 +873,20 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
857 if (card->model == MODEL_8305) { 873 if (card->model == MODEL_8305) {
858 card->align_regs = 1; 874 card->align_regs = 1;
859 if (prod_id < IF_CS_CF8305_B1_REV) { 875 if (prod_id < IF_CS_CF8305_B1_REV) {
860 lbs_pr_err("8305 rev B0 and older are not supported\n"); 876 pr_err("8305 rev B0 and older are not supported\n");
861 ret = -ENODEV; 877 ret = -ENODEV;
862 goto out2; 878 goto out2;
863 } 879 }
864 } 880 }
865 881
866 if ((card->model == MODEL_8381) && prod_id < IF_CS_CF8381_B3_REV) { 882 if ((card->model == MODEL_8381) && prod_id < IF_CS_CF8381_B3_REV) {
867 lbs_pr_err("8381 rev B2 and older are not supported\n"); 883 pr_err("8381 rev B2 and older are not supported\n");
868 ret = -ENODEV; 884 ret = -ENODEV;
869 goto out2; 885 goto out2;
870 } 886 }
871 887
872 if ((card->model == MODEL_8385) && prod_id < IF_CS_CF8385_B1_REV) { 888 if ((card->model == MODEL_8385) && prod_id < IF_CS_CF8385_B1_REV) {
873 lbs_pr_err("8385 rev B0 and older are not supported\n"); 889 pr_err("8385 rev B0 and older are not supported\n");
874 ret = -ENODEV; 890 ret = -ENODEV;
875 goto out2; 891 goto out2;
876 } 892 }
@@ -878,7 +894,7 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
878 ret = lbs_get_firmware(&p_dev->dev, NULL, NULL, card->model, 894 ret = lbs_get_firmware(&p_dev->dev, NULL, NULL, card->model,
879 &fw_table[0], &helper, &mainfw); 895 &fw_table[0], &helper, &mainfw);
880 if (ret) { 896 if (ret) {
881 lbs_pr_err("failed to find firmware (%d)\n", ret); 897 pr_err("failed to find firmware (%d)\n", ret);
882 goto out2; 898 goto out2;
883 } 899 }
884 900
@@ -909,18 +925,20 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
909 ret = request_irq(p_dev->irq, if_cs_interrupt, 925 ret = request_irq(p_dev->irq, if_cs_interrupt,
910 IRQF_SHARED, DRV_NAME, card); 926 IRQF_SHARED, DRV_NAME, card);
911 if (ret) { 927 if (ret) {
912 lbs_pr_err("error in request_irq\n"); 928 pr_err("error in request_irq\n");
913 goto out3; 929 goto out3;
914 } 930 }
915 931
916 /* Clear any interrupt cause that happened while sending 932 /*
917 * firmware/initializing card */ 933 * Clear any interrupt cause that happened while sending
934 * firmware/initializing card
935 */
918 if_cs_write16(card, IF_CS_CARD_INT_CAUSE, IF_CS_BIT_MASK); 936 if_cs_write16(card, IF_CS_CARD_INT_CAUSE, IF_CS_BIT_MASK);
919 if_cs_enable_ints(card); 937 if_cs_enable_ints(card);
920 938
921 /* And finally bring the card up */ 939 /* And finally bring the card up */
922 if (lbs_start_card(priv) != 0) { 940 if (lbs_start_card(priv) != 0) {
923 lbs_pr_err("could not activate card\n"); 941 pr_err("could not activate card\n");
924 goto out3; 942 goto out3;
925 } 943 }
926 944
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index b4de0ca10feb..a7b5cb0c2753 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -26,6 +26,8 @@
26 * if_sdio_card_to_host() to pad the data. 26 * if_sdio_card_to_host() to pad the data.
27 */ 27 */
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
29#include <linux/kernel.h> 31#include <linux/kernel.h>
30#include <linux/moduleparam.h> 32#include <linux/moduleparam.h>
31#include <linux/slab.h> 33#include <linux/slab.h>
@@ -409,7 +411,7 @@ static int if_sdio_card_to_host(struct if_sdio_card *card)
409 411
410out: 412out:
411 if (ret) 413 if (ret)
412 lbs_pr_err("problem fetching packet from firmware\n"); 414 pr_err("problem fetching packet from firmware\n");
413 415
414 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret); 416 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
415 417
@@ -446,7 +448,7 @@ static void if_sdio_host_to_card_worker(struct work_struct *work)
446 } 448 }
447 449
448 if (ret) 450 if (ret)
449 lbs_pr_err("error %d sending packet to firmware\n", ret); 451 pr_err("error %d sending packet to firmware\n", ret);
450 452
451 sdio_release_host(card->func); 453 sdio_release_host(card->func);
452 454
@@ -555,7 +557,7 @@ release:
555 557
556out: 558out:
557 if (ret) 559 if (ret)
558 lbs_pr_err("failed to load helper firmware\n"); 560 pr_err("failed to load helper firmware\n");
559 561
560 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret); 562 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
561 return ret; 563 return ret;
@@ -669,7 +671,7 @@ release:
669 671
670out: 672out:
671 if (ret) 673 if (ret)
672 lbs_pr_err("failed to load firmware\n"); 674 pr_err("failed to load firmware\n");
673 675
674 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret); 676 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
675 return ret; 677 return ret;
@@ -723,7 +725,7 @@ static int if_sdio_prog_firmware(struct if_sdio_card *card)
723 ret = lbs_get_firmware(&card->func->dev, lbs_helper_name, lbs_fw_name, 725 ret = lbs_get_firmware(&card->func->dev, lbs_helper_name, lbs_fw_name,
724 card->model, &fw_table[0], &helper, &mainfw); 726 card->model, &fw_table[0], &helper, &mainfw);
725 if (ret) { 727 if (ret) {
726 lbs_pr_err("failed to find firmware (%d)\n", ret); 728 pr_err("failed to find firmware (%d)\n", ret);
727 goto out; 729 goto out;
728 } 730 }
729 731
@@ -849,7 +851,7 @@ static int if_sdio_enter_deep_sleep(struct lbs_private *priv)
849 ret = __lbs_cmd(priv, CMD_802_11_DEEP_SLEEP, &cmd, sizeof(cmd), 851 ret = __lbs_cmd(priv, CMD_802_11_DEEP_SLEEP, &cmd, sizeof(cmd),
850 lbs_cmd_copyback, (unsigned long) &cmd); 852 lbs_cmd_copyback, (unsigned long) &cmd);
851 if (ret) 853 if (ret)
852 lbs_pr_err("DEEP_SLEEP cmd failed\n"); 854 netdev_err(priv->dev, "DEEP_SLEEP cmd failed\n");
853 855
854 mdelay(200); 856 mdelay(200);
855 return ret; 857 return ret;
@@ -865,7 +867,7 @@ static int if_sdio_exit_deep_sleep(struct lbs_private *priv)
865 867
866 sdio_writeb(card->func, HOST_POWER_UP, CONFIGURATION_REG, &ret); 868 sdio_writeb(card->func, HOST_POWER_UP, CONFIGURATION_REG, &ret);
867 if (ret) 869 if (ret)
868 lbs_pr_err("sdio_writeb failed!\n"); 870 netdev_err(priv->dev, "sdio_writeb failed!\n");
869 871
870 sdio_release_host(card->func); 872 sdio_release_host(card->func);
871 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret); 873 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
@@ -882,7 +884,7 @@ static int if_sdio_reset_deep_sleep_wakeup(struct lbs_private *priv)
882 884
883 sdio_writeb(card->func, 0, CONFIGURATION_REG, &ret); 885 sdio_writeb(card->func, 0, CONFIGURATION_REG, &ret);
884 if (ret) 886 if (ret)
885 lbs_pr_err("sdio_writeb failed!\n"); 887 netdev_err(priv->dev, "sdio_writeb failed!\n");
886 888
887 sdio_release_host(card->func); 889 sdio_release_host(card->func);
888 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret); 890 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
@@ -961,7 +963,7 @@ static int if_sdio_probe(struct sdio_func *func,
961 } 963 }
962 964
963 if (i == func->card->num_info) { 965 if (i == func->card->num_info) {
964 lbs_pr_err("unable to identify card model\n"); 966 pr_err("unable to identify card model\n");
965 return -ENODEV; 967 return -ENODEV;
966 } 968 }
967 969
@@ -995,7 +997,7 @@ static int if_sdio_probe(struct sdio_func *func,
995 break; 997 break;
996 } 998 }
997 if (i == ARRAY_SIZE(fw_table)) { 999 if (i == ARRAY_SIZE(fw_table)) {
998 lbs_pr_err("unknown card model 0x%x\n", card->model); 1000 pr_err("unknown card model 0x%x\n", card->model);
999 ret = -ENODEV; 1001 ret = -ENODEV;
1000 goto free; 1002 goto free;
1001 } 1003 }
@@ -1101,7 +1103,7 @@ static int if_sdio_probe(struct sdio_func *func,
1101 lbs_deb_sdio("send function INIT command\n"); 1103 lbs_deb_sdio("send function INIT command\n");
1102 if (__lbs_cmd(priv, CMD_FUNC_INIT, &cmd, sizeof(cmd), 1104 if (__lbs_cmd(priv, CMD_FUNC_INIT, &cmd, sizeof(cmd),
1103 lbs_cmd_copyback, (unsigned long) &cmd)) 1105 lbs_cmd_copyback, (unsigned long) &cmd))
1104 lbs_pr_alert("CMD_FUNC_INIT cmd failed\n"); 1106 netdev_alert(priv->dev, "CMD_FUNC_INIT cmd failed\n");
1105 } 1107 }
1106 1108
1107 ret = lbs_start_card(priv); 1109 ret = lbs_start_card(priv);
@@ -1163,7 +1165,7 @@ static void if_sdio_remove(struct sdio_func *func)
1163 if (__lbs_cmd(card->priv, CMD_FUNC_SHUTDOWN, 1165 if (__lbs_cmd(card->priv, CMD_FUNC_SHUTDOWN,
1164 &cmd, sizeof(cmd), lbs_cmd_copyback, 1166 &cmd, sizeof(cmd), lbs_cmd_copyback,
1165 (unsigned long) &cmd)) 1167 (unsigned long) &cmd))
1166 lbs_pr_alert("CMD_FUNC_SHUTDOWN cmd failed\n"); 1168 pr_alert("CMD_FUNC_SHUTDOWN cmd failed\n");
1167 } 1169 }
1168 1170
1169 1171
@@ -1202,20 +1204,19 @@ static int if_sdio_suspend(struct device *dev)
1202 1204
1203 mmc_pm_flag_t flags = sdio_get_host_pm_caps(func); 1205 mmc_pm_flag_t flags = sdio_get_host_pm_caps(func);
1204 1206
1205 lbs_pr_info("%s: suspend: PM flags = 0x%x\n", 1207 dev_info(dev, "%s: suspend: PM flags = 0x%x\n",
1206 sdio_func_id(func), flags); 1208 sdio_func_id(func), flags);
1207 1209
1208 /* If we aren't being asked to wake on anything, we should bail out 1210 /* If we aren't being asked to wake on anything, we should bail out
1209 * and let the SD stack power down the card. 1211 * and let the SD stack power down the card.
1210 */ 1212 */
1211 if (card->priv->wol_criteria == EHS_REMOVE_WAKEUP) { 1213 if (card->priv->wol_criteria == EHS_REMOVE_WAKEUP) {
1212 lbs_pr_info("Suspend without wake params -- " 1214 dev_info(dev, "Suspend without wake params -- powering down card\n");
1213 "powering down card.");
1214 return -ENOSYS; 1215 return -ENOSYS;
1215 } 1216 }
1216 1217
1217 if (!(flags & MMC_PM_KEEP_POWER)) { 1218 if (!(flags & MMC_PM_KEEP_POWER)) {
1218 lbs_pr_err("%s: cannot remain alive while host is suspended\n", 1219 dev_err(dev, "%s: cannot remain alive while host is suspended\n",
1219 sdio_func_id(func)); 1220 sdio_func_id(func));
1220 return -ENOSYS; 1221 return -ENOSYS;
1221 } 1222 }
@@ -1237,7 +1238,7 @@ static int if_sdio_resume(struct device *dev)
1237 struct if_sdio_card *card = sdio_get_drvdata(func); 1238 struct if_sdio_card *card = sdio_get_drvdata(func);
1238 int ret; 1239 int ret;
1239 1240
1240 lbs_pr_info("%s: resume: we're back\n", sdio_func_id(func)); 1241 dev_info(dev, "%s: resume: we're back\n", sdio_func_id(func));
1241 1242
1242 ret = lbs_resume(card->priv); 1243 ret = lbs_resume(card->priv);
1243 1244
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index f6c2cd665f49..463352c890d7 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -17,6 +17,8 @@
17 * (at your option) any later version. 17 * (at your option) any later version.
18 */ 18 */
19 19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
20#include <linux/moduleparam.h> 22#include <linux/moduleparam.h>
21#include <linux/firmware.h> 23#include <linux/firmware.h>
22#include <linux/jiffies.h> 24#include <linux/jiffies.h>
@@ -57,6 +59,7 @@ struct if_spi_card {
57 /* Handles all SPI communication (except for FW load) */ 59 /* Handles all SPI communication (except for FW load) */
58 struct workqueue_struct *workqueue; 60 struct workqueue_struct *workqueue;
59 struct work_struct packet_work; 61 struct work_struct packet_work;
62 struct work_struct resume_work;
60 63
61 u8 cmd_buffer[IF_SPI_CMD_BUF_SIZE]; 64 u8 cmd_buffer[IF_SPI_CMD_BUF_SIZE];
62 65
@@ -68,6 +71,9 @@ struct if_spi_card {
68 71
69 /* Protects cmd_packet_list and data_packet_list */ 72 /* Protects cmd_packet_list and data_packet_list */
70 spinlock_t buffer_lock; 73 spinlock_t buffer_lock;
74
75 /* True is card suspended */
76 u8 suspended;
71}; 77};
72 78
73static void free_if_spi_card(struct if_spi_card *card) 79static void free_if_spi_card(struct if_spi_card *card)
@@ -139,8 +145,10 @@ static void spu_transaction_finish(struct if_spi_card *card)
139 card->prev_xfer_time = jiffies; 145 card->prev_xfer_time = jiffies;
140} 146}
141 147
142/* Write out a byte buffer to an SPI register, 148/*
143 * using a series of 16-bit transfers. */ 149 * Write out a byte buffer to an SPI register,
150 * using a series of 16-bit transfers.
151 */
144static int spu_write(struct if_spi_card *card, u16 reg, const u8 *buf, int len) 152static int spu_write(struct if_spi_card *card, u16 reg, const u8 *buf, int len)
145{ 153{
146 int err = 0; 154 int err = 0;
@@ -204,8 +212,10 @@ static int spu_read(struct if_spi_card *card, u16 reg, u8 *buf, int len)
204 struct spi_transfer dummy_trans; 212 struct spi_transfer dummy_trans;
205 struct spi_transfer data_trans; 213 struct spi_transfer data_trans;
206 214
207 /* You must take an even number of bytes from the SPU, even if you 215 /*
208 * don't care about the last one. */ 216 * You must take an even number of bytes from the SPU, even if you
217 * don't care about the last one.
218 */
209 BUG_ON(len & 0x1); 219 BUG_ON(len & 0x1);
210 220
211 spu_transaction_init(card); 221 spu_transaction_init(card);
@@ -254,8 +264,10 @@ static inline int spu_read_u16(struct if_spi_card *card, u16 reg, u16 *val)
254 return ret; 264 return ret;
255} 265}
256 266
257/* Read 32 bits from an SPI register. 267/*
258 * The low 16 bits are read first. */ 268 * Read 32 bits from an SPI register.
269 * The low 16 bits are read first.
270 */
259static int spu_read_u32(struct if_spi_card *card, u16 reg, u32 *val) 271static int spu_read_u32(struct if_spi_card *card, u16 reg, u32 *val)
260{ 272{
261 __le32 buf; 273 __le32 buf;
@@ -267,13 +279,15 @@ static int spu_read_u32(struct if_spi_card *card, u16 reg, u32 *val)
267 return err; 279 return err;
268} 280}
269 281
270/* Keep reading 16 bits from an SPI register until you get the correct result. 282/*
283 * Keep reading 16 bits from an SPI register until you get the correct result.
271 * 284 *
272 * If mask = 0, the correct result is any non-zero number. 285 * If mask = 0, the correct result is any non-zero number.
273 * If mask != 0, the correct result is any number where 286 * If mask != 0, the correct result is any number where
274 * number & target_mask == target 287 * number & target_mask == target
275 * 288 *
276 * Returns -ETIMEDOUT if a second passes without the correct result. */ 289 * Returns -ETIMEDOUT if a second passes without the correct result.
290 */
277static int spu_wait_for_u16(struct if_spi_card *card, u16 reg, 291static int spu_wait_for_u16(struct if_spi_card *card, u16 reg,
278 u16 target_mask, u16 target) 292 u16 target_mask, u16 target)
279{ 293{
@@ -293,16 +307,17 @@ static int spu_wait_for_u16(struct if_spi_card *card, u16 reg,
293 } 307 }
294 udelay(100); 308 udelay(100);
295 if (time_after(jiffies, timeout)) { 309 if (time_after(jiffies, timeout)) {
296 lbs_pr_err("%s: timeout with val=%02x, " 310 pr_err("%s: timeout with val=%02x, target_mask=%02x, target=%02x\n",
297 "target_mask=%02x, target=%02x\n",
298 __func__, val, target_mask, target); 311 __func__, val, target_mask, target);
299 return -ETIMEDOUT; 312 return -ETIMEDOUT;
300 } 313 }
301 } 314 }
302} 315}
303 316
304/* Read 16 bits from an SPI register until you receive a specific value. 317/*
305 * Returns -ETIMEDOUT if a 4 tries pass without success. */ 318 * Read 16 bits from an SPI register until you receive a specific value.
319 * Returns -ETIMEDOUT if a 4 tries pass without success.
320 */
306static int spu_wait_for_u32(struct if_spi_card *card, u32 reg, u32 target) 321static int spu_wait_for_u32(struct if_spi_card *card, u32 reg, u32 target)
307{ 322{
308 int err, try; 323 int err, try;
@@ -324,8 +339,10 @@ static int spu_set_interrupt_mode(struct if_spi_card *card,
324{ 339{
325 int err = 0; 340 int err = 0;
326 341
327 /* We can suppress a host interrupt by clearing the appropriate 342 /*
328 * bit in the "host interrupt status mask" register */ 343 * We can suppress a host interrupt by clearing the appropriate
344 * bit in the "host interrupt status mask" register
345 */
329 if (suppress_host_int) { 346 if (suppress_host_int) {
330 err = spu_write_u16(card, IF_SPI_HOST_INT_STATUS_MASK_REG, 0); 347 err = spu_write_u16(card, IF_SPI_HOST_INT_STATUS_MASK_REG, 0);
331 if (err) 348 if (err)
@@ -341,10 +358,12 @@ static int spu_set_interrupt_mode(struct if_spi_card *card,
341 return err; 358 return err;
342 } 359 }
343 360
344 /* If auto-interrupts are on, the completion of certain transactions 361 /*
362 * If auto-interrupts are on, the completion of certain transactions
345 * will trigger an interrupt automatically. If auto-interrupts 363 * will trigger an interrupt automatically. If auto-interrupts
346 * are off, we need to set the "Card Interrupt Cause" register to 364 * are off, we need to set the "Card Interrupt Cause" register to
347 * trigger a card interrupt. */ 365 * trigger a card interrupt.
366 */
348 if (auto_int) { 367 if (auto_int) {
349 err = spu_write_u16(card, IF_SPI_HOST_INT_CTRL_REG, 368 err = spu_write_u16(card, IF_SPI_HOST_INT_CTRL_REG,
350 IF_SPI_HICT_TX_DOWNLOAD_OVER_AUTO | 369 IF_SPI_HICT_TX_DOWNLOAD_OVER_AUTO |
@@ -387,7 +406,7 @@ static int spu_set_bus_mode(struct if_spi_card *card, u16 mode)
387 if (err) 406 if (err)
388 return err; 407 return err;
389 if ((rval & 0xF) != mode) { 408 if ((rval & 0xF) != mode) {
390 lbs_pr_err("Can't read bus mode register.\n"); 409 pr_err("Can't read bus mode register\n");
391 return -EIO; 410 return -EIO;
392 } 411 }
393 return 0; 412 return 0;
@@ -398,8 +417,10 @@ static int spu_init(struct if_spi_card *card, int use_dummy_writes)
398 int err = 0; 417 int err = 0;
399 u32 delay; 418 u32 delay;
400 419
401 /* We have to start up in timed delay mode so that we can safely 420 /*
402 * read the Delay Read Register. */ 421 * We have to start up in timed delay mode so that we can safely
422 * read the Delay Read Register.
423 */
403 card->use_dummy_writes = 0; 424 card->use_dummy_writes = 0;
404 err = spu_set_bus_mode(card, 425 err = spu_set_bus_mode(card,
405 IF_SPI_BUS_MODE_SPI_CLOCK_PHASE_RISING | 426 IF_SPI_BUS_MODE_SPI_CLOCK_PHASE_RISING |
@@ -455,8 +476,10 @@ static int if_spi_prog_helper_firmware(struct if_spi_card *card,
455 476
456 /* Load helper firmware image */ 477 /* Load helper firmware image */
457 while (bytes_remaining > 0) { 478 while (bytes_remaining > 0) {
458 /* Scratch pad 1 should contain the number of bytes we 479 /*
459 * want to download to the firmware */ 480 * Scratch pad 1 should contain the number of bytes we
481 * want to download to the firmware
482 */
460 err = spu_write_u16(card, IF_SPI_SCRATCH_1_REG, 483 err = spu_write_u16(card, IF_SPI_SCRATCH_1_REG,
461 HELPER_FW_LOAD_CHUNK_SZ); 484 HELPER_FW_LOAD_CHUNK_SZ);
462 if (err) 485 if (err)
@@ -468,8 +491,10 @@ static int if_spi_prog_helper_firmware(struct if_spi_card *card,
468 if (err) 491 if (err)
469 goto out; 492 goto out;
470 493
471 /* Feed the data into the command read/write port reg 494 /*
472 * in chunks of 64 bytes */ 495 * Feed the data into the command read/write port reg
496 * in chunks of 64 bytes
497 */
473 memset(temp, 0, sizeof(temp)); 498 memset(temp, 0, sizeof(temp));
474 memcpy(temp, fw, 499 memcpy(temp, fw,
475 min(bytes_remaining, HELPER_FW_LOAD_CHUNK_SZ)); 500 min(bytes_remaining, HELPER_FW_LOAD_CHUNK_SZ));
@@ -491,9 +516,11 @@ static int if_spi_prog_helper_firmware(struct if_spi_card *card,
491 fw += HELPER_FW_LOAD_CHUNK_SZ; 516 fw += HELPER_FW_LOAD_CHUNK_SZ;
492 } 517 }
493 518
494 /* Once the helper / single stage firmware download is complete, 519 /*
520 * Once the helper / single stage firmware download is complete,
495 * write 0 to scratch pad 1 and interrupt the 521 * write 0 to scratch pad 1 and interrupt the
496 * bootloader. This completes the helper download. */ 522 * bootloader. This completes the helper download.
523 */
497 err = spu_write_u16(card, IF_SPI_SCRATCH_1_REG, FIRMWARE_DNLD_OK); 524 err = spu_write_u16(card, IF_SPI_SCRATCH_1_REG, FIRMWARE_DNLD_OK);
498 if (err) 525 if (err)
499 goto out; 526 goto out;
@@ -508,26 +535,30 @@ static int if_spi_prog_helper_firmware(struct if_spi_card *card,
508 535
509out: 536out:
510 if (err) 537 if (err)
511 lbs_pr_err("failed to load helper firmware (err=%d)\n", err); 538 pr_err("failed to load helper firmware (err=%d)\n", err);
512 lbs_deb_leave_args(LBS_DEB_SPI, "err %d", err); 539 lbs_deb_leave_args(LBS_DEB_SPI, "err %d", err);
513 return err; 540 return err;
514} 541}
515 542
516/* Returns the length of the next packet the firmware expects us to send 543/*
517 * Sets crc_err if the previous transfer had a CRC error. */ 544 * Returns the length of the next packet the firmware expects us to send.
545 * Sets crc_err if the previous transfer had a CRC error.
546 */
518static int if_spi_prog_main_firmware_check_len(struct if_spi_card *card, 547static int if_spi_prog_main_firmware_check_len(struct if_spi_card *card,
519 int *crc_err) 548 int *crc_err)
520{ 549{
521 u16 len; 550 u16 len;
522 int err = 0; 551 int err = 0;
523 552
524 /* wait until the host interrupt status register indicates 553 /*
525 * that we are ready to download */ 554 * wait until the host interrupt status register indicates
555 * that we are ready to download
556 */
526 err = spu_wait_for_u16(card, IF_SPI_HOST_INT_STATUS_REG, 557 err = spu_wait_for_u16(card, IF_SPI_HOST_INT_STATUS_REG,
527 IF_SPI_HIST_CMD_DOWNLOAD_RDY, 558 IF_SPI_HIST_CMD_DOWNLOAD_RDY,
528 IF_SPI_HIST_CMD_DOWNLOAD_RDY); 559 IF_SPI_HIST_CMD_DOWNLOAD_RDY);
529 if (err) { 560 if (err) {
530 lbs_pr_err("timed out waiting for host_int_status\n"); 561 pr_err("timed out waiting for host_int_status\n");
531 return err; 562 return err;
532 } 563 }
533 564
@@ -537,9 +568,8 @@ static int if_spi_prog_main_firmware_check_len(struct if_spi_card *card,
537 return err; 568 return err;
538 569
539 if (len > IF_SPI_CMD_BUF_SIZE) { 570 if (len > IF_SPI_CMD_BUF_SIZE) {
540 lbs_pr_err("firmware load device requested a larger " 571 pr_err("firmware load device requested a larger transfer than we are prepared to handle (len = %d)\n",
541 "tranfer than we are prepared to " 572 len);
542 "handle. (len = %d)\n", len);
543 return -EIO; 573 return -EIO;
544 } 574 }
545 if (len & 0x1) { 575 if (len & 0x1) {
@@ -555,6 +585,7 @@ static int if_spi_prog_main_firmware_check_len(struct if_spi_card *card,
555static int if_spi_prog_main_firmware(struct if_spi_card *card, 585static int if_spi_prog_main_firmware(struct if_spi_card *card,
556 const struct firmware *firmware) 586 const struct firmware *firmware)
557{ 587{
588 struct lbs_private *priv = card->priv;
558 int len, prev_len; 589 int len, prev_len;
559 int bytes, crc_err = 0, err = 0; 590 int bytes, crc_err = 0, err = 0;
560 const u8 *fw; 591 const u8 *fw;
@@ -568,8 +599,9 @@ static int if_spi_prog_main_firmware(struct if_spi_card *card,
568 599
569 err = spu_wait_for_u16(card, IF_SPI_SCRATCH_1_REG, 0, 0); 600 err = spu_wait_for_u16(card, IF_SPI_SCRATCH_1_REG, 0, 0);
570 if (err) { 601 if (err) {
571 lbs_pr_err("%s: timed out waiting for initial " 602 netdev_err(priv->dev,
572 "scratch reg = 0\n", __func__); 603 "%s: timed out waiting for initial scratch reg = 0\n",
604 __func__);
573 goto out; 605 goto out;
574 } 606 }
575 607
@@ -583,17 +615,18 @@ static int if_spi_prog_main_firmware(struct if_spi_card *card,
583 goto out; 615 goto out;
584 } 616 }
585 if (bytes < 0) { 617 if (bytes < 0) {
586 /* If there are no more bytes left, we would normally 618 /*
587 * expect to have terminated with len = 0 */ 619 * If there are no more bytes left, we would normally
588 lbs_pr_err("Firmware load wants more bytes " 620 * expect to have terminated with len = 0
589 "than we have to offer.\n"); 621 */
622 netdev_err(priv->dev,
623 "Firmware load wants more bytes than we have to offer.\n");
590 break; 624 break;
591 } 625 }
592 if (crc_err) { 626 if (crc_err) {
593 /* Previous transfer failed. */ 627 /* Previous transfer failed. */
594 if (++num_crc_errs > MAX_MAIN_FW_LOAD_CRC_ERR) { 628 if (++num_crc_errs > MAX_MAIN_FW_LOAD_CRC_ERR) {
595 lbs_pr_err("Too many CRC errors encountered " 629 pr_err("Too many CRC errors encountered in firmware load.\n");
596 "in firmware load.\n");
597 err = -EIO; 630 err = -EIO;
598 goto out; 631 goto out;
599 } 632 }
@@ -622,21 +655,20 @@ static int if_spi_prog_main_firmware(struct if_spi_card *card,
622 prev_len = len; 655 prev_len = len;
623 } 656 }
624 if (bytes > prev_len) { 657 if (bytes > prev_len) {
625 lbs_pr_err("firmware load wants fewer bytes than " 658 pr_err("firmware load wants fewer bytes than we have to offer\n");
626 "we have to offer.\n");
627 } 659 }
628 660
629 /* Confirm firmware download */ 661 /* Confirm firmware download */
630 err = spu_wait_for_u32(card, IF_SPI_SCRATCH_4_REG, 662 err = spu_wait_for_u32(card, IF_SPI_SCRATCH_4_REG,
631 SUCCESSFUL_FW_DOWNLOAD_MAGIC); 663 SUCCESSFUL_FW_DOWNLOAD_MAGIC);
632 if (err) { 664 if (err) {
633 lbs_pr_err("failed to confirm the firmware download\n"); 665 pr_err("failed to confirm the firmware download\n");
634 goto out; 666 goto out;
635 } 667 }
636 668
637out: 669out:
638 if (err) 670 if (err)
639 lbs_pr_err("failed to load firmware (err=%d)\n", err); 671 pr_err("failed to load firmware (err=%d)\n", err);
640 lbs_deb_leave_args(LBS_DEB_SPI, "err %d", err); 672 lbs_deb_leave_args(LBS_DEB_SPI, "err %d", err);
641 return err; 673 return err;
642} 674}
@@ -656,14 +688,18 @@ static int if_spi_c2h_cmd(struct if_spi_card *card)
656 u16 len; 688 u16 len;
657 u8 i; 689 u8 i;
658 690
659 /* We need a buffer big enough to handle whatever people send to 691 /*
660 * hw_host_to_card */ 692 * We need a buffer big enough to handle whatever people send to
693 * hw_host_to_card
694 */
661 BUILD_BUG_ON(IF_SPI_CMD_BUF_SIZE < LBS_CMD_BUFFER_SIZE); 695 BUILD_BUG_ON(IF_SPI_CMD_BUF_SIZE < LBS_CMD_BUFFER_SIZE);
662 BUILD_BUG_ON(IF_SPI_CMD_BUF_SIZE < LBS_UPLD_SIZE); 696 BUILD_BUG_ON(IF_SPI_CMD_BUF_SIZE < LBS_UPLD_SIZE);
663 697
664 /* It's just annoying if the buffer size isn't a multiple of 4, because 698 /*
665 * then we might have len < IF_SPI_CMD_BUF_SIZE but 699 * It's just annoying if the buffer size isn't a multiple of 4, because
666 * ALIGN(len, 4) > IF_SPI_CMD_BUF_SIZE */ 700 * then we might have len < IF_SPI_CMD_BUF_SIZE but
701 * ALIGN(len, 4) > IF_SPI_CMD_BUF_SIZE
702 */
667 BUILD_BUG_ON(IF_SPI_CMD_BUF_SIZE % 4 != 0); 703 BUILD_BUG_ON(IF_SPI_CMD_BUF_SIZE % 4 != 0);
668 704
669 lbs_deb_enter(LBS_DEB_SPI); 705 lbs_deb_enter(LBS_DEB_SPI);
@@ -673,13 +709,13 @@ static int if_spi_c2h_cmd(struct if_spi_card *card)
673 if (err) 709 if (err)
674 goto out; 710 goto out;
675 if (!len) { 711 if (!len) {
676 lbs_pr_err("%s: error: card has no data for host\n", 712 netdev_err(priv->dev, "%s: error: card has no data for host\n",
677 __func__); 713 __func__);
678 err = -EINVAL; 714 err = -EINVAL;
679 goto out; 715 goto out;
680 } else if (len > IF_SPI_CMD_BUF_SIZE) { 716 } else if (len > IF_SPI_CMD_BUF_SIZE) {
681 lbs_pr_err("%s: error: response packet too large: " 717 netdev_err(priv->dev,
682 "%d bytes, but maximum is %d\n", 718 "%s: error: response packet too large: %d bytes, but maximum is %d\n",
683 __func__, len, IF_SPI_CMD_BUF_SIZE); 719 __func__, len, IF_SPI_CMD_BUF_SIZE);
684 err = -EINVAL; 720 err = -EINVAL;
685 goto out; 721 goto out;
@@ -701,7 +737,7 @@ static int if_spi_c2h_cmd(struct if_spi_card *card)
701 737
702out: 738out:
703 if (err) 739 if (err)
704 lbs_pr_err("%s: err=%d\n", __func__, err); 740 netdev_err(priv->dev, "%s: err=%d\n", __func__, err);
705 lbs_deb_leave(LBS_DEB_SPI); 741 lbs_deb_leave(LBS_DEB_SPI);
706 return err; 742 return err;
707} 743}
@@ -709,6 +745,7 @@ out:
709/* Move data from the card to the host */ 745/* Move data from the card to the host */
710static int if_spi_c2h_data(struct if_spi_card *card) 746static int if_spi_c2h_data(struct if_spi_card *card)
711{ 747{
748 struct lbs_private *priv = card->priv;
712 struct sk_buff *skb; 749 struct sk_buff *skb;
713 char *data; 750 char *data;
714 u16 len; 751 u16 len;
@@ -721,13 +758,13 @@ static int if_spi_c2h_data(struct if_spi_card *card)
721 if (err) 758 if (err)
722 goto out; 759 goto out;
723 if (!len) { 760 if (!len) {
724 lbs_pr_err("%s: error: card has no data for host\n", 761 netdev_err(priv->dev, "%s: error: card has no data for host\n",
725 __func__); 762 __func__);
726 err = -EINVAL; 763 err = -EINVAL;
727 goto out; 764 goto out;
728 } else if (len > MRVDRV_ETH_RX_PACKET_BUFFER_SIZE) { 765 } else if (len > MRVDRV_ETH_RX_PACKET_BUFFER_SIZE) {
729 lbs_pr_err("%s: error: card has %d bytes of data, but " 766 netdev_err(priv->dev,
730 "our maximum skb size is %zu\n", 767 "%s: error: card has %d bytes of data, but our maximum skb size is %zu\n",
731 __func__, len, MRVDRV_ETH_RX_PACKET_BUFFER_SIZE); 768 __func__, len, MRVDRV_ETH_RX_PACKET_BUFFER_SIZE);
732 err = -EINVAL; 769 err = -EINVAL;
733 goto out; 770 goto out;
@@ -759,7 +796,7 @@ free_skb:
759 dev_kfree_skb(skb); 796 dev_kfree_skb(skb);
760out: 797out:
761 if (err) 798 if (err)
762 lbs_pr_err("%s: err=%d\n", __func__, err); 799 netdev_err(priv->dev, "%s: err=%d\n", __func__, err);
763 lbs_deb_leave(LBS_DEB_SPI); 800 lbs_deb_leave(LBS_DEB_SPI);
764 return err; 801 return err;
765} 802}
@@ -768,6 +805,7 @@ out:
768static void if_spi_h2c(struct if_spi_card *card, 805static void if_spi_h2c(struct if_spi_card *card,
769 struct if_spi_packet *packet, int type) 806 struct if_spi_packet *packet, int type)
770{ 807{
808 struct lbs_private *priv = card->priv;
771 int err = 0; 809 int err = 0;
772 u16 int_type, port_reg; 810 u16 int_type, port_reg;
773 811
@@ -781,7 +819,8 @@ static void if_spi_h2c(struct if_spi_card *card,
781 port_reg = IF_SPI_CMD_RDWRPORT_REG; 819 port_reg = IF_SPI_CMD_RDWRPORT_REG;
782 break; 820 break;
783 default: 821 default:
784 lbs_pr_err("can't transfer buffer of type %d\n", type); 822 netdev_err(priv->dev, "can't transfer buffer of type %d\n",
823 type);
785 err = -EINVAL; 824 err = -EINVAL;
786 goto out; 825 goto out;
787 } 826 }
@@ -795,7 +834,7 @@ out:
795 kfree(packet); 834 kfree(packet);
796 835
797 if (err) 836 if (err)
798 lbs_pr_err("%s: error %d\n", __func__, err); 837 netdev_err(priv->dev, "%s: error %d\n", __func__, err);
799} 838}
800 839
801/* Inform the host about a card event */ 840/* Inform the host about a card event */
@@ -819,7 +858,7 @@ static void if_spi_e2h(struct if_spi_card *card)
819 lbs_queue_event(priv, cause & 0xff); 858 lbs_queue_event(priv, cause & 0xff);
820out: 859out:
821 if (err) 860 if (err)
822 lbs_pr_err("%s: error %d\n", __func__, err); 861 netdev_err(priv->dev, "%s: error %d\n", __func__, err);
823} 862}
824 863
825static void if_spi_host_to_card_worker(struct work_struct *work) 864static void if_spi_host_to_card_worker(struct work_struct *work)
@@ -829,17 +868,21 @@ static void if_spi_host_to_card_worker(struct work_struct *work)
829 u16 hiStatus; 868 u16 hiStatus;
830 unsigned long flags; 869 unsigned long flags;
831 struct if_spi_packet *packet; 870 struct if_spi_packet *packet;
871 struct lbs_private *priv;
832 872
833 card = container_of(work, struct if_spi_card, packet_work); 873 card = container_of(work, struct if_spi_card, packet_work);
874 priv = card->priv;
834 875
835 lbs_deb_enter(LBS_DEB_SPI); 876 lbs_deb_enter(LBS_DEB_SPI);
836 877
837 /* Read the host interrupt status register to see what we 878 /*
838 * can do. */ 879 * Read the host interrupt status register to see what we
880 * can do.
881 */
839 err = spu_read_u16(card, IF_SPI_HOST_INT_STATUS_REG, 882 err = spu_read_u16(card, IF_SPI_HOST_INT_STATUS_REG,
840 &hiStatus); 883 &hiStatus);
841 if (err) { 884 if (err) {
842 lbs_pr_err("I/O error\n"); 885 netdev_err(priv->dev, "I/O error\n");
843 goto err; 886 goto err;
844 } 887 }
845 888
@@ -854,12 +897,15 @@ static void if_spi_host_to_card_worker(struct work_struct *work)
854 goto err; 897 goto err;
855 } 898 }
856 899
857 /* workaround: in PS mode, the card does not set the Command 900 /*
858 * Download Ready bit, but it sets TX Download Ready. */ 901 * workaround: in PS mode, the card does not set the Command
902 * Download Ready bit, but it sets TX Download Ready.
903 */
859 if (hiStatus & IF_SPI_HIST_CMD_DOWNLOAD_RDY || 904 if (hiStatus & IF_SPI_HIST_CMD_DOWNLOAD_RDY ||
860 (card->priv->psstate != PS_STATE_FULL_POWER && 905 (card->priv->psstate != PS_STATE_FULL_POWER &&
861 (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY))) { 906 (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY))) {
862 /* This means two things. First of all, 907 /*
908 * This means two things. First of all,
863 * if there was a previous command sent, the card has 909 * if there was a previous command sent, the card has
864 * successfully received it. 910 * successfully received it.
865 * Secondly, it is now ready to download another 911 * Secondly, it is now ready to download another
@@ -867,8 +913,7 @@ static void if_spi_host_to_card_worker(struct work_struct *work)
867 */ 913 */
868 lbs_host_to_card_done(card->priv); 914 lbs_host_to_card_done(card->priv);
869 915
870 /* Do we have any command packets from the host to 916 /* Do we have any command packets from the host to send? */
871 * send? */
872 packet = NULL; 917 packet = NULL;
873 spin_lock_irqsave(&card->buffer_lock, flags); 918 spin_lock_irqsave(&card->buffer_lock, flags);
874 if (!list_empty(&card->cmd_packet_list)) { 919 if (!list_empty(&card->cmd_packet_list)) {
@@ -882,8 +927,7 @@ static void if_spi_host_to_card_worker(struct work_struct *work)
882 if_spi_h2c(card, packet, MVMS_CMD); 927 if_spi_h2c(card, packet, MVMS_CMD);
883 } 928 }
884 if (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY) { 929 if (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY) {
885 /* Do we have any data packets from the host to 930 /* Do we have any data packets from the host to send? */
886 * send? */
887 packet = NULL; 931 packet = NULL;
888 spin_lock_irqsave(&card->buffer_lock, flags); 932 spin_lock_irqsave(&card->buffer_lock, flags);
889 if (!list_empty(&card->data_packet_list)) { 933 if (!list_empty(&card->data_packet_list)) {
@@ -901,7 +945,7 @@ static void if_spi_host_to_card_worker(struct work_struct *work)
901 945
902err: 946err:
903 if (err) 947 if (err)
904 lbs_pr_err("%s: got error %d\n", __func__, err); 948 netdev_err(priv->dev, "%s: got error %d\n", __func__, err);
905 949
906 lbs_deb_leave(LBS_DEB_SPI); 950 lbs_deb_leave(LBS_DEB_SPI);
907} 951}
@@ -910,7 +954,8 @@ err:
910 * Host to Card 954 * Host to Card
911 * 955 *
912 * Called from Libertas to transfer some data to the WLAN device 956 * Called from Libertas to transfer some data to the WLAN device
913 * We can't sleep here. */ 957 * We can't sleep here.
958 */
914static int if_spi_host_to_card(struct lbs_private *priv, 959static int if_spi_host_to_card(struct lbs_private *priv,
915 u8 type, u8 *buf, u16 nb) 960 u8 type, u8 *buf, u16 nb)
916{ 961{
@@ -923,7 +968,8 @@ static int if_spi_host_to_card(struct lbs_private *priv,
923 lbs_deb_enter_args(LBS_DEB_SPI, "type %d, bytes %d", type, nb); 968 lbs_deb_enter_args(LBS_DEB_SPI, "type %d, bytes %d", type, nb);
924 969
925 if (nb == 0) { 970 if (nb == 0) {
926 lbs_pr_err("%s: invalid size requested: %d\n", __func__, nb); 971 netdev_err(priv->dev, "%s: invalid size requested: %d\n",
972 __func__, nb);
927 err = -EINVAL; 973 err = -EINVAL;
928 goto out; 974 goto out;
929 } 975 }
@@ -951,7 +997,8 @@ static int if_spi_host_to_card(struct lbs_private *priv,
951 spin_unlock_irqrestore(&card->buffer_lock, flags); 997 spin_unlock_irqrestore(&card->buffer_lock, flags);
952 break; 998 break;
953 default: 999 default:
954 lbs_pr_err("can't transfer buffer of type %d", type); 1000 netdev_err(priv->dev, "can't transfer buffer of type %d\n",
1001 type);
955 err = -EINVAL; 1002 err = -EINVAL;
956 break; 1003 break;
957 } 1004 }
@@ -984,6 +1031,7 @@ static irqreturn_t if_spi_host_interrupt(int irq, void *dev_id)
984 1031
985static int if_spi_init_card(struct if_spi_card *card) 1032static int if_spi_init_card(struct if_spi_card *card)
986{ 1033{
1034 struct lbs_private *priv = card->priv;
987 struct spi_device *spi = card->spi; 1035 struct spi_device *spi = card->spi;
988 int err, i; 1036 int err, i;
989 u32 scratch; 1037 u32 scratch;
@@ -1012,8 +1060,8 @@ static int if_spi_init_card(struct if_spi_card *card)
1012 break; 1060 break;
1013 } 1061 }
1014 if (i == ARRAY_SIZE(fw_table)) { 1062 if (i == ARRAY_SIZE(fw_table)) {
1015 lbs_pr_err("Unsupported chip_id: 0x%02x\n", 1063 netdev_err(priv->dev, "Unsupported chip_id: 0x%02x\n",
1016 card->card_id); 1064 card->card_id);
1017 err = -ENODEV; 1065 err = -ENODEV;
1018 goto out; 1066 goto out;
1019 } 1067 }
@@ -1022,7 +1070,8 @@ static int if_spi_init_card(struct if_spi_card *card)
1022 card->card_id, &fw_table[0], &helper, 1070 card->card_id, &fw_table[0], &helper,
1023 &mainfw); 1071 &mainfw);
1024 if (err) { 1072 if (err) {
1025 lbs_pr_err("failed to find firmware (%d)\n", err); 1073 netdev_err(priv->dev, "failed to find firmware (%d)\n",
1074 err);
1026 goto out; 1075 goto out;
1027 } 1076 }
1028 1077
@@ -1057,6 +1106,28 @@ out:
1057 return err; 1106 return err;
1058} 1107}
1059 1108
1109static void if_spi_resume_worker(struct work_struct *work)
1110{
1111 struct if_spi_card *card;
1112
1113 card = container_of(work, struct if_spi_card, resume_work);
1114
1115 if (card->suspended) {
1116 if (card->pdata->setup)
1117 card->pdata->setup(card->spi);
1118
1119 /* Init card ... */
1120 if_spi_init_card(card);
1121
1122 enable_irq(card->spi->irq);
1123
1124 /* And resume it ... */
1125 lbs_resume(card->priv);
1126
1127 card->suspended = 0;
1128 }
1129}
1130
1060static int __devinit if_spi_probe(struct spi_device *spi) 1131static int __devinit if_spi_probe(struct spi_device *spi)
1061{ 1132{
1062 struct if_spi_card *card; 1133 struct if_spi_card *card;
@@ -1099,14 +1170,17 @@ static int __devinit if_spi_probe(struct spi_device *spi)
1099 if (err) 1170 if (err)
1100 goto free_card; 1171 goto free_card;
1101 1172
1102 /* Register our card with libertas. 1173 /*
1103 * This will call alloc_etherdev */ 1174 * Register our card with libertas.
1175 * This will call alloc_etherdev.
1176 */
1104 priv = lbs_add_card(card, &spi->dev); 1177 priv = lbs_add_card(card, &spi->dev);
1105 if (!priv) { 1178 if (!priv) {
1106 err = -ENOMEM; 1179 err = -ENOMEM;
1107 goto free_card; 1180 goto free_card;
1108 } 1181 }
1109 card->priv = priv; 1182 card->priv = priv;
1183 priv->setup_fw_on_resume = 1;
1110 priv->card = card; 1184 priv->card = card;
1111 priv->hw_host_to_card = if_spi_host_to_card; 1185 priv->hw_host_to_card = if_spi_host_to_card;
1112 priv->enter_deep_sleep = NULL; 1186 priv->enter_deep_sleep = NULL;
@@ -1117,17 +1191,20 @@ static int __devinit if_spi_probe(struct spi_device *spi)
1117 /* Initialize interrupt handling stuff. */ 1191 /* Initialize interrupt handling stuff. */
1118 card->workqueue = create_workqueue("libertas_spi"); 1192 card->workqueue = create_workqueue("libertas_spi");
1119 INIT_WORK(&card->packet_work, if_spi_host_to_card_worker); 1193 INIT_WORK(&card->packet_work, if_spi_host_to_card_worker);
1194 INIT_WORK(&card->resume_work, if_spi_resume_worker);
1120 1195
1121 err = request_irq(spi->irq, if_spi_host_interrupt, 1196 err = request_irq(spi->irq, if_spi_host_interrupt,
1122 IRQF_TRIGGER_FALLING, "libertas_spi", card); 1197 IRQF_TRIGGER_FALLING, "libertas_spi", card);
1123 if (err) { 1198 if (err) {
1124 lbs_pr_err("can't get host irq line-- request_irq failed\n"); 1199 pr_err("can't get host irq line-- request_irq failed\n");
1125 goto terminate_workqueue; 1200 goto terminate_workqueue;
1126 } 1201 }
1127 1202
1128 /* Start the card. 1203 /*
1204 * Start the card.
1129 * This will call register_netdev, and we'll start 1205 * This will call register_netdev, and we'll start
1130 * getting interrupts... */ 1206 * getting interrupts...
1207 */
1131 err = lbs_start_card(priv); 1208 err = lbs_start_card(priv);
1132 if (err) 1209 if (err)
1133 goto release_irq; 1210 goto release_irq;
@@ -1161,6 +1238,8 @@ static int __devexit libertas_spi_remove(struct spi_device *spi)
1161 lbs_deb_spi("libertas_spi_remove\n"); 1238 lbs_deb_spi("libertas_spi_remove\n");
1162 lbs_deb_enter(LBS_DEB_SPI); 1239 lbs_deb_enter(LBS_DEB_SPI);
1163 1240
1241 cancel_work_sync(&card->resume_work);
1242
1164 lbs_stop_card(priv); 1243 lbs_stop_card(priv);
1165 lbs_remove_card(priv); /* will call free_netdev */ 1244 lbs_remove_card(priv); /* will call free_netdev */
1166 1245
@@ -1174,6 +1253,40 @@ static int __devexit libertas_spi_remove(struct spi_device *spi)
1174 return 0; 1253 return 0;
1175} 1254}
1176 1255
1256static int if_spi_suspend(struct device *dev)
1257{
1258 struct spi_device *spi = to_spi_device(dev);
1259 struct if_spi_card *card = spi_get_drvdata(spi);
1260
1261 if (!card->suspended) {
1262 lbs_suspend(card->priv);
1263 flush_workqueue(card->workqueue);
1264 disable_irq(spi->irq);
1265
1266 if (card->pdata->teardown)
1267 card->pdata->teardown(spi);
1268 card->suspended = 1;
1269 }
1270
1271 return 0;
1272}
1273
1274static int if_spi_resume(struct device *dev)
1275{
1276 struct spi_device *spi = to_spi_device(dev);
1277 struct if_spi_card *card = spi_get_drvdata(spi);
1278
1279 /* Schedule delayed work */
1280 schedule_work(&card->resume_work);
1281
1282 return 0;
1283}
1284
1285static const struct dev_pm_ops if_spi_pm_ops = {
1286 .suspend = if_spi_suspend,
1287 .resume = if_spi_resume,
1288};
1289
1177static struct spi_driver libertas_spi_driver = { 1290static struct spi_driver libertas_spi_driver = {
1178 .probe = if_spi_probe, 1291 .probe = if_spi_probe,
1179 .remove = __devexit_p(libertas_spi_remove), 1292 .remove = __devexit_p(libertas_spi_remove),
@@ -1181,6 +1294,7 @@ static struct spi_driver libertas_spi_driver = {
1181 .name = "libertas_spi", 1294 .name = "libertas_spi",
1182 .bus = &spi_bus_type, 1295 .bus = &spi_bus_type,
1183 .owner = THIS_MODULE, 1296 .owner = THIS_MODULE,
1297 .pm = &if_spi_pm_ops,
1184 }, 1298 },
1185}; 1299};
1186 1300
diff --git a/drivers/net/wireless/libertas/if_spi.h b/drivers/net/wireless/libertas/if_spi.h
index d2ac1dcd7e2e..e450e31fd11d 100644
--- a/drivers/net/wireless/libertas/if_spi.h
+++ b/drivers/net/wireless/libertas/if_spi.h
@@ -86,34 +86,34 @@
86#define IF_SPI_DEVICEID_CTRL_REG_TO_CARD_REV(dc) (dc & 0x000000ff) 86#define IF_SPI_DEVICEID_CTRL_REG_TO_CARD_REV(dc) (dc & 0x000000ff)
87 87
88/***************** IF_SPI_HOST_INT_CTRL_REG *****************/ 88/***************** IF_SPI_HOST_INT_CTRL_REG *****************/
89/** Host Interrupt Control bit : Wake up */ 89/* Host Interrupt Control bit : Wake up */
90#define IF_SPI_HICT_WAKE_UP (1<<0) 90#define IF_SPI_HICT_WAKE_UP (1<<0)
91/** Host Interrupt Control bit : WLAN ready */ 91/* Host Interrupt Control bit : WLAN ready */
92#define IF_SPI_HICT_WLAN_READY (1<<1) 92#define IF_SPI_HICT_WLAN_READY (1<<1)
93/*#define IF_SPI_HICT_FIFO_FIRST_HALF_EMPTY (1<<2) */ 93/*#define IF_SPI_HICT_FIFO_FIRST_HALF_EMPTY (1<<2) */
94/*#define IF_SPI_HICT_FIFO_SECOND_HALF_EMPTY (1<<3) */ 94/*#define IF_SPI_HICT_FIFO_SECOND_HALF_EMPTY (1<<3) */
95/*#define IF_SPI_HICT_IRQSRC_WLAN (1<<4) */ 95/*#define IF_SPI_HICT_IRQSRC_WLAN (1<<4) */
96/** Host Interrupt Control bit : Tx auto download */ 96/* Host Interrupt Control bit : Tx auto download */
97#define IF_SPI_HICT_TX_DOWNLOAD_OVER_AUTO (1<<5) 97#define IF_SPI_HICT_TX_DOWNLOAD_OVER_AUTO (1<<5)
98/** Host Interrupt Control bit : Rx auto upload */ 98/* Host Interrupt Control bit : Rx auto upload */
99#define IF_SPI_HICT_RX_UPLOAD_OVER_AUTO (1<<6) 99#define IF_SPI_HICT_RX_UPLOAD_OVER_AUTO (1<<6)
100/** Host Interrupt Control bit : Command auto download */ 100/* Host Interrupt Control bit : Command auto download */
101#define IF_SPI_HICT_CMD_DOWNLOAD_OVER_AUTO (1<<7) 101#define IF_SPI_HICT_CMD_DOWNLOAD_OVER_AUTO (1<<7)
102/** Host Interrupt Control bit : Command auto upload */ 102/* Host Interrupt Control bit : Command auto upload */
103#define IF_SPI_HICT_CMD_UPLOAD_OVER_AUTO (1<<8) 103#define IF_SPI_HICT_CMD_UPLOAD_OVER_AUTO (1<<8)
104 104
105/***************** IF_SPI_CARD_INT_CAUSE_REG *****************/ 105/***************** IF_SPI_CARD_INT_CAUSE_REG *****************/
106/** Card Interrupt Case bit : Tx download over */ 106/* Card Interrupt Case bit : Tx download over */
107#define IF_SPI_CIC_TX_DOWNLOAD_OVER (1<<0) 107#define IF_SPI_CIC_TX_DOWNLOAD_OVER (1<<0)
108/** Card Interrupt Case bit : Rx upload over */ 108/* Card Interrupt Case bit : Rx upload over */
109#define IF_SPI_CIC_RX_UPLOAD_OVER (1<<1) 109#define IF_SPI_CIC_RX_UPLOAD_OVER (1<<1)
110/** Card Interrupt Case bit : Command download over */ 110/* Card Interrupt Case bit : Command download over */
111#define IF_SPI_CIC_CMD_DOWNLOAD_OVER (1<<2) 111#define IF_SPI_CIC_CMD_DOWNLOAD_OVER (1<<2)
112/** Card Interrupt Case bit : Host event */ 112/* Card Interrupt Case bit : Host event */
113#define IF_SPI_CIC_HOST_EVENT (1<<3) 113#define IF_SPI_CIC_HOST_EVENT (1<<3)
114/** Card Interrupt Case bit : Command upload over */ 114/* Card Interrupt Case bit : Command upload over */
115#define IF_SPI_CIC_CMD_UPLOAD_OVER (1<<4) 115#define IF_SPI_CIC_CMD_UPLOAD_OVER (1<<4)
116/** Card Interrupt Case bit : Power down */ 116/* Card Interrupt Case bit : Power down */
117#define IF_SPI_CIC_POWER_DOWN (1<<5) 117#define IF_SPI_CIC_POWER_DOWN (1<<5)
118 118
119/***************** IF_SPI_CARD_INT_STATUS_REG *****************/ 119/***************** IF_SPI_CARD_INT_STATUS_REG *****************/
@@ -138,51 +138,51 @@
138#define IF_SPI_HICU_CMD_RD_FIFO_UNDERFLOW (1<<10) 138#define IF_SPI_HICU_CMD_RD_FIFO_UNDERFLOW (1<<10)
139 139
140/***************** IF_SPI_HOST_INT_STATUS_REG *****************/ 140/***************** IF_SPI_HOST_INT_STATUS_REG *****************/
141/** Host Interrupt Status bit : Tx download ready */ 141/* Host Interrupt Status bit : Tx download ready */
142#define IF_SPI_HIST_TX_DOWNLOAD_RDY (1<<0) 142#define IF_SPI_HIST_TX_DOWNLOAD_RDY (1<<0)
143/** Host Interrupt Status bit : Rx upload ready */ 143/* Host Interrupt Status bit : Rx upload ready */
144#define IF_SPI_HIST_RX_UPLOAD_RDY (1<<1) 144#define IF_SPI_HIST_RX_UPLOAD_RDY (1<<1)
145/** Host Interrupt Status bit : Command download ready */ 145/* Host Interrupt Status bit : Command download ready */
146#define IF_SPI_HIST_CMD_DOWNLOAD_RDY (1<<2) 146#define IF_SPI_HIST_CMD_DOWNLOAD_RDY (1<<2)
147/** Host Interrupt Status bit : Card event */ 147/* Host Interrupt Status bit : Card event */
148#define IF_SPI_HIST_CARD_EVENT (1<<3) 148#define IF_SPI_HIST_CARD_EVENT (1<<3)
149/** Host Interrupt Status bit : Command upload ready */ 149/* Host Interrupt Status bit : Command upload ready */
150#define IF_SPI_HIST_CMD_UPLOAD_RDY (1<<4) 150#define IF_SPI_HIST_CMD_UPLOAD_RDY (1<<4)
151/** Host Interrupt Status bit : I/O write FIFO overflow */ 151/* Host Interrupt Status bit : I/O write FIFO overflow */
152#define IF_SPI_HIST_IO_WR_FIFO_OVERFLOW (1<<5) 152#define IF_SPI_HIST_IO_WR_FIFO_OVERFLOW (1<<5)
153/** Host Interrupt Status bit : I/O read FIFO underflow */ 153/* Host Interrupt Status bit : I/O read FIFO underflow */
154#define IF_SPI_HIST_IO_RD_FIFO_UNDRFLOW (1<<6) 154#define IF_SPI_HIST_IO_RD_FIFO_UNDRFLOW (1<<6)
155/** Host Interrupt Status bit : Data write FIFO overflow */ 155/* Host Interrupt Status bit : Data write FIFO overflow */
156#define IF_SPI_HIST_DATA_WR_FIFO_OVERFLOW (1<<7) 156#define IF_SPI_HIST_DATA_WR_FIFO_OVERFLOW (1<<7)
157/** Host Interrupt Status bit : Data read FIFO underflow */ 157/* Host Interrupt Status bit : Data read FIFO underflow */
158#define IF_SPI_HIST_DATA_RD_FIFO_UNDERFLOW (1<<8) 158#define IF_SPI_HIST_DATA_RD_FIFO_UNDERFLOW (1<<8)
159/** Host Interrupt Status bit : Command write FIFO overflow */ 159/* Host Interrupt Status bit : Command write FIFO overflow */
160#define IF_SPI_HIST_CMD_WR_FIFO_OVERFLOW (1<<9) 160#define IF_SPI_HIST_CMD_WR_FIFO_OVERFLOW (1<<9)
161/** Host Interrupt Status bit : Command read FIFO underflow */ 161/* Host Interrupt Status bit : Command read FIFO underflow */
162#define IF_SPI_HIST_CMD_RD_FIFO_UNDERFLOW (1<<10) 162#define IF_SPI_HIST_CMD_RD_FIFO_UNDERFLOW (1<<10)
163 163
164/***************** IF_SPI_HOST_INT_STATUS_MASK_REG *****************/ 164/***************** IF_SPI_HOST_INT_STATUS_MASK_REG *****************/
165/** Host Interrupt Status Mask bit : Tx download ready */ 165/* Host Interrupt Status Mask bit : Tx download ready */
166#define IF_SPI_HISM_TX_DOWNLOAD_RDY (1<<0) 166#define IF_SPI_HISM_TX_DOWNLOAD_RDY (1<<0)
167/** Host Interrupt Status Mask bit : Rx upload ready */ 167/* Host Interrupt Status Mask bit : Rx upload ready */
168#define IF_SPI_HISM_RX_UPLOAD_RDY (1<<1) 168#define IF_SPI_HISM_RX_UPLOAD_RDY (1<<1)
169/** Host Interrupt Status Mask bit : Command download ready */ 169/* Host Interrupt Status Mask bit : Command download ready */
170#define IF_SPI_HISM_CMD_DOWNLOAD_RDY (1<<2) 170#define IF_SPI_HISM_CMD_DOWNLOAD_RDY (1<<2)
171/** Host Interrupt Status Mask bit : Card event */ 171/* Host Interrupt Status Mask bit : Card event */
172#define IF_SPI_HISM_CARDEVENT (1<<3) 172#define IF_SPI_HISM_CARDEVENT (1<<3)
173/** Host Interrupt Status Mask bit : Command upload ready */ 173/* Host Interrupt Status Mask bit : Command upload ready */
174#define IF_SPI_HISM_CMD_UPLOAD_RDY (1<<4) 174#define IF_SPI_HISM_CMD_UPLOAD_RDY (1<<4)
175/** Host Interrupt Status Mask bit : I/O write FIFO overflow */ 175/* Host Interrupt Status Mask bit : I/O write FIFO overflow */
176#define IF_SPI_HISM_IO_WR_FIFO_OVERFLOW (1<<5) 176#define IF_SPI_HISM_IO_WR_FIFO_OVERFLOW (1<<5)
177/** Host Interrupt Status Mask bit : I/O read FIFO underflow */ 177/* Host Interrupt Status Mask bit : I/O read FIFO underflow */
178#define IF_SPI_HISM_IO_RD_FIFO_UNDERFLOW (1<<6) 178#define IF_SPI_HISM_IO_RD_FIFO_UNDERFLOW (1<<6)
179/** Host Interrupt Status Mask bit : Data write FIFO overflow */ 179/* Host Interrupt Status Mask bit : Data write FIFO overflow */
180#define IF_SPI_HISM_DATA_WR_FIFO_OVERFLOW (1<<7) 180#define IF_SPI_HISM_DATA_WR_FIFO_OVERFLOW (1<<7)
181/** Host Interrupt Status Mask bit : Data write FIFO underflow */ 181/* Host Interrupt Status Mask bit : Data write FIFO underflow */
182#define IF_SPI_HISM_DATA_RD_FIFO_UNDERFLOW (1<<8) 182#define IF_SPI_HISM_DATA_RD_FIFO_UNDERFLOW (1<<8)
183/** Host Interrupt Status Mask bit : Command write FIFO overflow */ 183/* Host Interrupt Status Mask bit : Command write FIFO overflow */
184#define IF_SPI_HISM_CMD_WR_FIFO_OVERFLOW (1<<9) 184#define IF_SPI_HISM_CMD_WR_FIFO_OVERFLOW (1<<9)
185/** Host Interrupt Status Mask bit : Command write FIFO underflow */ 185/* Host Interrupt Status Mask bit : Command write FIFO underflow */
186#define IF_SPI_HISM_CMD_RD_FIFO_UNDERFLOW (1<<10) 186#define IF_SPI_HISM_CMD_RD_FIFO_UNDERFLOW (1<<10)
187 187
188/***************** IF_SPI_SPU_BUS_MODE_REG *****************/ 188/***************** IF_SPI_SPU_BUS_MODE_REG *****************/
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index 6524c70363d9..b5acc393a65a 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -1,6 +1,9 @@
1/** 1/*
2 * This file contains functions used in USB interface module. 2 * This file contains functions used in USB interface module.
3 */ 3 */
4
5#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6
4#include <linux/delay.h> 7#include <linux/delay.h>
5#include <linux/moduleparam.h> 8#include <linux/moduleparam.h>
6#include <linux/firmware.h> 9#include <linux/firmware.h>
@@ -66,7 +69,7 @@ static int if_usb_reset_device(struct if_usb_card *cardp);
66 69
67/* sysfs hooks */ 70/* sysfs hooks */
68 71
69/** 72/*
70 * Set function to write firmware to device's persistent memory 73 * Set function to write firmware to device's persistent memory
71 */ 74 */
72static ssize_t if_usb_firmware_set(struct device *dev, 75static ssize_t if_usb_firmware_set(struct device *dev,
@@ -85,7 +88,7 @@ static ssize_t if_usb_firmware_set(struct device *dev,
85 return ret; 88 return ret;
86} 89}
87 90
88/** 91/*
89 * lbs_flash_fw attribute to be exported per ethX interface through sysfs 92 * lbs_flash_fw attribute to be exported per ethX interface through sysfs
90 * (/sys/class/net/ethX/lbs_flash_fw). Use this like so to write firmware to 93 * (/sys/class/net/ethX/lbs_flash_fw). Use this like so to write firmware to
91 * the device's persistent memory: 94 * the device's persistent memory:
@@ -94,7 +97,14 @@ static ssize_t if_usb_firmware_set(struct device *dev,
94static DEVICE_ATTR(lbs_flash_fw, 0200, NULL, if_usb_firmware_set); 97static DEVICE_ATTR(lbs_flash_fw, 0200, NULL, if_usb_firmware_set);
95 98
96/** 99/**
97 * Set function to write firmware to device's persistent memory 100 * if_usb_boot2_set - write firmware to device's persistent memory
101 *
102 * @dev: target device
103 * @attr: device attributes
104 * @buf: firmware buffer to write
105 * @count: number of bytes to write
106 *
107 * returns: number of bytes written or negative error code
98 */ 108 */
99static ssize_t if_usb_boot2_set(struct device *dev, 109static ssize_t if_usb_boot2_set(struct device *dev,
100 struct device_attribute *attr, const char *buf, size_t count) 110 struct device_attribute *attr, const char *buf, size_t count)
@@ -112,7 +122,7 @@ static ssize_t if_usb_boot2_set(struct device *dev,
112 return ret; 122 return ret;
113} 123}
114 124
115/** 125/*
116 * lbs_flash_boot2 attribute to be exported per ethX interface through sysfs 126 * lbs_flash_boot2 attribute to be exported per ethX interface through sysfs
117 * (/sys/class/net/ethX/lbs_flash_boot2). Use this like so to write firmware 127 * (/sys/class/net/ethX/lbs_flash_boot2). Use this like so to write firmware
118 * to the device's persistent memory: 128 * to the device's persistent memory:
@@ -121,9 +131,10 @@ static ssize_t if_usb_boot2_set(struct device *dev,
121static DEVICE_ATTR(lbs_flash_boot2, 0200, NULL, if_usb_boot2_set); 131static DEVICE_ATTR(lbs_flash_boot2, 0200, NULL, if_usb_boot2_set);
122 132
123/** 133/**
124 * @brief call back function to handle the status of the URB 134 * if_usb_write_bulk_callback - callback function to handle the status
125 * @param urb pointer to urb structure 135 * of the URB
126 * @return N/A 136 * @urb: pointer to &urb structure
137 * returns: N/A
127 */ 138 */
128static void if_usb_write_bulk_callback(struct urb *urb) 139static void if_usb_write_bulk_callback(struct urb *urb)
129{ 140{
@@ -145,14 +156,14 @@ static void if_usb_write_bulk_callback(struct urb *urb)
145 lbs_host_to_card_done(priv); 156 lbs_host_to_card_done(priv);
146 } else { 157 } else {
147 /* print the failure status number for debug */ 158 /* print the failure status number for debug */
148 lbs_pr_info("URB in failure status: %d\n", urb->status); 159 pr_info("URB in failure status: %d\n", urb->status);
149 } 160 }
150} 161}
151 162
152/** 163/**
153 * @brief free tx/rx urb, skb and rx buffer 164 * if_usb_free - free tx/rx urb, skb and rx buffer
154 * @param cardp pointer if_usb_card 165 * @cardp: pointer to &if_usb_card
155 * @return N/A 166 * returns: N/A
156 */ 167 */
157static void if_usb_free(struct if_usb_card *cardp) 168static void if_usb_free(struct if_usb_card *cardp)
158{ 169{
@@ -195,7 +206,7 @@ static void if_usb_setup_firmware(struct lbs_private *priv)
195 wake_method.hdr.size = cpu_to_le16(sizeof(wake_method)); 206 wake_method.hdr.size = cpu_to_le16(sizeof(wake_method));
196 wake_method.action = cpu_to_le16(CMD_ACT_GET); 207 wake_method.action = cpu_to_le16(CMD_ACT_GET);
197 if (lbs_cmd_with_response(priv, CMD_802_11_FW_WAKE_METHOD, &wake_method)) { 208 if (lbs_cmd_with_response(priv, CMD_802_11_FW_WAKE_METHOD, &wake_method)) {
198 lbs_pr_info("Firmware does not seem to support PS mode\n"); 209 netdev_info(priv->dev, "Firmware does not seem to support PS mode\n");
199 priv->fwcapinfo &= ~FW_CAPINFO_PS; 210 priv->fwcapinfo &= ~FW_CAPINFO_PS;
200 } else { 211 } else {
201 if (le16_to_cpu(wake_method.method) == CMD_WAKE_METHOD_COMMAND_INT) { 212 if (le16_to_cpu(wake_method.method) == CMD_WAKE_METHOD_COMMAND_INT) {
@@ -204,7 +215,8 @@ static void if_usb_setup_firmware(struct lbs_private *priv)
204 /* The versions which boot up this way don't seem to 215 /* The versions which boot up this way don't seem to
205 work even if we set it to the command interrupt */ 216 work even if we set it to the command interrupt */
206 priv->fwcapinfo &= ~FW_CAPINFO_PS; 217 priv->fwcapinfo &= ~FW_CAPINFO_PS;
207 lbs_pr_info("Firmware doesn't wake via command interrupt; disabling PS mode\n"); 218 netdev_info(priv->dev,
219 "Firmware doesn't wake via command interrupt; disabling PS mode\n");
208 } 220 }
209 } 221 }
210} 222}
@@ -216,7 +228,7 @@ static void if_usb_fw_timeo(unsigned long priv)
216 if (cardp->fwdnldover) { 228 if (cardp->fwdnldover) {
217 lbs_deb_usb("Download complete, no event. Assuming success\n"); 229 lbs_deb_usb("Download complete, no event. Assuming success\n");
218 } else { 230 } else {
219 lbs_pr_err("Download timed out\n"); 231 pr_err("Download timed out\n");
220 cardp->surprise_removed = 1; 232 cardp->surprise_removed = 1;
221 } 233 }
222 wake_up(&cardp->fw_wq); 234 wake_up(&cardp->fw_wq);
@@ -231,10 +243,10 @@ static void if_usb_reset_olpc_card(struct lbs_private *priv)
231#endif 243#endif
232 244
233/** 245/**
234 * @brief sets the configuration values 246 * if_usb_probe - sets the configuration values
235 * @param ifnum interface number 247 * @intf: &usb_interface pointer
236 * @param id pointer to usb_device_id 248 * @id: pointer to usb_device_id
237 * @return 0 on success, error code on failure 249 * returns: 0 on success, error code on failure
238 */ 250 */
239static int if_usb_probe(struct usb_interface *intf, 251static int if_usb_probe(struct usb_interface *intf,
240 const struct usb_device_id *id) 252 const struct usb_device_id *id)
@@ -250,7 +262,7 @@ static int if_usb_probe(struct usb_interface *intf,
250 262
251 cardp = kzalloc(sizeof(struct if_usb_card), GFP_KERNEL); 263 cardp = kzalloc(sizeof(struct if_usb_card), GFP_KERNEL);
252 if (!cardp) { 264 if (!cardp) {
253 lbs_pr_err("Out of memory allocating private data.\n"); 265 pr_err("Out of memory allocating private data\n");
254 goto error; 266 goto error;
255 } 267 }
256 268
@@ -340,10 +352,12 @@ static int if_usb_probe(struct usb_interface *intf,
340 usb_set_intfdata(intf, cardp); 352 usb_set_intfdata(intf, cardp);
341 353
342 if (device_create_file(&priv->dev->dev, &dev_attr_lbs_flash_fw)) 354 if (device_create_file(&priv->dev->dev, &dev_attr_lbs_flash_fw))
343 lbs_pr_err("cannot register lbs_flash_fw attribute\n"); 355 netdev_err(priv->dev,
356 "cannot register lbs_flash_fw attribute\n");
344 357
345 if (device_create_file(&priv->dev->dev, &dev_attr_lbs_flash_boot2)) 358 if (device_create_file(&priv->dev->dev, &dev_attr_lbs_flash_boot2))
346 lbs_pr_err("cannot register lbs_flash_boot2 attribute\n"); 359 netdev_err(priv->dev,
360 "cannot register lbs_flash_boot2 attribute\n");
347 361
348 /* 362 /*
349 * EHS_REMOVE_WAKEUP is not supported on all versions of the firmware. 363 * EHS_REMOVE_WAKEUP is not supported on all versions of the firmware.
@@ -366,9 +380,9 @@ error:
366} 380}
367 381
368/** 382/**
369 * @brief free resource and cleanup 383 * if_usb_disconnect - free resource and cleanup
370 * @param intf USB interface structure 384 * @intf: USB interface structure
371 * @return N/A 385 * returns: N/A
372 */ 386 */
373static void if_usb_disconnect(struct usb_interface *intf) 387static void if_usb_disconnect(struct usb_interface *intf)
374{ 388{
@@ -398,9 +412,9 @@ static void if_usb_disconnect(struct usb_interface *intf)
398} 412}
399 413
400/** 414/**
401 * @brief This function download FW 415 * if_usb_send_fw_pkt - download FW
402 * @param priv pointer to struct lbs_private 416 * @cardp: pointer to &struct if_usb_card
403 * @return 0 417 * returns: 0
404 */ 418 */
405static int if_usb_send_fw_pkt(struct if_usb_card *cardp) 419static int if_usb_send_fw_pkt(struct if_usb_card *cardp)
406{ 420{
@@ -486,11 +500,11 @@ static int if_usb_reset_device(struct if_usb_card *cardp)
486} 500}
487 501
488/** 502/**
489 * @brief This function transfer the data to the device. 503 * usb_tx_block - transfer the data to the device
490 * @param priv pointer to struct lbs_private 504 * @cardp: pointer to &struct if_usb_card
491 * @param payload pointer to payload data 505 * @payload: pointer to payload data
492 * @param nb data length 506 * @nb: data length
493 * @return 0 or -1 507 * returns: 0 for success or negative error code
494 */ 508 */
495static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload, uint16_t nb) 509static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload, uint16_t nb)
496{ 510{
@@ -528,7 +542,7 @@ static int __if_usb_submit_rx_urb(struct if_usb_card *cardp,
528 int ret = -1; 542 int ret = -1;
529 543
530 if (!(skb = dev_alloc_skb(MRVDRV_ETH_RX_PACKET_BUFFER_SIZE))) { 544 if (!(skb = dev_alloc_skb(MRVDRV_ETH_RX_PACKET_BUFFER_SIZE))) {
531 lbs_pr_err("No free skb\n"); 545 pr_err("No free skb\n");
532 goto rx_ret; 546 goto rx_ret;
533 } 547 }
534 548
@@ -587,7 +601,7 @@ static void if_usb_receive_fwload(struct urb *urb)
587 601
588 if (tmp[0] == cpu_to_le32(CMD_TYPE_INDICATION) && 602 if (tmp[0] == cpu_to_le32(CMD_TYPE_INDICATION) &&
589 tmp[1] == cpu_to_le32(MACREG_INT_CODE_FIRMWARE_READY)) { 603 tmp[1] == cpu_to_le32(MACREG_INT_CODE_FIRMWARE_READY)) {
590 lbs_pr_info("Firmware ready event received\n"); 604 pr_info("Firmware ready event received\n");
591 wake_up(&cardp->fw_wq); 605 wake_up(&cardp->fw_wq);
592 } else { 606 } else {
593 lbs_deb_usb("Waiting for confirmation; got %x %x\n", 607 lbs_deb_usb("Waiting for confirmation; got %x %x\n",
@@ -614,20 +628,20 @@ static void if_usb_receive_fwload(struct urb *urb)
614 bootcmdresp.magic == cpu_to_le32(CMD_TYPE_DATA) || 628 bootcmdresp.magic == cpu_to_le32(CMD_TYPE_DATA) ||
615 bootcmdresp.magic == cpu_to_le32(CMD_TYPE_INDICATION)) { 629 bootcmdresp.magic == cpu_to_le32(CMD_TYPE_INDICATION)) {
616 if (!cardp->bootcmdresp) 630 if (!cardp->bootcmdresp)
617 lbs_pr_info("Firmware already seems alive; resetting\n"); 631 pr_info("Firmware already seems alive; resetting\n");
618 cardp->bootcmdresp = -1; 632 cardp->bootcmdresp = -1;
619 } else { 633 } else {
620 lbs_pr_info("boot cmd response wrong magic number (0x%x)\n", 634 pr_info("boot cmd response wrong magic number (0x%x)\n",
621 le32_to_cpu(bootcmdresp.magic)); 635 le32_to_cpu(bootcmdresp.magic));
622 } 636 }
623 } else if ((bootcmdresp.cmd != BOOT_CMD_FW_BY_USB) && 637 } else if ((bootcmdresp.cmd != BOOT_CMD_FW_BY_USB) &&
624 (bootcmdresp.cmd != BOOT_CMD_UPDATE_FW) && 638 (bootcmdresp.cmd != BOOT_CMD_UPDATE_FW) &&
625 (bootcmdresp.cmd != BOOT_CMD_UPDATE_BOOT2)) { 639 (bootcmdresp.cmd != BOOT_CMD_UPDATE_BOOT2)) {
626 lbs_pr_info("boot cmd response cmd_tag error (%d)\n", 640 pr_info("boot cmd response cmd_tag error (%d)\n",
627 bootcmdresp.cmd); 641 bootcmdresp.cmd);
628 } else if (bootcmdresp.result != BOOT_CMD_RESP_OK) { 642 } else if (bootcmdresp.result != BOOT_CMD_RESP_OK) {
629 lbs_pr_info("boot cmd response result error (%d)\n", 643 pr_info("boot cmd response result error (%d)\n",
630 bootcmdresp.result); 644 bootcmdresp.result);
631 } else { 645 } else {
632 cardp->bootcmdresp = 1; 646 cardp->bootcmdresp = 1;
633 lbs_deb_usbd(&cardp->udev->dev, 647 lbs_deb_usbd(&cardp->udev->dev,
@@ -727,11 +741,11 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
727} 741}
728 742
729/** 743/**
730 * @brief This function reads of the packet into the upload buff, 744 * if_usb_receive - read the packet into the upload buffer,
731 * wake up the main thread and initialise the Rx callack. 745 * wake up the main thread and initialise the Rx callack
732 * 746 *
733 * @param urb pointer to struct urb 747 * @urb: pointer to &struct urb
734 * @return N/A 748 * returns: N/A
735 */ 749 */
736static void if_usb_receive(struct urb *urb) 750static void if_usb_receive(struct urb *urb)
737{ 751{
@@ -802,12 +816,12 @@ rx_exit:
802} 816}
803 817
804/** 818/**
805 * @brief This function downloads data to FW 819 * if_usb_host_to_card - downloads data to FW
806 * @param priv pointer to struct lbs_private structure 820 * @priv: pointer to &struct lbs_private structure
807 * @param type type of data 821 * @type: type of data
808 * @param buf pointer to data buffer 822 * @payload: pointer to data buffer
809 * @param len number of bytes 823 * @nb: number of bytes
810 * @return 0 or -1 824 * returns: 0 for success or negative error code
811 */ 825 */
812static int if_usb_host_to_card(struct lbs_private *priv, uint8_t type, 826static int if_usb_host_to_card(struct lbs_private *priv, uint8_t type,
813 uint8_t *payload, uint16_t nb) 827 uint8_t *payload, uint16_t nb)
@@ -831,10 +845,11 @@ static int if_usb_host_to_card(struct lbs_private *priv, uint8_t type,
831} 845}
832 846
833/** 847/**
834 * @brief This function issues Boot command to the Boot2 code 848 * if_usb_issue_boot_command - issues Boot command to the Boot2 code
835 * @param ivalue 1:Boot from FW by USB-Download 849 * @cardp: pointer to &if_usb_card
836 * 2:Boot from FW in EEPROM 850 * @ivalue: 1:Boot from FW by USB-Download
837 * @return 0 851 * 2:Boot from FW in EEPROM
852 * returns: 0 for success or negative error code
838 */ 853 */
839static int if_usb_issue_boot_command(struct if_usb_card *cardp, int ivalue) 854static int if_usb_issue_boot_command(struct if_usb_card *cardp, int ivalue)
840{ 855{
@@ -853,11 +868,11 @@ static int if_usb_issue_boot_command(struct if_usb_card *cardp, int ivalue)
853 868
854 869
855/** 870/**
856 * @brief This function checks the validity of Boot2/FW image. 871 * check_fwfile_format - check the validity of Boot2/FW image
857 * 872 *
858 * @param data pointer to image 873 * @data: pointer to image
859 * len image length 874 * @totlen: image length
860 * @return 0 or -1 875 * returns: 0 (good) or 1 (failure)
861 */ 876 */
862static int check_fwfile_format(const uint8_t *data, uint32_t totlen) 877static int check_fwfile_format(const uint8_t *data, uint32_t totlen)
863{ 878{
@@ -892,7 +907,7 @@ static int check_fwfile_format(const uint8_t *data, uint32_t totlen)
892 } while (!exit); 907 } while (!exit);
893 908
894 if (ret) 909 if (ret)
895 lbs_pr_err("firmware file format check FAIL\n"); 910 pr_err("firmware file format check FAIL\n");
896 else 911 else
897 lbs_deb_fw("firmware file format check PASS\n"); 912 lbs_deb_fw("firmware file format check PASS\n");
898 913
@@ -901,13 +916,13 @@ static int check_fwfile_format(const uint8_t *data, uint32_t totlen)
901 916
902 917
903/** 918/**
904* @brief This function programs the firmware subject to cmd 919* if_usb_prog_firmware - programs the firmware subject to cmd
905* 920*
906* @param cardp the if_usb_card descriptor 921* @cardp: the if_usb_card descriptor
907* fwname firmware or boot2 image file name 922* @fwname: firmware or boot2 image file name
908* cmd either BOOT_CMD_FW_BY_USB, BOOT_CMD_UPDATE_FW, 923* @cmd: either BOOT_CMD_FW_BY_USB, BOOT_CMD_UPDATE_FW,
909* or BOOT_CMD_UPDATE_BOOT2. 924* or BOOT_CMD_UPDATE_BOOT2.
910* @return 0 or error code 925* returns: 0 or error code
911*/ 926*/
912static int if_usb_prog_firmware(struct if_usb_card *cardp, 927static int if_usb_prog_firmware(struct if_usb_card *cardp,
913 const char *fwname, int cmd) 928 const char *fwname, int cmd)
@@ -989,7 +1004,7 @@ static int __if_usb_prog_firmware(struct if_usb_card *cardp,
989 1004
990 ret = get_fw(cardp, fwname); 1005 ret = get_fw(cardp, fwname);
991 if (ret) { 1006 if (ret) {
992 lbs_pr_err("failed to find firmware (%d)\n", ret); 1007 pr_err("failed to find firmware (%d)\n", ret);
993 goto done; 1008 goto done;
994 } 1009 }
995 1010
@@ -1064,13 +1079,13 @@ restart:
1064 usb_kill_urb(cardp->rx_urb); 1079 usb_kill_urb(cardp->rx_urb);
1065 1080
1066 if (!cardp->fwdnldover) { 1081 if (!cardp->fwdnldover) {
1067 lbs_pr_info("failed to load fw, resetting device!\n"); 1082 pr_info("failed to load fw, resetting device!\n");
1068 if (--reset_count >= 0) { 1083 if (--reset_count >= 0) {
1069 if_usb_reset_device(cardp); 1084 if_usb_reset_device(cardp);
1070 goto restart; 1085 goto restart;
1071 } 1086 }
1072 1087
1073 lbs_pr_info("FW download failure, time = %d ms\n", i * 100); 1088 pr_info("FW download failure, time = %d ms\n", i * 100);
1074 ret = -EIO; 1089 ret = -EIO;
1075 goto release_fw; 1090 goto release_fw;
1076 } 1091 }
diff --git a/drivers/net/wireless/libertas/if_usb.h b/drivers/net/wireless/libertas/if_usb.h
index d819e7e3c9aa..6e42eac331de 100644
--- a/drivers/net/wireless/libertas/if_usb.h
+++ b/drivers/net/wireless/libertas/if_usb.h
@@ -6,9 +6,9 @@
6 6
7struct lbs_private; 7struct lbs_private;
8 8
9/** 9/*
10 * This file contains definition for USB interface. 10 * This file contains definition for USB interface.
11 */ 11 */
12#define CMD_TYPE_REQUEST 0xF00DFACE 12#define CMD_TYPE_REQUEST 0xF00DFACE
13#define CMD_TYPE_DATA 0xBEADC0DE 13#define CMD_TYPE_DATA 0xBEADC0DE
14#define CMD_TYPE_INDICATION 0xBEEFFACE 14#define CMD_TYPE_INDICATION 0xBEEFFACE
@@ -40,7 +40,7 @@ struct bootcmdresp
40 uint8_t pad[2]; 40 uint8_t pad[2];
41}; 41};
42 42
43/** USB card description structure*/ 43/* USB card description structure*/
44struct if_usb_card { 44struct if_usb_card {
45 struct usb_device *udev; 45 struct usb_device *udev;
46 uint32_t model; /* MODEL_* */ 46 uint32_t model; /* MODEL_* */
@@ -77,7 +77,7 @@ struct if_usb_card {
77 __le16 boot2_version; 77 __le16 boot2_version;
78}; 78};
79 79
80/** fwheader */ 80/* fwheader */
81struct fwheader { 81struct fwheader {
82 __le32 dnldcmd; 82 __le32 dnldcmd;
83 __le32 baseaddr; 83 __le32 baseaddr;
@@ -86,14 +86,14 @@ struct fwheader {
86}; 86};
87 87
88#define FW_MAX_DATA_BLK_SIZE 600 88#define FW_MAX_DATA_BLK_SIZE 600
89/** FWData */ 89/* FWData */
90struct fwdata { 90struct fwdata {
91 struct fwheader hdr; 91 struct fwheader hdr;
92 __le32 seqnum; 92 __le32 seqnum;
93 uint8_t data[0]; 93 uint8_t data[0];
94}; 94};
95 95
96/** fwsyncheader */ 96/* fwsyncheader */
97struct fwsyncheader { 97struct fwsyncheader {
98 __le32 cmd; 98 __le32 cmd;
99 __le32 seqnum; 99 __le32 seqnum;
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index ca8149cd5bd9..8c40949cb076 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -1,8 +1,10 @@
1/** 1/*
2 * This file contains the major functions in WLAN 2 * This file contains the major functions in WLAN
3 * driver. It includes init, exit, open, close and main 3 * driver. It includes init, exit, open, close and main
4 * thread etc.. 4 * thread etc..
5 */ 5 */
6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 8
7#include <linux/moduleparam.h> 9#include <linux/moduleparam.h>
8#include <linux/delay.h> 10#include <linux/delay.h>
@@ -34,19 +36,25 @@ unsigned int lbs_debug;
34EXPORT_SYMBOL_GPL(lbs_debug); 36EXPORT_SYMBOL_GPL(lbs_debug);
35module_param_named(libertas_debug, lbs_debug, int, 0644); 37module_param_named(libertas_debug, lbs_debug, int, 0644);
36 38
39unsigned int lbs_disablemesh;
40EXPORT_SYMBOL_GPL(lbs_disablemesh);
41module_param_named(libertas_disablemesh, lbs_disablemesh, int, 0644);
37 42
38/* This global structure is used to send the confirm_sleep command as 43
39 * fast as possible down to the firmware. */ 44/*
45 * This global structure is used to send the confirm_sleep command as
46 * fast as possible down to the firmware.
47 */
40struct cmd_confirm_sleep confirm_sleep; 48struct cmd_confirm_sleep confirm_sleep;
41 49
42 50
43/** 51/*
44 * the table to keep region code 52 * the table to keep region code
45 */ 53 */
46u16 lbs_region_code_to_index[MRVDRV_MAX_REGION_CODE] = 54u16 lbs_region_code_to_index[MRVDRV_MAX_REGION_CODE] =
47 { 0x10, 0x20, 0x30, 0x31, 0x32, 0x40 }; 55 { 0x10, 0x20, 0x30, 0x31, 0x32, 0x40 };
48 56
49/** 57/*
50 * FW rate table. FW refers to rates by their index in this table, not by the 58 * FW rate table. FW refers to rates by their index in this table, not by the
51 * rate value itself. Values of 0x00 are 59 * rate value itself. Values of 0x00 are
52 * reserved positions. 60 * reserved positions.
@@ -57,10 +65,10 @@ static u8 fw_data_rates[MAX_RATES] =
57}; 65};
58 66
59/** 67/**
60 * @brief use index to get the data rate 68 * lbs_fw_index_to_data_rate - use index to get the data rate
61 * 69 *
62 * @param idx The index of data rate 70 * @idx: The index of data rate
63 * @return data rate or 0 71 * returns: data rate or 0
64 */ 72 */
65u32 lbs_fw_index_to_data_rate(u8 idx) 73u32 lbs_fw_index_to_data_rate(u8 idx)
66{ 74{
@@ -70,10 +78,10 @@ u32 lbs_fw_index_to_data_rate(u8 idx)
70} 78}
71 79
72/** 80/**
73 * @brief use rate to get the index 81 * lbs_data_rate_to_fw_index - use rate to get the index
74 * 82 *
75 * @param rate data rate 83 * @rate: data rate
76 * @return index or 0 84 * returns: index or 0
77 */ 85 */
78u8 lbs_data_rate_to_fw_index(u32 rate) 86u8 lbs_data_rate_to_fw_index(u32 rate)
79{ 87{
@@ -91,10 +99,10 @@ u8 lbs_data_rate_to_fw_index(u32 rate)
91 99
92 100
93/** 101/**
94 * @brief This function opens the ethX interface 102 * lbs_dev_open - open the ethX interface
95 * 103 *
96 * @param dev A pointer to net_device structure 104 * @dev: A pointer to &net_device structure
97 * @return 0 or -EBUSY if monitor mode active 105 * returns: 0 or -EBUSY if monitor mode active
98 */ 106 */
99static int lbs_dev_open(struct net_device *dev) 107static int lbs_dev_open(struct net_device *dev)
100{ 108{
@@ -120,10 +128,10 @@ static int lbs_dev_open(struct net_device *dev)
120} 128}
121 129
122/** 130/**
123 * @brief This function closes the ethX interface 131 * lbs_eth_stop - close the ethX interface
124 * 132 *
125 * @param dev A pointer to net_device structure 133 * @dev: A pointer to &net_device structure
126 * @return 0 134 * returns: 0
127 */ 135 */
128static int lbs_eth_stop(struct net_device *dev) 136static int lbs_eth_stop(struct net_device *dev)
129{ 137{
@@ -147,28 +155,6 @@ static int lbs_eth_stop(struct net_device *dev)
147 return 0; 155 return 0;
148} 156}
149 157
150static void lbs_tx_timeout(struct net_device *dev)
151{
152 struct lbs_private *priv = dev->ml_priv;
153
154 lbs_deb_enter(LBS_DEB_TX);
155
156 lbs_pr_err("tx watch dog timeout\n");
157
158 dev->trans_start = jiffies; /* prevent tx timeout */
159
160 if (priv->currenttxskb)
161 lbs_send_tx_feedback(priv, 0);
162
163 /* XX: Shouldn't we also call into the hw-specific driver
164 to kick it somehow? */
165 lbs_host_to_card_done(priv);
166
167 /* FIXME: reset the card */
168
169 lbs_deb_leave(LBS_DEB_TX);
170}
171
172void lbs_host_to_card_done(struct lbs_private *priv) 158void lbs_host_to_card_done(struct lbs_private *priv)
173{ 159{
174 unsigned long flags; 160 unsigned long flags;
@@ -336,12 +322,12 @@ void lbs_set_multicast_list(struct net_device *dev)
336} 322}
337 323
338/** 324/**
339 * @brief This function handles the major jobs in the LBS driver. 325 * lbs_thread - handles the major jobs in the LBS driver.
340 * It handles all events generated by firmware, RX data received 326 * It handles all events generated by firmware, RX data received
341 * from firmware and TX data sent from kernel. 327 * from firmware and TX data sent from kernel.
342 * 328 *
343 * @param data A pointer to lbs_thread structure 329 * @data: A pointer to &lbs_thread structure
344 * @return 0 330 * returns: 0
345 */ 331 */
346static int lbs_thread(void *data) 332static int lbs_thread(void *data)
347{ 333{
@@ -462,8 +448,8 @@ static int lbs_thread(void *data)
462 if (priv->cmd_timed_out && priv->cur_cmd) { 448 if (priv->cmd_timed_out && priv->cur_cmd) {
463 struct cmd_ctrl_node *cmdnode = priv->cur_cmd; 449 struct cmd_ctrl_node *cmdnode = priv->cur_cmd;
464 450
465 lbs_pr_info("Timeout submitting command 0x%04x\n", 451 netdev_info(dev, "Timeout submitting command 0x%04x\n",
466 le16_to_cpu(cmdnode->cmdbuf->command)); 452 le16_to_cpu(cmdnode->cmdbuf->command));
467 lbs_complete_command(priv, cmdnode, -ETIMEDOUT); 453 lbs_complete_command(priv, cmdnode, -ETIMEDOUT);
468 if (priv->reset_card) 454 if (priv->reset_card)
469 priv->reset_card(priv); 455 priv->reset_card(priv);
@@ -490,8 +476,8 @@ static int lbs_thread(void *data)
490 * after firmware fixes it 476 * after firmware fixes it
491 */ 477 */
492 priv->psstate = PS_STATE_AWAKE; 478 priv->psstate = PS_STATE_AWAKE;
493 lbs_pr_alert("ignore PS_SleepConfirm in " 479 netdev_alert(dev,
494 "non-connected state\n"); 480 "ignore PS_SleepConfirm in non-connected state\n");
495 } 481 }
496 } 482 }
497 483
@@ -540,11 +526,11 @@ static int lbs_thread(void *data)
540} 526}
541 527
542/** 528/**
543 * @brief This function gets the HW spec from the firmware and sets 529 * lbs_setup_firmware - gets the HW spec from the firmware and sets
544 * some basic parameters. 530 * some basic parameters
545 * 531 *
546 * @param priv A pointer to struct lbs_private structure 532 * @priv: A pointer to &struct lbs_private structure
547 * @return 0 or -1 533 * returns: 0 or -1
548 */ 534 */
549static int lbs_setup_firmware(struct lbs_private *priv) 535static int lbs_setup_firmware(struct lbs_private *priv)
550{ 536{
@@ -585,7 +571,8 @@ int lbs_suspend(struct lbs_private *priv)
585 if (priv->is_deep_sleep) { 571 if (priv->is_deep_sleep) {
586 ret = lbs_set_deep_sleep(priv, 0); 572 ret = lbs_set_deep_sleep(priv, 0);
587 if (ret) { 573 if (ret) {
588 lbs_pr_err("deep sleep cancellation failed: %d\n", ret); 574 netdev_err(priv->dev,
575 "deep sleep cancellation failed: %d\n", ret);
589 return ret; 576 return ret;
590 } 577 }
591 priv->deep_sleep_required = 1; 578 priv->deep_sleep_required = 1;
@@ -618,7 +605,8 @@ int lbs_resume(struct lbs_private *priv)
618 priv->deep_sleep_required = 0; 605 priv->deep_sleep_required = 0;
619 ret = lbs_set_deep_sleep(priv, 1); 606 ret = lbs_set_deep_sleep(priv, 1);
620 if (ret) 607 if (ret)
621 lbs_pr_err("deep sleep activation failed: %d\n", ret); 608 netdev_err(priv->dev,
609 "deep sleep activation failed: %d\n", ret);
622 } 610 }
623 611
624 if (priv->setup_fw_on_resume) 612 if (priv->setup_fw_on_resume)
@@ -630,8 +618,10 @@ int lbs_resume(struct lbs_private *priv)
630EXPORT_SYMBOL_GPL(lbs_resume); 618EXPORT_SYMBOL_GPL(lbs_resume);
631 619
632/** 620/**
633 * This function handles the timeout of command sending. 621 * lbs_cmd_timeout_handler - handles the timeout of command sending.
634 * It will re-send the same command again. 622 * It will re-send the same command again.
623 *
624 * @data: &struct lbs_private pointer
635 */ 625 */
636static void lbs_cmd_timeout_handler(unsigned long data) 626static void lbs_cmd_timeout_handler(unsigned long data)
637{ 627{
@@ -644,8 +634,8 @@ static void lbs_cmd_timeout_handler(unsigned long data)
644 if (!priv->cur_cmd) 634 if (!priv->cur_cmd)
645 goto out; 635 goto out;
646 636
647 lbs_pr_info("command 0x%04x timed out\n", 637 netdev_info(priv->dev, "command 0x%04x timed out\n",
648 le16_to_cpu(priv->cur_cmd->cmdbuf->command)); 638 le16_to_cpu(priv->cur_cmd->cmdbuf->command));
649 639
650 priv->cmd_timed_out = 1; 640 priv->cmd_timed_out = 1;
651 wake_up_interruptible(&priv->waitq); 641 wake_up_interruptible(&priv->waitq);
@@ -655,8 +645,10 @@ out:
655} 645}
656 646
657/** 647/**
658 * This function put the device back to deep sleep mode when timer expires 648 * auto_deepsleep_timer_fn - put the device back to deep sleep mode when
659 * and no activity (command, event, data etc.) is detected. 649 * timer expires and no activity (command, event, data etc.) is detected.
650 * @data: &struct lbs_private pointer
651 * returns: N/A
660 */ 652 */
661static void auto_deepsleep_timer_fn(unsigned long data) 653static void auto_deepsleep_timer_fn(unsigned long data)
662{ 654{
@@ -748,7 +740,7 @@ static int lbs_init_adapter(struct lbs_private *priv)
748 740
749 /* Allocate the command buffers */ 741 /* Allocate the command buffers */
750 if (lbs_allocate_cmd_buffer(priv)) { 742 if (lbs_allocate_cmd_buffer(priv)) {
751 lbs_pr_err("Out of memory allocating command buffers\n"); 743 pr_err("Out of memory allocating command buffers\n");
752 ret = -ENOMEM; 744 ret = -ENOMEM;
753 goto out; 745 goto out;
754 } 746 }
@@ -758,7 +750,7 @@ static int lbs_init_adapter(struct lbs_private *priv)
758 /* Create the event FIFO */ 750 /* Create the event FIFO */
759 ret = kfifo_alloc(&priv->event_fifo, sizeof(u32) * 16, GFP_KERNEL); 751 ret = kfifo_alloc(&priv->event_fifo, sizeof(u32) * 16, GFP_KERNEL);
760 if (ret) { 752 if (ret) {
761 lbs_pr_err("Out of memory allocating event FIFO buffer\n"); 753 pr_err("Out of memory allocating event FIFO buffer\n");
762 goto out; 754 goto out;
763 } 755 }
764 756
@@ -785,18 +777,18 @@ static const struct net_device_ops lbs_netdev_ops = {
785 .ndo_stop = lbs_eth_stop, 777 .ndo_stop = lbs_eth_stop,
786 .ndo_start_xmit = lbs_hard_start_xmit, 778 .ndo_start_xmit = lbs_hard_start_xmit,
787 .ndo_set_mac_address = lbs_set_mac_address, 779 .ndo_set_mac_address = lbs_set_mac_address,
788 .ndo_tx_timeout = lbs_tx_timeout,
789 .ndo_set_multicast_list = lbs_set_multicast_list, 780 .ndo_set_multicast_list = lbs_set_multicast_list,
790 .ndo_change_mtu = eth_change_mtu, 781 .ndo_change_mtu = eth_change_mtu,
791 .ndo_validate_addr = eth_validate_addr, 782 .ndo_validate_addr = eth_validate_addr,
792}; 783};
793 784
794/** 785/**
795 * @brief This function adds the card. it will probe the 786 * lbs_add_card - adds the card. It will probe the
796 * card, allocate the lbs_priv and initialize the device. 787 * card, allocate the lbs_priv and initialize the device.
797 * 788 *
798 * @param card A pointer to card 789 * @card: A pointer to card
799 * @return A pointer to struct lbs_private structure 790 * @dmdev: A pointer to &struct device
791 * returns: A pointer to &struct lbs_private structure
800 */ 792 */
801struct lbs_private *lbs_add_card(void *card, struct device *dmdev) 793struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
802{ 794{
@@ -809,7 +801,7 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
809 /* Allocate an Ethernet device and register it */ 801 /* Allocate an Ethernet device and register it */
810 wdev = lbs_cfg_alloc(dmdev); 802 wdev = lbs_cfg_alloc(dmdev);
811 if (IS_ERR(wdev)) { 803 if (IS_ERR(wdev)) {
812 lbs_pr_err("cfg80211 init failed\n"); 804 pr_err("cfg80211 init failed\n");
813 goto done; 805 goto done;
814 } 806 }
815 807
@@ -818,7 +810,7 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
818 priv->wdev = wdev; 810 priv->wdev = wdev;
819 811
820 if (lbs_init_adapter(priv)) { 812 if (lbs_init_adapter(priv)) {
821 lbs_pr_err("failed to initialize adapter structure.\n"); 813 pr_err("failed to initialize adapter structure\n");
822 goto err_wdev; 814 goto err_wdev;
823 } 815 }
824 816
@@ -950,17 +942,20 @@ int lbs_start_card(struct lbs_private *priv)
950 goto done; 942 goto done;
951 943
952 if (lbs_cfg_register(priv)) { 944 if (lbs_cfg_register(priv)) {
953 lbs_pr_err("cannot register device\n"); 945 pr_err("cannot register device\n");
954 goto done; 946 goto done;
955 } 947 }
956 948
957 lbs_update_channel(priv); 949 lbs_update_channel(priv);
958 950
959 lbs_init_mesh(priv); 951 if (!lbs_disablemesh)
952 lbs_init_mesh(priv);
953 else
954 pr_info("%s: mesh disabled\n", dev->name);
960 955
961 lbs_debugfs_init_one(priv, dev); 956 lbs_debugfs_init_one(priv, dev);
962 957
963 lbs_pr_info("%s: Marvell WLAN 802.11 adapter\n", dev->name); 958 netdev_info(dev, "Marvell WLAN 802.11 adapter\n");
964 959
965 ret = 0; 960 ret = 0;
966 961
@@ -1057,19 +1052,19 @@ void lbs_notify_command_response(struct lbs_private *priv, u8 resp_idx)
1057EXPORT_SYMBOL_GPL(lbs_notify_command_response); 1052EXPORT_SYMBOL_GPL(lbs_notify_command_response);
1058 1053
1059/** 1054/**
1060 * @brief Retrieves two-stage firmware 1055 * lbs_get_firmware - Retrieves two-stage firmware
1061 * 1056 *
1062 * @param dev A pointer to device structure 1057 * @dev: A pointer to &device structure
1063 * @param user_helper User-defined helper firmware file 1058 * @user_helper: User-defined helper firmware file
1064 * @param user_mainfw User-defined main firmware file 1059 * @user_mainfw: User-defined main firmware file
1065 * @param card_model Bus-specific card model ID used to filter firmware table 1060 * @card_model: Bus-specific card model ID used to filter firmware table
1066 * elements 1061 * elements
1067 * @param fw_table Table of firmware file names and device model numbers 1062 * @fw_table: Table of firmware file names and device model numbers
1068 * terminated by an entry with a NULL helper name 1063 * terminated by an entry with a NULL helper name
1069 * @param helper On success, the helper firmware; caller must free 1064 * @helper: On success, the helper firmware; caller must free
1070 * @param mainfw On success, the main firmware; caller must free 1065 * @mainfw: On success, the main firmware; caller must free
1071 * 1066 *
1072 * @return 0 on success, non-zero on failure 1067 * returns: 0 on success, non-zero on failure
1073 */ 1068 */
1074int lbs_get_firmware(struct device *dev, const char *user_helper, 1069int lbs_get_firmware(struct device *dev, const char *user_helper,
1075 const char *user_mainfw, u32 card_model, 1070 const char *user_mainfw, u32 card_model,
@@ -1087,16 +1082,16 @@ int lbs_get_firmware(struct device *dev, const char *user_helper,
1087 if (user_helper) { 1082 if (user_helper) {
1088 ret = request_firmware(helper, user_helper, dev); 1083 ret = request_firmware(helper, user_helper, dev);
1089 if (ret) { 1084 if (ret) {
1090 lbs_pr_err("couldn't find helper firmware %s", 1085 dev_err(dev, "couldn't find helper firmware %s\n",
1091 user_helper); 1086 user_helper);
1092 goto fail; 1087 goto fail;
1093 } 1088 }
1094 } 1089 }
1095 if (user_mainfw) { 1090 if (user_mainfw) {
1096 ret = request_firmware(mainfw, user_mainfw, dev); 1091 ret = request_firmware(mainfw, user_mainfw, dev);
1097 if (ret) { 1092 if (ret) {
1098 lbs_pr_err("couldn't find main firmware %s", 1093 dev_err(dev, "couldn't find main firmware %s\n",
1099 user_mainfw); 1094 user_mainfw);
1100 goto fail; 1095 goto fail;
1101 } 1096 }
1102 } 1097 }
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c
index 9d097b9c8005..24cf06680c6b 100644
--- a/drivers/net/wireless/libertas/mesh.c
+++ b/drivers/net/wireless/libertas/mesh.c
@@ -1,3 +1,5 @@
1#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
1#include <linux/delay.h> 3#include <linux/delay.h>
2#include <linux/etherdevice.h> 4#include <linux/etherdevice.h>
3#include <linux/netdevice.h> 5#include <linux/netdevice.h>
@@ -16,12 +18,15 @@
16 * Mesh sysfs support 18 * Mesh sysfs support
17 */ 19 */
18 20
19/** 21/*
20 * Attributes exported through sysfs 22 * Attributes exported through sysfs
21 */ 23 */
22 24
23/** 25/**
24 * @brief Get function for sysfs attribute anycast_mask 26 * lbs_anycast_get - Get function for sysfs attribute anycast_mask
27 * @dev: the &struct device
28 * @attr: device attributes
29 * @buf: buffer where data will be returned
25 */ 30 */
26static ssize_t lbs_anycast_get(struct device *dev, 31static ssize_t lbs_anycast_get(struct device *dev,
27 struct device_attribute *attr, char * buf) 32 struct device_attribute *attr, char * buf)
@@ -40,7 +45,11 @@ static ssize_t lbs_anycast_get(struct device *dev,
40} 45}
41 46
42/** 47/**
43 * @brief Set function for sysfs attribute anycast_mask 48 * lbs_anycast_set - Set function for sysfs attribute anycast_mask
49 * @dev: the &struct device
50 * @attr: device attributes
51 * @buf: buffer that contains new attribute value
52 * @count: size of buffer
44 */ 53 */
45static ssize_t lbs_anycast_set(struct device *dev, 54static ssize_t lbs_anycast_set(struct device *dev,
46 struct device_attribute *attr, const char * buf, size_t count) 55 struct device_attribute *attr, const char * buf, size_t count)
@@ -62,7 +71,10 @@ static ssize_t lbs_anycast_set(struct device *dev,
62} 71}
63 72
64/** 73/**
65 * @brief Get function for sysfs attribute prb_rsp_limit 74 * lbs_prb_rsp_limit_get - Get function for sysfs attribute prb_rsp_limit
75 * @dev: the &struct device
76 * @attr: device attributes
77 * @buf: buffer where data will be returned
66 */ 78 */
67static ssize_t lbs_prb_rsp_limit_get(struct device *dev, 79static ssize_t lbs_prb_rsp_limit_get(struct device *dev,
68 struct device_attribute *attr, char *buf) 80 struct device_attribute *attr, char *buf)
@@ -85,7 +97,11 @@ static ssize_t lbs_prb_rsp_limit_get(struct device *dev,
85} 97}
86 98
87/** 99/**
88 * @brief Set function for sysfs attribute prb_rsp_limit 100 * lbs_prb_rsp_limit_set - Set function for sysfs attribute prb_rsp_limit
101 * @dev: the &struct device
102 * @attr: device attributes
103 * @buf: buffer that contains new attribute value
104 * @count: size of buffer
89 */ 105 */
90static ssize_t lbs_prb_rsp_limit_set(struct device *dev, 106static ssize_t lbs_prb_rsp_limit_set(struct device *dev,
91 struct device_attribute *attr, const char *buf, size_t count) 107 struct device_attribute *attr, const char *buf, size_t count)
@@ -114,7 +130,10 @@ static ssize_t lbs_prb_rsp_limit_set(struct device *dev,
114} 130}
115 131
116/** 132/**
117 * Get function for sysfs attribute mesh 133 * lbs_mesh_get - Get function for sysfs attribute mesh
134 * @dev: the &struct device
135 * @attr: device attributes
136 * @buf: buffer where data will be returned
118 */ 137 */
119static ssize_t lbs_mesh_get(struct device *dev, 138static ssize_t lbs_mesh_get(struct device *dev,
120 struct device_attribute *attr, char * buf) 139 struct device_attribute *attr, char * buf)
@@ -124,7 +143,11 @@ static ssize_t lbs_mesh_get(struct device *dev,
124} 143}
125 144
126/** 145/**
127 * Set function for sysfs attribute mesh 146 * lbs_mesh_set - Set function for sysfs attribute mesh
147 * @dev: the &struct device
148 * @attr: device attributes
149 * @buf: buffer that contains new attribute value
150 * @count: size of buffer
128 */ 151 */
129static ssize_t lbs_mesh_set(struct device *dev, 152static ssize_t lbs_mesh_set(struct device *dev,
130 struct device_attribute *attr, const char * buf, size_t count) 153 struct device_attribute *attr, const char * buf, size_t count)
@@ -151,19 +174,19 @@ static ssize_t lbs_mesh_set(struct device *dev,
151 return count; 174 return count;
152} 175}
153 176
154/** 177/*
155 * lbs_mesh attribute to be exported per ethX interface 178 * lbs_mesh attribute to be exported per ethX interface
156 * through sysfs (/sys/class/net/ethX/lbs_mesh) 179 * through sysfs (/sys/class/net/ethX/lbs_mesh)
157 */ 180 */
158static DEVICE_ATTR(lbs_mesh, 0644, lbs_mesh_get, lbs_mesh_set); 181static DEVICE_ATTR(lbs_mesh, 0644, lbs_mesh_get, lbs_mesh_set);
159 182
160/** 183/*
161 * anycast_mask attribute to be exported per mshX interface 184 * anycast_mask attribute to be exported per mshX interface
162 * through sysfs (/sys/class/net/mshX/anycast_mask) 185 * through sysfs (/sys/class/net/mshX/anycast_mask)
163 */ 186 */
164static DEVICE_ATTR(anycast_mask, 0644, lbs_anycast_get, lbs_anycast_set); 187static DEVICE_ATTR(anycast_mask, 0644, lbs_anycast_get, lbs_anycast_set);
165 188
166/** 189/*
167 * prb_rsp_limit attribute to be exported per mshX interface 190 * prb_rsp_limit attribute to be exported per mshX interface
168 * through sysfs (/sys/class/net/mshX/prb_rsp_limit) 191 * through sysfs (/sys/class/net/mshX/prb_rsp_limit)
169 */ 192 */
@@ -246,7 +269,7 @@ int lbs_init_mesh(struct lbs_private *priv)
246 lbs_add_mesh(priv); 269 lbs_add_mesh(priv);
247 270
248 if (device_create_file(&dev->dev, &dev_attr_lbs_mesh)) 271 if (device_create_file(&dev->dev, &dev_attr_lbs_mesh))
249 lbs_pr_err("cannot register lbs_mesh attribute\n"); 272 netdev_err(dev, "cannot register lbs_mesh attribute\n");
250 273
251 ret = 1; 274 ret = 1;
252 } 275 }
@@ -274,10 +297,10 @@ int lbs_deinit_mesh(struct lbs_private *priv)
274 297
275 298
276/** 299/**
277 * @brief This function closes the mshX interface 300 * lbs_mesh_stop - close the mshX interface
278 * 301 *
279 * @param dev A pointer to net_device structure 302 * @dev: A pointer to &net_device structure
280 * @return 0 303 * returns: 0
281 */ 304 */
282static int lbs_mesh_stop(struct net_device *dev) 305static int lbs_mesh_stop(struct net_device *dev)
283{ 306{
@@ -301,10 +324,10 @@ static int lbs_mesh_stop(struct net_device *dev)
301} 324}
302 325
303/** 326/**
304 * @brief This function opens the mshX interface 327 * lbs_mesh_dev_open - open the mshX interface
305 * 328 *
306 * @param dev A pointer to net_device structure 329 * @dev: A pointer to &net_device structure
307 * @return 0 or -EBUSY if monitor mode active 330 * returns: 0 or -EBUSY if monitor mode active
308 */ 331 */
309static int lbs_mesh_dev_open(struct net_device *dev) 332static int lbs_mesh_dev_open(struct net_device *dev)
310{ 333{
@@ -342,10 +365,10 @@ static const struct net_device_ops mesh_netdev_ops = {
342}; 365};
343 366
344/** 367/**
345 * @brief This function adds mshX interface 368 * lbs_add_mesh - add mshX interface
346 * 369 *
347 * @param priv A pointer to the struct lbs_private structure 370 * @priv: A pointer to the &struct lbs_private structure
348 * @return 0 if successful, -X otherwise 371 * returns: 0 if successful, -X otherwise
349 */ 372 */
350int lbs_add_mesh(struct lbs_private *priv) 373int lbs_add_mesh(struct lbs_private *priv)
351{ 374{
@@ -374,7 +397,7 @@ int lbs_add_mesh(struct lbs_private *priv)
374 /* Register virtual mesh interface */ 397 /* Register virtual mesh interface */
375 ret = register_netdev(mesh_dev); 398 ret = register_netdev(mesh_dev);
376 if (ret) { 399 if (ret) {
377 lbs_pr_err("cannot register mshX virtual interface\n"); 400 pr_err("cannot register mshX virtual interface\n");
378 goto err_free; 401 goto err_free;
379 } 402 }
380 403
@@ -456,13 +479,13 @@ void lbs_mesh_set_txpd(struct lbs_private *priv,
456 */ 479 */
457 480
458/** 481/**
459 * @brief Add or delete Mesh Blinding Table entries 482 * lbs_mesh_bt_add_del - Add or delete Mesh Blinding Table entries
460 * 483 *
461 * @param priv A pointer to struct lbs_private structure 484 * @priv: A pointer to &struct lbs_private structure
462 * @param add TRUE to add the entry, FALSE to delete it 485 * @add: TRUE to add the entry, FALSE to delete it
463 * @param addr1 Destination address to blind or unblind 486 * @addr1: Destination address to blind or unblind
464 * 487 *
465 * @return 0 on success, error on failure 488 * returns: 0 on success, error on failure
466 */ 489 */
467int lbs_mesh_bt_add_del(struct lbs_private *priv, bool add, u8 *addr1) 490int lbs_mesh_bt_add_del(struct lbs_private *priv, bool add, u8 *addr1)
468{ 491{
@@ -493,11 +516,11 @@ int lbs_mesh_bt_add_del(struct lbs_private *priv, bool add, u8 *addr1)
493} 516}
494 517
495/** 518/**
496 * @brief Reset/clear the mesh blinding table 519 * lbs_mesh_bt_reset - Reset/clear the mesh blinding table
497 * 520 *
498 * @param priv A pointer to struct lbs_private structure 521 * @priv: A pointer to &struct lbs_private structure
499 * 522 *
500 * @return 0 on success, error on failure 523 * returns: 0 on success, error on failure
501 */ 524 */
502int lbs_mesh_bt_reset(struct lbs_private *priv) 525int lbs_mesh_bt_reset(struct lbs_private *priv)
503{ 526{
@@ -517,17 +540,18 @@ int lbs_mesh_bt_reset(struct lbs_private *priv)
517} 540}
518 541
519/** 542/**
520 * @brief Gets the inverted status of the mesh blinding table 543 * lbs_mesh_bt_get_inverted - Gets the inverted status of the mesh
544 * blinding table
521 * 545 *
522 * Normally the firmware "blinds" or ignores traffic from mesh nodes in the 546 * Normally the firmware "blinds" or ignores traffic from mesh nodes in the
523 * table, but an inverted table allows *only* traffic from nodes listed in 547 * table, but an inverted table allows *only* traffic from nodes listed in
524 * the table. 548 * the table.
525 * 549 *
526 * @param priv A pointer to struct lbs_private structure 550 * @priv: A pointer to &struct lbs_private structure
527 * @param invert On success, TRUE if the blinding table is inverted, 551 * @inverted: On success, TRUE if the blinding table is inverted,
528 * FALSE if it is not inverted 552 * FALSE if it is not inverted
529 * 553 *
530 * @return 0 on success, error on failure 554 * returns: 0 on success, error on failure
531 */ 555 */
532int lbs_mesh_bt_get_inverted(struct lbs_private *priv, bool *inverted) 556int lbs_mesh_bt_get_inverted(struct lbs_private *priv, bool *inverted)
533{ 557{
@@ -551,18 +575,19 @@ int lbs_mesh_bt_get_inverted(struct lbs_private *priv, bool *inverted)
551} 575}
552 576
553/** 577/**
554 * @brief Sets the inverted status of the mesh blinding table 578 * lbs_mesh_bt_set_inverted - Sets the inverted status of the mesh
579 * blinding table
555 * 580 *
556 * Normally the firmware "blinds" or ignores traffic from mesh nodes in the 581 * Normally the firmware "blinds" or ignores traffic from mesh nodes in the
557 * table, but an inverted table allows *only* traffic from nodes listed in 582 * table, but an inverted table allows *only* traffic from nodes listed in
558 * the table. 583 * the table.
559 * 584 *
560 * @param priv A pointer to struct lbs_private structure 585 * @priv: A pointer to &struct lbs_private structure
561 * @param invert TRUE to invert the blinding table (only traffic from 586 * @inverted: TRUE to invert the blinding table (only traffic from
562 * listed nodes allowed), FALSE to return it 587 * listed nodes allowed), FALSE to return it
563 * to normal state (listed nodes ignored) 588 * to normal state (listed nodes ignored)
564 * 589 *
565 * @return 0 on success, error on failure 590 * returns: 0 on success, error on failure
566 */ 591 */
567int lbs_mesh_bt_set_inverted(struct lbs_private *priv, bool inverted) 592int lbs_mesh_bt_set_inverted(struct lbs_private *priv, bool inverted)
568{ 593{
@@ -583,13 +608,13 @@ int lbs_mesh_bt_set_inverted(struct lbs_private *priv, bool inverted)
583} 608}
584 609
585/** 610/**
586 * @brief List an entry in the mesh blinding table 611 * lbs_mesh_bt_get_entry - List an entry in the mesh blinding table
587 * 612 *
588 * @param priv A pointer to struct lbs_private structure 613 * @priv: A pointer to &struct lbs_private structure
589 * @param id The ID of the entry to list 614 * @id: The ID of the entry to list
590 * @param addr1 MAC address associated with the table entry 615 * @addr1: MAC address associated with the table entry
591 * 616 *
592 * @return 0 on success, error on failure 617 * returns: 0 on success, error on failure
593 */ 618 */
594int lbs_mesh_bt_get_entry(struct lbs_private *priv, u32 id, u8 *addr1) 619int lbs_mesh_bt_get_entry(struct lbs_private *priv, u32 id, u8 *addr1)
595{ 620{
@@ -614,14 +639,14 @@ int lbs_mesh_bt_get_entry(struct lbs_private *priv, u32 id, u8 *addr1)
614} 639}
615 640
616/** 641/**
617 * @brief Access the mesh forwarding table 642 * lbs_cmd_fwt_access - Access the mesh forwarding table
618 * 643 *
619 * @param priv A pointer to struct lbs_private structure 644 * @priv: A pointer to &struct lbs_private structure
620 * @param cmd_action The forwarding table action to perform 645 * @cmd_action: The forwarding table action to perform
621 * @param cmd The pre-filled FWT_ACCESS command 646 * @cmd: The pre-filled FWT_ACCESS command
622 * 647 *
623 * @return 0 on success and 'cmd' will be filled with the 648 * returns: 0 on success and 'cmd' will be filled with the
624 * firmware's response 649 * firmware's response
625 */ 650 */
626int lbs_cmd_fwt_access(struct lbs_private *priv, u16 cmd_action, 651int lbs_cmd_fwt_access(struct lbs_private *priv, u16 cmd_action,
627 struct cmd_ds_fwt_access *cmd) 652 struct cmd_ds_fwt_access *cmd)
@@ -774,7 +799,10 @@ static int mesh_get_default_parameters(struct device *dev,
774} 799}
775 800
776/** 801/**
777 * @brief Get function for sysfs attribute bootflag 802 * bootflag_get - Get function for sysfs attribute bootflag
803 * @dev: the &struct device
804 * @attr: device attributes
805 * @buf: buffer where data will be returned
778 */ 806 */
779static ssize_t bootflag_get(struct device *dev, 807static ssize_t bootflag_get(struct device *dev,
780 struct device_attribute *attr, char *buf) 808 struct device_attribute *attr, char *buf)
@@ -791,7 +819,11 @@ static ssize_t bootflag_get(struct device *dev,
791} 819}
792 820
793/** 821/**
794 * @brief Set function for sysfs attribute bootflag 822 * bootflag_set - Set function for sysfs attribute bootflag
823 * @dev: the &struct device
824 * @attr: device attributes
825 * @buf: buffer that contains new attribute value
826 * @count: size of buffer
795 */ 827 */
796static ssize_t bootflag_set(struct device *dev, struct device_attribute *attr, 828static ssize_t bootflag_set(struct device *dev, struct device_attribute *attr,
797 const char *buf, size_t count) 829 const char *buf, size_t count)
@@ -817,7 +849,10 @@ static ssize_t bootflag_set(struct device *dev, struct device_attribute *attr,
817} 849}
818 850
819/** 851/**
820 * @brief Get function for sysfs attribute boottime 852 * boottime_get - Get function for sysfs attribute boottime
853 * @dev: the &struct device
854 * @attr: device attributes
855 * @buf: buffer where data will be returned
821 */ 856 */
822static ssize_t boottime_get(struct device *dev, 857static ssize_t boottime_get(struct device *dev,
823 struct device_attribute *attr, char *buf) 858 struct device_attribute *attr, char *buf)
@@ -834,7 +869,11 @@ static ssize_t boottime_get(struct device *dev,
834} 869}
835 870
836/** 871/**
837 * @brief Set function for sysfs attribute boottime 872 * boottime_set - Set function for sysfs attribute boottime
873 * @dev: the &struct device
874 * @attr: device attributes
875 * @buf: buffer that contains new attribute value
876 * @count: size of buffer
838 */ 877 */
839static ssize_t boottime_set(struct device *dev, 878static ssize_t boottime_set(struct device *dev,
840 struct device_attribute *attr, const char *buf, size_t count) 879 struct device_attribute *attr, const char *buf, size_t count)
@@ -869,7 +908,10 @@ static ssize_t boottime_set(struct device *dev,
869} 908}
870 909
871/** 910/**
872 * @brief Get function for sysfs attribute channel 911 * channel_get - Get function for sysfs attribute channel
912 * @dev: the &struct device
913 * @attr: device attributes
914 * @buf: buffer where data will be returned
873 */ 915 */
874static ssize_t channel_get(struct device *dev, 916static ssize_t channel_get(struct device *dev,
875 struct device_attribute *attr, char *buf) 917 struct device_attribute *attr, char *buf)
@@ -886,7 +928,11 @@ static ssize_t channel_get(struct device *dev,
886} 928}
887 929
888/** 930/**
889 * @brief Set function for sysfs attribute channel 931 * channel_set - Set function for sysfs attribute channel
932 * @dev: the &struct device
933 * @attr: device attributes
934 * @buf: buffer that contains new attribute value
935 * @count: size of buffer
890 */ 936 */
891static ssize_t channel_set(struct device *dev, struct device_attribute *attr, 937static ssize_t channel_set(struct device *dev, struct device_attribute *attr,
892 const char *buf, size_t count) 938 const char *buf, size_t count)
@@ -912,7 +958,10 @@ static ssize_t channel_set(struct device *dev, struct device_attribute *attr,
912} 958}
913 959
914/** 960/**
915 * @brief Get function for sysfs attribute mesh_id 961 * mesh_id_get - Get function for sysfs attribute mesh_id
962 * @dev: the &struct device
963 * @attr: device attributes
964 * @buf: buffer where data will be returned
916 */ 965 */
917static ssize_t mesh_id_get(struct device *dev, struct device_attribute *attr, 966static ssize_t mesh_id_get(struct device *dev, struct device_attribute *attr,
918 char *buf) 967 char *buf)
@@ -926,7 +975,7 @@ static ssize_t mesh_id_get(struct device *dev, struct device_attribute *attr,
926 return ret; 975 return ret;
927 976
928 if (defs.meshie.val.mesh_id_len > IEEE80211_MAX_SSID_LEN) { 977 if (defs.meshie.val.mesh_id_len > IEEE80211_MAX_SSID_LEN) {
929 lbs_pr_err("inconsistent mesh ID length"); 978 dev_err(dev, "inconsistent mesh ID length\n");
930 defs.meshie.val.mesh_id_len = IEEE80211_MAX_SSID_LEN; 979 defs.meshie.val.mesh_id_len = IEEE80211_MAX_SSID_LEN;
931 } 980 }
932 981
@@ -938,7 +987,11 @@ static ssize_t mesh_id_get(struct device *dev, struct device_attribute *attr,
938} 987}
939 988
940/** 989/**
941 * @brief Set function for sysfs attribute mesh_id 990 * mesh_id_set - Set function for sysfs attribute mesh_id
991 * @dev: the &struct device
992 * @attr: device attributes
993 * @buf: buffer that contains new attribute value
994 * @count: size of buffer
942 */ 995 */
943static ssize_t mesh_id_set(struct device *dev, struct device_attribute *attr, 996static ssize_t mesh_id_set(struct device *dev, struct device_attribute *attr,
944 const char *buf, size_t count) 997 const char *buf, size_t count)
@@ -980,7 +1033,10 @@ static ssize_t mesh_id_set(struct device *dev, struct device_attribute *attr,
980} 1033}
981 1034
982/** 1035/**
983 * @brief Get function for sysfs attribute protocol_id 1036 * protocol_id_get - Get function for sysfs attribute protocol_id
1037 * @dev: the &struct device
1038 * @attr: device attributes
1039 * @buf: buffer where data will be returned
984 */ 1040 */
985static ssize_t protocol_id_get(struct device *dev, 1041static ssize_t protocol_id_get(struct device *dev,
986 struct device_attribute *attr, char *buf) 1042 struct device_attribute *attr, char *buf)
@@ -997,7 +1053,11 @@ static ssize_t protocol_id_get(struct device *dev,
997} 1053}
998 1054
999/** 1055/**
1000 * @brief Set function for sysfs attribute protocol_id 1056 * protocol_id_set - Set function for sysfs attribute protocol_id
1057 * @dev: the &struct device
1058 * @attr: device attributes
1059 * @buf: buffer that contains new attribute value
1060 * @count: size of buffer
1001 */ 1061 */
1002static ssize_t protocol_id_set(struct device *dev, 1062static ssize_t protocol_id_set(struct device *dev,
1003 struct device_attribute *attr, const char *buf, size_t count) 1063 struct device_attribute *attr, const char *buf, size_t count)
@@ -1034,7 +1094,10 @@ static ssize_t protocol_id_set(struct device *dev,
1034} 1094}
1035 1095
1036/** 1096/**
1037 * @brief Get function for sysfs attribute metric_id 1097 * metric_id_get - Get function for sysfs attribute metric_id
1098 * @dev: the &struct device
1099 * @attr: device attributes
1100 * @buf: buffer where data will be returned
1038 */ 1101 */
1039static ssize_t metric_id_get(struct device *dev, 1102static ssize_t metric_id_get(struct device *dev,
1040 struct device_attribute *attr, char *buf) 1103 struct device_attribute *attr, char *buf)
@@ -1051,7 +1114,11 @@ static ssize_t metric_id_get(struct device *dev,
1051} 1114}
1052 1115
1053/** 1116/**
1054 * @brief Set function for sysfs attribute metric_id 1117 * metric_id_set - Set function for sysfs attribute metric_id
1118 * @dev: the &struct device
1119 * @attr: device attributes
1120 * @buf: buffer that contains new attribute value
1121 * @count: size of buffer
1055 */ 1122 */
1056static ssize_t metric_id_set(struct device *dev, struct device_attribute *attr, 1123static ssize_t metric_id_set(struct device *dev, struct device_attribute *attr,
1057 const char *buf, size_t count) 1124 const char *buf, size_t count)
@@ -1088,7 +1155,10 @@ static ssize_t metric_id_set(struct device *dev, struct device_attribute *attr,
1088} 1155}
1089 1156
1090/** 1157/**
1091 * @brief Get function for sysfs attribute capability 1158 * capability_get - Get function for sysfs attribute capability
1159 * @dev: the &struct device
1160 * @attr: device attributes
1161 * @buf: buffer where data will be returned
1092 */ 1162 */
1093static ssize_t capability_get(struct device *dev, 1163static ssize_t capability_get(struct device *dev,
1094 struct device_attribute *attr, char *buf) 1164 struct device_attribute *attr, char *buf)
@@ -1105,7 +1175,11 @@ static ssize_t capability_get(struct device *dev,
1105} 1175}
1106 1176
1107/** 1177/**
1108 * @brief Set function for sysfs attribute capability 1178 * capability_set - Set function for sysfs attribute capability
1179 * @dev: the &struct device
1180 * @attr: device attributes
1181 * @buf: buffer that contains new attribute value
1182 * @count: size of buffer
1109 */ 1183 */
1110static ssize_t capability_set(struct device *dev, struct device_attribute *attr, 1184static ssize_t capability_set(struct device *dev, struct device_attribute *attr,
1111 const char *buf, size_t count) 1185 const char *buf, size_t count)
diff --git a/drivers/net/wireless/libertas/mesh.h b/drivers/net/wireless/libertas/mesh.h
index afb2e8dead3f..ee95c73ed5f4 100644
--- a/drivers/net/wireless/libertas/mesh.h
+++ b/drivers/net/wireless/libertas/mesh.h
@@ -1,6 +1,6 @@
1/** 1/*
2 * Contains all definitions needed for the Libertas' MESH implementation. 2 * Contains all definitions needed for the Libertas' MESH implementation.
3 */ 3 */
4#ifndef _LBS_MESH_H_ 4#ifndef _LBS_MESH_H_
5#define _LBS_MESH_H_ 5#define _LBS_MESH_H_
6 6
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index a2b1df21d286..fdb0448301a0 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -1,6 +1,9 @@
1/** 1/*
2 * This file contains the handling of RX in wlan driver. 2 * This file contains the handling of RX in wlan driver.
3 */ 3 */
4
5#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6
4#include <linux/etherdevice.h> 7#include <linux/etherdevice.h>
5#include <linux/slab.h> 8#include <linux/slab.h>
6#include <linux/types.h> 9#include <linux/types.h>
@@ -40,12 +43,12 @@ static int process_rxed_802_11_packet(struct lbs_private *priv,
40 struct sk_buff *skb); 43 struct sk_buff *skb);
41 44
42/** 45/**
43 * @brief This function processes received packet and forwards it 46 * lbs_process_rxed_packet - processes received packet and forwards it
44 * to kernel/upper layer 47 * to kernel/upper layer
45 * 48 *
46 * @param priv A pointer to struct lbs_private 49 * @priv: A pointer to &struct lbs_private
47 * @param skb A pointer to skb which includes the received packet 50 * @skb: A pointer to skb which includes the received packet
48 * @return 0 or -1 51 * returns: 0 or -1
49 */ 52 */
50int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb) 53int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
51{ 54{
@@ -156,11 +159,11 @@ done:
156EXPORT_SYMBOL_GPL(lbs_process_rxed_packet); 159EXPORT_SYMBOL_GPL(lbs_process_rxed_packet);
157 160
158/** 161/**
159 * @brief This function converts Tx/Rx rates from the Marvell WLAN format 162 * convert_mv_rate_to_radiotap - converts Tx/Rx rates from Marvell WLAN format
160 * (see Table 2 in Section 3.1) to IEEE80211_RADIOTAP_RATE units (500 Kb/s) 163 * (see Table 2 in Section 3.1) to IEEE80211_RADIOTAP_RATE units (500 Kb/s)
161 * 164 *
162 * @param rate Input rate 165 * @rate: Input rate
163 * @return Output Rate (0 if invalid) 166 * returns: Output Rate (0 if invalid)
164 */ 167 */
165static u8 convert_mv_rate_to_radiotap(u8 rate) 168static u8 convert_mv_rate_to_radiotap(u8 rate)
166{ 169{
@@ -191,17 +194,17 @@ static u8 convert_mv_rate_to_radiotap(u8 rate)
191 case 12: /* 54 Mbps */ 194 case 12: /* 54 Mbps */
192 return 108; 195 return 108;
193 } 196 }
194 lbs_pr_alert("Invalid Marvell WLAN rate %i\n", rate); 197 pr_alert("Invalid Marvell WLAN rate %i\n", rate);
195 return 0; 198 return 0;
196} 199}
197 200
198/** 201/**
199 * @brief This function processes a received 802.11 packet and forwards it 202 * process_rxed_802_11_packet - processes a received 802.11 packet and forwards
200 * to kernel/upper layer 203 * it to kernel/upper layer
201 * 204 *
202 * @param priv A pointer to struct lbs_private 205 * @priv: A pointer to &struct lbs_private
203 * @param skb A pointer to skb which includes the received packet 206 * @skb: A pointer to skb which includes the received packet
204 * @return 0 or -1 207 * returns: 0 or -1
205 */ 208 */
206static int process_rxed_802_11_packet(struct lbs_private *priv, 209static int process_rxed_802_11_packet(struct lbs_private *priv,
207 struct sk_buff *skb) 210 struct sk_buff *skb)
@@ -248,7 +251,7 @@ static int process_rxed_802_11_packet(struct lbs_private *priv,
248 /* add space for the new radio header */ 251 /* add space for the new radio header */
249 if ((skb_headroom(skb) < sizeof(struct rx_radiotap_hdr)) && 252 if ((skb_headroom(skb) < sizeof(struct rx_radiotap_hdr)) &&
250 pskb_expand_head(skb, sizeof(struct rx_radiotap_hdr), 0, GFP_ATOMIC)) { 253 pskb_expand_head(skb, sizeof(struct rx_radiotap_hdr), 0, GFP_ATOMIC)) {
251 lbs_pr_alert("%s: couldn't pskb_expand_head\n", __func__); 254 netdev_alert(dev, "%s: couldn't pskb_expand_head\n", __func__);
252 ret = -ENOMEM; 255 ret = -ENOMEM;
253 kfree_skb(skb); 256 kfree_skb(skb);
254 goto done; 257 goto done;
diff --git a/drivers/net/wireless/libertas/tx.c b/drivers/net/wireless/libertas/tx.c
index 8000ca6165d0..bbb95f88dc01 100644
--- a/drivers/net/wireless/libertas/tx.c
+++ b/drivers/net/wireless/libertas/tx.c
@@ -1,6 +1,6 @@
1/** 1/*
2 * This file contains the handling of TX in wlan driver. 2 * This file contains the handling of TX in wlan driver.
3 */ 3 */
4#include <linux/netdevice.h> 4#include <linux/netdevice.h>
5#include <linux/etherdevice.h> 5#include <linux/etherdevice.h>
6#include <linux/sched.h> 6#include <linux/sched.h>
@@ -13,11 +13,11 @@
13#include "dev.h" 13#include "dev.h"
14 14
15/** 15/**
16 * @brief This function converts Tx/Rx rates from IEEE80211_RADIOTAP_RATE 16 * convert_radiotap_rate_to_mv - converts Tx/Rx rates from IEEE80211_RADIOTAP_RATE
17 * units (500 Kb/s) into Marvell WLAN format (see Table 8 in Section 3.2.1) 17 * units (500 Kb/s) into Marvell WLAN format (see Table 8 in Section 3.2.1)
18 * 18 *
19 * @param rate Input rate 19 * @rate: Input rate
20 * @return Output Rate (0 if invalid) 20 * returns: Output Rate (0 if invalid)
21 */ 21 */
22static u32 convert_radiotap_rate_to_mv(u8 rate) 22static u32 convert_radiotap_rate_to_mv(u8 rate)
23{ 23{
@@ -51,12 +51,12 @@ static u32 convert_radiotap_rate_to_mv(u8 rate)
51} 51}
52 52
53/** 53/**
54 * @brief This function checks the conditions and sends packet to IF 54 * lbs_hard_start_xmit - checks the conditions and sends packet to IF
55 * layer if everything is ok. 55 * layer if everything is ok
56 * 56 *
57 * @param priv A pointer to struct lbs_private structure 57 * @skb: A pointer to skb which includes TX packet
58 * @param skb A pointer to skb which includes TX packet 58 * @dev: A pointer to the &struct net_device
59 * @return 0 or -1 59 * returns: 0 or -1
60 */ 60 */
61netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) 61netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
62{ 62{
@@ -168,13 +168,13 @@ netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
168} 168}
169 169
170/** 170/**
171 * @brief This function sends to the host the last transmitted packet, 171 * lbs_send_tx_feedback - sends to the host the last transmitted packet,
172 * filling the radiotap headers with transmission information. 172 * filling the radiotap headers with transmission information.
173 * 173 *
174 * @param priv A pointer to struct lbs_private structure 174 * @priv: A pointer to &struct lbs_private structure
175 * @param status A 32 bit value containing transmission status. 175 * @try_count: A 32-bit value containing transmission retry status.
176 * 176 *
177 * @returns void 177 * returns: void
178 */ 178 */
179void lbs_send_tx_feedback(struct lbs_private *priv, u32 try_count) 179void lbs_send_tx_feedback(struct lbs_private *priv, u32 try_count)
180{ 180{
diff --git a/drivers/net/wireless/libertas/types.h b/drivers/net/wireless/libertas/types.h
index 462fbb4cb743..cf1d9b047ee6 100644
--- a/drivers/net/wireless/libertas/types.h
+++ b/drivers/net/wireless/libertas/types.h
@@ -1,6 +1,6 @@
1/** 1/*
2 * This header file contains definition for global types 2 * This header file contains definition for global types
3 */ 3 */
4#ifndef _LBS_TYPES_H_ 4#ifndef _LBS_TYPES_H_
5#define _LBS_TYPES_H_ 5#define _LBS_TYPES_H_
6 6
@@ -54,7 +54,7 @@ union ieee_phy_param_set {
54 struct ieee_ie_ds_param_set ds; 54 struct ieee_ie_ds_param_set ds;
55} __packed; 55} __packed;
56 56
57/** TLV type ID definition */ 57/* TLV type ID definition */
58#define PROPRIETARY_TLV_BASE_ID 0x0100 58#define PROPRIETARY_TLV_BASE_ID 0x0100
59 59
60/* Terminating TLV type */ 60/* Terminating TLV type */
@@ -96,7 +96,7 @@ union ieee_phy_param_set {
96#define TLV_TYPE_MESH_ID (PROPRIETARY_TLV_BASE_ID + 37) 96#define TLV_TYPE_MESH_ID (PROPRIETARY_TLV_BASE_ID + 37)
97#define TLV_TYPE_OLD_MESH_ID (PROPRIETARY_TLV_BASE_ID + 291) 97#define TLV_TYPE_OLD_MESH_ID (PROPRIETARY_TLV_BASE_ID + 291)
98 98
99/** TLV related data structures*/ 99/* TLV related data structures */
100struct mrvl_ie_header { 100struct mrvl_ie_header {
101 __le16 type; 101 __le16 type;
102 __le16 len; 102 __le16 len;
@@ -177,7 +177,7 @@ struct mrvl_ie_auth_type {
177 __le16 auth; 177 __le16 auth;
178} __packed; 178} __packed;
179 179
180/** Local Power capability */ 180/* Local Power capability */
181struct mrvl_ie_power_capability { 181struct mrvl_ie_power_capability {
182 struct mrvl_ie_header header; 182 struct mrvl_ie_header header;
183 s8 minpower; 183 s8 minpower;
@@ -235,9 +235,11 @@ struct mrvl_ie_ledbhv {
235 struct led_bhv ledbhv[1]; 235 struct led_bhv ledbhv[1];
236} __packed; 236} __packed;
237 237
238/* Meant to be packed as the value member of a struct ieee80211_info_element. 238/*
239 * Meant to be packed as the value member of a struct ieee80211_info_element.
239 * Note that the len member of the ieee80211_info_element varies depending on 240 * Note that the len member of the ieee80211_info_element varies depending on
240 * the mesh_id_len */ 241 * the mesh_id_len
242 */
241struct mrvl_meshie_val { 243struct mrvl_meshie_val {
242 uint8_t oui[3]; 244 uint8_t oui[3];
243 uint8_t type; 245 uint8_t type;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index f4f4257a9d67..9d4a40ee16c4 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1515,19 +1515,10 @@ static int __init init_mac80211_hwsim(void)
1515 if (hwsim_mon == NULL) 1515 if (hwsim_mon == NULL)
1516 goto failed; 1516 goto failed;
1517 1517
1518 rtnl_lock(); 1518 err = register_netdev(hwsim_mon);
1519
1520 err = dev_alloc_name(hwsim_mon, hwsim_mon->name);
1521 if (err < 0) 1519 if (err < 0)
1522 goto failed_mon; 1520 goto failed_mon;
1523 1521
1524
1525 err = register_netdevice(hwsim_mon);
1526 if (err < 0)
1527 goto failed_mon;
1528
1529 rtnl_unlock();
1530
1531 return 0; 1522 return 0;
1532 1523
1533failed_mon: 1524failed_mon:
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
new file mode 100644
index 000000000000..916183d39009
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -0,0 +1,744 @@
1/*
2 * Marvell Wireless LAN device driver: 802.11n
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "ioctl.h"
22#include "util.h"
23#include "fw.h"
24#include "main.h"
25#include "wmm.h"
26#include "11n.h"
27
28/*
29 * Fills HT capability information field, AMPDU Parameters field, HT extended
30 * capability field, and supported MCS set fields.
31 *
32 * HT capability information field, AMPDU Parameters field, supported MCS set
33 * fields are retrieved from cfg80211 stack
34 *
35 * RD responder bit to set to clear in the extended capability header.
36 */
37void
38mwifiex_fill_cap_info(struct mwifiex_private *priv, u8 radio_type,
39 struct mwifiex_ie_types_htcap *ht_cap)
40{
41 uint16_t ht_ext_cap = le16_to_cpu(ht_cap->ht_cap.extended_ht_cap_info);
42 struct ieee80211_supported_band *sband =
43 priv->wdev->wiphy->bands[radio_type];
44
45 ht_cap->ht_cap.ampdu_params_info =
46 (sband->ht_cap.ampdu_factor &
47 IEEE80211_HT_AMPDU_PARM_FACTOR)|
48 ((sband->ht_cap.ampdu_density <<
49 IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT) &
50 IEEE80211_HT_AMPDU_PARM_DENSITY);
51
52 memcpy((u8 *) &ht_cap->ht_cap.mcs, &sband->ht_cap.mcs,
53 sizeof(sband->ht_cap.mcs));
54
55 if (priv->bss_mode == NL80211_IFTYPE_STATION ||
56 (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
57 /* Set MCS32 for infra mode or ad-hoc mode with 40MHz support */
58 SETHT_MCS32(ht_cap->ht_cap.mcs.rx_mask);
59
60 /* Clear RD responder bit */
61 ht_ext_cap &= ~IEEE80211_HT_EXT_CAP_RD_RESPONDER;
62
63 ht_cap->ht_cap.cap_info = cpu_to_le16(sband->ht_cap.cap);
64 ht_cap->ht_cap.extended_ht_cap_info = cpu_to_le16(ht_ext_cap);
65}
66
67/*
68 * This function returns the pointer to an entry in BA Stream
69 * table which matches the requested BA status.
70 */
71static struct mwifiex_tx_ba_stream_tbl *
72mwifiex_11n_get_tx_ba_stream_status(struct mwifiex_private *priv,
73 enum mwifiex_ba_status ba_status)
74{
75 struct mwifiex_tx_ba_stream_tbl *tx_ba_tsr_tbl;
76 unsigned long flags;
77
78 spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
79 list_for_each_entry(tx_ba_tsr_tbl, &priv->tx_ba_stream_tbl_ptr, list) {
80 if (tx_ba_tsr_tbl->ba_status == ba_status) {
81 spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock,
82 flags);
83 return tx_ba_tsr_tbl;
84 }
85 }
86 spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
87 return NULL;
88}
89
90/*
91 * This function handles the command response of delete a block
92 * ack request.
93 *
94 * The function checks the response success status and takes action
95 * accordingly (send an add BA request in case of success, or recreate
96 * the deleted stream in case of failure, if the add BA was also
97 * initiated by us).
98 */
99int mwifiex_ret_11n_delba(struct mwifiex_private *priv,
100 struct host_cmd_ds_command *resp)
101{
102 int tid;
103 struct mwifiex_tx_ba_stream_tbl *tx_ba_tbl;
104 struct host_cmd_ds_11n_delba *del_ba =
105 (struct host_cmd_ds_11n_delba *) &resp->params.del_ba;
106 uint16_t del_ba_param_set = le16_to_cpu(del_ba->del_ba_param_set);
107
108 tid = del_ba_param_set >> DELBA_TID_POS;
109 if (del_ba->del_result == BA_RESULT_SUCCESS) {
110 mwifiex_11n_delete_ba_stream_tbl(priv, tid,
111 del_ba->peer_mac_addr, TYPE_DELBA_SENT,
112 INITIATOR_BIT(del_ba_param_set));
113
114 tx_ba_tbl = mwifiex_11n_get_tx_ba_stream_status(priv,
115 BA_STREAM_SETUP_INPROGRESS);
116 if (tx_ba_tbl)
117 mwifiex_send_addba(priv, tx_ba_tbl->tid,
118 tx_ba_tbl->ra);
119 } else { /*
120 * In case of failure, recreate the deleted stream in case
121 * we initiated the ADDBA
122 */
123 if (INITIATOR_BIT(del_ba_param_set)) {
124 mwifiex_11n_create_tx_ba_stream_tbl(priv,
125 del_ba->peer_mac_addr, tid,
126 BA_STREAM_SETUP_INPROGRESS);
127
128 tx_ba_tbl = mwifiex_11n_get_tx_ba_stream_status(priv,
129 BA_STREAM_SETUP_INPROGRESS);
130 if (tx_ba_tbl)
131 mwifiex_11n_delete_ba_stream_tbl(priv,
132 tx_ba_tbl->tid, tx_ba_tbl->ra,
133 TYPE_DELBA_SENT, true);
134 }
135 }
136
137 return 0;
138}
139
140/*
141 * This function handles the command response of add a block
142 * ack request.
143 *
144 * Handling includes changing the header fields to CPU formats, checking
145 * the response success status and taking actions accordingly (delete the
146 * BA stream table in case of failure).
147 */
148int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
149 struct host_cmd_ds_command *resp)
150{
151 int tid;
152 struct host_cmd_ds_11n_addba_rsp *add_ba_rsp =
153 (struct host_cmd_ds_11n_addba_rsp *) &resp->params.add_ba_rsp;
154 struct mwifiex_tx_ba_stream_tbl *tx_ba_tbl;
155
156 add_ba_rsp->ssn = cpu_to_le16((le16_to_cpu(add_ba_rsp->ssn))
157 & SSN_MASK);
158
159 tid = (le16_to_cpu(add_ba_rsp->block_ack_param_set)
160 & IEEE80211_ADDBA_PARAM_TID_MASK)
161 >> BLOCKACKPARAM_TID_POS;
162 if (le16_to_cpu(add_ba_rsp->status_code) == BA_RESULT_SUCCESS) {
163 tx_ba_tbl = mwifiex_11n_get_tx_ba_stream_tbl(priv, tid,
164 add_ba_rsp->peer_mac_addr);
165 if (tx_ba_tbl) {
166 dev_dbg(priv->adapter->dev, "info: BA stream complete\n");
167 tx_ba_tbl->ba_status = BA_STREAM_SETUP_COMPLETE;
168 } else {
169 dev_err(priv->adapter->dev, "BA stream not created\n");
170 }
171 } else {
172 mwifiex_11n_delete_ba_stream_tbl(priv, tid,
173 add_ba_rsp->peer_mac_addr,
174 TYPE_DELBA_SENT, true);
175 if (add_ba_rsp->add_rsp_result != BA_RESULT_TIMEOUT)
176 priv->aggr_prio_tbl[tid].ampdu_ap =
177 BA_STREAM_NOT_ALLOWED;
178 }
179
180 return 0;
181}
182
183/*
184 * This function handles the command response of 11n configuration request.
185 *
186 * Handling includes changing the header fields into CPU format.
187 */
188int mwifiex_ret_11n_cfg(struct host_cmd_ds_command *resp, void *data_buf)
189{
190 struct mwifiex_ds_11n_tx_cfg *tx_cfg;
191 struct host_cmd_ds_11n_cfg *htcfg = &resp->params.htcfg;
192
193 if (data_buf) {
194 tx_cfg = (struct mwifiex_ds_11n_tx_cfg *) data_buf;
195 tx_cfg->tx_htcap = le16_to_cpu(htcfg->ht_tx_cap);
196 tx_cfg->tx_htinfo = le16_to_cpu(htcfg->ht_tx_info);
197 }
198 return 0;
199}
200
201/*
202 * This function prepares command of reconfigure Tx buffer.
203 *
204 * Preparation includes -
205 * - Setting command ID, action and proper size
206 * - Setting Tx buffer size (for SET only)
207 * - Ensuring correct endian-ness
208 */
209int mwifiex_cmd_recfg_tx_buf(struct mwifiex_private *priv,
210 struct host_cmd_ds_command *cmd, int cmd_action,
211 void *data_buf)
212{
213 struct host_cmd_ds_txbuf_cfg *tx_buf = &cmd->params.tx_buf;
214 u16 action = (u16) cmd_action;
215 u16 buf_size = *((u16 *) data_buf);
216
217 cmd->command = cpu_to_le16(HostCmd_CMD_RECONFIGURE_TX_BUFF);
218 cmd->size =
219 cpu_to_le16(sizeof(struct host_cmd_ds_txbuf_cfg) + S_DS_GEN);
220 tx_buf->action = cpu_to_le16(action);
221 switch (action) {
222 case HostCmd_ACT_GEN_SET:
223 dev_dbg(priv->adapter->dev, "cmd: set tx_buf=%d\n", buf_size);
224 tx_buf->buff_size = cpu_to_le16(buf_size);
225 break;
226 case HostCmd_ACT_GEN_GET:
227 default:
228 tx_buf->buff_size = 0;
229 break;
230 }
231 return 0;
232}
233
234/*
235 * This function prepares command of AMSDU aggregation control.
236 *
237 * Preparation includes -
238 * - Setting command ID, action and proper size
239 * - Setting AMSDU control parameters (for SET only)
240 * - Ensuring correct endian-ness
241 */
242int mwifiex_cmd_amsdu_aggr_ctrl(struct host_cmd_ds_command *cmd,
243 int cmd_action, void *data_buf)
244{
245 struct host_cmd_ds_amsdu_aggr_ctrl *amsdu_ctrl =
246 &cmd->params.amsdu_aggr_ctrl;
247 u16 action = (u16) cmd_action;
248 struct mwifiex_ds_11n_amsdu_aggr_ctrl *aa_ctrl =
249 (struct mwifiex_ds_11n_amsdu_aggr_ctrl *) data_buf;
250
251 cmd->command = cpu_to_le16(HostCmd_CMD_AMSDU_AGGR_CTRL);
252 cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_amsdu_aggr_ctrl)
253 + S_DS_GEN);
254 amsdu_ctrl->action = cpu_to_le16(action);
255 switch (action) {
256 case HostCmd_ACT_GEN_SET:
257 amsdu_ctrl->enable = cpu_to_le16(aa_ctrl->enable);
258 amsdu_ctrl->curr_buf_size = 0;
259 break;
260 case HostCmd_ACT_GEN_GET:
261 default:
262 amsdu_ctrl->curr_buf_size = 0;
263 break;
264 }
265 return 0;
266}
267
268/*
269 * This function handles the command response of AMSDU aggregation
270 * control request.
271 *
272 * Handling includes changing the header fields into CPU format.
273 */
274int mwifiex_ret_amsdu_aggr_ctrl(struct host_cmd_ds_command *resp,
275 void *data_buf)
276{
277 struct mwifiex_ds_11n_amsdu_aggr_ctrl *amsdu_aggr_ctrl;
278 struct host_cmd_ds_amsdu_aggr_ctrl *amsdu_ctrl =
279 &resp->params.amsdu_aggr_ctrl;
280
281 if (data_buf) {
282 amsdu_aggr_ctrl =
283 (struct mwifiex_ds_11n_amsdu_aggr_ctrl *) data_buf;
284 amsdu_aggr_ctrl->enable = le16_to_cpu(amsdu_ctrl->enable);
285 amsdu_aggr_ctrl->curr_buf_size =
286 le16_to_cpu(amsdu_ctrl->curr_buf_size);
287 }
288 return 0;
289}
290
291/*
292 * This function prepares 11n configuration command.
293 *
294 * Preparation includes -
295 * - Setting command ID, action and proper size
296 * - Setting HT Tx capability and HT Tx information fields
297 * - Ensuring correct endian-ness
298 */
299int mwifiex_cmd_11n_cfg(struct host_cmd_ds_command *cmd,
300 u16 cmd_action, void *data_buf)
301{
302 struct host_cmd_ds_11n_cfg *htcfg = &cmd->params.htcfg;
303 struct mwifiex_ds_11n_tx_cfg *txcfg =
304 (struct mwifiex_ds_11n_tx_cfg *) data_buf;
305
306 cmd->command = cpu_to_le16(HostCmd_CMD_11N_CFG);
307 cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_11n_cfg) + S_DS_GEN);
308 htcfg->action = cpu_to_le16(cmd_action);
309 htcfg->ht_tx_cap = cpu_to_le16(txcfg->tx_htcap);
310 htcfg->ht_tx_info = cpu_to_le16(txcfg->tx_htinfo);
311 return 0;
312}
313
314/*
315 * This function appends an 11n TLV to a buffer.
316 *
317 * Buffer allocation is responsibility of the calling
318 * function. No size validation is made here.
319 *
320 * The function fills up the following sections, if applicable -
321 * - HT capability IE
322 * - HT information IE (with channel list)
323 * - 20/40 BSS Coexistence IE
324 * - HT Extended Capabilities IE
325 */
326int
327mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
328 struct mwifiex_bssdescriptor *bss_desc,
329 u8 **buffer)
330{
331 struct mwifiex_ie_types_htcap *ht_cap;
332 struct mwifiex_ie_types_htinfo *ht_info;
333 struct mwifiex_ie_types_chan_list_param_set *chan_list;
334 struct mwifiex_ie_types_2040bssco *bss_co_2040;
335 struct mwifiex_ie_types_extcap *ext_cap;
336 int ret_len = 0;
337 struct ieee80211_supported_band *sband;
338 u8 radio_type;
339
340 if (!buffer || !*buffer)
341 return ret_len;
342
343 radio_type = mwifiex_band_to_radio_type((u8) bss_desc->bss_band);
344 sband = priv->wdev->wiphy->bands[radio_type];
345
346 if (bss_desc->bcn_ht_cap) {
347 ht_cap = (struct mwifiex_ie_types_htcap *) *buffer;
348 memset(ht_cap, 0, sizeof(struct mwifiex_ie_types_htcap));
349 ht_cap->header.type = cpu_to_le16(WLAN_EID_HT_CAPABILITY);
350 ht_cap->header.len =
351 cpu_to_le16(sizeof(struct ieee80211_ht_cap));
352 memcpy((u8 *) ht_cap + sizeof(struct mwifiex_ie_types_header),
353 (u8 *) bss_desc->bcn_ht_cap +
354 sizeof(struct ieee_types_header),
355 le16_to_cpu(ht_cap->header.len));
356
357 mwifiex_fill_cap_info(priv, radio_type, ht_cap);
358
359 *buffer += sizeof(struct mwifiex_ie_types_htcap);
360 ret_len += sizeof(struct mwifiex_ie_types_htcap);
361 }
362
363 if (bss_desc->bcn_ht_info) {
364 if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
365 ht_info = (struct mwifiex_ie_types_htinfo *) *buffer;
366 memset(ht_info, 0,
367 sizeof(struct mwifiex_ie_types_htinfo));
368 ht_info->header.type =
369 cpu_to_le16(WLAN_EID_HT_INFORMATION);
370 ht_info->header.len =
371 cpu_to_le16(sizeof(struct ieee80211_ht_info));
372
373 memcpy((u8 *) ht_info +
374 sizeof(struct mwifiex_ie_types_header),
375 (u8 *) bss_desc->bcn_ht_info +
376 sizeof(struct ieee_types_header),
377 le16_to_cpu(ht_info->header.len));
378
379 if (!(sband->ht_cap.cap &
380 IEEE80211_HT_CAP_SUP_WIDTH_20_40))
381 ht_info->ht_info.ht_param &=
382 ~(IEEE80211_HT_PARAM_CHAN_WIDTH_ANY |
383 IEEE80211_HT_PARAM_CHA_SEC_OFFSET);
384
385 *buffer += sizeof(struct mwifiex_ie_types_htinfo);
386 ret_len += sizeof(struct mwifiex_ie_types_htinfo);
387 }
388
389 chan_list =
390 (struct mwifiex_ie_types_chan_list_param_set *) *buffer;
391 memset(chan_list, 0,
392 sizeof(struct mwifiex_ie_types_chan_list_param_set));
393 chan_list->header.type = cpu_to_le16(TLV_TYPE_CHANLIST);
394 chan_list->header.len = cpu_to_le16(
395 sizeof(struct mwifiex_ie_types_chan_list_param_set) -
396 sizeof(struct mwifiex_ie_types_header));
397 chan_list->chan_scan_param[0].chan_number =
398 bss_desc->bcn_ht_info->control_chan;
399 chan_list->chan_scan_param[0].radio_type =
400 mwifiex_band_to_radio_type((u8) bss_desc->bss_band);
401
402 if ((sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
403 && (bss_desc->bcn_ht_info->ht_param &
404 IEEE80211_HT_PARAM_CHAN_WIDTH_ANY))
405 SET_SECONDARYCHAN(chan_list->chan_scan_param[0].
406 radio_type,
407 (bss_desc->bcn_ht_info->ht_param &
408 IEEE80211_HT_PARAM_CHA_SEC_OFFSET));
409
410 *buffer += sizeof(struct mwifiex_ie_types_chan_list_param_set);
411 ret_len += sizeof(struct mwifiex_ie_types_chan_list_param_set);
412 }
413
414 if (bss_desc->bcn_bss_co_2040) {
415 bss_co_2040 = (struct mwifiex_ie_types_2040bssco *) *buffer;
416 memset(bss_co_2040, 0,
417 sizeof(struct mwifiex_ie_types_2040bssco));
418 bss_co_2040->header.type = cpu_to_le16(WLAN_EID_BSS_COEX_2040);
419 bss_co_2040->header.len =
420 cpu_to_le16(sizeof(bss_co_2040->bss_co_2040));
421
422 memcpy((u8 *) bss_co_2040 +
423 sizeof(struct mwifiex_ie_types_header),
424 (u8 *) bss_desc->bcn_bss_co_2040 +
425 sizeof(struct ieee_types_header),
426 le16_to_cpu(bss_co_2040->header.len));
427
428 *buffer += sizeof(struct mwifiex_ie_types_2040bssco);
429 ret_len += sizeof(struct mwifiex_ie_types_2040bssco);
430 }
431
432 if (bss_desc->bcn_ext_cap) {
433 ext_cap = (struct mwifiex_ie_types_extcap *) *buffer;
434 memset(ext_cap, 0, sizeof(struct mwifiex_ie_types_extcap));
435 ext_cap->header.type = cpu_to_le16(WLAN_EID_EXT_CAPABILITY);
436 ext_cap->header.len = cpu_to_le16(sizeof(ext_cap->ext_cap));
437
438 memcpy((u8 *) ext_cap +
439 sizeof(struct mwifiex_ie_types_header),
440 (u8 *) bss_desc->bcn_ext_cap +
441 sizeof(struct ieee_types_header),
442 le16_to_cpu(ext_cap->header.len));
443
444 *buffer += sizeof(struct mwifiex_ie_types_extcap);
445 ret_len += sizeof(struct mwifiex_ie_types_extcap);
446 }
447
448 return ret_len;
449}
450
451/*
452 * This function reconfigures the Tx buffer size in firmware.
453 *
454 * This function prepares a firmware command and issues it, if
455 * the current Tx buffer size is different from the one requested.
456 * Maximum configurable Tx buffer size is limited by the HT capability
457 * field value.
458 */
459void
460mwifiex_cfg_tx_buf(struct mwifiex_private *priv,
461 struct mwifiex_bssdescriptor *bss_desc)
462{
463 u16 max_amsdu = MWIFIEX_TX_DATA_BUF_SIZE_2K;
464 u16 tx_buf, curr_tx_buf_size = 0;
465
466 if (bss_desc->bcn_ht_cap) {
467 if (le16_to_cpu(bss_desc->bcn_ht_cap->cap_info) &
468 IEEE80211_HT_CAP_MAX_AMSDU)
469 max_amsdu = MWIFIEX_TX_DATA_BUF_SIZE_8K;
470 else
471 max_amsdu = MWIFIEX_TX_DATA_BUF_SIZE_4K;
472 }
473
474 tx_buf = min(priv->adapter->max_tx_buf_size, max_amsdu);
475
476 dev_dbg(priv->adapter->dev, "info: max_amsdu=%d, max_tx_buf=%d\n",
477 max_amsdu, priv->adapter->max_tx_buf_size);
478
479 if (priv->adapter->curr_tx_buf_size <= MWIFIEX_TX_DATA_BUF_SIZE_2K)
480 curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
481 else if (priv->adapter->curr_tx_buf_size <= MWIFIEX_TX_DATA_BUF_SIZE_4K)
482 curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
483 else if (priv->adapter->curr_tx_buf_size <= MWIFIEX_TX_DATA_BUF_SIZE_8K)
484 curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_8K;
485 if (curr_tx_buf_size != tx_buf)
486 mwifiex_send_cmd_async(priv, HostCmd_CMD_RECONFIGURE_TX_BUFF,
487 HostCmd_ACT_GEN_SET, 0, &tx_buf);
488}
489
490/*
491 * This function checks if the given pointer is valid entry of
492 * Tx BA Stream table.
493 */
494static int mwifiex_is_tx_ba_stream_ptr_valid(struct mwifiex_private *priv,
495 struct mwifiex_tx_ba_stream_tbl *tx_tbl_ptr)
496{
497 struct mwifiex_tx_ba_stream_tbl *tx_ba_tsr_tbl;
498
499 list_for_each_entry(tx_ba_tsr_tbl, &priv->tx_ba_stream_tbl_ptr, list) {
500 if (tx_ba_tsr_tbl == tx_tbl_ptr)
501 return true;
502 }
503
504 return false;
505}
506
507/*
508 * This function deletes the given entry in Tx BA Stream table.
509 *
510 * The function also performs a validity check on the supplied
511 * pointer before trying to delete.
512 */
513void mwifiex_11n_delete_tx_ba_stream_tbl_entry(struct mwifiex_private *priv,
514 struct mwifiex_tx_ba_stream_tbl *tx_ba_tsr_tbl)
515{
516 if (!tx_ba_tsr_tbl &&
517 mwifiex_is_tx_ba_stream_ptr_valid(priv, tx_ba_tsr_tbl))
518 return;
519
520 dev_dbg(priv->adapter->dev, "info: tx_ba_tsr_tbl %p\n", tx_ba_tsr_tbl);
521
522 list_del(&tx_ba_tsr_tbl->list);
523
524 kfree(tx_ba_tsr_tbl);
525}
526
527/*
528 * This function deletes all the entries in Tx BA Stream table.
529 */
530void mwifiex_11n_delete_all_tx_ba_stream_tbl(struct mwifiex_private *priv)
531{
532 int i;
533 struct mwifiex_tx_ba_stream_tbl *del_tbl_ptr, *tmp_node;
534 unsigned long flags;
535
536 spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
537 list_for_each_entry_safe(del_tbl_ptr, tmp_node,
538 &priv->tx_ba_stream_tbl_ptr, list)
539 mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, del_tbl_ptr);
540 spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
541
542 INIT_LIST_HEAD(&priv->tx_ba_stream_tbl_ptr);
543
544 for (i = 0; i < MAX_NUM_TID; ++i)
545 priv->aggr_prio_tbl[i].ampdu_ap =
546 priv->aggr_prio_tbl[i].ampdu_user;
547}
548
549/*
550 * This function returns the pointer to an entry in BA Stream
551 * table which matches the given RA/TID pair.
552 */
553struct mwifiex_tx_ba_stream_tbl *
554mwifiex_11n_get_tx_ba_stream_tbl(struct mwifiex_private *priv,
555 int tid, u8 *ra)
556{
557 struct mwifiex_tx_ba_stream_tbl *tx_ba_tsr_tbl;
558 unsigned long flags;
559
560 spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
561 list_for_each_entry(tx_ba_tsr_tbl, &priv->tx_ba_stream_tbl_ptr, list) {
562 if ((!memcmp(tx_ba_tsr_tbl->ra, ra, ETH_ALEN))
563 && (tx_ba_tsr_tbl->tid == tid)) {
564 spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock,
565 flags);
566 return tx_ba_tsr_tbl;
567 }
568 }
569 spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
570 return NULL;
571}
572
573/*
574 * This function creates an entry in Tx BA stream table for the
575 * given RA/TID pair.
576 */
577void mwifiex_11n_create_tx_ba_stream_tbl(struct mwifiex_private *priv,
578 u8 *ra, int tid,
579 enum mwifiex_ba_status ba_status)
580{
581 struct mwifiex_tx_ba_stream_tbl *new_node;
582 unsigned long flags;
583
584 if (!mwifiex_11n_get_tx_ba_stream_tbl(priv, tid, ra)) {
585 new_node = kzalloc(sizeof(struct mwifiex_tx_ba_stream_tbl),
586 GFP_ATOMIC);
587 if (!new_node) {
588 dev_err(priv->adapter->dev,
589 "%s: failed to alloc new_node\n", __func__);
590 return;
591 }
592
593 INIT_LIST_HEAD(&new_node->list);
594
595 new_node->tid = tid;
596 new_node->ba_status = ba_status;
597 memcpy(new_node->ra, ra, ETH_ALEN);
598
599 spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
600 list_add_tail(&new_node->list, &priv->tx_ba_stream_tbl_ptr);
601 spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
602 }
603}
604
605/*
606 * This function sends an add BA request to the given TID/RA pair.
607 */
608int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac)
609{
610 struct host_cmd_ds_11n_addba_req add_ba_req;
611 static u8 dialog_tok;
612 int ret;
613
614 dev_dbg(priv->adapter->dev, "cmd: %s: tid %d\n", __func__, tid);
615
616 add_ba_req.block_ack_param_set = cpu_to_le16(
617 (u16) ((tid << BLOCKACKPARAM_TID_POS) |
618 (priv->add_ba_param.
619 tx_win_size << BLOCKACKPARAM_WINSIZE_POS) |
620 IMMEDIATE_BLOCK_ACK));
621 add_ba_req.block_ack_tmo = cpu_to_le16((u16)priv->add_ba_param.timeout);
622
623 ++dialog_tok;
624
625 if (dialog_tok == 0)
626 dialog_tok = 1;
627
628 add_ba_req.dialog_token = dialog_tok;
629 memcpy(&add_ba_req.peer_mac_addr, peer_mac, ETH_ALEN);
630
631 /* We don't wait for the response of this command */
632 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_ADDBA_REQ,
633 0, 0, &add_ba_req);
634
635 return ret;
636}
637
638/*
639 * This function sends a delete BA request to the given TID/RA pair.
640 */
641int mwifiex_send_delba(struct mwifiex_private *priv, int tid, u8 *peer_mac,
642 int initiator)
643{
644 struct host_cmd_ds_11n_delba delba;
645 int ret;
646 uint16_t del_ba_param_set;
647
648 memset(&delba, 0, sizeof(delba));
649 delba.del_ba_param_set = cpu_to_le16(tid << DELBA_TID_POS);
650
651 del_ba_param_set = le16_to_cpu(delba.del_ba_param_set);
652 if (initiator)
653 del_ba_param_set |= IEEE80211_DELBA_PARAM_INITIATOR_MASK;
654 else
655 del_ba_param_set &= ~IEEE80211_DELBA_PARAM_INITIATOR_MASK;
656
657 memcpy(&delba.peer_mac_addr, peer_mac, ETH_ALEN);
658
659 /* We don't wait for the response of this command */
660 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_DELBA,
661 HostCmd_ACT_GEN_SET, 0, &delba);
662
663 return ret;
664}
665
666/*
667 * This function handles the command response of a delete BA request.
668 */
669void mwifiex_11n_delete_ba_stream(struct mwifiex_private *priv, u8 *del_ba)
670{
671 struct host_cmd_ds_11n_delba *cmd_del_ba =
672 (struct host_cmd_ds_11n_delba *) del_ba;
673 uint16_t del_ba_param_set = le16_to_cpu(cmd_del_ba->del_ba_param_set);
674 int tid;
675
676 tid = del_ba_param_set >> DELBA_TID_POS;
677
678 mwifiex_11n_delete_ba_stream_tbl(priv, tid, cmd_del_ba->peer_mac_addr,
679 TYPE_DELBA_RECEIVE,
680 INITIATOR_BIT(del_ba_param_set));
681}
682
683/*
684 * This function retrieves the Rx reordering table.
685 */
686int mwifiex_get_rx_reorder_tbl(struct mwifiex_private *priv,
687 struct mwifiex_ds_rx_reorder_tbl *buf)
688{
689 int i;
690 struct mwifiex_ds_rx_reorder_tbl *rx_reo_tbl = buf;
691 struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr;
692 int count = 0;
693 unsigned long flags;
694
695 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
696 list_for_each_entry(rx_reorder_tbl_ptr, &priv->rx_reorder_tbl_ptr,
697 list) {
698 rx_reo_tbl->tid = (u16) rx_reorder_tbl_ptr->tid;
699 memcpy(rx_reo_tbl->ta, rx_reorder_tbl_ptr->ta, ETH_ALEN);
700 rx_reo_tbl->start_win = rx_reorder_tbl_ptr->start_win;
701 rx_reo_tbl->win_size = rx_reorder_tbl_ptr->win_size;
702 for (i = 0; i < rx_reorder_tbl_ptr->win_size; ++i) {
703 if (rx_reorder_tbl_ptr->rx_reorder_ptr[i])
704 rx_reo_tbl->buffer[i] = true;
705 else
706 rx_reo_tbl->buffer[i] = false;
707 }
708 rx_reo_tbl++;
709 count++;
710
711 if (count >= MWIFIEX_MAX_RX_BASTREAM_SUPPORTED)
712 break;
713 }
714 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
715
716 return count;
717}
718
719/*
720 * This function retrieves the Tx BA stream table.
721 */
722int mwifiex_get_tx_ba_stream_tbl(struct mwifiex_private *priv,
723 struct mwifiex_ds_tx_ba_stream_tbl *buf)
724{
725 struct mwifiex_tx_ba_stream_tbl *tx_ba_tsr_tbl;
726 struct mwifiex_ds_tx_ba_stream_tbl *rx_reo_tbl = buf;
727 int count = 0;
728 unsigned long flags;
729
730 spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
731 list_for_each_entry(tx_ba_tsr_tbl, &priv->tx_ba_stream_tbl_ptr, list) {
732 rx_reo_tbl->tid = (u16) tx_ba_tsr_tbl->tid;
733 dev_dbg(priv->adapter->dev, "data: %s tid=%d\n",
734 __func__, rx_reo_tbl->tid);
735 memcpy(rx_reo_tbl->ra, tx_ba_tsr_tbl->ra, ETH_ALEN);
736 rx_reo_tbl++;
737 count++;
738 if (count >= MWIFIEX_MAX_TX_BASTREAM_SUPPORTED)
739 break;
740 }
741 spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
742
743 return count;
744}
diff --git a/drivers/net/wireless/mwifiex/11n.h b/drivers/net/wireless/mwifiex/11n.h
new file mode 100644
index 000000000000..a4390a1a2a9f
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/11n.h
@@ -0,0 +1,161 @@
1/*
2 * Marvell Wireless LAN device driver: 802.11n
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#ifndef _MWIFIEX_11N_H_
21#define _MWIFIEX_11N_H_
22
23#include "11n_aggr.h"
24#include "11n_rxreorder.h"
25#include "wmm.h"
26
27int mwifiex_ret_11n_delba(struct mwifiex_private *priv,
28 struct host_cmd_ds_command *resp);
29int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
30 struct host_cmd_ds_command *resp);
31int mwifiex_ret_11n_cfg(struct host_cmd_ds_command *resp,
32 void *data_buf);
33int mwifiex_cmd_11n_cfg(struct host_cmd_ds_command *cmd,
34 u16 cmd_action, void *data_buf);
35
36int mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
37 struct mwifiex_bssdescriptor *bss_desc,
38 u8 **buffer);
39void mwifiex_cfg_tx_buf(struct mwifiex_private *priv,
40 struct mwifiex_bssdescriptor *bss_desc);
41void mwifiex_fill_cap_info(struct mwifiex_private *, u8 radio_type,
42 struct mwifiex_ie_types_htcap *);
43int mwifiex_set_get_11n_htcap_cfg(struct mwifiex_private *priv,
44 u16 action, int *htcap_cfg);
45void mwifiex_11n_delete_tx_ba_stream_tbl_entry(struct mwifiex_private *priv,
46 struct mwifiex_tx_ba_stream_tbl
47 *tx_tbl);
48void mwifiex_11n_delete_all_tx_ba_stream_tbl(struct mwifiex_private *priv);
49struct mwifiex_tx_ba_stream_tbl *mwifiex_11n_get_tx_ba_stream_tbl(struct
50 mwifiex_private
51 *priv, int tid,
52 u8 *ra);
53void mwifiex_11n_create_tx_ba_stream_tbl(struct mwifiex_private *priv, u8 *ra,
54 int tid,
55 enum mwifiex_ba_status ba_status);
56int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac);
57int mwifiex_send_delba(struct mwifiex_private *priv, int tid, u8 *peer_mac,
58 int initiator);
59void mwifiex_11n_delete_ba_stream(struct mwifiex_private *priv, u8 *del_ba);
60int mwifiex_get_rx_reorder_tbl(struct mwifiex_private *priv,
61 struct mwifiex_ds_rx_reorder_tbl *buf);
62int mwifiex_get_tx_ba_stream_tbl(struct mwifiex_private *priv,
63 struct mwifiex_ds_tx_ba_stream_tbl *buf);
64int mwifiex_ret_amsdu_aggr_ctrl(struct host_cmd_ds_command *resp,
65 void *data_buf);
66int mwifiex_cmd_recfg_tx_buf(struct mwifiex_private *priv,
67 struct host_cmd_ds_command *cmd,
68 int cmd_action, void *data_buf);
69int mwifiex_cmd_amsdu_aggr_ctrl(struct host_cmd_ds_command *cmd,
70 int cmd_action, void *data_buf);
71
72/*
73 * This function checks whether AMPDU is allowed or not for a particular TID.
74 */
75static inline u8
76mwifiex_is_ampdu_allowed(struct mwifiex_private *priv, int tid)
77{
78 return ((priv->aggr_prio_tbl[tid].ampdu_ap != BA_STREAM_NOT_ALLOWED)
79 ? true : false);
80}
81
82/*
83 * This function checks whether AMSDU is allowed or not for a particular TID.
84 */
85static inline u8
86mwifiex_is_amsdu_allowed(struct mwifiex_private *priv, int tid)
87{
88 return (((priv->aggr_prio_tbl[tid].amsdu != BA_STREAM_NOT_ALLOWED)
89 && ((priv->is_data_rate_auto)
90 || !((priv->bitmap_rates[2]) & 0x03)))
91 ? true : false);
92}
93
94/*
95 * This function checks whether a space is available for new BA stream or not.
96 */
97static inline u8 mwifiex_space_avail_for_new_ba_stream(
98 struct mwifiex_adapter *adapter)
99{
100 struct mwifiex_private *priv;
101 u8 i;
102 u32 ba_stream_num = 0;
103
104 for (i = 0; i < adapter->priv_num; i++) {
105 priv = adapter->priv[i];
106 if (priv)
107 ba_stream_num += mwifiex_wmm_list_len(
108 (struct list_head *)
109 &priv->tx_ba_stream_tbl_ptr);
110 }
111
112 return ((ba_stream_num <
113 MWIFIEX_MAX_TX_BASTREAM_SUPPORTED) ? true : false);
114}
115
116/*
117 * This function finds the correct Tx BA stream to delete.
118 *
119 * Upon successfully locating, both the TID and the RA are returned.
120 */
121static inline u8
122mwifiex_find_stream_to_delete(struct mwifiex_private *priv, int ptr_tid,
123 int *ptid, u8 *ra)
124{
125 int tid;
126 u8 ret = false;
127 struct mwifiex_tx_ba_stream_tbl *tx_tbl;
128 unsigned long flags;
129
130 tid = priv->aggr_prio_tbl[ptr_tid].ampdu_user;
131
132 spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
133 list_for_each_entry(tx_tbl, &priv->tx_ba_stream_tbl_ptr, list) {
134 if (tid > priv->aggr_prio_tbl[tx_tbl->tid].ampdu_user) {
135 tid = priv->aggr_prio_tbl[tx_tbl->tid].ampdu_user;
136 *ptid = tx_tbl->tid;
137 memcpy(ra, tx_tbl->ra, ETH_ALEN);
138 ret = true;
139 }
140 }
141 spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
142
143 return ret;
144}
145
146/*
147 * This function checks whether BA stream is set up or not.
148 */
149static inline int
150mwifiex_is_ba_stream_setup(struct mwifiex_private *priv,
151 struct mwifiex_ra_list_tbl *ptr, int tid)
152{
153 struct mwifiex_tx_ba_stream_tbl *tx_tbl;
154
155 tx_tbl = mwifiex_11n_get_tx_ba_stream_tbl(priv, tid, ptr->ra);
156 if (tx_tbl && IS_BASTREAM_SETUP(tx_tbl))
157 return true;
158
159 return false;
160}
161#endif /* !_MWIFIEX_11N_H_ */
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
new file mode 100644
index 000000000000..d3d5e0853c45
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -0,0 +1,298 @@
1/*
2 * Marvell Wireless LAN device driver: 802.11n Aggregation
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "ioctl.h"
22#include "util.h"
23#include "fw.h"
24#include "main.h"
25#include "wmm.h"
26#include "11n.h"
27#include "11n_aggr.h"
28
29/*
30 * Creates an AMSDU subframe for aggregation into one AMSDU packet.
31 *
32 * The resultant AMSDU subframe format is -
33 *
34 * +---- ~ -----+---- ~ ------+---- ~ -----+----- ~ -----+---- ~ -----+
35 * | DA | SA | Length | SNAP header | MSDU |
36 * | data[0..5] | data[6..11] | | | data[14..] |
37 * +---- ~ -----+---- ~ ------+---- ~ -----+----- ~ -----+---- ~ -----+
38 * <--6-bytes--> <--6-bytes--> <--2-bytes--><--8-bytes--> <--n-bytes-->
39 *
40 * This function also computes the amount of padding required to make the
41 * buffer length multiple of 4 bytes.
42 *
43 * Data => |DA|SA|SNAP-TYPE|........ .|
44 * MSDU => |DA|SA|Length|SNAP|...... ..|
45 */
46static int
47mwifiex_11n_form_amsdu_pkt(struct sk_buff *skb_aggr,
48 struct sk_buff *skb_src, int *pad)
49
50{
51 int dt_offset;
52 struct rfc_1042_hdr snap = {
53 0xaa, /* LLC DSAP */
54 0xaa, /* LLC SSAP */
55 0x03, /* LLC CTRL */
56 {0x00, 0x00, 0x00}, /* SNAP OUI */
57 0x0000 /* SNAP type */
58 /*
59 * This field will be overwritten
60 * later with ethertype
61 */
62 };
63 struct tx_packet_hdr *tx_header;
64
65 skb_put(skb_aggr, sizeof(*tx_header));
66
67 tx_header = (struct tx_packet_hdr *) skb_aggr->data;
68
69 /* Copy DA and SA */
70 dt_offset = 2 * ETH_ALEN;
71 memcpy(&tx_header->eth803_hdr, skb_src->data, dt_offset);
72
73 /* Copy SNAP header */
74 snap.snap_type = *(u16 *) ((u8 *)skb_src->data + dt_offset);
75 dt_offset += sizeof(u16);
76
77 memcpy(&tx_header->rfc1042_hdr, &snap, sizeof(struct rfc_1042_hdr));
78
79 skb_pull(skb_src, dt_offset);
80
81 /* Update Length field */
82 tx_header->eth803_hdr.h_proto = htons(skb_src->len + LLC_SNAP_LEN);
83
84 /* Add payload */
85 skb_put(skb_aggr, skb_src->len);
86 memcpy(skb_aggr->data + sizeof(*tx_header), skb_src->data,
87 skb_src->len);
88 *pad = (((skb_src->len + LLC_SNAP_LEN) & 3)) ? (4 - (((skb_src->len +
89 LLC_SNAP_LEN)) & 3)) : 0;
90 skb_put(skb_aggr, *pad);
91
92 return skb_aggr->len + *pad;
93}
94
95/*
96 * Adds TxPD to AMSDU header.
97 *
98 * Each AMSDU packet will contain one TxPD at the beginning,
99 * followed by multiple AMSDU subframes.
100 */
101static void
102mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv,
103 struct sk_buff *skb)
104{
105 struct txpd *local_tx_pd;
106
107 skb_push(skb, sizeof(*local_tx_pd));
108
109 local_tx_pd = (struct txpd *) skb->data;
110 memset(local_tx_pd, 0, sizeof(struct txpd));
111
112 /* Original priority has been overwritten */
113 local_tx_pd->priority = (u8) skb->priority;
114 local_tx_pd->pkt_delay_2ms =
115 mwifiex_wmm_compute_drv_pkt_delay(priv, skb);
116 local_tx_pd->bss_num = priv->bss_num;
117 local_tx_pd->bss_type = priv->bss_type;
118 /* Always zero as the data is followed by struct txpd */
119 local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd));
120 local_tx_pd->tx_pkt_type = cpu_to_le16(PKT_TYPE_AMSDU);
121 local_tx_pd->tx_pkt_length = cpu_to_le16(skb->len -
122 sizeof(*local_tx_pd));
123
124 if (local_tx_pd->tx_control == 0)
125 /* TxCtrl set by user or default */
126 local_tx_pd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl);
127
128 if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
129 (priv->adapter->pps_uapsd_mode)) {
130 if (true == mwifiex_check_last_packet_indication(priv)) {
131 priv->adapter->tx_lock_flag = true;
132 local_tx_pd->flags =
133 MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET;
134 }
135 }
136}
137
138/*
139 * Create aggregated packet.
140 *
141 * This function creates an aggregated MSDU packet, by combining buffers
142 * from the RA list. Each individual buffer is encapsulated as an AMSDU
143 * subframe and all such subframes are concatenated together to form the
144 * AMSDU packet.
145 *
146 * A TxPD is also added to the front of the resultant AMSDU packets for
147 * transmission. The resultant packets format is -
148 *
149 * +---- ~ ----+------ ~ ------+------ ~ ------+-..-+------ ~ ------+
150 * | TxPD |AMSDU sub-frame|AMSDU sub-frame| .. |AMSDU sub-frame|
151 * | | 1 | 2 | .. | n |
152 * +---- ~ ----+------ ~ ------+------ ~ ------+ .. +------ ~ ------+
153 */
154int
155mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
156 struct mwifiex_ra_list_tbl *pra_list, int headroom,
157 int ptrindex, unsigned long ra_list_flags)
158 __releases(&priv->wmm.ra_list_spinlock)
159{
160 struct mwifiex_adapter *adapter = priv->adapter;
161 struct sk_buff *skb_aggr, *skb_src;
162 struct mwifiex_txinfo *tx_info_aggr, *tx_info_src;
163 int pad = 0, ret;
164 struct mwifiex_tx_param tx_param;
165 struct txpd *ptx_pd = NULL;
166
167 if (skb_queue_empty(&pra_list->skb_head)) {
168 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
169 ra_list_flags);
170 return 0;
171 }
172 skb_src = skb_peek(&pra_list->skb_head);
173 tx_info_src = MWIFIEX_SKB_TXCB(skb_src);
174 skb_aggr = dev_alloc_skb(adapter->tx_buf_size);
175 if (!skb_aggr) {
176 dev_err(adapter->dev, "%s: alloc skb_aggr\n", __func__);
177 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
178 ra_list_flags);
179 return -1;
180 }
181 skb_reserve(skb_aggr, headroom + sizeof(struct txpd));
182 tx_info_aggr = MWIFIEX_SKB_TXCB(skb_aggr);
183
184 tx_info_aggr->bss_index = tx_info_src->bss_index;
185 skb_aggr->priority = skb_src->priority;
186
187 while (skb_src && ((skb_headroom(skb_aggr) + skb_src->len
188 + LLC_SNAP_LEN)
189 <= adapter->tx_buf_size)) {
190
191 if (!skb_queue_empty(&pra_list->skb_head))
192 skb_src = skb_dequeue(&pra_list->skb_head);
193 else
194 skb_src = NULL;
195
196 if (skb_src)
197 pra_list->total_pkts_size -= skb_src->len;
198
199 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
200 ra_list_flags);
201 mwifiex_11n_form_amsdu_pkt(skb_aggr, skb_src, &pad);
202
203 mwifiex_write_data_complete(adapter, skb_src, 0);
204
205 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
206
207 if (!mwifiex_is_ralist_valid(priv, pra_list, ptrindex)) {
208 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
209 ra_list_flags);
210 return -1;
211 }
212
213 if (!skb_queue_empty(&pra_list->skb_head))
214 skb_src = skb_peek(&pra_list->skb_head);
215 else
216 skb_src = NULL;
217 }
218
219 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags);
220
221 /* Last AMSDU packet does not need padding */
222 skb_trim(skb_aggr, skb_aggr->len - pad);
223
224 /* Form AMSDU */
225 mwifiex_11n_form_amsdu_txpd(priv, skb_aggr);
226 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA)
227 ptx_pd = (struct txpd *)skb_aggr->data;
228
229 skb_push(skb_aggr, headroom);
230
231 tx_param.next_pkt_len = ((pra_list->total_pkts_size) ?
232 (((pra_list->total_pkts_size) >
233 adapter->tx_buf_size) ? adapter->
234 tx_buf_size : pra_list->total_pkts_size +
235 LLC_SNAP_LEN + sizeof(struct txpd)) : 0);
236 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
237 skb_aggr->data,
238 skb_aggr->len, &tx_param);
239 switch (ret) {
240 case -EBUSY:
241 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
242 if (!mwifiex_is_ralist_valid(priv, pra_list, ptrindex)) {
243 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
244 ra_list_flags);
245 mwifiex_write_data_complete(adapter, skb_aggr, -1);
246 return -1;
247 }
248 if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
249 (adapter->pps_uapsd_mode) &&
250 (adapter->tx_lock_flag)) {
251 priv->adapter->tx_lock_flag = false;
252 if (ptx_pd)
253 ptx_pd->flags = 0;
254 }
255
256 skb_queue_tail(&pra_list->skb_head, skb_aggr);
257
258 pra_list->total_pkts_size += skb_aggr->len;
259
260 tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
261 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
262 ra_list_flags);
263 dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
264 break;
265 case -1:
266 adapter->data_sent = false;
267 dev_err(adapter->dev, "%s: host_to_card failed: %#x\n",
268 __func__, ret);
269 adapter->dbg.num_tx_host_to_card_failure++;
270 mwifiex_write_data_complete(adapter, skb_aggr, ret);
271 return 0;
272 case -EINPROGRESS:
273 adapter->data_sent = false;
274 break;
275 case 0:
276 mwifiex_write_data_complete(adapter, skb_aggr, ret);
277 break;
278 default:
279 break;
280 }
281 if (ret != -EBUSY) {
282 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
283 if (mwifiex_is_ralist_valid(priv, pra_list, ptrindex)) {
284 priv->wmm.packets_out[ptrindex]++;
285 priv->wmm.tid_tbl_ptr[ptrindex].ra_list_curr = pra_list;
286 }
287 /* Now bss_prio_cur pointer points to next node */
288 adapter->bss_prio_tbl[priv->bss_priority].bss_prio_cur =
289 list_first_entry(
290 &adapter->bss_prio_tbl[priv->bss_priority]
291 .bss_prio_cur->list,
292 struct mwifiex_bss_prio_node, list);
293 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
294 ra_list_flags);
295 }
296
297 return 0;
298}
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.h b/drivers/net/wireless/mwifiex/11n_aggr.h
new file mode 100644
index 000000000000..9c6dca7ab02c
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/11n_aggr.h
@@ -0,0 +1,32 @@
1/*
2 * Marvell Wireless LAN device driver: 802.11n Aggregation
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#ifndef _MWIFIEX_11N_AGGR_H_
21#define _MWIFIEX_11N_AGGR_H_
22
23#define PKT_TYPE_AMSDU 0xE6
24
25int mwifiex_11n_deaggregate_pkt(struct mwifiex_private *priv,
26 struct sk_buff *skb);
27int mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
28 struct mwifiex_ra_list_tbl *ptr, int headroom,
29 int ptr_index, unsigned long flags)
30 __releases(&priv->wmm.ra_list_spinlock);
31
32#endif /* !_MWIFIEX_11N_AGGR_H_ */
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
new file mode 100644
index 000000000000..e5dfdc39a921
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -0,0 +1,616 @@
1/*
2 * Marvell Wireless LAN device driver: 802.11n RX Re-ordering
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "ioctl.h"
22#include "util.h"
23#include "fw.h"
24#include "main.h"
25#include "wmm.h"
26#include "11n.h"
27#include "11n_rxreorder.h"
28
29/*
30 * This function dispatches all packets in the Rx reorder table.
31 *
32 * There could be holes in the buffer, which are skipped by the function.
33 * Since the buffer is linear, the function uses rotation to simulate
34 * circular buffer.
35 */
36static int
37mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
38 struct mwifiex_rx_reorder_tbl
39 *rx_reor_tbl_ptr, int start_win)
40{
41 int no_pkt_to_send, i;
42 void *rx_tmp_ptr;
43 unsigned long flags;
44
45 no_pkt_to_send = (start_win > rx_reor_tbl_ptr->start_win) ?
46 min((start_win - rx_reor_tbl_ptr->start_win),
47 rx_reor_tbl_ptr->win_size) : rx_reor_tbl_ptr->win_size;
48
49 for (i = 0; i < no_pkt_to_send; ++i) {
50 spin_lock_irqsave(&priv->rx_pkt_lock, flags);
51 rx_tmp_ptr = NULL;
52 if (rx_reor_tbl_ptr->rx_reorder_ptr[i]) {
53 rx_tmp_ptr = rx_reor_tbl_ptr->rx_reorder_ptr[i];
54 rx_reor_tbl_ptr->rx_reorder_ptr[i] = NULL;
55 }
56 spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
57 if (rx_tmp_ptr)
58 mwifiex_process_rx_packet(priv->adapter, rx_tmp_ptr);
59 }
60
61 spin_lock_irqsave(&priv->rx_pkt_lock, flags);
62 /*
63 * We don't have a circular buffer, hence use rotation to simulate
64 * circular buffer
65 */
66 for (i = 0; i < rx_reor_tbl_ptr->win_size - no_pkt_to_send; ++i) {
67 rx_reor_tbl_ptr->rx_reorder_ptr[i] =
68 rx_reor_tbl_ptr->rx_reorder_ptr[no_pkt_to_send + i];
69 rx_reor_tbl_ptr->rx_reorder_ptr[no_pkt_to_send + i] = NULL;
70 }
71
72 rx_reor_tbl_ptr->start_win = start_win;
73 spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
74
75 return 0;
76}
77
78/*
79 * This function dispatches all packets in the Rx reorder table until
80 * a hole is found.
81 *
82 * The start window is adjusted automatically when a hole is located.
83 * Since the buffer is linear, the function uses rotation to simulate
84 * circular buffer.
85 */
86static int
87mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
88 struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr)
89{
90 int i, j, xchg;
91 void *rx_tmp_ptr;
92 unsigned long flags;
93
94 for (i = 0; i < rx_reor_tbl_ptr->win_size; ++i) {
95 spin_lock_irqsave(&priv->rx_pkt_lock, flags);
96 if (!rx_reor_tbl_ptr->rx_reorder_ptr[i]) {
97 spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
98 break;
99 }
100 rx_tmp_ptr = rx_reor_tbl_ptr->rx_reorder_ptr[i];
101 rx_reor_tbl_ptr->rx_reorder_ptr[i] = NULL;
102 spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
103 mwifiex_process_rx_packet(priv->adapter, rx_tmp_ptr);
104 }
105
106 spin_lock_irqsave(&priv->rx_pkt_lock, flags);
107 /*
108 * We don't have a circular buffer, hence use rotation to simulate
109 * circular buffer
110 */
111 if (i > 0) {
112 xchg = rx_reor_tbl_ptr->win_size - i;
113 for (j = 0; j < xchg; ++j) {
114 rx_reor_tbl_ptr->rx_reorder_ptr[j] =
115 rx_reor_tbl_ptr->rx_reorder_ptr[i + j];
116 rx_reor_tbl_ptr->rx_reorder_ptr[i + j] = NULL;
117 }
118 }
119 rx_reor_tbl_ptr->start_win = (rx_reor_tbl_ptr->start_win + i)
120 &(MAX_TID_VALUE - 1);
121 spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
122 return 0;
123}
124
125/*
126 * This function deletes the Rx reorder table and frees the memory.
127 *
128 * The function stops the associated timer and dispatches all the
129 * pending packets in the Rx reorder table before deletion.
130 */
131static void
132mwifiex_11n_delete_rx_reorder_tbl_entry(struct mwifiex_private *priv,
133 struct mwifiex_rx_reorder_tbl
134 *rx_reor_tbl_ptr)
135{
136 unsigned long flags;
137
138 if (!rx_reor_tbl_ptr)
139 return;
140
141 mwifiex_11n_dispatch_pkt_until_start_win(priv, rx_reor_tbl_ptr,
142 (rx_reor_tbl_ptr->start_win +
143 rx_reor_tbl_ptr->win_size)
144 &(MAX_TID_VALUE - 1));
145
146 del_timer(&rx_reor_tbl_ptr->timer_context.timer);
147
148 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
149 list_del(&rx_reor_tbl_ptr->list);
150 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
151
152 kfree(rx_reor_tbl_ptr->rx_reorder_ptr);
153 kfree(rx_reor_tbl_ptr);
154}
155
156/*
157 * This function returns the pointer to an entry in Rx reordering
158 * table which matches the given TA/TID pair.
159 */
160static struct mwifiex_rx_reorder_tbl *
161mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
162{
163 struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr;
164 unsigned long flags;
165
166 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
167 list_for_each_entry(rx_reor_tbl_ptr, &priv->rx_reorder_tbl_ptr, list) {
168 if ((!memcmp(rx_reor_tbl_ptr->ta, ta, ETH_ALEN))
169 && (rx_reor_tbl_ptr->tid == tid)) {
170 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
171 flags);
172 return rx_reor_tbl_ptr;
173 }
174 }
175 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
176
177 return NULL;
178}
179
180/*
181 * This function finds the last sequence number used in the packets
182 * buffered in Rx reordering table.
183 */
184static int
185mwifiex_11n_find_last_seq_num(struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr)
186{
187 int i;
188
189 for (i = (rx_reorder_tbl_ptr->win_size - 1); i >= 0; --i)
190 if (rx_reorder_tbl_ptr->rx_reorder_ptr[i])
191 return i;
192
193 return -1;
194}
195
196/*
197 * This function flushes all the packets in Rx reordering table.
198 *
199 * The function checks if any packets are currently buffered in the
200 * table or not. In case there are packets available, it dispatches
201 * them and then dumps the Rx reordering table.
202 */
203static void
204mwifiex_flush_data(unsigned long context)
205{
206 struct reorder_tmr_cnxt *reorder_cnxt =
207 (struct reorder_tmr_cnxt *) context;
208 int start_win;
209
210 start_win = mwifiex_11n_find_last_seq_num(reorder_cnxt->ptr);
211 if (start_win >= 0) {
212 dev_dbg(reorder_cnxt->priv->adapter->dev,
213 "info: flush data %d\n", start_win);
214 mwifiex_11n_dispatch_pkt_until_start_win(reorder_cnxt->priv,
215 reorder_cnxt->ptr,
216 ((reorder_cnxt->ptr->start_win +
217 start_win + 1) & (MAX_TID_VALUE - 1)));
218 }
219}
220
221/*
222 * This function creates an entry in Rx reordering table for the
223 * given TA/TID.
224 *
225 * The function also initializes the entry with sequence number, window
226 * size as well as initializes the timer.
227 *
228 * If the received TA/TID pair is already present, all the packets are
229 * dispatched and the window size is moved until the SSN.
230 */
231static void
232mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
233 int tid, int win_size, int seq_num)
234{
235 int i;
236 struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr, *new_node;
237 u16 last_seq = 0;
238 unsigned long flags;
239
240 /*
241 * If we get a TID, ta pair which is already present dispatch all the
242 * the packets and move the window size until the ssn
243 */
244 rx_reor_tbl_ptr = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
245 if (rx_reor_tbl_ptr) {
246 mwifiex_11n_dispatch_pkt_until_start_win(priv, rx_reor_tbl_ptr,
247 seq_num);
248 return;
249 }
250 /* if !rx_reor_tbl_ptr then create one */
251 new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL);
252 if (!new_node) {
253 dev_err(priv->adapter->dev, "%s: failed to alloc new_node\n",
254 __func__);
255 return;
256 }
257
258 INIT_LIST_HEAD(&new_node->list);
259 new_node->tid = tid;
260 memcpy(new_node->ta, ta, ETH_ALEN);
261 new_node->start_win = seq_num;
262 if (mwifiex_queuing_ra_based(priv))
263 /* TODO for adhoc */
264 dev_dbg(priv->adapter->dev,
265 "info: ADHOC:last_seq=%d start_win=%d\n",
266 last_seq, new_node->start_win);
267 else
268 last_seq = priv->rx_seq[tid];
269
270 if (last_seq >= new_node->start_win)
271 new_node->start_win = last_seq + 1;
272
273 new_node->win_size = win_size;
274
275 new_node->rx_reorder_ptr = kzalloc(sizeof(void *) * win_size,
276 GFP_KERNEL);
277 if (!new_node->rx_reorder_ptr) {
278 kfree((u8 *) new_node);
279 dev_err(priv->adapter->dev,
280 "%s: failed to alloc reorder_ptr\n", __func__);
281 return;
282 }
283
284 new_node->timer_context.ptr = new_node;
285 new_node->timer_context.priv = priv;
286
287 init_timer(&new_node->timer_context.timer);
288 new_node->timer_context.timer.function = mwifiex_flush_data;
289 new_node->timer_context.timer.data =
290 (unsigned long) &new_node->timer_context;
291
292 for (i = 0; i < win_size; ++i)
293 new_node->rx_reorder_ptr[i] = NULL;
294
295 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
296 list_add_tail(&new_node->list, &priv->rx_reorder_tbl_ptr);
297 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
298}
299
300/*
301 * This function prepares command for adding a BA request.
302 *
303 * Preparation includes -
304 * - Setting command ID and proper size
305 * - Setting add BA request buffer
306 * - Ensuring correct endian-ness
307 */
308int mwifiex_cmd_11n_addba_req(struct host_cmd_ds_command *cmd, void *data_buf)
309{
310 struct host_cmd_ds_11n_addba_req *add_ba_req =
311 (struct host_cmd_ds_11n_addba_req *)
312 &cmd->params.add_ba_req;
313
314 cmd->command = cpu_to_le16(HostCmd_CMD_11N_ADDBA_REQ);
315 cmd->size = cpu_to_le16(sizeof(*add_ba_req) + S_DS_GEN);
316 memcpy(add_ba_req, data_buf, sizeof(*add_ba_req));
317
318 return 0;
319}
320
321/*
322 * This function prepares command for adding a BA response.
323 *
324 * Preparation includes -
325 * - Setting command ID and proper size
326 * - Setting add BA response buffer
327 * - Ensuring correct endian-ness
328 */
329int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
330 struct host_cmd_ds_command *cmd,
331 void *data_buf)
332{
333 struct host_cmd_ds_11n_addba_rsp *add_ba_rsp =
334 (struct host_cmd_ds_11n_addba_rsp *)
335 &cmd->params.add_ba_rsp;
336 struct host_cmd_ds_11n_addba_req *cmd_addba_req =
337 (struct host_cmd_ds_11n_addba_req *) data_buf;
338 u8 tid;
339 int win_size;
340 uint16_t block_ack_param_set;
341
342 cmd->command = cpu_to_le16(HostCmd_CMD_11N_ADDBA_RSP);
343 cmd->size = cpu_to_le16(sizeof(*add_ba_rsp) + S_DS_GEN);
344
345 memcpy(add_ba_rsp->peer_mac_addr, cmd_addba_req->peer_mac_addr,
346 ETH_ALEN);
347 add_ba_rsp->dialog_token = cmd_addba_req->dialog_token;
348 add_ba_rsp->block_ack_tmo = cmd_addba_req->block_ack_tmo;
349 add_ba_rsp->ssn = cmd_addba_req->ssn;
350
351 block_ack_param_set = le16_to_cpu(cmd_addba_req->block_ack_param_set);
352 tid = (block_ack_param_set & IEEE80211_ADDBA_PARAM_TID_MASK)
353 >> BLOCKACKPARAM_TID_POS;
354 add_ba_rsp->status_code = cpu_to_le16(ADDBA_RSP_STATUS_ACCEPT);
355 block_ack_param_set &= ~IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK;
356 /* We donot support AMSDU inside AMPDU, hence reset the bit */
357 block_ack_param_set &= ~BLOCKACKPARAM_AMSDU_SUPP_MASK;
358 block_ack_param_set |= (priv->add_ba_param.rx_win_size <<
359 BLOCKACKPARAM_WINSIZE_POS);
360 add_ba_rsp->block_ack_param_set = cpu_to_le16(block_ack_param_set);
361 win_size = (le16_to_cpu(add_ba_rsp->block_ack_param_set)
362 & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
363 >> BLOCKACKPARAM_WINSIZE_POS;
364 cmd_addba_req->block_ack_param_set = cpu_to_le16(block_ack_param_set);
365
366 mwifiex_11n_create_rx_reorder_tbl(priv, cmd_addba_req->peer_mac_addr,
367 tid, win_size, le16_to_cpu(cmd_addba_req->ssn));
368 return 0;
369}
370
371/*
372 * This function prepares command for deleting a BA request.
373 *
374 * Preparation includes -
375 * - Setting command ID and proper size
376 * - Setting del BA request buffer
377 * - Ensuring correct endian-ness
378 */
379int mwifiex_cmd_11n_delba(struct host_cmd_ds_command *cmd, void *data_buf)
380{
381 struct host_cmd_ds_11n_delba *del_ba = (struct host_cmd_ds_11n_delba *)
382 &cmd->params.del_ba;
383
384 cmd->command = cpu_to_le16(HostCmd_CMD_11N_DELBA);
385 cmd->size = cpu_to_le16(sizeof(*del_ba) + S_DS_GEN);
386 memcpy(del_ba, data_buf, sizeof(*del_ba));
387
388 return 0;
389}
390
391/*
392 * This function identifies if Rx reordering is needed for a received packet.
393 *
394 * In case reordering is required, the function will do the reordering
395 * before sending it to kernel.
396 *
397 * The Rx reorder table is checked first with the received TID/TA pair. If
398 * not found, the received packet is dispatched immediately. But if found,
399 * the packet is reordered and all the packets in the updated Rx reordering
400 * table is dispatched until a hole is found.
401 *
402 * For sequence number less than the starting window, the packet is dropped.
403 */
404int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
405 u16 seq_num, u16 tid,
406 u8 *ta, u8 pkt_type, void *payload)
407{
408 struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr;
409 int start_win, end_win, win_size, ret;
410 u16 pkt_index;
411
412 rx_reor_tbl_ptr =
413 mwifiex_11n_get_rx_reorder_tbl((struct mwifiex_private *) priv,
414 tid, ta);
415 if (!rx_reor_tbl_ptr) {
416 if (pkt_type != PKT_TYPE_BAR)
417 mwifiex_process_rx_packet(priv->adapter, payload);
418 return 0;
419 }
420 start_win = rx_reor_tbl_ptr->start_win;
421 win_size = rx_reor_tbl_ptr->win_size;
422 end_win = ((start_win + win_size) - 1) & (MAX_TID_VALUE - 1);
423 del_timer(&rx_reor_tbl_ptr->timer_context.timer);
424 mod_timer(&rx_reor_tbl_ptr->timer_context.timer, jiffies
425 + (MIN_FLUSH_TIMER_MS * win_size * HZ) / 1000);
426
427 /*
428 * If seq_num is less then starting win then ignore and drop the
429 * packet
430 */
431 if ((start_win + TWOPOW11) > (MAX_TID_VALUE - 1)) {/* Wrap */
432 if (seq_num >= ((start_win + (TWOPOW11)) & (MAX_TID_VALUE - 1))
433 && (seq_num < start_win))
434 return -1;
435 } else if ((seq_num < start_win)
436 || (seq_num > (start_win + (TWOPOW11)))) {
437 return -1;
438 }
439
440 /*
441 * If this packet is a BAR we adjust seq_num as
442 * WinStart = seq_num
443 */
444 if (pkt_type == PKT_TYPE_BAR)
445 seq_num = ((seq_num + win_size) - 1) & (MAX_TID_VALUE - 1);
446
447 if (((end_win < start_win)
448 && (seq_num < (TWOPOW11 - (MAX_TID_VALUE - start_win)))
449 && (seq_num > end_win)) || ((end_win > start_win)
450 && ((seq_num > end_win) || (seq_num < start_win)))) {
451 end_win = seq_num;
452 if (((seq_num - win_size) + 1) >= 0)
453 start_win = (end_win - win_size) + 1;
454 else
455 start_win = (MAX_TID_VALUE - (win_size - seq_num)) + 1;
456 ret = mwifiex_11n_dispatch_pkt_until_start_win(priv,
457 rx_reor_tbl_ptr, start_win);
458
459 if (ret)
460 return ret;
461 }
462
463 if (pkt_type != PKT_TYPE_BAR) {
464 if (seq_num >= start_win)
465 pkt_index = seq_num - start_win;
466 else
467 pkt_index = (seq_num+MAX_TID_VALUE) - start_win;
468
469 if (rx_reor_tbl_ptr->rx_reorder_ptr[pkt_index])
470 return -1;
471
472 rx_reor_tbl_ptr->rx_reorder_ptr[pkt_index] = payload;
473 }
474
475 /*
476 * Dispatch all packets sequentially from start_win until a
477 * hole is found and adjust the start_win appropriately
478 */
479 ret = mwifiex_11n_scan_and_dispatch(priv, rx_reor_tbl_ptr);
480
481 return ret;
482}
483
484/*
485 * This function deletes an entry for a given TID/TA pair.
486 *
487 * The TID/TA are taken from del BA event body.
488 */
489void
490mwifiex_11n_delete_ba_stream_tbl(struct mwifiex_private *priv, int tid,
491 u8 *peer_mac, u8 type, int initiator)
492{
493 struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr;
494 struct mwifiex_tx_ba_stream_tbl *ptx_tbl;
495 u8 cleanup_rx_reorder_tbl;
496 unsigned long flags;
497
498 if (type == TYPE_DELBA_RECEIVE)
499 cleanup_rx_reorder_tbl = (initiator) ? true : false;
500 else
501 cleanup_rx_reorder_tbl = (initiator) ? false : true;
502
503 dev_dbg(priv->adapter->dev, "event: DELBA: %pM tid=%d, "
504 "initiator=%d\n", peer_mac, tid, initiator);
505
506 if (cleanup_rx_reorder_tbl) {
507 rx_reor_tbl_ptr = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
508 peer_mac);
509 if (!rx_reor_tbl_ptr) {
510 dev_dbg(priv->adapter->dev,
511 "event: TID, TA not found in table\n");
512 return;
513 }
514 mwifiex_11n_delete_rx_reorder_tbl_entry(priv, rx_reor_tbl_ptr);
515 } else {
516 ptx_tbl = mwifiex_11n_get_tx_ba_stream_tbl(priv, tid, peer_mac);
517 if (!ptx_tbl) {
518 dev_dbg(priv->adapter->dev,
519 "event: TID, RA not found in table\n");
520 return;
521 }
522
523 spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
524 mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, ptx_tbl);
525 spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
526 }
527}
528
529/*
530 * This function handles the command response of an add BA response.
531 *
532 * Handling includes changing the header fields into CPU format and
533 * creating the stream, provided the add BA is accepted.
534 */
535int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
536 struct host_cmd_ds_command *resp)
537{
538 struct host_cmd_ds_11n_addba_rsp *add_ba_rsp =
539 (struct host_cmd_ds_11n_addba_rsp *)
540 &resp->params.add_ba_rsp;
541 int tid, win_size;
542 struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr;
543 uint16_t block_ack_param_set;
544
545 block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set);
546
547 tid = (block_ack_param_set & IEEE80211_ADDBA_PARAM_TID_MASK)
548 >> BLOCKACKPARAM_TID_POS;
549 /*
550 * Check if we had rejected the ADDBA, if yes then do not create
551 * the stream
552 */
553 if (le16_to_cpu(add_ba_rsp->status_code) == BA_RESULT_SUCCESS) {
554 win_size = (block_ack_param_set &
555 IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
556 >> BLOCKACKPARAM_WINSIZE_POS;
557
558 dev_dbg(priv->adapter->dev, "cmd: ADDBA RSP: %pM"
559 " tid=%d ssn=%d win_size=%d\n",
560 add_ba_rsp->peer_mac_addr,
561 tid, add_ba_rsp->ssn, win_size);
562 } else {
563 dev_err(priv->adapter->dev, "ADDBA RSP: failed %pM tid=%d)\n",
564 add_ba_rsp->peer_mac_addr, tid);
565
566 rx_reor_tbl_ptr = mwifiex_11n_get_rx_reorder_tbl(priv,
567 tid, add_ba_rsp->peer_mac_addr);
568 if (rx_reor_tbl_ptr)
569 mwifiex_11n_delete_rx_reorder_tbl_entry(priv,
570 rx_reor_tbl_ptr);
571 }
572
573 return 0;
574}
575
576/*
577 * This function handles BA stream timeout event by preparing and sending
578 * a command to the firmware.
579 */
580void mwifiex_11n_ba_stream_timeout(struct mwifiex_private *priv,
581 struct host_cmd_ds_11n_batimeout *event)
582{
583 struct host_cmd_ds_11n_delba delba;
584
585 memset(&delba, 0, sizeof(struct host_cmd_ds_11n_delba));
586 memcpy(delba.peer_mac_addr, event->peer_mac_addr, ETH_ALEN);
587
588 delba.del_ba_param_set |=
589 cpu_to_le16((u16) event->tid << DELBA_TID_POS);
590 delba.del_ba_param_set |= cpu_to_le16(
591 (u16) event->origninator << DELBA_INITIATOR_POS);
592 delba.reason_code = cpu_to_le16(WLAN_REASON_QSTA_TIMEOUT);
593 mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_DELBA, 0, 0, &delba);
594}
595
596/*
597 * This function cleans up the Rx reorder table by deleting all the entries
598 * and re-initializing.
599 */
600void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
601{
602 struct mwifiex_rx_reorder_tbl *del_tbl_ptr, *tmp_node;
603 unsigned long flags;
604
605 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
606 list_for_each_entry_safe(del_tbl_ptr, tmp_node,
607 &priv->rx_reorder_tbl_ptr, list) {
608 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
609 mwifiex_11n_delete_rx_reorder_tbl_entry(priv, del_tbl_ptr);
610 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
611 }
612 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
613
614 INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
615 memset(priv->rx_seq, 0, sizeof(priv->rx_seq));
616}
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.h b/drivers/net/wireless/mwifiex/11n_rxreorder.h
new file mode 100644
index 000000000000..f3ca8c8c18f9
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.h
@@ -0,0 +1,65 @@
1/*
2 * Marvell Wireless LAN device driver: 802.11n RX Re-ordering
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#ifndef _MWIFIEX_11N_RXREORDER_H_
21#define _MWIFIEX_11N_RXREORDER_H_
22
23#define MIN_FLUSH_TIMER_MS 50
24
25#define PKT_TYPE_BAR 0xE7
26#define MAX_TID_VALUE (2 << 11)
27#define TWOPOW11 (2 << 10)
28
29#define BLOCKACKPARAM_TID_POS 2
30#define BLOCKACKPARAM_AMSDU_SUPP_MASK 0x1
31#define BLOCKACKPARAM_WINSIZE_POS 6
32#define DELBA_TID_POS 12
33#define DELBA_INITIATOR_POS 11
34#define TYPE_DELBA_SENT 1
35#define TYPE_DELBA_RECEIVE 2
36#define IMMEDIATE_BLOCK_ACK 0x2
37
38#define ADDBA_RSP_STATUS_ACCEPT 0
39
40int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *,
41 u16 seqNum,
42 u16 tid, u8 *ta,
43 u8 pkttype, void *payload);
44void mwifiex_11n_delete_ba_stream_tbl(struct mwifiex_private *priv, int Tid,
45 u8 *PeerMACAddr, u8 type,
46 int initiator);
47void mwifiex_11n_ba_stream_timeout(struct mwifiex_private *priv,
48 struct host_cmd_ds_11n_batimeout *event);
49int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
50 struct host_cmd_ds_command
51 *resp);
52int mwifiex_cmd_11n_delba(struct host_cmd_ds_command *cmd,
53 void *data_buf);
54int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
55 struct host_cmd_ds_command
56 *cmd, void *data_buf);
57int mwifiex_cmd_11n_addba_req(struct host_cmd_ds_command *cmd,
58 void *data_buf);
59void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv);
60struct mwifiex_rx_reorder_tbl *mwifiex_11n_get_rxreorder_tbl(struct
61 mwifiex_private
62 *priv, int tid,
63 u8 *ta);
64
65#endif /* _MWIFIEX_11N_RXREORDER_H_ */
diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig
new file mode 100644
index 000000000000..86962920cef3
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/Kconfig
@@ -0,0 +1,21 @@
1config MWIFIEX
2 tristate "Marvell WiFi-Ex Driver"
3 depends on CFG80211
4 select LIB80211
5 ---help---
6 This adds support for wireless adapters based on Marvell
7 802.11n chipsets.
8
9 If you choose to build it as a module, it will be called
10 mwifiex.
11
12config MWIFIEX_SDIO
13 tristate "Marvell WiFi-Ex Driver for SD8787"
14 depends on MWIFIEX && MMC
15 select FW_LOADER
16 ---help---
17 This adds support for wireless adapters based on Marvell
18 8787 chipset with SDIO interface.
19
20 If you choose to build it as a module, it will be called
21 mwifiex_sdio.
diff --git a/drivers/net/wireless/mwifiex/Makefile b/drivers/net/wireless/mwifiex/Makefile
new file mode 100644
index 000000000000..42cb733ea33a
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/Makefile
@@ -0,0 +1,41 @@
1#
2# Copyright (C) 2011, Marvell International Ltd.
3#
4# This software file (the "File") is distributed by Marvell International
5# Ltd. under the terms of the GNU General Public License Version 2, June 1991
6# (the "License"). You may use, redistribute and/or modify this File in
7# accordance with the terms and conditions of the License, a copy of which
8# is available by writing to the Free Software Foundation, Inc.,
9# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
10# worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
11#
12# THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
13# IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
14# ARE EXPRESSLY DISCLAIMED. The License provides additional details about
15# this warranty disclaimer.
16
17
18mwifiex-y += main.o
19mwifiex-y += init.o
20mwifiex-y += cfp.o
21mwifiex-y += cmdevt.o
22mwifiex-y += util.o
23mwifiex-y += txrx.o
24mwifiex-y += wmm.o
25mwifiex-y += 11n.o
26mwifiex-y += 11n_aggr.o
27mwifiex-y += 11n_rxreorder.o
28mwifiex-y += scan.o
29mwifiex-y += join.o
30mwifiex-y += sta_ioctl.o
31mwifiex-y += sta_cmd.o
32mwifiex-y += sta_cmdresp.o
33mwifiex-y += sta_event.o
34mwifiex-y += sta_tx.o
35mwifiex-y += sta_rx.o
36mwifiex-y += cfg80211.o
37mwifiex-$(CONFIG_DEBUG_FS) += debugfs.o
38obj-$(CONFIG_MWIFIEX) += mwifiex.o
39
40mwifiex_sdio-y += sdio.o
41obj-$(CONFIG_MWIFIEX_SDIO) += mwifiex_sdio.o
diff --git a/drivers/net/wireless/mwifiex/README b/drivers/net/wireless/mwifiex/README
new file mode 100644
index 000000000000..b55badef4660
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/README
@@ -0,0 +1,204 @@
1# Copyright (C) 2011, Marvell International Ltd.
2#
3# This software file (the "File") is distributed by Marvell International
4# Ltd. under the terms of the GNU General Public License Version 2, June 1991
5# (the "License"). You may use, redistribute and/or modify this File in
6# accordance with the terms and conditions of the License, a copy of which
7# is available by writing to the Free Software Foundation, Inc.,
8# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
9# worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
10#
11# THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
12# IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
13# ARE EXPRESSLY DISCLAIMED. The License provides additional details about
14# this warranty disclaimer.
15
16
17===============================================================================
18 U S E R M A N U A L
19
201) FOR DRIVER INSTALL
21
22 a) Copy sd8787.bin to /lib/firmware/mrvl/ directory,
23 create the directory if it doesn't exist.
24 b) Install WLAN driver,
25 insmod mwifiex.ko
26 c) Uninstall WLAN driver,
27 ifconfig mlanX down
28 rmmod mwifiex
29
30
312) FOR DRIVER CONFIGURATION AND INFO
32 The configurations can be done either using the 'iw' user space
33 utility or debugfs.
34
35 a) 'iw' utility commands
36
37 Following are some useful iw commands:-
38
39iw dev mlan0 scan
40
41 This command will trigger a scan.
42 The command will then display the scan table entries
43
44iw dev mlan0 connect -w <SSID> [<freq in MHz>] [<bssid>] [key 0:abcde d:1123456789a]
45 The above command can be used to connect to an AP with a particular SSID.
46 Ap's operating frequency can be specified or even the bssid. If the AP is using
47 WEP encryption, wep keys can be specified in the command.
48 Note: Every time before connecting to an AP scan command (iw dev mlan0 scan) should be used by user.
49
50iw dev mlan0 disconnect
51 This command will be used to disconnect from an AP.
52
53
54iw dev mlan0 ibss join <SSID> <freq in MHz> [fixed-freq] [fixed-bssid] [key 0:abcde]
55 The command will be used to join or create an ibss. Optionally, operating frequency,
56 bssid and the security related parameters can be specified while joining/creating
57 and ibss.
58
59iw dev mlan0 ibss leave
60 The command will be used to leave an ibss network.
61
62iw dev mlan0 link
63 The command will be used to get the connection status. The command will return parameters
64 such as SSID, operating frequency, rx/tx packets, signal strength, tx bitrate.
65
66 Apart from the iw utility all standard configurations using the 'iwconfig' utility are also supported.
67
68 b) Debugfs interface
69
70 The debugfs interface can be used for configurations and for getting
71 some useful information from the driver.
72 The section below explains the configurations that can be
73 done.
74
75 Mount debugfs to /debugfs mount point:
76
77 mkdir /debugfs
78 mount -t debugfs debugfs /debugfs
79
80 The information is provided in /debugfs/mwifiex/mlanX/:
81
82iw reg set <country code>
83 The command will be used to change the regulatory domain.
84
85iw reg get
86 The command will be used to get current regulatory domain.
87
88info
89 This command is used to get driver info.
90
91 Usage:
92 cat info
93
94 driver_name = "mwifiex"
95 driver_version = <driver_name, driver_version, (firmware_version)>
96 interface_name = "mlanX"
97 bss_mode = "Ad-hoc" | "Managed" | "Auto" | "Unknown"
98 media_state = "Disconnected" | "Connected"
99 mac_address = <6-byte adapter MAC address>
100 multicase_count = <multicast address count>
101 essid = <current SSID>
102 bssid = <current BSSID>
103 channel = <current channel>
104 region_code = <current region code>
105 multicasr_address[n] = <multicast address>
106 num_tx_bytes = <number of bytes sent to device>
107 num_rx_bytes = <number of bytes received from device and sent to kernel>
108 num_tx_pkts = <number of packets sent to device>
109 num_rx_pkts = <number of packets received from device and sent to kernel>
110 num_tx_pkts_dropped = <number of Tx packets dropped by driver>
111 num_rx_pkts_dropped = <number of Rx packets dropped by driver>
112 num_tx_pkts_err = <number of Tx packets failed to send to device>
113 num_rx_pkts_err = <number of Rx packets failed to receive from device>
114 carrier "on" | "off"
115 tx queue "stopped" | "started"
116
117 The following debug info are provided in /debugfs/mwifiex/mlanX/debug:
118
119 int_counter = <interrupt count, cleared when interrupt handled>
120 wmm_ac_vo = <number of packets sent to device from WMM AcVo queue>
121 wmm_ac_vi = <number of packets sent to device from WMM AcVi queue>
122 wmm_ac_be = <number of packets sent to device from WMM AcBE queue>
123 wmm_ac_bk = <number of packets sent to device from WMM AcBK queue>
124 max_tx_buf_size = <maximum Tx buffer size>
125 tx_buf_size = <current Tx buffer size>
126 curr_tx_buf_size = <current Tx buffer size>
127 ps_mode = <0/1, CAM mode/PS mode>
128 ps_state = <0/1/2/3, full power state/awake state/pre-sleep state/sleep state>
129 is_deep_sleep = <0/1, not deep sleep state/deep sleep state>
130 wakeup_dev_req = <0/1, wakeup device not required/required>
131 wakeup_tries = <wakeup device count, cleared when device awake>
132 hs_configured = <0/1, host sleep not configured/configured>
133 hs_activated = <0/1, extended host sleep not activated/activated>
134 num_tx_timeout = <number of Tx timeout>
135 num_cmd_timeout = <number of timeout commands>
136 timeout_cmd_id = <command id of the last timeout command>
137 timeout_cmd_act = <command action of the last timeout command>
138 last_cmd_id = <command id of the last several commands sent to device>
139 last_cmd_act = <command action of the last several commands sent to device>
140 last_cmd_index = <0 based last command index>
141 last_cmd_resp_id = <command id of the last several command responses received from device>
142 last_cmd_resp_index = <0 based last command response index>
143 last_event = <event id of the last several events received from device>
144 last_event_index = <0 based last event index>
145 num_cmd_h2c_fail = <number of commands failed to send to device>
146 num_cmd_sleep_cfm_fail = <number of sleep confirm failed to send to device>
147 num_tx_h2c_fail = <number of data packets failed to send to device>
148 num_evt_deauth = <number of deauthenticated events received from device>
149 num_evt_disassoc = <number of disassociated events received from device>
150 num_evt_link_lost = <number of link lost events received from device>
151 num_cmd_deauth = <number of deauthenticate commands sent to device>
152 num_cmd_assoc_ok = <number of associate commands with success return>
153 num_cmd_assoc_fail = <number of associate commands with failure return>
154 cmd_sent = <0/1, send command resources available/sending command to device>
155 data_sent = <0/1, send data resources available/sending data to device>
156 mp_rd_bitmap = <SDIO multi-port read bitmap>
157 mp_wr_bitmap = <SDIO multi-port write bitmap>
158 cmd_resp_received = <0/1, no cmd response to process/response received and yet to process>
159 event_received = <0/1, no event to process/event received and yet to process>
160 cmd_pending = <number of cmd pending>
161 tx_pending = <number of Tx packet pending>
162 rx_pending = <number of Rx packet pending>
163
164
1653) FOR DRIVER CONFIGURATION
166
167regrdwr
168 This command is used to read/write the adapter register.
169
170 Usage:
171 echo " <type> <offset> [value]" > regrdwr
172 cat regrdwr
173
174 where the parameters are,
175 <type>: 1:MAC/SOC, 2:BBP, 3:RF, 4:PMIC, 5:CAU
176 <offset>: offset of register
177 [value]: value to be written
178
179 Examples:
180 echo "1 0xa060" > regrdwr : Read the MAC register
181 echo "1 0xa060 0x12" > regrdwr : Write the MAC register
182 echo "1 0xa794 0x80000000" > regrdwr
183 : Write 0x80000000 to MAC register
184rdeeprom
185 This command is used to read the EEPROM contents of the card.
186
187 Usage:
188 echo "<offset> <length>" > rdeeprom
189 cat rdeeprom
190
191 where the parameters are,
192 <offset>: multiples of 4
193 <length>: 4-20, multiples of 4
194
195 Example:
196 echo "0 20" > rdeeprom : Read 20 bytes of EEPROM data from offset 0
197
198getlog
199 This command is used to get the statistics available in the station.
200 Usage:
201
202 cat getlog
203
204===============================================================================
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
new file mode 100644
index 000000000000..660831ce293c
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -0,0 +1,1417 @@
1/*
2 * Marvell Wireless LAN device driver: CFG80211
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "cfg80211.h"
21#include "main.h"
22
23/*
24 * This function maps the nl802.11 channel type into driver channel type.
25 *
26 * The mapping is as follows -
27 * NL80211_CHAN_NO_HT -> NO_SEC_CHANNEL
28 * NL80211_CHAN_HT20 -> NO_SEC_CHANNEL
29 * NL80211_CHAN_HT40PLUS -> SEC_CHANNEL_ABOVE
30 * NL80211_CHAN_HT40MINUS -> SEC_CHANNEL_BELOW
31 * Others -> NO_SEC_CHANNEL
32 */
33static int
34mwifiex_cfg80211_channel_type_to_mwifiex_channels(enum nl80211_channel_type
35 channel_type)
36{
37 switch (channel_type) {
38 case NL80211_CHAN_NO_HT:
39 case NL80211_CHAN_HT20:
40 return NO_SEC_CHANNEL;
41 case NL80211_CHAN_HT40PLUS:
42 return SEC_CHANNEL_ABOVE;
43 case NL80211_CHAN_HT40MINUS:
44 return SEC_CHANNEL_BELOW;
45 default:
46 return NO_SEC_CHANNEL;
47 }
48}
49
50/*
51 * This function maps the driver channel type into nl802.11 channel type.
52 *
53 * The mapping is as follows -
54 * NO_SEC_CHANNEL -> NL80211_CHAN_HT20
55 * SEC_CHANNEL_ABOVE -> NL80211_CHAN_HT40PLUS
56 * SEC_CHANNEL_BELOW -> NL80211_CHAN_HT40MINUS
57 * Others -> NL80211_CHAN_HT20
58 */
59static enum nl80211_channel_type
60mwifiex_channels_to_cfg80211_channel_type(int channel_type)
61{
62 switch (channel_type) {
63 case NO_SEC_CHANNEL:
64 return NL80211_CHAN_HT20;
65 case SEC_CHANNEL_ABOVE:
66 return NL80211_CHAN_HT40PLUS;
67 case SEC_CHANNEL_BELOW:
68 return NL80211_CHAN_HT40MINUS;
69 default:
70 return NL80211_CHAN_HT20;
71 }
72}
73
74/*
75 * This function checks whether WEP is set.
76 */
77static int
78mwifiex_is_alg_wep(u32 cipher)
79{
80 switch (cipher) {
81 case WLAN_CIPHER_SUITE_WEP40:
82 case WLAN_CIPHER_SUITE_WEP104:
83 return 1;
84 default:
85 break;
86 }
87
88 return 0;
89}
90
91/*
92 * This function retrieves the private structure from kernel wiphy structure.
93 */
94static void *mwifiex_cfg80211_get_priv(struct wiphy *wiphy)
95{
96 return (void *) (*(unsigned long *) wiphy_priv(wiphy));
97}
98
99/*
100 * CFG802.11 operation handler to delete a network key.
101 */
102static int
103mwifiex_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev,
104 u8 key_index, bool pairwise, const u8 *mac_addr)
105{
106 struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
107
108 if (mwifiex_set_encode(priv, NULL, 0, key_index, 1)) {
109 wiphy_err(wiphy, "deleting the crypto keys\n");
110 return -EFAULT;
111 }
112
113 wiphy_dbg(wiphy, "info: crypto keys deleted\n");
114 return 0;
115}
116
117/*
118 * CFG802.11 operation handler to set Tx power.
119 */
120static int
121mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy,
122 enum nl80211_tx_power_setting type,
123 int dbm)
124{
125 struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
126 struct mwifiex_power_cfg power_cfg;
127
128 if (type == NL80211_TX_POWER_FIXED) {
129 power_cfg.is_power_auto = 0;
130 power_cfg.power_level = dbm;
131 } else {
132 power_cfg.is_power_auto = 1;
133 }
134
135 return mwifiex_set_tx_power(priv, &power_cfg);
136}
137
138/*
139 * CFG802.11 operation handler to set Power Save option.
140 *
141 * The timeout value, if provided, is currently ignored.
142 */
143static int
144mwifiex_cfg80211_set_power_mgmt(struct wiphy *wiphy,
145 struct net_device *dev,
146 bool enabled, int timeout)
147{
148 struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
149 u32 ps_mode;
150
151 if (timeout)
152 wiphy_dbg(wiphy,
153 "info: ignoring the timeout value"
154 " for IEEE power save\n");
155
156 ps_mode = enabled;
157
158 return mwifiex_drv_set_power(priv, &ps_mode);
159}
160
161/*
162 * CFG802.11 operation handler to set the default network key.
163 */
164static int
165mwifiex_cfg80211_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
166 u8 key_index, bool unicast,
167 bool multicast)
168{
169 struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
170
171 /* Return if WEP key not configured */
172 if (priv->sec_info.wep_status == MWIFIEX_802_11_WEP_DISABLED)
173 return 0;
174
175 if (mwifiex_set_encode(priv, NULL, 0, key_index, 0)) {
176 wiphy_err(wiphy, "set default Tx key index\n");
177 return -EFAULT;
178 }
179
180 return 0;
181}
182
183/*
184 * CFG802.11 operation handler to add a network key.
185 */
186static int
187mwifiex_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev,
188 u8 key_index, bool pairwise, const u8 *mac_addr,
189 struct key_params *params)
190{
191 struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
192
193 if (mwifiex_set_encode(priv, params->key, params->key_len,
194 key_index, 0)) {
195 wiphy_err(wiphy, "crypto keys added\n");
196 return -EFAULT;
197 }
198
199 return 0;
200}
201
202/*
203 * This function sends domain information to the firmware.
204 *
205 * The following information are passed to the firmware -
206 * - Country codes
207 * - Sub bands (first channel, number of channels, maximum Tx power)
208 */
209static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
210{
211 u8 no_of_triplet = 0;
212 struct ieee80211_country_ie_triplet *t;
213 u8 no_of_parsed_chan = 0;
214 u8 first_chan = 0, next_chan = 0, max_pwr = 0;
215 u8 i, flag = 0;
216 enum ieee80211_band band;
217 struct ieee80211_supported_band *sband;
218 struct ieee80211_channel *ch;
219 struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
220 struct mwifiex_adapter *adapter = priv->adapter;
221 struct mwifiex_802_11d_domain_reg *domain_info = &adapter->domain_reg;
222
223 /* Set country code */
224 domain_info->country_code[0] = priv->country_code[0];
225 domain_info->country_code[1] = priv->country_code[1];
226 domain_info->country_code[2] = ' ';
227
228 band = mwifiex_band_to_radio_type(adapter->config_bands);
229 if (!wiphy->bands[band]) {
230 wiphy_err(wiphy, "11D: setting domain info in FW\n");
231 return -1;
232 }
233
234 sband = wiphy->bands[band];
235
236 for (i = 0; i < sband->n_channels ; i++) {
237 ch = &sband->channels[i];
238 if (ch->flags & IEEE80211_CHAN_DISABLED)
239 continue;
240
241 if (!flag) {
242 flag = 1;
243 first_chan = (u32) ch->hw_value;
244 next_chan = first_chan;
245 max_pwr = ch->max_power;
246 no_of_parsed_chan = 1;
247 continue;
248 }
249
250 if (ch->hw_value == next_chan + 1 &&
251 ch->max_power == max_pwr) {
252 next_chan++;
253 no_of_parsed_chan++;
254 } else {
255 t = &domain_info->triplet[no_of_triplet];
256 t->chans.first_channel = first_chan;
257 t->chans.num_channels = no_of_parsed_chan;
258 t->chans.max_power = max_pwr;
259 no_of_triplet++;
260 first_chan = (u32) ch->hw_value;
261 next_chan = first_chan;
262 max_pwr = ch->max_power;
263 no_of_parsed_chan = 1;
264 }
265 }
266
267 if (flag) {
268 t = &domain_info->triplet[no_of_triplet];
269 t->chans.first_channel = first_chan;
270 t->chans.num_channels = no_of_parsed_chan;
271 t->chans.max_power = max_pwr;
272 no_of_triplet++;
273 }
274
275 domain_info->no_of_triplet = no_of_triplet;
276
277 if (mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11D_DOMAIN_INFO,
278 HostCmd_ACT_GEN_SET, 0, NULL)) {
279 wiphy_err(wiphy, "11D: setting domain info in FW\n");
280 return -1;
281 }
282
283 return 0;
284}
285
286/*
287 * CFG802.11 regulatory domain callback function.
288 *
289 * This function is called when the regulatory domain is changed due to the
290 * following reasons -
291 * - Set by driver
292 * - Set by system core
293 * - Set by user
294 * - Set bt Country IE
295 */
296static int mwifiex_reg_notifier(struct wiphy *wiphy,
297 struct regulatory_request *request)
298{
299 struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
300
301 wiphy_dbg(wiphy, "info: cfg80211 regulatory domain callback for domain"
302 " %c%c\n", request->alpha2[0], request->alpha2[1]);
303
304 memcpy(priv->country_code, request->alpha2, sizeof(request->alpha2));
305
306 switch (request->initiator) {
307 case NL80211_REGDOM_SET_BY_DRIVER:
308 case NL80211_REGDOM_SET_BY_CORE:
309 case NL80211_REGDOM_SET_BY_USER:
310 break;
311 /* Todo: apply driver specific changes in channel flags based
312 on the request initiator if necessary. */
313 case NL80211_REGDOM_SET_BY_COUNTRY_IE:
314 break;
315 }
316 mwifiex_send_domain_info_cmd_fw(wiphy);
317
318 return 0;
319}
320
321/*
322 * This function sets the RF channel.
323 *
324 * This function creates multiple IOCTL requests, populates them accordingly
325 * and issues them to set the band/channel and frequency.
326 */
327static int
328mwifiex_set_rf_channel(struct mwifiex_private *priv,
329 struct ieee80211_channel *chan,
330 enum nl80211_channel_type channel_type)
331{
332 struct mwifiex_chan_freq_power cfp;
333 struct mwifiex_ds_band_cfg band_cfg;
334 u32 config_bands = 0;
335 struct wiphy *wiphy = priv->wdev->wiphy;
336
337 if (chan) {
338 memset(&band_cfg, 0, sizeof(band_cfg));
339 /* Set appropriate bands */
340 if (chan->band == IEEE80211_BAND_2GHZ)
341 config_bands = BAND_B | BAND_G | BAND_GN;
342 else
343 config_bands = BAND_AN | BAND_A;
344 if (priv->bss_mode == NL80211_IFTYPE_STATION
345 || priv->bss_mode == NL80211_IFTYPE_UNSPECIFIED) {
346 band_cfg.config_bands = config_bands;
347 } else if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
348 band_cfg.config_bands = config_bands;
349 band_cfg.adhoc_start_band = config_bands;
350 }
351
352 band_cfg.sec_chan_offset =
353 mwifiex_cfg80211_channel_type_to_mwifiex_channels
354 (channel_type);
355
356 if (mwifiex_set_radio_band_cfg(priv, &band_cfg))
357 return -EFAULT;
358
359 mwifiex_send_domain_info_cmd_fw(wiphy);
360 }
361
362 wiphy_dbg(wiphy, "info: setting band %d, channel offset %d and "
363 "mode %d\n", config_bands, band_cfg.sec_chan_offset,
364 priv->bss_mode);
365 if (!chan)
366 return 0;
367
368 memset(&cfp, 0, sizeof(cfp));
369 cfp.freq = chan->center_freq;
370 cfp.channel = ieee80211_frequency_to_channel(chan->center_freq);
371
372 if (mwifiex_bss_set_channel(priv, &cfp))
373 return -EFAULT;
374
375 return mwifiex_drv_change_adhoc_chan(priv, cfp.channel);
376}
377
378/*
379 * CFG802.11 operation handler to set channel.
380 *
381 * This function can only be used when station is not connected.
382 */
383static int
384mwifiex_cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev,
385 struct ieee80211_channel *chan,
386 enum nl80211_channel_type channel_type)
387{
388 struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
389
390 if (priv->media_connected) {
391 wiphy_err(wiphy, "This setting is valid only when station "
392 "is not connected\n");
393 return -EINVAL;
394 }
395
396 return mwifiex_set_rf_channel(priv, chan, channel_type);
397}
398
399/*
400 * This function sets the fragmentation threshold.
401 *
402 * The fragmentation threshold value must lie between MWIFIEX_FRAG_MIN_VALUE
403 * and MWIFIEX_FRAG_MAX_VALUE.
404 */
405static int
406mwifiex_set_frag(struct mwifiex_private *priv, u32 frag_thr)
407{
408 int ret;
409
410 if (frag_thr < MWIFIEX_FRAG_MIN_VALUE
411 || frag_thr > MWIFIEX_FRAG_MAX_VALUE)
412 return -EINVAL;
413
414 /* Send request to firmware */
415 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
416 HostCmd_ACT_GEN_SET, FRAG_THRESH_I,
417 &frag_thr);
418
419 return ret;
420}
421
422/*
423 * This function sets the RTS threshold.
424
425 * The rts value must lie between MWIFIEX_RTS_MIN_VALUE
426 * and MWIFIEX_RTS_MAX_VALUE.
427 */
428static int
429mwifiex_set_rts(struct mwifiex_private *priv, u32 rts_thr)
430{
431 if (rts_thr < MWIFIEX_RTS_MIN_VALUE || rts_thr > MWIFIEX_RTS_MAX_VALUE)
432 rts_thr = MWIFIEX_RTS_MAX_VALUE;
433
434 return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
435 HostCmd_ACT_GEN_SET, RTS_THRESH_I,
436 &rts_thr);
437}
438
439/*
440 * CFG802.11 operation handler to set wiphy parameters.
441 *
442 * This function can be used to set the RTS threshold and the
443 * Fragmentation threshold of the driver.
444 */
445static int
446mwifiex_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
447{
448 struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
449 int ret = 0;
450
451 if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
452 ret = mwifiex_set_rts(priv, wiphy->rts_threshold);
453 if (ret)
454 return ret;
455 }
456
457 if (changed & WIPHY_PARAM_FRAG_THRESHOLD)
458 ret = mwifiex_set_frag(priv, wiphy->frag_threshold);
459
460 return ret;
461}
462
463/*
464 * CFG802.11 operation handler to change interface type.
465 */
466static int
467mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
468 struct net_device *dev,
469 enum nl80211_iftype type, u32 *flags,
470 struct vif_params *params)
471{
472 int ret;
473 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
474
475 if (priv->bss_mode == type) {
476 wiphy_warn(wiphy, "already set to required type\n");
477 return 0;
478 }
479
480 priv->bss_mode = type;
481
482 switch (type) {
483 case NL80211_IFTYPE_ADHOC:
484 dev->ieee80211_ptr->iftype = NL80211_IFTYPE_ADHOC;
485 wiphy_dbg(wiphy, "info: setting interface type to adhoc\n");
486 break;
487 case NL80211_IFTYPE_STATION:
488 dev->ieee80211_ptr->iftype = NL80211_IFTYPE_STATION;
489 wiphy_dbg(wiphy, "info: setting interface type to managed\n");
490 break;
491 case NL80211_IFTYPE_UNSPECIFIED:
492 dev->ieee80211_ptr->iftype = NL80211_IFTYPE_STATION;
493 wiphy_dbg(wiphy, "info: setting interface type to auto\n");
494 return 0;
495 default:
496 wiphy_err(wiphy, "unknown interface type: %d\n", type);
497 return -EINVAL;
498 }
499
500 mwifiex_deauthenticate(priv, NULL);
501
502 priv->sec_info.authentication_mode = NL80211_AUTHTYPE_OPEN_SYSTEM;
503
504 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_SET_BSS_MODE,
505 HostCmd_ACT_GEN_SET, 0, NULL);
506
507 return ret;
508}
509
510/*
511 * This function dumps the station information on a buffer.
512 *
513 * The following information are shown -
514 * - Total bytes transmitted
515 * - Total bytes received
516 * - Total packets transmitted
517 * - Total packets received
518 * - Signal quality level
519 * - Transmission rate
520 */
521static int
522mwifiex_dump_station_info(struct mwifiex_private *priv,
523 struct station_info *sinfo)
524{
525 struct mwifiex_ds_get_signal signal;
526 struct mwifiex_rate_cfg rate;
527 int ret = 0;
528
529 sinfo->filled = STATION_INFO_RX_BYTES | STATION_INFO_TX_BYTES |
530 STATION_INFO_RX_PACKETS |
531 STATION_INFO_TX_PACKETS
532 | STATION_INFO_SIGNAL | STATION_INFO_TX_BITRATE;
533
534 /* Get signal information from the firmware */
535 memset(&signal, 0, sizeof(struct mwifiex_ds_get_signal));
536 if (mwifiex_get_signal_info(priv, &signal)) {
537 dev_err(priv->adapter->dev, "getting signal information\n");
538 ret = -EFAULT;
539 }
540
541 if (mwifiex_drv_get_data_rate(priv, &rate)) {
542 dev_err(priv->adapter->dev, "getting data rate\n");
543 ret = -EFAULT;
544 }
545
546 sinfo->rx_bytes = priv->stats.rx_bytes;
547 sinfo->tx_bytes = priv->stats.tx_bytes;
548 sinfo->rx_packets = priv->stats.rx_packets;
549 sinfo->tx_packets = priv->stats.tx_packets;
550 sinfo->signal = priv->w_stats.qual.level;
551 sinfo->txrate.legacy = rate.rate;
552
553 return ret;
554}
555
556/*
557 * CFG802.11 operation handler to get station information.
558 *
559 * This function only works in connected mode, and dumps the
560 * requested station information, if available.
561 */
562static int
563mwifiex_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
564 u8 *mac, struct station_info *sinfo)
565{
566 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
567
568 mwifiex_dump_station_info(priv, sinfo);
569
570 if (!priv->media_connected)
571 return -ENOENT;
572 if (memcmp(mac, priv->cfg_bssid, ETH_ALEN))
573 return -ENOENT;
574
575 return mwifiex_dump_station_info(priv, sinfo);
576}
577
578/* Supported rates to be advertised to the cfg80211 */
579
580static struct ieee80211_rate mwifiex_rates[] = {
581 {.bitrate = 10, .hw_value = 2, },
582 {.bitrate = 20, .hw_value = 4, },
583 {.bitrate = 55, .hw_value = 11, },
584 {.bitrate = 110, .hw_value = 22, },
585 {.bitrate = 220, .hw_value = 44, },
586 {.bitrate = 60, .hw_value = 12, },
587 {.bitrate = 90, .hw_value = 18, },
588 {.bitrate = 120, .hw_value = 24, },
589 {.bitrate = 180, .hw_value = 36, },
590 {.bitrate = 240, .hw_value = 48, },
591 {.bitrate = 360, .hw_value = 72, },
592 {.bitrate = 480, .hw_value = 96, },
593 {.bitrate = 540, .hw_value = 108, },
594 {.bitrate = 720, .hw_value = 144, },
595};
596
597/* Channel definitions to be advertised to cfg80211 */
598
599static struct ieee80211_channel mwifiex_channels_2ghz[] = {
600 {.center_freq = 2412, .hw_value = 1, },
601 {.center_freq = 2417, .hw_value = 2, },
602 {.center_freq = 2422, .hw_value = 3, },
603 {.center_freq = 2427, .hw_value = 4, },
604 {.center_freq = 2432, .hw_value = 5, },
605 {.center_freq = 2437, .hw_value = 6, },
606 {.center_freq = 2442, .hw_value = 7, },
607 {.center_freq = 2447, .hw_value = 8, },
608 {.center_freq = 2452, .hw_value = 9, },
609 {.center_freq = 2457, .hw_value = 10, },
610 {.center_freq = 2462, .hw_value = 11, },
611 {.center_freq = 2467, .hw_value = 12, },
612 {.center_freq = 2472, .hw_value = 13, },
613 {.center_freq = 2484, .hw_value = 14, },
614};
615
616static struct ieee80211_supported_band mwifiex_band_2ghz = {
617 .channels = mwifiex_channels_2ghz,
618 .n_channels = ARRAY_SIZE(mwifiex_channels_2ghz),
619 .bitrates = mwifiex_rates,
620 .n_bitrates = 14,
621};
622
623static struct ieee80211_channel mwifiex_channels_5ghz[] = {
624 {.center_freq = 5040, .hw_value = 8, },
625 {.center_freq = 5060, .hw_value = 12, },
626 {.center_freq = 5080, .hw_value = 16, },
627 {.center_freq = 5170, .hw_value = 34, },
628 {.center_freq = 5190, .hw_value = 38, },
629 {.center_freq = 5210, .hw_value = 42, },
630 {.center_freq = 5230, .hw_value = 46, },
631 {.center_freq = 5180, .hw_value = 36, },
632 {.center_freq = 5200, .hw_value = 40, },
633 {.center_freq = 5220, .hw_value = 44, },
634 {.center_freq = 5240, .hw_value = 48, },
635 {.center_freq = 5260, .hw_value = 52, },
636 {.center_freq = 5280, .hw_value = 56, },
637 {.center_freq = 5300, .hw_value = 60, },
638 {.center_freq = 5320, .hw_value = 64, },
639 {.center_freq = 5500, .hw_value = 100, },
640 {.center_freq = 5520, .hw_value = 104, },
641 {.center_freq = 5540, .hw_value = 108, },
642 {.center_freq = 5560, .hw_value = 112, },
643 {.center_freq = 5580, .hw_value = 116, },
644 {.center_freq = 5600, .hw_value = 120, },
645 {.center_freq = 5620, .hw_value = 124, },
646 {.center_freq = 5640, .hw_value = 128, },
647 {.center_freq = 5660, .hw_value = 132, },
648 {.center_freq = 5680, .hw_value = 136, },
649 {.center_freq = 5700, .hw_value = 140, },
650 {.center_freq = 5745, .hw_value = 149, },
651 {.center_freq = 5765, .hw_value = 153, },
652 {.center_freq = 5785, .hw_value = 157, },
653 {.center_freq = 5805, .hw_value = 161, },
654 {.center_freq = 5825, .hw_value = 165, },
655};
656
657static struct ieee80211_supported_band mwifiex_band_5ghz = {
658 .channels = mwifiex_channels_5ghz,
659 .n_channels = ARRAY_SIZE(mwifiex_channels_5ghz),
660 .bitrates = mwifiex_rates - 4,
661 .n_bitrates = ARRAY_SIZE(mwifiex_rates) + 4,
662};
663
664
665/* Supported crypto cipher suits to be advertised to cfg80211 */
666
667static const u32 mwifiex_cipher_suites[] = {
668 WLAN_CIPHER_SUITE_WEP40,
669 WLAN_CIPHER_SUITE_WEP104,
670 WLAN_CIPHER_SUITE_TKIP,
671 WLAN_CIPHER_SUITE_CCMP,
672};
673
674/*
675 * CFG802.11 operation handler for disconnection request.
676 *
677 * This function does not work when there is already a disconnection
678 * procedure going on.
679 */
680static int
681mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
682 u16 reason_code)
683{
684 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
685
686 if (priv->disconnect)
687 return -EBUSY;
688
689 priv->disconnect = 1;
690 if (mwifiex_deauthenticate(priv, NULL))
691 return -EFAULT;
692
693 wiphy_dbg(wiphy, "info: successfully disconnected from %pM:"
694 " reason code %d\n", priv->cfg_bssid, reason_code);
695
696 queue_work(priv->workqueue, &priv->cfg_workqueue);
697
698 return 0;
699}
700
701/*
702 * This function informs the CFG802.11 subsystem of a new IBSS.
703 *
704 * The following information are sent to the CFG802.11 subsystem
705 * to register the new IBSS. If we do not register the new IBSS,
706 * a kernel panic will result.
707 * - SSID
708 * - SSID length
709 * - BSSID
710 * - Channel
711 */
712static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv)
713{
714 struct ieee80211_channel *chan;
715 struct mwifiex_bss_info bss_info;
716 int ie_len;
717 u8 ie_buf[IEEE80211_MAX_SSID_LEN + sizeof(struct ieee_types_header)];
718
719 if (mwifiex_get_bss_info(priv, &bss_info))
720 return -1;
721
722 ie_buf[0] = WLAN_EID_SSID;
723 ie_buf[1] = bss_info.ssid.ssid_len;
724
725 memcpy(&ie_buf[sizeof(struct ieee_types_header)],
726 &bss_info.ssid.ssid,
727 bss_info.ssid.ssid_len);
728 ie_len = ie_buf[1] + sizeof(struct ieee_types_header);
729
730 chan = __ieee80211_get_channel(priv->wdev->wiphy,
731 ieee80211_channel_to_frequency(bss_info.bss_chan,
732 priv->curr_bss_params.band));
733
734 cfg80211_inform_bss(priv->wdev->wiphy, chan,
735 bss_info.bssid, 0, WLAN_CAPABILITY_IBSS,
736 0, ie_buf, ie_len, 0, GFP_KERNEL);
737 memcpy(priv->cfg_bssid, bss_info.bssid, ETH_ALEN);
738
739 return 0;
740}
741
742/*
743 * This function informs the CFG802.11 subsystem of a new BSS connection.
744 *
745 * The following information are sent to the CFG802.11 subsystem
746 * to register the new BSS connection. If we do not register the new BSS,
747 * a kernel panic will result.
748 * - MAC address
749 * - Capabilities
750 * - Beacon period
751 * - RSSI value
752 * - Channel
753 * - Supported rates IE
754 * - Extended capabilities IE
755 * - DS parameter set IE
756 * - HT Capability IE
757 * - Vendor Specific IE (221)
758 * - WPA IE
759 * - RSN IE
760 */
761static int mwifiex_inform_bss_from_scan_result(struct mwifiex_private *priv,
762 struct mwifiex_802_11_ssid *ssid)
763{
764 struct mwifiex_bssdescriptor *scan_table;
765 int i, j;
766 struct ieee80211_channel *chan;
767 u8 *ie, *ie_buf;
768 u32 ie_len;
769 u8 *beacon;
770 int beacon_size;
771 u8 element_id, element_len;
772
773#define MAX_IE_BUF 2048
774 ie_buf = kzalloc(MAX_IE_BUF, GFP_KERNEL);
775 if (!ie_buf) {
776 dev_err(priv->adapter->dev, "%s: failed to alloc ie_buf\n",
777 __func__);
778 return -ENOMEM;
779 }
780
781 scan_table = priv->adapter->scan_table;
782 for (i = 0; i < priv->adapter->num_in_scan_table; i++) {
783 if (ssid) {
784 /* Inform specific BSS only */
785 if (memcmp(ssid->ssid, scan_table[i].ssid.ssid,
786 ssid->ssid_len))
787 continue;
788 }
789 memset(ie_buf, 0, MAX_IE_BUF);
790 ie_buf[0] = WLAN_EID_SSID;
791 ie_buf[1] = scan_table[i].ssid.ssid_len;
792 memcpy(&ie_buf[sizeof(struct ieee_types_header)],
793 scan_table[i].ssid.ssid, ie_buf[1]);
794
795 ie = ie_buf + ie_buf[1] + sizeof(struct ieee_types_header);
796 ie_len = ie_buf[1] + sizeof(struct ieee_types_header);
797
798 ie[0] = WLAN_EID_SUPP_RATES;
799
800 for (j = 0; j < sizeof(scan_table[i].supported_rates); j++) {
801 if (!scan_table[i].supported_rates[j])
802 break;
803 else
804 ie[j + sizeof(struct ieee_types_header)] =
805 scan_table[i].supported_rates[j];
806 }
807
808 ie[1] = j;
809 ie_len += ie[1] + sizeof(struct ieee_types_header);
810
811 beacon = scan_table[i].beacon_buf;
812 beacon_size = scan_table[i].beacon_buf_size;
813
814 /* Skip time stamp, beacon interval and capability */
815
816 if (beacon) {
817 beacon += sizeof(scan_table[i].beacon_period)
818 + sizeof(scan_table[i].time_stamp) +
819 +sizeof(scan_table[i].cap_info_bitmap);
820
821 beacon_size -= sizeof(scan_table[i].beacon_period)
822 + sizeof(scan_table[i].time_stamp)
823 + sizeof(scan_table[i].cap_info_bitmap);
824 }
825
826 while (beacon_size >= sizeof(struct ieee_types_header)) {
827 ie = ie_buf + ie_len;
828 element_id = *beacon;
829 element_len = *(beacon + 1);
830 if (beacon_size < (int) element_len +
831 sizeof(struct ieee_types_header)) {
832 dev_err(priv->adapter->dev, "%s: in processing"
833 " IE, bytes left < IE length\n",
834 __func__);
835 break;
836 }
837 switch (element_id) {
838 case WLAN_EID_EXT_CAPABILITY:
839 case WLAN_EID_DS_PARAMS:
840 case WLAN_EID_HT_CAPABILITY:
841 case WLAN_EID_VENDOR_SPECIFIC:
842 case WLAN_EID_RSN:
843 case WLAN_EID_BSS_AC_ACCESS_DELAY:
844 ie[0] = element_id;
845 ie[1] = element_len;
846 memcpy(&ie[sizeof(struct ieee_types_header)],
847 (u8 *) beacon
848 + sizeof(struct ieee_types_header),
849 element_len);
850 ie_len += ie[1] +
851 sizeof(struct ieee_types_header);
852 break;
853 default:
854 break;
855 }
856 beacon += element_len +
857 sizeof(struct ieee_types_header);
858 beacon_size -= element_len +
859 sizeof(struct ieee_types_header);
860 }
861 chan = ieee80211_get_channel(priv->wdev->wiphy,
862 scan_table[i].freq);
863 cfg80211_inform_bss(priv->wdev->wiphy, chan,
864 scan_table[i].mac_address,
865 0, scan_table[i].cap_info_bitmap,
866 scan_table[i].beacon_period,
867 ie_buf, ie_len,
868 scan_table[i].rssi, GFP_KERNEL);
869 }
870
871 kfree(ie_buf);
872 return 0;
873}
874
875/*
876 * This function connects with a BSS.
877 *
878 * This function handles both Infra and Ad-Hoc modes. It also performs
879 * validity checking on the provided parameters, disconnects from the
880 * current BSS (if any), sets up the association/scan parameters,
881 * including security settings, and performs specific SSID scan before
882 * trying to connect.
883 *
884 * For Infra mode, the function returns failure if the specified SSID
885 * is not found in scan table. However, for Ad-Hoc mode, it can create
886 * the IBSS if it does not exist. On successful completion in either case,
887 * the function notifies the CFG802.11 subsystem of the new BSS connection,
888 * otherwise the kernel will panic.
889 */
890static int
891mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
892 u8 *bssid, int mode, struct ieee80211_channel *channel,
893 struct cfg80211_connect_params *sme, bool privacy)
894{
895 struct mwifiex_802_11_ssid req_ssid;
896 struct mwifiex_ssid_bssid ssid_bssid;
897 int ret, auth_type = 0;
898
899 memset(&req_ssid, 0, sizeof(struct mwifiex_802_11_ssid));
900 memset(&ssid_bssid, 0, sizeof(struct mwifiex_ssid_bssid));
901
902 req_ssid.ssid_len = ssid_len;
903 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
904 dev_err(priv->adapter->dev, "invalid SSID - aborting\n");
905 return -EINVAL;
906 }
907
908 memcpy(req_ssid.ssid, ssid, ssid_len);
909 if (!req_ssid.ssid_len || req_ssid.ssid[0] < 0x20) {
910 dev_err(priv->adapter->dev, "invalid SSID - aborting\n");
911 return -EINVAL;
912 }
913
914 /* disconnect before try to associate */
915 mwifiex_deauthenticate(priv, NULL);
916
917 if (channel)
918 ret = mwifiex_set_rf_channel(priv, channel,
919 mwifiex_channels_to_cfg80211_channel_type
920 (priv->adapter->chan_offset));
921
922 ret = mwifiex_set_encode(priv, NULL, 0, 0, 1); /* Disable keys */
923
924 if (mode == NL80211_IFTYPE_ADHOC) {
925 /* "privacy" is set only for ad-hoc mode */
926 if (privacy) {
927 /*
928 * Keep WLAN_CIPHER_SUITE_WEP104 for now so that
929 * the firmware can find a matching network from the
930 * scan. The cfg80211 does not give us the encryption
931 * mode at this stage so just setting it to WEP here.
932 */
933 priv->sec_info.encryption_mode =
934 WLAN_CIPHER_SUITE_WEP104;
935 priv->sec_info.authentication_mode =
936 NL80211_AUTHTYPE_OPEN_SYSTEM;
937 }
938
939 goto done;
940 }
941
942 /* Now handle infra mode. "sme" is valid for infra mode only */
943 if (sme->auth_type == NL80211_AUTHTYPE_AUTOMATIC
944 || sme->auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM)
945 auth_type = NL80211_AUTHTYPE_OPEN_SYSTEM;
946 else if (sme->auth_type == NL80211_AUTHTYPE_SHARED_KEY)
947 auth_type = NL80211_AUTHTYPE_SHARED_KEY;
948
949 if (sme->crypto.n_ciphers_pairwise) {
950 priv->sec_info.encryption_mode =
951 sme->crypto.ciphers_pairwise[0];
952 priv->sec_info.authentication_mode = auth_type;
953 }
954
955 if (sme->crypto.cipher_group) {
956 priv->sec_info.encryption_mode = sme->crypto.cipher_group;
957 priv->sec_info.authentication_mode = auth_type;
958 }
959 if (sme->ie)
960 ret = mwifiex_set_gen_ie(priv, sme->ie, sme->ie_len);
961
962 if (sme->key) {
963 if (mwifiex_is_alg_wep(0) | mwifiex_is_alg_wep(0)) {
964 dev_dbg(priv->adapter->dev,
965 "info: setting wep encryption"
966 " with key len %d\n", sme->key_len);
967 ret = mwifiex_set_encode(priv, sme->key, sme->key_len,
968 sme->key_idx, 0);
969 }
970 }
971done:
972 /* Do specific SSID scanning */
973 if (mwifiex_request_scan(priv, &req_ssid)) {
974 dev_err(priv->adapter->dev, "scan error\n");
975 return -EFAULT;
976 }
977
978
979 memcpy(&ssid_bssid.ssid, &req_ssid, sizeof(struct mwifiex_802_11_ssid));
980
981 if (mode != NL80211_IFTYPE_ADHOC) {
982 if (mwifiex_find_best_bss(priv, &ssid_bssid))
983 return -EFAULT;
984 /* Inform the BSS information to kernel, otherwise
985 * kernel will give a panic after successful assoc */
986 if (mwifiex_inform_bss_from_scan_result(priv, &req_ssid))
987 return -EFAULT;
988 }
989
990 dev_dbg(priv->adapter->dev, "info: trying to associate to %s and bssid %pM\n",
991 (char *) req_ssid.ssid, ssid_bssid.bssid);
992
993 memcpy(&priv->cfg_bssid, ssid_bssid.bssid, 6);
994
995 /* Connect to BSS by ESSID */
996 memset(&ssid_bssid.bssid, 0, ETH_ALEN);
997
998 if (!netif_queue_stopped(priv->netdev))
999 netif_stop_queue(priv->netdev);
1000
1001 if (mwifiex_bss_start(priv, &ssid_bssid))
1002 return -EFAULT;
1003
1004 if (mode == NL80211_IFTYPE_ADHOC) {
1005 /* Inform the BSS information to kernel, otherwise
1006 * kernel will give a panic after successful assoc */
1007 if (mwifiex_cfg80211_inform_ibss_bss(priv))
1008 return -EFAULT;
1009 }
1010
1011 return ret;
1012}
1013
1014/*
1015 * CFG802.11 operation handler for association request.
1016 *
1017 * This function does not work when the current mode is set to Ad-Hoc, or
1018 * when there is already an association procedure going on. The given BSS
1019 * information is used to associate.
1020 */
1021static int
1022mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
1023 struct cfg80211_connect_params *sme)
1024{
1025 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
1026 int ret = 0;
1027
1028 if (priv->assoc_request)
1029 return -EBUSY;
1030
1031 if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
1032 wiphy_err(wiphy, "received infra assoc request "
1033 "when station is in ibss mode\n");
1034 goto done;
1035 }
1036
1037 priv->assoc_request = -EINPROGRESS;
1038
1039 wiphy_dbg(wiphy, "info: Trying to associate to %s and bssid %pM\n",
1040 (char *) sme->ssid, sme->bssid);
1041
1042 ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid,
1043 priv->bss_mode, sme->channel, sme, 0);
1044
1045 priv->assoc_request = 1;
1046done:
1047 priv->assoc_result = ret;
1048 queue_work(priv->workqueue, &priv->cfg_workqueue);
1049 return ret;
1050}
1051
1052/*
1053 * CFG802.11 operation handler to join an IBSS.
1054 *
1055 * This function does not work in any mode other than Ad-Hoc, or if
1056 * a join operation is already in progress.
1057 */
1058static int
1059mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
1060 struct cfg80211_ibss_params *params)
1061{
1062 struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
1063 int ret = 0;
1064
1065 if (priv->ibss_join_request)
1066 return -EBUSY;
1067
1068 if (priv->bss_mode != NL80211_IFTYPE_ADHOC) {
1069 wiphy_err(wiphy, "request to join ibss received "
1070 "when station is not in ibss mode\n");
1071 goto done;
1072 }
1073
1074 priv->ibss_join_request = -EINPROGRESS;
1075
1076 wiphy_dbg(wiphy, "info: trying to join to %s and bssid %pM\n",
1077 (char *) params->ssid, params->bssid);
1078
1079 ret = mwifiex_cfg80211_assoc(priv, params->ssid_len, params->ssid,
1080 params->bssid, priv->bss_mode,
1081 params->channel, NULL, params->privacy);
1082
1083 priv->ibss_join_request = 1;
1084done:
1085 priv->ibss_join_result = ret;
1086 queue_work(priv->workqueue, &priv->cfg_workqueue);
1087 return ret;
1088}
1089
1090/*
1091 * CFG802.11 operation handler to leave an IBSS.
1092 *
1093 * This function does not work if a leave operation is
1094 * already in progress.
1095 */
1096static int
1097mwifiex_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
1098{
1099 struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
1100
1101 if (priv->disconnect)
1102 return -EBUSY;
1103
1104 priv->disconnect = 1;
1105
1106 wiphy_dbg(wiphy, "info: disconnecting from essid %pM\n",
1107 priv->cfg_bssid);
1108 if (mwifiex_deauthenticate(priv, NULL))
1109 return -EFAULT;
1110
1111 queue_work(priv->workqueue, &priv->cfg_workqueue);
1112
1113 return 0;
1114}
1115
1116/*
1117 * CFG802.11 operation handler for scan request.
1118 *
1119 * This function issues a scan request to the firmware based upon
1120 * the user specified scan configuration. On successfull completion,
1121 * it also informs the results.
1122 */
1123static int
1124mwifiex_cfg80211_scan(struct wiphy *wiphy, struct net_device *dev,
1125 struct cfg80211_scan_request *request)
1126{
1127 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
1128
1129 wiphy_dbg(wiphy, "info: received scan request on %s\n", dev->name);
1130
1131 if (priv->scan_request && priv->scan_request != request)
1132 return -EBUSY;
1133
1134 priv->scan_request = request;
1135
1136 queue_work(priv->workqueue, &priv->cfg_workqueue);
1137 return 0;
1138}
1139
1140/*
1141 * This function sets up the CFG802.11 specific HT capability fields
1142 * with default values.
1143 *
1144 * The following default values are set -
1145 * - HT Supported = True
1146 * - Maximum AMPDU length factor = IEEE80211_HT_MAX_AMPDU_64K
1147 * - Minimum AMPDU spacing = IEEE80211_HT_MPDU_DENSITY_NONE
1148 * - HT Capabilities supported by firmware
1149 * - MCS information, Rx mask = 0xff
1150 * - MCD information, Tx parameters = IEEE80211_HT_MCS_TX_DEFINED (0x01)
1151 */
1152static void
1153mwifiex_setup_ht_caps(struct ieee80211_sta_ht_cap *ht_info,
1154 struct mwifiex_private *priv)
1155{
1156 int rx_mcs_supp;
1157 struct ieee80211_mcs_info mcs_set;
1158 u8 *mcs = (u8 *)&mcs_set;
1159 struct mwifiex_adapter *adapter = priv->adapter;
1160
1161 ht_info->ht_supported = true;
1162 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
1163 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
1164
1165 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
1166
1167 /* Fill HT capability information */
1168 if (ISSUPP_CHANWIDTH40(adapter->hw_dot_11n_dev_cap))
1169 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
1170 else
1171 ht_info->cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
1172
1173 if (ISSUPP_SHORTGI20(adapter->hw_dot_11n_dev_cap))
1174 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
1175 else
1176 ht_info->cap &= ~IEEE80211_HT_CAP_SGI_20;
1177
1178 if (ISSUPP_SHORTGI40(adapter->hw_dot_11n_dev_cap))
1179 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
1180 else
1181 ht_info->cap &= ~IEEE80211_HT_CAP_SGI_40;
1182
1183 if (ISSUPP_RXSTBC(adapter->hw_dot_11n_dev_cap))
1184 ht_info->cap |= 1 << IEEE80211_HT_CAP_RX_STBC_SHIFT;
1185 else
1186 ht_info->cap &= ~(3 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
1187
1188 if (ISSUPP_TXSTBC(adapter->hw_dot_11n_dev_cap))
1189 ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
1190 else
1191 ht_info->cap &= ~IEEE80211_HT_CAP_TX_STBC;
1192
1193 ht_info->cap &= ~IEEE80211_HT_CAP_MAX_AMSDU;
1194 ht_info->cap |= IEEE80211_HT_CAP_SM_PS;
1195
1196 rx_mcs_supp = GET_RXMCSSUPP(adapter->hw_dev_mcs_support);
1197 /* Set MCS for 1x1 */
1198 memset(mcs, 0xff, rx_mcs_supp);
1199 /* Clear all the other values */
1200 memset(&mcs[rx_mcs_supp], 0,
1201 sizeof(struct ieee80211_mcs_info) - rx_mcs_supp);
1202 if (priv->bss_mode == NL80211_IFTYPE_STATION ||
1203 ISSUPP_CHANWIDTH40(adapter->hw_dot_11n_dev_cap))
1204 /* Set MCS32 for infra mode or ad-hoc mode with 40MHz support */
1205 SETHT_MCS32(mcs_set.rx_mask);
1206
1207 memcpy((u8 *) &ht_info->mcs, mcs, sizeof(struct ieee80211_mcs_info));
1208
1209 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
1210}
1211
1212/* station cfg80211 operations */
1213static struct cfg80211_ops mwifiex_cfg80211_ops = {
1214 .change_virtual_intf = mwifiex_cfg80211_change_virtual_intf,
1215 .scan = mwifiex_cfg80211_scan,
1216 .connect = mwifiex_cfg80211_connect,
1217 .disconnect = mwifiex_cfg80211_disconnect,
1218 .get_station = mwifiex_cfg80211_get_station,
1219 .set_wiphy_params = mwifiex_cfg80211_set_wiphy_params,
1220 .set_channel = mwifiex_cfg80211_set_channel,
1221 .join_ibss = mwifiex_cfg80211_join_ibss,
1222 .leave_ibss = mwifiex_cfg80211_leave_ibss,
1223 .add_key = mwifiex_cfg80211_add_key,
1224 .del_key = mwifiex_cfg80211_del_key,
1225 .set_default_key = mwifiex_cfg80211_set_default_key,
1226 .set_power_mgmt = mwifiex_cfg80211_set_power_mgmt,
1227 .set_tx_power = mwifiex_cfg80211_set_tx_power,
1228};
1229
1230/*
1231 * This function registers the device with CFG802.11 subsystem.
1232 *
1233 * The function creates the wireless device/wiphy, populates it with
1234 * default parameters and handler function pointers, and finally
1235 * registers the device.
1236 */
1237int mwifiex_register_cfg80211(struct net_device *dev, u8 *mac,
1238 struct mwifiex_private *priv)
1239{
1240 int ret;
1241 void *wdev_priv;
1242 struct wireless_dev *wdev;
1243
1244 wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
1245 if (!wdev) {
1246 dev_err(priv->adapter->dev, "%s: allocating wireless device\n",
1247 __func__);
1248 return -ENOMEM;
1249 }
1250 wdev->wiphy =
1251 wiphy_new(&mwifiex_cfg80211_ops,
1252 sizeof(struct mwifiex_private *));
1253 if (!wdev->wiphy) {
1254 kfree(wdev);
1255 return -ENOMEM;
1256 }
1257 wdev->iftype = NL80211_IFTYPE_STATION;
1258 wdev->wiphy->max_scan_ssids = 10;
1259 wdev->wiphy->interface_modes =
1260 BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
1261
1262 wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &mwifiex_band_2ghz;
1263 mwifiex_setup_ht_caps(
1264 &wdev->wiphy->bands[IEEE80211_BAND_2GHZ]->ht_cap, priv);
1265
1266 if (priv->adapter->config_bands & BAND_A) {
1267 wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &mwifiex_band_5ghz;
1268 mwifiex_setup_ht_caps(
1269 &wdev->wiphy->bands[IEEE80211_BAND_5GHZ]->ht_cap, priv);
1270 } else {
1271 wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
1272 }
1273
1274 /* Initialize cipher suits */
1275 wdev->wiphy->cipher_suites = mwifiex_cipher_suites;
1276 wdev->wiphy->n_cipher_suites = ARRAY_SIZE(mwifiex_cipher_suites);
1277
1278 memcpy(wdev->wiphy->perm_addr, mac, 6);
1279 wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
1280
1281 /* We are using custom domains */
1282 wdev->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
1283
1284 wdev->wiphy->reg_notifier = mwifiex_reg_notifier;
1285
1286 /* Set struct mwifiex_private pointer in wiphy_priv */
1287 wdev_priv = wiphy_priv(wdev->wiphy);
1288
1289 *(unsigned long *) wdev_priv = (unsigned long) priv;
1290
1291 ret = wiphy_register(wdev->wiphy);
1292 if (ret < 0) {
1293 dev_err(priv->adapter->dev, "%s: registering cfg80211 device\n",
1294 __func__);
1295 wiphy_free(wdev->wiphy);
1296 kfree(wdev);
1297 return ret;
1298 } else {
1299 dev_dbg(priv->adapter->dev,
1300 "info: successfully registered wiphy device\n");
1301 }
1302
1303 dev_net_set(dev, wiphy_net(wdev->wiphy));
1304 dev->ieee80211_ptr = wdev;
1305 memcpy(dev->dev_addr, wdev->wiphy->perm_addr, 6);
1306 memcpy(dev->perm_addr, wdev->wiphy->perm_addr, 6);
1307 SET_NETDEV_DEV(dev, wiphy_dev(wdev->wiphy));
1308 priv->wdev = wdev;
1309
1310 dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
1311 dev->watchdog_timeo = MWIFIEX_DEFAULT_WATCHDOG_TIMEOUT;
1312 dev->hard_header_len += MWIFIEX_MIN_DATA_HEADER_LEN;
1313
1314 return ret;
1315}
1316
1317/*
1318 * This function handles the result of different pending network operations.
1319 *
1320 * The following operations are handled and CFG802.11 subsystem is
1321 * notified accordingly -
1322 * - Scan request completion
1323 * - Association request completion
1324 * - IBSS join request completion
1325 * - Disconnect request completion
1326 */
1327void
1328mwifiex_cfg80211_results(struct work_struct *work)
1329{
1330 struct mwifiex_private *priv =
1331 container_of(work, struct mwifiex_private, cfg_workqueue);
1332 struct mwifiex_user_scan_cfg *scan_req;
1333 int ret = 0, i;
1334 struct ieee80211_channel *chan;
1335
1336 if (priv->scan_request) {
1337 scan_req = kzalloc(sizeof(struct mwifiex_user_scan_cfg),
1338 GFP_KERNEL);
1339 if (!scan_req) {
1340 dev_err(priv->adapter->dev, "failed to alloc "
1341 "scan_req\n");
1342 return;
1343 }
1344 for (i = 0; i < priv->scan_request->n_ssids; i++) {
1345 memcpy(scan_req->ssid_list[i].ssid,
1346 priv->scan_request->ssids[i].ssid,
1347 priv->scan_request->ssids[i].ssid_len);
1348 scan_req->ssid_list[i].max_len =
1349 priv->scan_request->ssids[i].ssid_len;
1350 }
1351 for (i = 0; i < priv->scan_request->n_channels; i++) {
1352 chan = priv->scan_request->channels[i];
1353 scan_req->chan_list[i].chan_number = chan->hw_value;
1354 scan_req->chan_list[i].radio_type = chan->band;
1355 if (chan->flags & IEEE80211_CHAN_DISABLED)
1356 scan_req->chan_list[i].scan_type =
1357 MWIFIEX_SCAN_TYPE_PASSIVE;
1358 else
1359 scan_req->chan_list[i].scan_type =
1360 MWIFIEX_SCAN_TYPE_ACTIVE;
1361 scan_req->chan_list[i].scan_time = 0;
1362 }
1363 if (mwifiex_set_user_scan_ioctl(priv, scan_req)) {
1364 ret = -EFAULT;
1365 goto done;
1366 }
1367 if (mwifiex_inform_bss_from_scan_result(priv, NULL))
1368 ret = -EFAULT;
1369done:
1370 priv->scan_result_status = ret;
1371 dev_dbg(priv->adapter->dev, "info: %s: sending scan results\n",
1372 __func__);
1373 cfg80211_scan_done(priv->scan_request,
1374 (priv->scan_result_status < 0));
1375 priv->scan_request = NULL;
1376 kfree(scan_req);
1377 }
1378
1379 if (priv->assoc_request == 1) {
1380 if (!priv->assoc_result) {
1381 cfg80211_connect_result(priv->netdev, priv->cfg_bssid,
1382 NULL, 0, NULL, 0,
1383 WLAN_STATUS_SUCCESS,
1384 GFP_KERNEL);
1385 dev_dbg(priv->adapter->dev,
1386 "info: associated to bssid %pM successfully\n",
1387 priv->cfg_bssid);
1388 } else {
1389 dev_dbg(priv->adapter->dev,
1390 "info: association to bssid %pM failed\n",
1391 priv->cfg_bssid);
1392 memset(priv->cfg_bssid, 0, ETH_ALEN);
1393 }
1394 priv->assoc_request = 0;
1395 priv->assoc_result = 0;
1396 }
1397
1398 if (priv->ibss_join_request == 1) {
1399 if (!priv->ibss_join_result) {
1400 cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid,
1401 GFP_KERNEL);
1402 dev_dbg(priv->adapter->dev,
1403 "info: joined/created adhoc network with bssid"
1404 " %pM successfully\n", priv->cfg_bssid);
1405 } else {
1406 dev_dbg(priv->adapter->dev,
1407 "info: failed creating/joining adhoc network\n");
1408 }
1409 priv->ibss_join_request = 0;
1410 priv->ibss_join_result = 0;
1411 }
1412
1413 if (priv->disconnect) {
1414 memset(priv->cfg_bssid, 0, ETH_ALEN);
1415 priv->disconnect = 0;
1416 }
1417}
diff --git a/drivers/net/wireless/mwifiex/cfg80211.h b/drivers/net/wireless/mwifiex/cfg80211.h
new file mode 100644
index 000000000000..c4db8f36aa16
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/cfg80211.h
@@ -0,0 +1,31 @@
1/*
2 * Marvell Wireless LAN device driver: CFG80211
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#ifndef __MWIFIEX_CFG80211__
21#define __MWIFIEX_CFG80211__
22
23#include <net/cfg80211.h>
24
25#include "main.h"
26
27int mwifiex_register_cfg80211(struct net_device *, u8 *,
28 struct mwifiex_private *);
29
30void mwifiex_cfg80211_results(struct work_struct *work);
31#endif
diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c
new file mode 100644
index 000000000000..d0cada5a29a0
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/cfp.c
@@ -0,0 +1,360 @@
1/*
2 * Marvell Wireless LAN device driver: Channel, Frequence and Power
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "ioctl.h"
22#include "util.h"
23#include "fw.h"
24#include "main.h"
25#include "cfg80211.h"
26
27/* 100mW */
28#define MWIFIEX_TX_PWR_DEFAULT 20
29/* 100mW */
30#define MWIFIEX_TX_PWR_US_DEFAULT 20
31/* 50mW */
32#define MWIFIEX_TX_PWR_JP_DEFAULT 16
33/* 100mW */
34#define MWIFIEX_TX_PWR_FR_100MW 20
35/* 10mW */
36#define MWIFIEX_TX_PWR_FR_10MW 10
37/* 100mW */
38#define MWIFIEX_TX_PWR_EMEA_DEFAULT 20
39
40static u8 adhoc_rates_b[B_SUPPORTED_RATES] = { 0x82, 0x84, 0x8b, 0x96, 0 };
41
42static u8 adhoc_rates_g[G_SUPPORTED_RATES] = { 0x8c, 0x12, 0x98, 0x24,
43 0xb0, 0x48, 0x60, 0x6c, 0 };
44
45static u8 adhoc_rates_bg[BG_SUPPORTED_RATES] = { 0x82, 0x84, 0x8b, 0x96,
46 0x0c, 0x12, 0x18, 0x24,
47 0x30, 0x48, 0x60, 0x6c, 0 };
48
49static u8 adhoc_rates_a[A_SUPPORTED_RATES] = { 0x8c, 0x12, 0x98, 0x24,
50 0xb0, 0x48, 0x60, 0x6c, 0 };
51u8 supported_rates_a[A_SUPPORTED_RATES] = { 0x0c, 0x12, 0x18, 0x24,
52 0xb0, 0x48, 0x60, 0x6c, 0 };
53static u16 mwifiex_data_rates[MWIFIEX_SUPPORTED_RATES_EXT] = { 0x02, 0x04,
54 0x0B, 0x16, 0x00, 0x0C, 0x12, 0x18,
55 0x24, 0x30, 0x48, 0x60, 0x6C, 0x90,
56 0x0D, 0x1A, 0x27, 0x34, 0x4E, 0x68,
57 0x75, 0x82, 0x0C, 0x1B, 0x36, 0x51,
58 0x6C, 0xA2, 0xD8, 0xF3, 0x10E, 0x00 };
59
60u8 supported_rates_b[B_SUPPORTED_RATES] = { 0x02, 0x04, 0x0b, 0x16, 0 };
61
62u8 supported_rates_g[G_SUPPORTED_RATES] = { 0x0c, 0x12, 0x18, 0x24,
63 0x30, 0x48, 0x60, 0x6c, 0 };
64
65u8 supported_rates_bg[BG_SUPPORTED_RATES] = { 0x02, 0x04, 0x0b, 0x0c,
66 0x12, 0x16, 0x18, 0x24, 0x30, 0x48,
67 0x60, 0x6c, 0 };
68
69u16 region_code_index[MWIFIEX_MAX_REGION_CODE] = { 0x10, 0x20, 0x30,
70 0x32, 0x40, 0x41, 0xff };
71
72u8 supported_rates_n[N_SUPPORTED_RATES] = { 0x02, 0x04, 0 };
73
74/*
75 * This function maps an index in supported rates table into
76 * the corresponding data rate.
77 */
78u32 mwifiex_index_to_data_rate(u8 index, u8 ht_info)
79{
80 u16 mcs_rate[4][8] = {
81 {0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e}
82 , /* LG 40M */
83 {0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c}
84 , /* SG 40M */
85 {0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82}
86 , /* LG 20M */
87 {0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90}
88 }; /* SG 20M */
89
90 u32 rate;
91
92 if (ht_info & BIT(0)) {
93 if (index == MWIFIEX_RATE_BITMAP_MCS0) {
94 if (ht_info & BIT(2))
95 rate = 0x0D; /* MCS 32 SGI rate */
96 else
97 rate = 0x0C; /* MCS 32 LGI rate */
98 } else if (index < 8) {
99 if (ht_info & BIT(1)) {
100 if (ht_info & BIT(2))
101 /* SGI, 40M */
102 rate = mcs_rate[1][index];
103 else
104 /* LGI, 40M */
105 rate = mcs_rate[0][index];
106 } else {
107 if (ht_info & BIT(2))
108 /* SGI, 20M */
109 rate = mcs_rate[3][index];
110 else
111 /* LGI, 20M */
112 rate = mcs_rate[2][index];
113 }
114 } else
115 rate = mwifiex_data_rates[0];
116 } else {
117 if (index >= MWIFIEX_SUPPORTED_RATES_EXT)
118 index = 0;
119 rate = mwifiex_data_rates[index];
120 }
121 return rate;
122}
123
124/*
125 * This function maps a data rate value into corresponding index in supported
126 * rates table.
127 */
128u8 mwifiex_data_rate_to_index(u32 rate)
129{
130 u16 *ptr;
131
132 if (rate) {
133 ptr = memchr(mwifiex_data_rates, rate,
134 sizeof(mwifiex_data_rates));
135 if (ptr)
136 return (u8) (ptr - mwifiex_data_rates);
137 }
138 return 0;
139}
140
141/*
142 * This function returns the current active data rates.
143 *
144 * The result may vary depending upon connection status.
145 */
146u32 mwifiex_get_active_data_rates(struct mwifiex_private *priv, u8 *rates)
147{
148 if (!priv->media_connected)
149 return mwifiex_get_supported_rates(priv, rates);
150 else
151 return mwifiex_copy_rates(rates, 0,
152 priv->curr_bss_params.data_rates,
153 priv->curr_bss_params.num_of_rates);
154}
155
156/*
157 * This function locates the Channel-Frequency-Power triplet based upon
158 * band and channel parameters.
159 */
160struct mwifiex_chan_freq_power *
161mwifiex_get_cfp_by_band_and_channel_from_cfg80211(struct mwifiex_private
162 *priv, u8 band, u16 channel)
163{
164 struct mwifiex_chan_freq_power *cfp = NULL;
165 struct ieee80211_supported_band *sband;
166 struct ieee80211_channel *ch;
167 int i;
168
169 if (mwifiex_band_to_radio_type(band) == HostCmd_SCAN_RADIO_TYPE_BG)
170 sband = priv->wdev->wiphy->bands[IEEE80211_BAND_2GHZ];
171 else
172 sband = priv->wdev->wiphy->bands[IEEE80211_BAND_5GHZ];
173
174 if (!sband) {
175 dev_err(priv->adapter->dev, "%s: cannot find cfp by band %d"
176 " & channel %d\n", __func__, band, channel);
177 return cfp;
178 }
179
180 for (i = 0; i < sband->n_channels; i++) {
181 ch = &sband->channels[i];
182 if (((ch->hw_value == channel) ||
183 (channel == FIRST_VALID_CHANNEL))
184 && !(ch->flags & IEEE80211_CHAN_DISABLED)) {
185 priv->cfp.channel = channel;
186 priv->cfp.freq = ch->center_freq;
187 priv->cfp.max_tx_power = ch->max_power;
188 cfp = &priv->cfp;
189 break;
190 }
191 }
192 if (i == sband->n_channels)
193 dev_err(priv->adapter->dev, "%s: cannot find cfp by band %d"
194 " & channel %d\n", __func__, band, channel);
195
196 return cfp;
197}
198
199/*
200 * This function locates the Channel-Frequency-Power triplet based upon
201 * band and frequency parameters.
202 */
203struct mwifiex_chan_freq_power *
204mwifiex_get_cfp_by_band_and_freq_from_cfg80211(struct mwifiex_private *priv,
205 u8 band, u32 freq)
206{
207 struct mwifiex_chan_freq_power *cfp = NULL;
208 struct ieee80211_supported_band *sband;
209 struct ieee80211_channel *ch;
210 int i;
211
212 if (mwifiex_band_to_radio_type(band) == HostCmd_SCAN_RADIO_TYPE_BG)
213 sband = priv->wdev->wiphy->bands[IEEE80211_BAND_2GHZ];
214 else
215 sband = priv->wdev->wiphy->bands[IEEE80211_BAND_5GHZ];
216
217 if (!sband) {
218 dev_err(priv->adapter->dev, "%s: cannot find cfp by band %d"
219 " & freq %d\n", __func__, band, freq);
220 return cfp;
221 }
222
223 for (i = 0; i < sband->n_channels; i++) {
224 ch = &sband->channels[i];
225 if ((ch->center_freq == freq) &&
226 !(ch->flags & IEEE80211_CHAN_DISABLED)) {
227 priv->cfp.channel = ch->hw_value;
228 priv->cfp.freq = freq;
229 priv->cfp.max_tx_power = ch->max_power;
230 cfp = &priv->cfp;
231 break;
232 }
233 }
234 if (i == sband->n_channels)
235 dev_err(priv->adapter->dev, "%s: cannot find cfp by band %d"
236 " & freq %d\n", __func__, band, freq);
237
238 return cfp;
239}
240
241/*
242 * This function checks if the data rate is set to auto.
243 */
244u8
245mwifiex_is_rate_auto(struct mwifiex_private *priv)
246{
247 u32 i;
248 int rate_num = 0;
249
250 for (i = 0; i < ARRAY_SIZE(priv->bitmap_rates); i++)
251 if (priv->bitmap_rates[i])
252 rate_num++;
253
254 if (rate_num > 1)
255 return true;
256 else
257 return false;
258}
259
260/*
261 * This function converts rate bitmap into rate index.
262 */
263int mwifiex_get_rate_index(u16 *rate_bitmap, int size)
264{
265 int i;
266
267 for (i = 0; i < size * 8; i++)
268 if (rate_bitmap[i / 16] & (1 << (i % 16)))
269 return i;
270
271 return 0;
272}
273
274/*
275 * This function gets the supported data rates.
276 *
277 * The function works in both Ad-Hoc and infra mode by printing the
278 * band and returning the data rates.
279 */
280u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
281{
282 u32 k = 0;
283 struct mwifiex_adapter *adapter = priv->adapter;
284 if (priv->bss_mode == NL80211_IFTYPE_STATION) {
285 switch (adapter->config_bands) {
286 case BAND_B:
287 dev_dbg(adapter->dev, "info: infra band=%d "
288 "supported_rates_b\n", adapter->config_bands);
289 k = mwifiex_copy_rates(rates, k, supported_rates_b,
290 sizeof(supported_rates_b));
291 break;
292 case BAND_G:
293 case BAND_G | BAND_GN:
294 dev_dbg(adapter->dev, "info: infra band=%d "
295 "supported_rates_g\n", adapter->config_bands);
296 k = mwifiex_copy_rates(rates, k, supported_rates_g,
297 sizeof(supported_rates_g));
298 break;
299 case BAND_B | BAND_G:
300 case BAND_A | BAND_B | BAND_G:
301 case BAND_A | BAND_B:
302 case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN:
303 case BAND_B | BAND_G | BAND_GN:
304 dev_dbg(adapter->dev, "info: infra band=%d "
305 "supported_rates_bg\n", adapter->config_bands);
306 k = mwifiex_copy_rates(rates, k, supported_rates_bg,
307 sizeof(supported_rates_bg));
308 break;
309 case BAND_A:
310 case BAND_A | BAND_G:
311 dev_dbg(adapter->dev, "info: infra band=%d "
312 "supported_rates_a\n", adapter->config_bands);
313 k = mwifiex_copy_rates(rates, k, supported_rates_a,
314 sizeof(supported_rates_a));
315 break;
316 case BAND_A | BAND_AN:
317 case BAND_A | BAND_G | BAND_AN | BAND_GN:
318 dev_dbg(adapter->dev, "info: infra band=%d "
319 "supported_rates_a\n", adapter->config_bands);
320 k = mwifiex_copy_rates(rates, k, supported_rates_a,
321 sizeof(supported_rates_a));
322 break;
323 case BAND_GN:
324 dev_dbg(adapter->dev, "info: infra band=%d "
325 "supported_rates_n\n", adapter->config_bands);
326 k = mwifiex_copy_rates(rates, k, supported_rates_n,
327 sizeof(supported_rates_n));
328 break;
329 }
330 } else {
331 /* Ad-hoc mode */
332 switch (adapter->adhoc_start_band) {
333 case BAND_B:
334 dev_dbg(adapter->dev, "info: adhoc B\n");
335 k = mwifiex_copy_rates(rates, k, adhoc_rates_b,
336 sizeof(adhoc_rates_b));
337 break;
338 case BAND_G:
339 case BAND_G | BAND_GN:
340 dev_dbg(adapter->dev, "info: adhoc G only\n");
341 k = mwifiex_copy_rates(rates, k, adhoc_rates_g,
342 sizeof(adhoc_rates_g));
343 break;
344 case BAND_B | BAND_G:
345 case BAND_B | BAND_G | BAND_GN:
346 dev_dbg(adapter->dev, "info: adhoc BG\n");
347 k = mwifiex_copy_rates(rates, k, adhoc_rates_bg,
348 sizeof(adhoc_rates_bg));
349 break;
350 case BAND_A:
351 case BAND_A | BAND_AN:
352 dev_dbg(adapter->dev, "info: adhoc A\n");
353 k = mwifiex_copy_rates(rates, k, adhoc_rates_a,
354 sizeof(adhoc_rates_a));
355 break;
356 }
357 }
358
359 return k;
360}
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
new file mode 100644
index 000000000000..cd89fed206ae
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -0,0 +1,1414 @@
1/*
2 * Marvell Wireless LAN device driver: commands and events
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "ioctl.h"
22#include "util.h"
23#include "fw.h"
24#include "main.h"
25#include "wmm.h"
26#include "11n.h"
27
28/*
29 * This function initializes a command node.
30 *
31 * The actual allocation of the node is not done by this function. It only
32 * initiates a node by filling it with default parameters. Similarly,
33 * allocation of the different buffers used (IOCTL buffer, data buffer) are
34 * not done by this function either.
35 */
36static void
37mwifiex_init_cmd_node(struct mwifiex_private *priv,
38 struct cmd_ctrl_node *cmd_node,
39 u32 cmd_oid, void *data_buf)
40{
41 cmd_node->priv = priv;
42 cmd_node->cmd_oid = cmd_oid;
43 cmd_node->wait_q_enabled = priv->adapter->cmd_wait_q_required;
44 priv->adapter->cmd_wait_q_required = false;
45 cmd_node->data_buf = data_buf;
46 cmd_node->cmd_skb = cmd_node->skb;
47}
48
49/*
50 * This function returns a command node from the free queue depending upon
51 * availability.
52 */
53static struct cmd_ctrl_node *
54mwifiex_get_cmd_node(struct mwifiex_adapter *adapter)
55{
56 struct cmd_ctrl_node *cmd_node;
57 unsigned long flags;
58
59 spin_lock_irqsave(&adapter->cmd_free_q_lock, flags);
60 if (list_empty(&adapter->cmd_free_q)) {
61 dev_err(adapter->dev, "GET_CMD_NODE: cmd node not available\n");
62 spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags);
63 return NULL;
64 }
65 cmd_node = list_first_entry(&adapter->cmd_free_q,
66 struct cmd_ctrl_node, list);
67 list_del(&cmd_node->list);
68 spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags);
69
70 return cmd_node;
71}
72
73/*
74 * This function cleans up a command node.
75 *
76 * The function resets the fields including the buffer pointers.
77 * This function does not try to free the buffers. They must be
78 * freed before calling this function.
79 *
80 * This function will however call the receive completion callback
81 * in case a response buffer is still available before resetting
82 * the pointer.
83 */
84static void
85mwifiex_clean_cmd_node(struct mwifiex_adapter *adapter,
86 struct cmd_ctrl_node *cmd_node)
87{
88 cmd_node->cmd_oid = 0;
89 cmd_node->cmd_flag = 0;
90 cmd_node->data_buf = NULL;
91 cmd_node->wait_q_enabled = false;
92
93 if (cmd_node->resp_skb) {
94 dev_kfree_skb_any(cmd_node->resp_skb);
95 cmd_node->resp_skb = NULL;
96 }
97}
98
99/*
100 * This function sends a host command to the firmware.
101 *
102 * The function copies the host command into the driver command
103 * buffer, which will be transferred to the firmware later by the
104 * main thread.
105 */
106static int mwifiex_cmd_host_cmd(struct mwifiex_private *priv,
107 struct host_cmd_ds_command *cmd, void *data_buf)
108{
109 struct mwifiex_ds_misc_cmd *pcmd_ptr =
110 (struct mwifiex_ds_misc_cmd *) data_buf;
111
112 /* Copy the HOST command to command buffer */
113 memcpy((void *) cmd, pcmd_ptr->cmd, pcmd_ptr->len);
114 dev_dbg(priv->adapter->dev, "cmd: host cmd size = %d\n", pcmd_ptr->len);
115 return 0;
116}
117
118/*
119 * This function downloads a command to the firmware.
120 *
121 * The function performs sanity tests, sets the command sequence
122 * number and size, converts the header fields to CPU format before
123 * sending. Afterwards, it logs the command ID and action for debugging
124 * and sets up the command timeout timer.
125 */
126static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
127 struct cmd_ctrl_node *cmd_node)
128{
129
130 struct mwifiex_adapter *adapter = priv->adapter;
131 int ret;
132 struct host_cmd_ds_command *host_cmd;
133 uint16_t cmd_code;
134 uint16_t cmd_size;
135 struct timeval tstamp;
136 unsigned long flags;
137
138 if (!adapter || !cmd_node)
139 return -1;
140
141 host_cmd = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data);
142
143 /* Sanity test */
144 if (host_cmd == NULL || host_cmd->size == 0) {
145 dev_err(adapter->dev, "DNLD_CMD: host_cmd is null"
146 " or cmd size is 0, not sending\n");
147 if (cmd_node->wait_q_enabled)
148 adapter->cmd_wait_q.status = -1;
149 mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
150 return -1;
151 }
152
153 /* Set command sequence number */
154 adapter->seq_num++;
155 host_cmd->seq_num = cpu_to_le16(HostCmd_SET_SEQ_NO_BSS_INFO
156 (adapter->seq_num, cmd_node->priv->bss_num,
157 cmd_node->priv->bss_type));
158
159 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
160 adapter->curr_cmd = cmd_node;
161 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
162
163 cmd_code = le16_to_cpu(host_cmd->command);
164 cmd_size = le16_to_cpu(host_cmd->size);
165
166 skb_trim(cmd_node->cmd_skb, cmd_size);
167
168 do_gettimeofday(&tstamp);
169 dev_dbg(adapter->dev, "cmd: DNLD_CMD: (%lu.%lu): %#x, act %#x, len %d,"
170 " seqno %#x\n",
171 tstamp.tv_sec, tstamp.tv_usec, cmd_code,
172 le16_to_cpu(*(__le16 *) ((u8 *) host_cmd + S_DS_GEN)), cmd_size,
173 le16_to_cpu(host_cmd->seq_num));
174
175 skb_push(cmd_node->cmd_skb, INTF_HEADER_LEN);
176
177 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_CMD,
178 cmd_node->cmd_skb->data,
179 cmd_node->cmd_skb->len, NULL);
180
181 skb_pull(cmd_node->cmd_skb, INTF_HEADER_LEN);
182
183 if (ret == -1) {
184 dev_err(adapter->dev, "DNLD_CMD: host to card failed\n");
185 if (cmd_node->wait_q_enabled)
186 adapter->cmd_wait_q.status = -1;
187 mwifiex_insert_cmd_to_free_q(adapter, adapter->curr_cmd);
188
189 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
190 adapter->curr_cmd = NULL;
191 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
192
193 adapter->dbg.num_cmd_host_to_card_failure++;
194 return -1;
195 }
196
197 /* Save the last command id and action to debug log */
198 adapter->dbg.last_cmd_index =
199 (adapter->dbg.last_cmd_index + 1) % DBG_CMD_NUM;
200 adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index] = cmd_code;
201 adapter->dbg.last_cmd_act[adapter->dbg.last_cmd_index] =
202 le16_to_cpu(*(__le16 *) ((u8 *) host_cmd + S_DS_GEN));
203
204 /* Clear BSS_NO_BITS from HostCmd */
205 cmd_code &= HostCmd_CMD_ID_MASK;
206
207 /* Setup the timer after transmit command */
208 mod_timer(&adapter->cmd_timer,
209 jiffies + (MWIFIEX_TIMER_10S * HZ) / 1000);
210
211 return 0;
212}
213
214/*
215 * This function downloads a sleep confirm command to the firmware.
216 *
217 * The function performs sanity tests, sets the command sequence
218 * number and size, converts the header fields to CPU format before
219 * sending.
220 *
221 * No responses are needed for sleep confirm command.
222 */
223static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
224{
225 int ret;
226 struct mwifiex_private *priv;
227 struct mwifiex_opt_sleep_confirm *sleep_cfm_buf =
228 (struct mwifiex_opt_sleep_confirm *)
229 adapter->sleep_cfm->data;
230 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
231
232 sleep_cfm_buf->seq_num =
233 cpu_to_le16((HostCmd_SET_SEQ_NO_BSS_INFO
234 (adapter->seq_num, priv->bss_num,
235 priv->bss_type)));
236 adapter->seq_num++;
237
238 skb_push(adapter->sleep_cfm, INTF_HEADER_LEN);
239 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_CMD,
240 adapter->sleep_cfm->data,
241 adapter->sleep_cfm->len, NULL);
242 skb_pull(adapter->sleep_cfm, INTF_HEADER_LEN);
243
244 if (ret == -1) {
245 dev_err(adapter->dev, "SLEEP_CFM: failed\n");
246 adapter->dbg.num_cmd_sleep_cfm_host_to_card_failure++;
247 return -1;
248 }
249 if (GET_BSS_ROLE(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY))
250 == MWIFIEX_BSS_ROLE_STA) {
251 if (!sleep_cfm_buf->resp_ctrl)
252 /* Response is not needed for sleep
253 confirm command */
254 adapter->ps_state = PS_STATE_SLEEP;
255 else
256 adapter->ps_state = PS_STATE_SLEEP_CFM;
257
258 if (!sleep_cfm_buf->resp_ctrl
259 && (adapter->is_hs_configured
260 && !adapter->sleep_period.period)) {
261 adapter->pm_wakeup_card_req = true;
262 mwifiex_hs_activated_event(mwifiex_get_priv(adapter,
263 MWIFIEX_BSS_ROLE_STA), true);
264 }
265 }
266
267 return ret;
268}
269
270/*
271 * This function allocates the command buffers and links them to
272 * the command free queue.
273 *
274 * The driver uses a pre allocated number of command buffers, which
275 * are created at driver initializations and freed at driver cleanup.
276 * Every command needs to obtain a command buffer from this pool before
277 * it can be issued. The command free queue lists the command buffers
278 * currently free to use, while the command pending queue lists the
279 * command buffers already in use and awaiting handling. Command buffers
280 * are returned to the free queue after use.
281 */
282int mwifiex_alloc_cmd_buffer(struct mwifiex_adapter *adapter)
283{
284 struct cmd_ctrl_node *cmd_array;
285 u32 buf_size;
286 u32 i;
287
288 /* Allocate and initialize struct cmd_ctrl_node */
289 buf_size = sizeof(struct cmd_ctrl_node) * MWIFIEX_NUM_OF_CMD_BUFFER;
290 cmd_array = kzalloc(buf_size, GFP_KERNEL);
291 if (!cmd_array) {
292 dev_err(adapter->dev, "%s: failed to alloc cmd_array\n",
293 __func__);
294 return -ENOMEM;
295 }
296
297 adapter->cmd_pool = cmd_array;
298 memset(adapter->cmd_pool, 0, buf_size);
299
300 /* Allocate and initialize command buffers */
301 for (i = 0; i < MWIFIEX_NUM_OF_CMD_BUFFER; i++) {
302 cmd_array[i].skb = dev_alloc_skb(MWIFIEX_SIZE_OF_CMD_BUFFER);
303 if (!cmd_array[i].skb) {
304 dev_err(adapter->dev, "ALLOC_CMD_BUF: out of memory\n");
305 return -1;
306 }
307 }
308
309 for (i = 0; i < MWIFIEX_NUM_OF_CMD_BUFFER; i++)
310 mwifiex_insert_cmd_to_free_q(adapter, &cmd_array[i]);
311
312 return 0;
313}
314
315/*
316 * This function frees the command buffers.
317 *
318 * The function calls the completion callback for all the command
319 * buffers that still have response buffers associated with them.
320 */
321int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter)
322{
323 struct cmd_ctrl_node *cmd_array;
324 u32 i;
325
326 /* Need to check if cmd pool is allocated or not */
327 if (!adapter->cmd_pool) {
328 dev_dbg(adapter->dev, "info: FREE_CMD_BUF: cmd_pool is null\n");
329 return 0;
330 }
331
332 cmd_array = adapter->cmd_pool;
333
334 /* Release shared memory buffers */
335 for (i = 0; i < MWIFIEX_NUM_OF_CMD_BUFFER; i++) {
336 if (cmd_array[i].skb) {
337 dev_dbg(adapter->dev, "cmd: free cmd buffer %d\n", i);
338 dev_kfree_skb_any(cmd_array[i].skb);
339 }
340 if (!cmd_array[i].resp_skb)
341 continue;
342 dev_kfree_skb_any(cmd_array[i].resp_skb);
343 }
344 /* Release struct cmd_ctrl_node */
345 if (adapter->cmd_pool) {
346 dev_dbg(adapter->dev, "cmd: free cmd pool\n");
347 kfree(adapter->cmd_pool);
348 adapter->cmd_pool = NULL;
349 }
350
351 return 0;
352}
353
354/*
355 * This function handles events generated by firmware.
356 *
357 * Event body of events received from firmware are not used (though they are
358 * saved), only the event ID is used. Some events are re-invoked by
359 * the driver, with a new event body.
360 *
361 * After processing, the function calls the completion callback
362 * for cleanup.
363 */
364int mwifiex_process_event(struct mwifiex_adapter *adapter)
365{
366 int ret;
367 struct mwifiex_private *priv =
368 mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
369 struct sk_buff *skb = adapter->event_skb;
370 u32 eventcause = adapter->event_cause;
371 struct timeval tstamp;
372 struct mwifiex_rxinfo *rx_info;
373
374 /* Save the last event to debug log */
375 adapter->dbg.last_event_index =
376 (adapter->dbg.last_event_index + 1) % DBG_CMD_NUM;
377 adapter->dbg.last_event[adapter->dbg.last_event_index] =
378 (u16) eventcause;
379
380 /* Get BSS number and corresponding priv */
381 priv = mwifiex_get_priv_by_id(adapter, EVENT_GET_BSS_NUM(eventcause),
382 EVENT_GET_BSS_TYPE(eventcause));
383 if (!priv)
384 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
385 /* Clear BSS_NO_BITS from event */
386 eventcause &= EVENT_ID_MASK;
387 adapter->event_cause = eventcause;
388
389 if (skb) {
390 rx_info = MWIFIEX_SKB_RXCB(skb);
391 rx_info->bss_index = priv->bss_index;
392 }
393
394 if (eventcause != EVENT_PS_SLEEP && eventcause != EVENT_PS_AWAKE) {
395 do_gettimeofday(&tstamp);
396 dev_dbg(adapter->dev, "event: %lu.%lu: cause: %#x\n",
397 tstamp.tv_sec, tstamp.tv_usec, eventcause);
398 }
399
400 ret = mwifiex_process_sta_event(priv);
401
402 adapter->event_cause = 0;
403 adapter->event_skb = NULL;
404
405 dev_kfree_skb_any(skb);
406
407 return ret;
408}
409
410/*
411 * This function is used to send synchronous command to the firmware.
412 *
413 * it allocates a wait queue for the command and wait for the command
414 * response.
415 */
416int mwifiex_send_cmd_sync(struct mwifiex_private *priv, uint16_t cmd_no,
417 u16 cmd_action, u32 cmd_oid, void *data_buf)
418{
419 int ret = 0;
420 struct mwifiex_adapter *adapter = priv->adapter;
421
422 adapter->cmd_wait_q_required = true;
423 adapter->cmd_wait_q.condition = false;
424
425 ret = mwifiex_send_cmd_async(priv, cmd_no, cmd_action, cmd_oid,
426 data_buf);
427 if (!ret)
428 ret = mwifiex_wait_queue_complete(adapter);
429
430 return ret;
431}
432
433
434/*
435 * This function prepares a command and asynchronously send it to the firmware.
436 *
437 * Preparation includes -
438 * - Sanity tests to make sure the card is still present or the FW
439 * is not reset
440 * - Getting a new command node from the command free queue
441 * - Initializing the command node for default parameters
442 * - Fill up the non-default parameters and buffer pointers
443 * - Add the command to pending queue
444 */
445int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
446 u16 cmd_action, u32 cmd_oid, void *data_buf)
447{
448 int ret;
449 struct mwifiex_adapter *adapter = priv->adapter;
450 struct cmd_ctrl_node *cmd_node;
451 struct host_cmd_ds_command *cmd_ptr;
452
453 if (!adapter) {
454 pr_err("PREP_CMD: adapter is NULL\n");
455 return -1;
456 }
457
458 if (adapter->is_suspended) {
459 dev_err(adapter->dev, "PREP_CMD: device in suspended state\n");
460 return -1;
461 }
462
463 if (adapter->surprise_removed) {
464 dev_err(adapter->dev, "PREP_CMD: card is removed\n");
465 return -1;
466 }
467
468 if (adapter->hw_status == MWIFIEX_HW_STATUS_RESET) {
469 if (cmd_no != HostCmd_CMD_FUNC_INIT) {
470 dev_err(adapter->dev, "PREP_CMD: FW in reset state\n");
471 return -1;
472 }
473 }
474
475 /* Get a new command node */
476 cmd_node = mwifiex_get_cmd_node(adapter);
477
478 if (!cmd_node) {
479 dev_err(adapter->dev, "PREP_CMD: no free cmd node\n");
480 return -1;
481 }
482
483 /* Initialize the command node */
484 mwifiex_init_cmd_node(priv, cmd_node, cmd_oid, data_buf);
485
486 if (!cmd_node->cmd_skb) {
487 dev_err(adapter->dev, "PREP_CMD: no free cmd buf\n");
488 return -1;
489 }
490
491 memset(skb_put(cmd_node->cmd_skb, sizeof(struct host_cmd_ds_command)),
492 0, sizeof(struct host_cmd_ds_command));
493
494 cmd_ptr = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data);
495 cmd_ptr->command = cpu_to_le16(cmd_no);
496 cmd_ptr->result = 0;
497
498 /* Prepare command */
499 if (cmd_no) {
500 ret = mwifiex_sta_prepare_cmd(priv, cmd_no, cmd_action,
501 cmd_oid, data_buf, cmd_ptr);
502 } else {
503 ret = mwifiex_cmd_host_cmd(priv, cmd_ptr, data_buf);
504 cmd_node->cmd_flag |= CMD_F_HOSTCMD;
505 }
506
507 /* Return error, since the command preparation failed */
508 if (ret) {
509 dev_err(adapter->dev, "PREP_CMD: cmd %#x preparation failed\n",
510 cmd_no);
511 mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
512 return -1;
513 }
514
515 /* Send command */
516 if (cmd_no == HostCmd_CMD_802_11_SCAN)
517 mwifiex_queue_scan_cmd(priv, cmd_node);
518 else
519 mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
520
521 return ret;
522}
523
524/*
525 * This function returns a command to the command free queue.
526 *
527 * The function also calls the completion callback if required, before
528 * cleaning the command node and re-inserting it into the free queue.
529 */
530void
531mwifiex_insert_cmd_to_free_q(struct mwifiex_adapter *adapter,
532 struct cmd_ctrl_node *cmd_node)
533{
534 unsigned long flags;
535
536 if (!cmd_node)
537 return;
538
539 if (cmd_node->wait_q_enabled)
540 mwifiex_complete_cmd(adapter);
541 /* Clean the node */
542 mwifiex_clean_cmd_node(adapter, cmd_node);
543
544 /* Insert node into cmd_free_q */
545 spin_lock_irqsave(&adapter->cmd_free_q_lock, flags);
546 list_add_tail(&cmd_node->list, &adapter->cmd_free_q);
547 spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags);
548}
549
550/*
551 * This function queues a command to the command pending queue.
552 *
553 * This in effect adds the command to the command list to be executed.
554 * Exit PS command is handled specially, by placing it always to the
555 * front of the command queue.
556 */
557void
558mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter,
559 struct cmd_ctrl_node *cmd_node, u32 add_tail)
560{
561 struct host_cmd_ds_command *host_cmd = NULL;
562 u16 command;
563 unsigned long flags;
564
565 host_cmd = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data);
566 if (!host_cmd) {
567 dev_err(adapter->dev, "QUEUE_CMD: host_cmd is NULL\n");
568 return;
569 }
570
571 command = le16_to_cpu(host_cmd->command);
572
573 /* Exit_PS command needs to be queued in the header always. */
574 if (command == HostCmd_CMD_802_11_PS_MODE_ENH) {
575 struct host_cmd_ds_802_11_ps_mode_enh *pm =
576 &host_cmd->params.psmode_enh;
577 if ((le16_to_cpu(pm->action) == DIS_PS)
578 || (le16_to_cpu(pm->action) == DIS_AUTO_PS)) {
579 if (adapter->ps_state != PS_STATE_AWAKE)
580 add_tail = false;
581 }
582 }
583
584 spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
585 if (add_tail)
586 list_add_tail(&cmd_node->list, &adapter->cmd_pending_q);
587 else
588 list_add(&cmd_node->list, &adapter->cmd_pending_q);
589 spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
590
591 dev_dbg(adapter->dev, "cmd: QUEUE_CMD: cmd=%#x is queued\n", command);
592}
593
594/*
595 * This function executes the next command in command pending queue.
596 *
597 * This function will fail if a command is already in processing stage,
598 * otherwise it will dequeue the first command from the command pending
599 * queue and send to the firmware.
600 *
601 * If the device is currently in host sleep mode, any commands, except the
602 * host sleep configuration command will de-activate the host sleep. For PS
603 * mode, the function will put the firmware back to sleep if applicable.
604 */
605int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter)
606{
607 struct mwifiex_private *priv;
608 struct cmd_ctrl_node *cmd_node;
609 int ret = 0;
610 struct host_cmd_ds_command *host_cmd;
611 unsigned long cmd_flags;
612 unsigned long cmd_pending_q_flags;
613
614 /* Check if already in processing */
615 if (adapter->curr_cmd) {
616 dev_err(adapter->dev, "EXEC_NEXT_CMD: cmd in processing\n");
617 return -1;
618 }
619
620 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
621 /* Check if any command is pending */
622 spin_lock_irqsave(&adapter->cmd_pending_q_lock, cmd_pending_q_flags);
623 if (list_empty(&adapter->cmd_pending_q)) {
624 spin_unlock_irqrestore(&adapter->cmd_pending_q_lock,
625 cmd_pending_q_flags);
626 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
627 return 0;
628 }
629 cmd_node = list_first_entry(&adapter->cmd_pending_q,
630 struct cmd_ctrl_node, list);
631 spin_unlock_irqrestore(&adapter->cmd_pending_q_lock,
632 cmd_pending_q_flags);
633
634 host_cmd = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data);
635 priv = cmd_node->priv;
636
637 if (adapter->ps_state != PS_STATE_AWAKE) {
638 dev_err(adapter->dev, "%s: cannot send cmd in sleep state,"
639 " this should not happen\n", __func__);
640 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
641 return ret;
642 }
643
644 spin_lock_irqsave(&adapter->cmd_pending_q_lock, cmd_pending_q_flags);
645 list_del(&cmd_node->list);
646 spin_unlock_irqrestore(&adapter->cmd_pending_q_lock,
647 cmd_pending_q_flags);
648
649 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
650 ret = mwifiex_dnld_cmd_to_fw(priv, cmd_node);
651 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
652 /* Any command sent to the firmware when host is in sleep
653 * mode should de-configure host sleep. We should skip the
654 * host sleep configuration command itself though
655 */
656 if (priv && (host_cmd->command !=
657 cpu_to_le16(HostCmd_CMD_802_11_HS_CFG_ENH))) {
658 if (adapter->hs_activated) {
659 adapter->is_hs_configured = false;
660 mwifiex_hs_activated_event(priv, false);
661 }
662 }
663
664 return ret;
665}
666
667/*
668 * This function handles the command response.
669 *
670 * After processing, the function cleans the command node and puts
671 * it back to the command free queue.
672 */
673int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
674{
675 struct host_cmd_ds_command *resp;
676 struct mwifiex_private *priv =
677 mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
678 int ret = 0;
679 uint16_t orig_cmdresp_no;
680 uint16_t cmdresp_no;
681 uint16_t cmdresp_result;
682 struct timeval tstamp;
683 unsigned long flags;
684
685 /* Now we got response from FW, cancel the command timer */
686 del_timer(&adapter->cmd_timer);
687
688 if (!adapter->curr_cmd || !adapter->curr_cmd->resp_skb) {
689 resp = (struct host_cmd_ds_command *) adapter->upld_buf;
690 dev_err(adapter->dev, "CMD_RESP: NULL curr_cmd, %#x\n",
691 le16_to_cpu(resp->command));
692 return -1;
693 }
694
695 adapter->num_cmd_timeout = 0;
696
697 resp = (struct host_cmd_ds_command *) adapter->curr_cmd->resp_skb->data;
698 if (adapter->curr_cmd->cmd_flag & CMD_F_CANCELED) {
699 dev_err(adapter->dev, "CMD_RESP: %#x been canceled\n",
700 le16_to_cpu(resp->command));
701 mwifiex_insert_cmd_to_free_q(adapter, adapter->curr_cmd);
702 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
703 adapter->curr_cmd = NULL;
704 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
705 return -1;
706 }
707
708 if (adapter->curr_cmd->cmd_flag & CMD_F_HOSTCMD) {
709 /* Copy original response back to response buffer */
710 struct mwifiex_ds_misc_cmd *hostcmd = NULL;
711 uint16_t size = le16_to_cpu(resp->size);
712 dev_dbg(adapter->dev, "info: host cmd resp size = %d\n", size);
713 size = min_t(u16, size, MWIFIEX_SIZE_OF_CMD_BUFFER);
714 if (adapter->curr_cmd->data_buf) {
715 hostcmd = (struct mwifiex_ds_misc_cmd *)
716 adapter->curr_cmd->data_buf;
717 hostcmd->len = size;
718 memcpy(hostcmd->cmd, (void *) resp, size);
719 }
720 }
721 orig_cmdresp_no = le16_to_cpu(resp->command);
722
723 /* Get BSS number and corresponding priv */
724 priv = mwifiex_get_priv_by_id(adapter,
725 HostCmd_GET_BSS_NO(le16_to_cpu(resp->seq_num)),
726 HostCmd_GET_BSS_TYPE(le16_to_cpu(resp->seq_num)));
727 if (!priv)
728 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
729 /* Clear RET_BIT from HostCmd */
730 resp->command = cpu_to_le16(orig_cmdresp_no & HostCmd_CMD_ID_MASK);
731
732 cmdresp_no = le16_to_cpu(resp->command);
733 cmdresp_result = le16_to_cpu(resp->result);
734
735 /* Save the last command response to debug log */
736 adapter->dbg.last_cmd_resp_index =
737 (adapter->dbg.last_cmd_resp_index + 1) % DBG_CMD_NUM;
738 adapter->dbg.last_cmd_resp_id[adapter->dbg.last_cmd_resp_index] =
739 orig_cmdresp_no;
740
741 do_gettimeofday(&tstamp);
742 dev_dbg(adapter->dev, "cmd: CMD_RESP: (%lu.%lu): 0x%x, result %d,"
743 " len %d, seqno 0x%x\n",
744 tstamp.tv_sec, tstamp.tv_usec, orig_cmdresp_no, cmdresp_result,
745 le16_to_cpu(resp->size), le16_to_cpu(resp->seq_num));
746
747 if (!(orig_cmdresp_no & HostCmd_RET_BIT)) {
748 dev_err(adapter->dev, "CMD_RESP: invalid cmd resp\n");
749 if (adapter->curr_cmd->wait_q_enabled)
750 adapter->cmd_wait_q.status = -1;
751
752 mwifiex_insert_cmd_to_free_q(adapter, adapter->curr_cmd);
753 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
754 adapter->curr_cmd = NULL;
755 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
756 return -1;
757 }
758
759 if (adapter->curr_cmd->cmd_flag & CMD_F_HOSTCMD) {
760 adapter->curr_cmd->cmd_flag &= ~CMD_F_HOSTCMD;
761 if ((cmdresp_result == HostCmd_RESULT_OK)
762 && (cmdresp_no == HostCmd_CMD_802_11_HS_CFG_ENH))
763 ret = mwifiex_ret_802_11_hs_cfg(priv, resp);
764 } else {
765 /* handle response */
766 ret = mwifiex_process_sta_cmdresp(priv, cmdresp_no, resp);
767 }
768
769 /* Check init command response */
770 if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) {
771 if (ret == -1) {
772 dev_err(adapter->dev, "%s: cmd %#x failed during "
773 "initialization\n", __func__, cmdresp_no);
774 mwifiex_init_fw_complete(adapter);
775 return -1;
776 } else if (adapter->last_init_cmd == cmdresp_no)
777 adapter->hw_status = MWIFIEX_HW_STATUS_INIT_DONE;
778 }
779
780 if (adapter->curr_cmd) {
781 if (adapter->curr_cmd->wait_q_enabled && (!ret))
782 adapter->cmd_wait_q.status = 0;
783 else if (adapter->curr_cmd->wait_q_enabled && (ret == -1))
784 adapter->cmd_wait_q.status = -1;
785
786 /* Clean up and put current command back to cmd_free_q */
787 mwifiex_insert_cmd_to_free_q(adapter, adapter->curr_cmd);
788
789 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
790 adapter->curr_cmd = NULL;
791 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
792 }
793
794 return ret;
795}
796
797/*
798 * This function handles the timeout of command sending.
799 *
800 * It will re-send the same command again.
801 */
802void
803mwifiex_cmd_timeout_func(unsigned long function_context)
804{
805 struct mwifiex_adapter *adapter =
806 (struct mwifiex_adapter *) function_context;
807 struct cmd_ctrl_node *cmd_node;
808 struct timeval tstamp;
809
810 adapter->num_cmd_timeout++;
811 adapter->dbg.num_cmd_timeout++;
812 if (!adapter->curr_cmd) {
813 dev_dbg(adapter->dev, "cmd: empty curr_cmd\n");
814 return;
815 }
816 cmd_node = adapter->curr_cmd;
817 if (cmd_node->wait_q_enabled)
818 adapter->cmd_wait_q.status = -ETIMEDOUT;
819
820 if (cmd_node) {
821 adapter->dbg.timeout_cmd_id =
822 adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index];
823 adapter->dbg.timeout_cmd_act =
824 adapter->dbg.last_cmd_act[adapter->dbg.last_cmd_index];
825 do_gettimeofday(&tstamp);
826 dev_err(adapter->dev, "%s: Timeout cmd id (%lu.%lu) = %#x,"
827 " act = %#x\n", __func__,
828 tstamp.tv_sec, tstamp.tv_usec,
829 adapter->dbg.timeout_cmd_id,
830 adapter->dbg.timeout_cmd_act);
831
832 dev_err(adapter->dev, "num_data_h2c_failure = %d\n",
833 adapter->dbg.num_tx_host_to_card_failure);
834 dev_err(adapter->dev, "num_cmd_h2c_failure = %d\n",
835 adapter->dbg.num_cmd_host_to_card_failure);
836
837 dev_err(adapter->dev, "num_cmd_timeout = %d\n",
838 adapter->dbg.num_cmd_timeout);
839 dev_err(adapter->dev, "num_tx_timeout = %d\n",
840 adapter->dbg.num_tx_timeout);
841
842 dev_err(adapter->dev, "last_cmd_index = %d\n",
843 adapter->dbg.last_cmd_index);
844 print_hex_dump_bytes("last_cmd_id: ", DUMP_PREFIX_OFFSET,
845 adapter->dbg.last_cmd_id, DBG_CMD_NUM);
846 print_hex_dump_bytes("last_cmd_act: ", DUMP_PREFIX_OFFSET,
847 adapter->dbg.last_cmd_act, DBG_CMD_NUM);
848
849 dev_err(adapter->dev, "last_cmd_resp_index = %d\n",
850 adapter->dbg.last_cmd_resp_index);
851 print_hex_dump_bytes("last_cmd_resp_id: ", DUMP_PREFIX_OFFSET,
852 adapter->dbg.last_cmd_resp_id, DBG_CMD_NUM);
853
854 dev_err(adapter->dev, "last_event_index = %d\n",
855 adapter->dbg.last_event_index);
856 print_hex_dump_bytes("last_event: ", DUMP_PREFIX_OFFSET,
857 adapter->dbg.last_event, DBG_CMD_NUM);
858
859 dev_err(adapter->dev, "data_sent=%d cmd_sent=%d\n",
860 adapter->data_sent, adapter->cmd_sent);
861
862 dev_err(adapter->dev, "ps_mode=%d ps_state=%d\n",
863 adapter->ps_mode, adapter->ps_state);
864 }
865 if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING)
866 mwifiex_init_fw_complete(adapter);
867}
868
869/*
870 * This function cancels all the pending commands.
871 *
872 * The current command, all commands in command pending queue and all scan
873 * commands in scan pending queue are cancelled. All the completion callbacks
874 * are called with failure status to ensure cleanup.
875 */
876void
877mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
878{
879 struct cmd_ctrl_node *cmd_node = NULL, *tmp_node;
880 unsigned long flags;
881
882 /* Cancel current cmd */
883 if ((adapter->curr_cmd) && (adapter->curr_cmd->wait_q_enabled)) {
884 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
885 adapter->curr_cmd->wait_q_enabled = false;
886 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
887 adapter->cmd_wait_q.status = -1;
888 mwifiex_complete_cmd(adapter);
889 }
890 /* Cancel all pending command */
891 spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
892 list_for_each_entry_safe(cmd_node, tmp_node,
893 &adapter->cmd_pending_q, list) {
894 list_del(&cmd_node->list);
895 spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
896
897 if (cmd_node->wait_q_enabled) {
898 adapter->cmd_wait_q.status = -1;
899 mwifiex_complete_cmd(adapter);
900 cmd_node->wait_q_enabled = false;
901 }
902 mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
903 spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
904 }
905 spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
906
907 /* Cancel all pending scan command */
908 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
909 list_for_each_entry_safe(cmd_node, tmp_node,
910 &adapter->scan_pending_q, list) {
911 list_del(&cmd_node->list);
912 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
913
914 cmd_node->wait_q_enabled = false;
915 mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
916 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
917 }
918 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
919
920 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
921 adapter->scan_processing = false;
922 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
923}
924
925/*
926 * This function cancels all pending commands that matches with
927 * the given IOCTL request.
928 *
929 * Both the current command buffer and the pending command queue are
930 * searched for matching IOCTL request. The completion callback of
931 * the matched command is called with failure status to ensure cleanup.
932 * In case of scan commands, all pending commands in scan pending queue
933 * are cancelled.
934 */
935void
936mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
937{
938 struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL;
939 unsigned long cmd_flags;
940 unsigned long cmd_pending_q_flags;
941 unsigned long scan_pending_q_flags;
942 uint16_t cancel_scan_cmd = false;
943
944 if ((adapter->curr_cmd) &&
945 (adapter->curr_cmd->wait_q_enabled)) {
946 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
947 cmd_node = adapter->curr_cmd;
948 cmd_node->wait_q_enabled = false;
949 cmd_node->cmd_flag |= CMD_F_CANCELED;
950 spin_lock_irqsave(&adapter->cmd_pending_q_lock,
951 cmd_pending_q_flags);
952 list_del(&cmd_node->list);
953 spin_unlock_irqrestore(&adapter->cmd_pending_q_lock,
954 cmd_pending_q_flags);
955 mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
956 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
957 }
958
959 /* Cancel all pending scan command */
960 spin_lock_irqsave(&adapter->scan_pending_q_lock,
961 scan_pending_q_flags);
962 list_for_each_entry_safe(cmd_node, tmp_node,
963 &adapter->scan_pending_q, list) {
964 list_del(&cmd_node->list);
965 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
966 scan_pending_q_flags);
967 cmd_node->wait_q_enabled = false;
968 mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
969 spin_lock_irqsave(&adapter->scan_pending_q_lock,
970 scan_pending_q_flags);
971 cancel_scan_cmd = true;
972 }
973 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
974 scan_pending_q_flags);
975
976 if (cancel_scan_cmd) {
977 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
978 adapter->scan_processing = false;
979 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
980 }
981 adapter->cmd_wait_q.status = -1;
982 mwifiex_complete_cmd(adapter);
983}
984
985/*
986 * This function sends the sleep confirm command to firmware, if
987 * possible.
988 *
989 * The sleep confirm command cannot be issued if command response,
990 * data response or event response is awaiting handling, or if we
991 * are in the middle of sending a command, or expecting a command
992 * response.
993 */
994void
995mwifiex_check_ps_cond(struct mwifiex_adapter *adapter)
996{
997 if (!adapter->cmd_sent &&
998 !adapter->curr_cmd && !IS_CARD_RX_RCVD(adapter))
999 mwifiex_dnld_sleep_confirm_cmd(adapter);
1000 else
1001 dev_dbg(adapter->dev,
1002 "cmd: Delay Sleep Confirm (%s%s%s)\n",
1003 (adapter->cmd_sent) ? "D" : "",
1004 (adapter->curr_cmd) ? "C" : "",
1005 (IS_CARD_RX_RCVD(adapter)) ? "R" : "");
1006}
1007
1008/*
1009 * This function sends a Host Sleep activated event to applications.
1010 *
1011 * This event is generated by the driver, with a blank event body.
1012 */
1013void
1014mwifiex_hs_activated_event(struct mwifiex_private *priv, u8 activated)
1015{
1016 if (activated) {
1017 if (priv->adapter->is_hs_configured) {
1018 priv->adapter->hs_activated = true;
1019 dev_dbg(priv->adapter->dev, "event: hs_activated\n");
1020 priv->adapter->hs_activate_wait_q_woken = true;
1021 wake_up_interruptible(
1022 &priv->adapter->hs_activate_wait_q);
1023 } else {
1024 dev_dbg(priv->adapter->dev, "event: HS not configured\n");
1025 }
1026 } else {
1027 dev_dbg(priv->adapter->dev, "event: hs_deactivated\n");
1028 priv->adapter->hs_activated = false;
1029 }
1030}
1031
1032/*
1033 * This function handles the command response of a Host Sleep configuration
1034 * command.
1035 *
1036 * Handling includes changing the header fields into CPU format
1037 * and setting the current host sleep activation status in driver.
1038 *
1039 * In case host sleep status change, the function generates an event to
1040 * notify the applications.
1041 */
1042int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
1043 struct host_cmd_ds_command *resp)
1044{
1045 struct mwifiex_adapter *adapter = priv->adapter;
1046 struct host_cmd_ds_802_11_hs_cfg_enh *phs_cfg =
1047 &resp->params.opt_hs_cfg;
1048 uint32_t conditions = le32_to_cpu(phs_cfg->params.hs_config.conditions);
1049
1050 if (phs_cfg->action == cpu_to_le16(HS_ACTIVATE)) {
1051 mwifiex_hs_activated_event(priv, true);
1052 return 0;
1053 } else {
1054 dev_dbg(adapter->dev, "cmd: CMD_RESP: HS_CFG cmd reply"
1055 " result=%#x, conditions=0x%x gpio=0x%x gap=0x%x\n",
1056 resp->result, conditions,
1057 phs_cfg->params.hs_config.gpio,
1058 phs_cfg->params.hs_config.gap);
1059 }
1060 if (conditions != HOST_SLEEP_CFG_CANCEL) {
1061 adapter->is_hs_configured = true;
1062 } else {
1063 adapter->is_hs_configured = false;
1064 if (adapter->hs_activated)
1065 mwifiex_hs_activated_event(priv, false);
1066 }
1067
1068 return 0;
1069}
1070
1071/*
1072 * This function wakes up the adapter and generates a Host Sleep
1073 * cancel event on receiving the power up interrupt.
1074 */
1075void
1076mwifiex_process_hs_config(struct mwifiex_adapter *adapter)
1077{
1078 dev_dbg(adapter->dev, "info: %s: auto cancelling host sleep"
1079 " since there is interrupt from the firmware\n", __func__);
1080
1081 adapter->if_ops.wakeup(adapter);
1082 adapter->hs_activated = false;
1083 adapter->is_hs_configured = false;
1084 mwifiex_hs_activated_event(mwifiex_get_priv(adapter,
1085 MWIFIEX_BSS_ROLE_ANY), false);
1086}
1087
1088/*
1089 * This function handles the command response of a sleep confirm command.
1090 *
1091 * The function sets the card state to SLEEP if the response indicates success.
1092 */
1093void
1094mwifiex_process_sleep_confirm_resp(struct mwifiex_adapter *adapter,
1095 u8 *pbuf, u32 upld_len)
1096{
1097 struct host_cmd_ds_command *cmd = (struct host_cmd_ds_command *) pbuf;
1098 struct mwifiex_private *priv =
1099 mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
1100 uint16_t result = le16_to_cpu(cmd->result);
1101 uint16_t command = le16_to_cpu(cmd->command);
1102 uint16_t seq_num = le16_to_cpu(cmd->seq_num);
1103
1104 if (!upld_len) {
1105 dev_err(adapter->dev, "%s: cmd size is 0\n", __func__);
1106 return;
1107 }
1108
1109 /* Get BSS number and corresponding priv */
1110 priv = mwifiex_get_priv_by_id(adapter, HostCmd_GET_BSS_NO(seq_num),
1111 HostCmd_GET_BSS_TYPE(seq_num));
1112 if (!priv)
1113 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
1114
1115 /* Update sequence number */
1116 seq_num = HostCmd_GET_SEQ_NO(seq_num);
1117 /* Clear RET_BIT from HostCmd */
1118 command &= HostCmd_CMD_ID_MASK;
1119
1120 if (command != HostCmd_CMD_802_11_PS_MODE_ENH) {
1121 dev_err(adapter->dev, "%s: received unexpected response for"
1122 " cmd %x, result = %x\n", __func__, command, result);
1123 return;
1124 }
1125
1126 if (result) {
1127 dev_err(adapter->dev, "%s: sleep confirm cmd failed\n",
1128 __func__);
1129 adapter->pm_wakeup_card_req = false;
1130 adapter->ps_state = PS_STATE_AWAKE;
1131 return;
1132 }
1133 adapter->pm_wakeup_card_req = true;
1134 if (adapter->is_hs_configured)
1135 mwifiex_hs_activated_event(mwifiex_get_priv(adapter,
1136 MWIFIEX_BSS_ROLE_ANY), true);
1137 adapter->ps_state = PS_STATE_SLEEP;
1138 cmd->command = cpu_to_le16(command);
1139 cmd->seq_num = cpu_to_le16(seq_num);
1140}
1141EXPORT_SYMBOL_GPL(mwifiex_process_sleep_confirm_resp);
1142
1143/*
1144 * This function prepares an enhanced power mode command.
1145 *
1146 * This function can be used to disable power save or to configure
1147 * power save with auto PS or STA PS or auto deep sleep.
1148 *
1149 * Preparation includes -
1150 * - Setting command ID, action and proper size
1151 * - Setting Power Save bitmap, PS parameters TLV, PS mode TLV,
1152 * auto deep sleep TLV (as required)
1153 * - Ensuring correct endian-ness
1154 */
1155int mwifiex_cmd_enh_power_mode(struct mwifiex_private *priv,
1156 struct host_cmd_ds_command *cmd,
1157 u16 cmd_action, uint16_t ps_bitmap,
1158 void *data_buf)
1159{
1160 struct host_cmd_ds_802_11_ps_mode_enh *psmode_enh =
1161 &cmd->params.psmode_enh;
1162 u8 *tlv;
1163 u16 cmd_size = 0;
1164
1165 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_PS_MODE_ENH);
1166 if (cmd_action == DIS_AUTO_PS) {
1167 psmode_enh->action = cpu_to_le16(DIS_AUTO_PS);
1168 psmode_enh->params.ps_bitmap = cpu_to_le16(ps_bitmap);
1169 cmd->size = cpu_to_le16(S_DS_GEN + sizeof(psmode_enh->action) +
1170 sizeof(psmode_enh->params.ps_bitmap));
1171 } else if (cmd_action == GET_PS) {
1172 psmode_enh->action = cpu_to_le16(GET_PS);
1173 psmode_enh->params.ps_bitmap = cpu_to_le16(ps_bitmap);
1174 cmd->size = cpu_to_le16(S_DS_GEN + sizeof(psmode_enh->action) +
1175 sizeof(psmode_enh->params.ps_bitmap));
1176 } else if (cmd_action == EN_AUTO_PS) {
1177 psmode_enh->action = cpu_to_le16(EN_AUTO_PS);
1178 psmode_enh->params.ps_bitmap = cpu_to_le16(ps_bitmap);
1179 cmd_size = S_DS_GEN + sizeof(psmode_enh->action) +
1180 sizeof(psmode_enh->params.ps_bitmap);
1181 tlv = (u8 *) cmd + cmd_size;
1182 if (ps_bitmap & BITMAP_STA_PS) {
1183 struct mwifiex_adapter *adapter = priv->adapter;
1184 struct mwifiex_ie_types_ps_param *ps_tlv =
1185 (struct mwifiex_ie_types_ps_param *) tlv;
1186 struct mwifiex_ps_param *ps_mode = &ps_tlv->param;
1187 ps_tlv->header.type = cpu_to_le16(TLV_TYPE_PS_PARAM);
1188 ps_tlv->header.len = cpu_to_le16(sizeof(*ps_tlv) -
1189 sizeof(struct mwifiex_ie_types_header));
1190 cmd_size += sizeof(*ps_tlv);
1191 tlv += sizeof(*ps_tlv);
1192 dev_dbg(adapter->dev, "cmd: PS Command: Enter PS\n");
1193 ps_mode->null_pkt_interval =
1194 cpu_to_le16(adapter->null_pkt_interval);
1195 ps_mode->multiple_dtims =
1196 cpu_to_le16(adapter->multiple_dtim);
1197 ps_mode->bcn_miss_timeout =
1198 cpu_to_le16(adapter->bcn_miss_time_out);
1199 ps_mode->local_listen_interval =
1200 cpu_to_le16(adapter->local_listen_interval);
1201 ps_mode->adhoc_wake_period =
1202 cpu_to_le16(adapter->adhoc_awake_period);
1203 ps_mode->delay_to_ps =
1204 cpu_to_le16(adapter->delay_to_ps);
1205 ps_mode->mode =
1206 cpu_to_le16(adapter->enhanced_ps_mode);
1207
1208 }
1209 if (ps_bitmap & BITMAP_AUTO_DS) {
1210 struct mwifiex_ie_types_auto_ds_param *auto_ds_tlv =
1211 (struct mwifiex_ie_types_auto_ds_param *) tlv;
1212 u16 idletime = 0;
1213
1214 auto_ds_tlv->header.type =
1215 cpu_to_le16(TLV_TYPE_AUTO_DS_PARAM);
1216 auto_ds_tlv->header.len =
1217 cpu_to_le16(sizeof(*auto_ds_tlv) -
1218 sizeof(struct mwifiex_ie_types_header));
1219 cmd_size += sizeof(*auto_ds_tlv);
1220 tlv += sizeof(*auto_ds_tlv);
1221 if (data_buf)
1222 idletime = ((struct mwifiex_ds_auto_ds *)
1223 data_buf)->idle_time;
1224 dev_dbg(priv->adapter->dev,
1225 "cmd: PS Command: Enter Auto Deep Sleep\n");
1226 auto_ds_tlv->deep_sleep_timeout = cpu_to_le16(idletime);
1227 }
1228 cmd->size = cpu_to_le16(cmd_size);
1229 }
1230 return 0;
1231}
1232
1233/*
1234 * This function handles the command response of an enhanced power mode
1235 * command.
1236 *
1237 * Handling includes changing the header fields into CPU format
1238 * and setting the current enhanced power mode in driver.
1239 */
1240int mwifiex_ret_enh_power_mode(struct mwifiex_private *priv,
1241 struct host_cmd_ds_command *resp,
1242 void *data_buf)
1243{
1244 struct mwifiex_adapter *adapter = priv->adapter;
1245 struct host_cmd_ds_802_11_ps_mode_enh *ps_mode =
1246 &resp->params.psmode_enh;
1247 uint16_t action = le16_to_cpu(ps_mode->action);
1248 uint16_t ps_bitmap = le16_to_cpu(ps_mode->params.ps_bitmap);
1249 uint16_t auto_ps_bitmap =
1250 le16_to_cpu(ps_mode->params.ps_bitmap);
1251
1252 dev_dbg(adapter->dev, "info: %s: PS_MODE cmd reply result=%#x action=%#X\n",
1253 __func__, resp->result, action);
1254 if (action == EN_AUTO_PS) {
1255 if (auto_ps_bitmap & BITMAP_AUTO_DS) {
1256 dev_dbg(adapter->dev, "cmd: Enabled auto deep sleep\n");
1257 priv->adapter->is_deep_sleep = true;
1258 }
1259 if (auto_ps_bitmap & BITMAP_STA_PS) {
1260 dev_dbg(adapter->dev, "cmd: Enabled STA power save\n");
1261 if (adapter->sleep_period.period)
1262 dev_dbg(adapter->dev, "cmd: set to uapsd/pps mode\n");
1263 }
1264 } else if (action == DIS_AUTO_PS) {
1265 if (ps_bitmap & BITMAP_AUTO_DS) {
1266 priv->adapter->is_deep_sleep = false;
1267 dev_dbg(adapter->dev, "cmd: Disabled auto deep sleep\n");
1268 }
1269 if (ps_bitmap & BITMAP_STA_PS) {
1270 dev_dbg(adapter->dev, "cmd: Disabled STA power save\n");
1271 if (adapter->sleep_period.period) {
1272 adapter->delay_null_pkt = false;
1273 adapter->tx_lock_flag = false;
1274 adapter->pps_uapsd_mode = false;
1275 }
1276 }
1277 } else if (action == GET_PS) {
1278 if (ps_bitmap & BITMAP_STA_PS)
1279 adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_PSP;
1280 else
1281 adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_CAM;
1282
1283 dev_dbg(adapter->dev, "cmd: ps_bitmap=%#x\n", ps_bitmap);
1284
1285 if (data_buf) {
1286 /* This section is for get power save mode */
1287 struct mwifiex_ds_pm_cfg *pm_cfg =
1288 (struct mwifiex_ds_pm_cfg *)data_buf;
1289 if (ps_bitmap & BITMAP_STA_PS)
1290 pm_cfg->param.ps_mode = 1;
1291 else
1292 pm_cfg->param.ps_mode = 0;
1293 }
1294 }
1295 return 0;
1296}
1297
1298/*
1299 * This function prepares command to get hardware specifications.
1300 *
1301 * Preparation includes -
1302 * - Setting command ID, action and proper size
1303 * - Setting permanent address parameter
1304 * - Ensuring correct endian-ness
1305 */
1306int mwifiex_cmd_get_hw_spec(struct mwifiex_private *priv,
1307 struct host_cmd_ds_command *cmd)
1308{
1309 struct host_cmd_ds_get_hw_spec *hw_spec = &cmd->params.hw_spec;
1310
1311 cmd->command = cpu_to_le16(HostCmd_CMD_GET_HW_SPEC);
1312 cmd->size =
1313 cpu_to_le16(sizeof(struct host_cmd_ds_get_hw_spec) + S_DS_GEN);
1314 memcpy(hw_spec->permanent_addr, priv->curr_addr, ETH_ALEN);
1315
1316 return 0;
1317}
1318
1319/*
1320 * This function handles the command response of get hardware
1321 * specifications.
1322 *
1323 * Handling includes changing the header fields into CPU format
1324 * and saving/updating the following parameters in driver -
1325 * - Firmware capability information
1326 * - Firmware band settings
1327 * - Ad-hoc start band and channel
1328 * - Ad-hoc 11n activation status
1329 * - Firmware release number
1330 * - Number of antennas
1331 * - Hardware address
1332 * - Hardware interface version
1333 * - Firmware version
1334 * - Region code
1335 * - 11n capabilities
1336 * - MCS support fields
1337 * - MP end port
1338 */
1339int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
1340 struct host_cmd_ds_command *resp)
1341{
1342 struct host_cmd_ds_get_hw_spec *hw_spec = &resp->params.hw_spec;
1343 struct mwifiex_adapter *adapter = priv->adapter;
1344 int i;
1345
1346 adapter->fw_cap_info = le32_to_cpu(hw_spec->fw_cap_info);
1347
1348 if (IS_SUPPORT_MULTI_BANDS(adapter))
1349 adapter->fw_bands = (u8) GET_FW_DEFAULT_BANDS(adapter);
1350 else
1351 adapter->fw_bands = BAND_B;
1352
1353 adapter->config_bands = adapter->fw_bands;
1354
1355 if (adapter->fw_bands & BAND_A) {
1356 if (adapter->fw_bands & BAND_GN) {
1357 adapter->config_bands |= BAND_AN;
1358 adapter->fw_bands |= BAND_AN;
1359 }
1360 if (adapter->fw_bands & BAND_AN) {
1361 adapter->adhoc_start_band = BAND_A | BAND_AN;
1362 adapter->adhoc_11n_enabled = true;
1363 } else {
1364 adapter->adhoc_start_band = BAND_A;
1365 }
1366 priv->adhoc_channel = DEFAULT_AD_HOC_CHANNEL_A;
1367 } else if (adapter->fw_bands & BAND_GN) {
1368 adapter->adhoc_start_band = BAND_G | BAND_B | BAND_GN;
1369 priv->adhoc_channel = DEFAULT_AD_HOC_CHANNEL;
1370 adapter->adhoc_11n_enabled = true;
1371 } else if (adapter->fw_bands & BAND_G) {
1372 adapter->adhoc_start_band = BAND_G | BAND_B;
1373 priv->adhoc_channel = DEFAULT_AD_HOC_CHANNEL;
1374 } else if (adapter->fw_bands & BAND_B) {
1375 adapter->adhoc_start_band = BAND_B;
1376 priv->adhoc_channel = DEFAULT_AD_HOC_CHANNEL;
1377 }
1378
1379 adapter->fw_release_number = le32_to_cpu(hw_spec->fw_release_number);
1380 adapter->number_of_antenna = le16_to_cpu(hw_spec->number_of_antenna);
1381
1382 dev_dbg(adapter->dev, "info: GET_HW_SPEC: fw_release_number- %#x\n",
1383 adapter->fw_release_number);
1384 dev_dbg(adapter->dev, "info: GET_HW_SPEC: permanent addr: %pM\n",
1385 hw_spec->permanent_addr);
1386 dev_dbg(adapter->dev, "info: GET_HW_SPEC: hw_if_version=%#x version=%#x\n",
1387 le16_to_cpu(hw_spec->hw_if_version),
1388 le16_to_cpu(hw_spec->version));
1389
1390 if (priv->curr_addr[0] == 0xff)
1391 memmove(priv->curr_addr, hw_spec->permanent_addr, ETH_ALEN);
1392
1393 adapter->region_code = le16_to_cpu(hw_spec->region_code);
1394
1395 for (i = 0; i < MWIFIEX_MAX_REGION_CODE; i++)
1396 /* Use the region code to search for the index */
1397 if (adapter->region_code == region_code_index[i])
1398 break;
1399
1400 /* If it's unidentified region code, use the default (USA) */
1401 if (i >= MWIFIEX_MAX_REGION_CODE) {
1402 adapter->region_code = 0x10;
1403 dev_dbg(adapter->dev, "cmd: unknown region code, use default (USA)\n");
1404 }
1405
1406 adapter->hw_dot_11n_dev_cap = le32_to_cpu(hw_spec->dot_11n_dev_cap);
1407 adapter->hw_dev_mcs_support = hw_spec->dev_mcs_support;
1408
1409 if (adapter->if_ops.update_mp_end_port)
1410 adapter->if_ops.update_mp_end_port(adapter,
1411 le16_to_cpu(hw_spec->mp_end_port));
1412
1413 return 0;
1414}
diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
new file mode 100644
index 000000000000..46d65e02c7ba
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/debugfs.c
@@ -0,0 +1,770 @@
1/*
2 * Marvell Wireless LAN device driver: debugfs
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include <linux/debugfs.h>
21
22#include "main.h"
23#include "11n.h"
24
25
26static struct dentry *mwifiex_dfs_dir;
27
28static char *bss_modes[] = {
29 "Unknown",
30 "Managed",
31 "Ad-hoc",
32 "Auto"
33};
34
35/* size/addr for mwifiex_debug_info */
36#define item_size(n) (FIELD_SIZEOF(struct mwifiex_debug_info, n))
37#define item_addr(n) (offsetof(struct mwifiex_debug_info, n))
38
39/* size/addr for struct mwifiex_adapter */
40#define adapter_item_size(n) (FIELD_SIZEOF(struct mwifiex_adapter, n))
41#define adapter_item_addr(n) (offsetof(struct mwifiex_adapter, n))
42
43struct mwifiex_debug_data {
44 char name[32]; /* variable/array name */
45 u32 size; /* size of the variable/array */
46 size_t addr; /* address of the variable/array */
47 int num; /* number of variables in an array */
48};
49
50static struct mwifiex_debug_data items[] = {
51 {"int_counter", item_size(int_counter),
52 item_addr(int_counter), 1},
53 {"wmm_ac_vo", item_size(packets_out[WMM_AC_VO]),
54 item_addr(packets_out[WMM_AC_VO]), 1},
55 {"wmm_ac_vi", item_size(packets_out[WMM_AC_VI]),
56 item_addr(packets_out[WMM_AC_VI]), 1},
57 {"wmm_ac_be", item_size(packets_out[WMM_AC_BE]),
58 item_addr(packets_out[WMM_AC_BE]), 1},
59 {"wmm_ac_bk", item_size(packets_out[WMM_AC_BK]),
60 item_addr(packets_out[WMM_AC_BK]), 1},
61 {"max_tx_buf_size", item_size(max_tx_buf_size),
62 item_addr(max_tx_buf_size), 1},
63 {"tx_buf_size", item_size(tx_buf_size),
64 item_addr(tx_buf_size), 1},
65 {"curr_tx_buf_size", item_size(curr_tx_buf_size),
66 item_addr(curr_tx_buf_size), 1},
67 {"ps_mode", item_size(ps_mode),
68 item_addr(ps_mode), 1},
69 {"ps_state", item_size(ps_state),
70 item_addr(ps_state), 1},
71 {"is_deep_sleep", item_size(is_deep_sleep),
72 item_addr(is_deep_sleep), 1},
73 {"wakeup_dev_req", item_size(pm_wakeup_card_req),
74 item_addr(pm_wakeup_card_req), 1},
75 {"wakeup_tries", item_size(pm_wakeup_fw_try),
76 item_addr(pm_wakeup_fw_try), 1},
77 {"hs_configured", item_size(is_hs_configured),
78 item_addr(is_hs_configured), 1},
79 {"hs_activated", item_size(hs_activated),
80 item_addr(hs_activated), 1},
81 {"num_tx_timeout", item_size(num_tx_timeout),
82 item_addr(num_tx_timeout), 1},
83 {"num_cmd_timeout", item_size(num_cmd_timeout),
84 item_addr(num_cmd_timeout), 1},
85 {"timeout_cmd_id", item_size(timeout_cmd_id),
86 item_addr(timeout_cmd_id), 1},
87 {"timeout_cmd_act", item_size(timeout_cmd_act),
88 item_addr(timeout_cmd_act), 1},
89 {"last_cmd_id", item_size(last_cmd_id),
90 item_addr(last_cmd_id), DBG_CMD_NUM},
91 {"last_cmd_act", item_size(last_cmd_act),
92 item_addr(last_cmd_act), DBG_CMD_NUM},
93 {"last_cmd_index", item_size(last_cmd_index),
94 item_addr(last_cmd_index), 1},
95 {"last_cmd_resp_id", item_size(last_cmd_resp_id),
96 item_addr(last_cmd_resp_id), DBG_CMD_NUM},
97 {"last_cmd_resp_index", item_size(last_cmd_resp_index),
98 item_addr(last_cmd_resp_index), 1},
99 {"last_event", item_size(last_event),
100 item_addr(last_event), DBG_CMD_NUM},
101 {"last_event_index", item_size(last_event_index),
102 item_addr(last_event_index), 1},
103 {"num_cmd_h2c_fail", item_size(num_cmd_host_to_card_failure),
104 item_addr(num_cmd_host_to_card_failure), 1},
105 {"num_cmd_sleep_cfm_fail",
106 item_size(num_cmd_sleep_cfm_host_to_card_failure),
107 item_addr(num_cmd_sleep_cfm_host_to_card_failure), 1},
108 {"num_tx_h2c_fail", item_size(num_tx_host_to_card_failure),
109 item_addr(num_tx_host_to_card_failure), 1},
110 {"num_evt_deauth", item_size(num_event_deauth),
111 item_addr(num_event_deauth), 1},
112 {"num_evt_disassoc", item_size(num_event_disassoc),
113 item_addr(num_event_disassoc), 1},
114 {"num_evt_link_lost", item_size(num_event_link_lost),
115 item_addr(num_event_link_lost), 1},
116 {"num_cmd_deauth", item_size(num_cmd_deauth),
117 item_addr(num_cmd_deauth), 1},
118 {"num_cmd_assoc_ok", item_size(num_cmd_assoc_success),
119 item_addr(num_cmd_assoc_success), 1},
120 {"num_cmd_assoc_fail", item_size(num_cmd_assoc_failure),
121 item_addr(num_cmd_assoc_failure), 1},
122 {"cmd_sent", item_size(cmd_sent),
123 item_addr(cmd_sent), 1},
124 {"data_sent", item_size(data_sent),
125 item_addr(data_sent), 1},
126 {"cmd_resp_received", item_size(cmd_resp_received),
127 item_addr(cmd_resp_received), 1},
128 {"event_received", item_size(event_received),
129 item_addr(event_received), 1},
130
131 /* variables defined in struct mwifiex_adapter */
132 {"cmd_pending", adapter_item_size(cmd_pending),
133 adapter_item_addr(cmd_pending), 1},
134 {"tx_pending", adapter_item_size(tx_pending),
135 adapter_item_addr(tx_pending), 1},
136 {"rx_pending", adapter_item_size(rx_pending),
137 adapter_item_addr(rx_pending), 1},
138};
139
140static int num_of_items = ARRAY_SIZE(items);
141
142/*
143 * Generic proc file open handler.
144 *
145 * This function is called every time a file is accessed for read or write.
146 */
147static int
148mwifiex_open_generic(struct inode *inode, struct file *file)
149{
150 file->private_data = inode->i_private;
151 return 0;
152}
153
154/*
155 * Proc info file read handler.
156 *
157 * This function is called when the 'info' file is opened for reading.
158 * It prints the following driver related information -
159 * - Driver name
160 * - Driver version
161 * - Driver extended version
162 * - Interface name
163 * - BSS mode
164 * - Media state (connected or disconnected)
165 * - MAC address
166 * - Total number of Tx bytes
167 * - Total number of Rx bytes
168 * - Total number of Tx packets
169 * - Total number of Rx packets
170 * - Total number of dropped Tx packets
171 * - Total number of dropped Rx packets
172 * - Total number of corrupted Tx packets
173 * - Total number of corrupted Rx packets
174 * - Carrier status (on or off)
175 * - Tx queue status (started or stopped)
176 *
177 * For STA mode drivers, it also prints the following extra -
178 * - ESSID
179 * - BSSID
180 * - Channel
181 * - Region code
182 * - Multicast count
183 * - Multicast addresses
184 */
185static ssize_t
186mwifiex_info_read(struct file *file, char __user *ubuf,
187 size_t count, loff_t *ppos)
188{
189 struct mwifiex_private *priv =
190 (struct mwifiex_private *) file->private_data;
191 struct net_device *netdev = priv->netdev;
192 struct netdev_hw_addr *ha;
193 unsigned long page = get_zeroed_page(GFP_KERNEL);
194 char *p = (char *) page, fmt[64];
195 struct mwifiex_bss_info info;
196 ssize_t ret;
197 int i = 0;
198
199 if (!p)
200 return -ENOMEM;
201
202 memset(&info, 0, sizeof(info));
203 ret = mwifiex_get_bss_info(priv, &info);
204 if (ret)
205 goto free_and_exit;
206
207 mwifiex_drv_get_driver_version(priv->adapter, fmt, sizeof(fmt) - 1);
208
209 if (!priv->version_str[0])
210 mwifiex_get_ver_ext(priv);
211
212 p += sprintf(p, "driver_name = " "\"mwifiex\"\n");
213 p += sprintf(p, "driver_version = %s", fmt);
214 p += sprintf(p, "\nverext = %s", priv->version_str);
215 p += sprintf(p, "\ninterface_name=\"%s\"\n", netdev->name);
216 p += sprintf(p, "bss_mode=\"%s\"\n", bss_modes[info.bss_mode]);
217 p += sprintf(p, "media_state=\"%s\"\n",
218 (!priv->media_connected ? "Disconnected" : "Connected"));
219 p += sprintf(p, "mac_address=\"%02x:%02x:%02x:%02x:%02x:%02x\"\n",
220 netdev->dev_addr[0], netdev->dev_addr[1],
221 netdev->dev_addr[2], netdev->dev_addr[3],
222 netdev->dev_addr[4], netdev->dev_addr[5]);
223
224 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) {
225 p += sprintf(p, "multicast_count=\"%d\"\n",
226 netdev_mc_count(netdev));
227 p += sprintf(p, "essid=\"%s\"\n", info.ssid.ssid);
228 p += sprintf(p, "bssid=\"%02x:%02x:%02x:%02x:%02x:%02x\"\n",
229 info.bssid[0], info.bssid[1],
230 info.bssid[2], info.bssid[3],
231 info.bssid[4], info.bssid[5]);
232 p += sprintf(p, "channel=\"%d\"\n", (int) info.bss_chan);
233 p += sprintf(p, "region_code = \"%02x\"\n", info.region_code);
234
235 netdev_for_each_mc_addr(ha, netdev)
236 p += sprintf(p, "multicast_address[%d]="
237 "\"%02x:%02x:%02x:%02x:%02x:%02x\"\n", i++,
238 ha->addr[0], ha->addr[1],
239 ha->addr[2], ha->addr[3],
240 ha->addr[4], ha->addr[5]);
241 }
242
243 p += sprintf(p, "num_tx_bytes = %lu\n", priv->stats.tx_bytes);
244 p += sprintf(p, "num_rx_bytes = %lu\n", priv->stats.rx_bytes);
245 p += sprintf(p, "num_tx_pkts = %lu\n", priv->stats.tx_packets);
246 p += sprintf(p, "num_rx_pkts = %lu\n", priv->stats.rx_packets);
247 p += sprintf(p, "num_tx_pkts_dropped = %lu\n", priv->stats.tx_dropped);
248 p += sprintf(p, "num_rx_pkts_dropped = %lu\n", priv->stats.rx_dropped);
249 p += sprintf(p, "num_tx_pkts_err = %lu\n", priv->stats.tx_errors);
250 p += sprintf(p, "num_rx_pkts_err = %lu\n", priv->stats.rx_errors);
251 p += sprintf(p, "carrier %s\n", ((netif_carrier_ok(priv->netdev))
252 ? "on" : "off"));
253 p += sprintf(p, "tx queue %s\n", ((netif_queue_stopped(priv->netdev))
254 ? "stopped" : "started"));
255
256 ret = simple_read_from_buffer(ubuf, count, ppos, (char *) page,
257 (unsigned long) p - page);
258
259free_and_exit:
260 free_page(page);
261 return ret;
262}
263
264/*
265 * Proc getlog file read handler.
266 *
267 * This function is called when the 'getlog' file is opened for reading
268 * It prints the following log information -
269 * - Number of multicast Tx frames
270 * - Number of failed packets
271 * - Number of Tx retries
272 * - Number of multicast Tx retries
273 * - Number of duplicate frames
274 * - Number of RTS successes
275 * - Number of RTS failures
276 * - Number of ACK failures
277 * - Number of fragmented Rx frames
278 * - Number of multicast Rx frames
279 * - Number of FCS errors
280 * - Number of Tx frames
281 * - WEP ICV error counts
282 */
283static ssize_t
284mwifiex_getlog_read(struct file *file, char __user *ubuf,
285 size_t count, loff_t *ppos)
286{
287 struct mwifiex_private *priv =
288 (struct mwifiex_private *) file->private_data;
289 unsigned long page = get_zeroed_page(GFP_KERNEL);
290 char *p = (char *) page;
291 ssize_t ret;
292 struct mwifiex_ds_get_stats stats;
293
294 if (!p)
295 return -ENOMEM;
296
297 memset(&stats, 0, sizeof(stats));
298 ret = mwifiex_get_stats_info(priv, &stats);
299 if (ret)
300 goto free_and_exit;
301
302 p += sprintf(p, "\n"
303 "mcasttxframe %u\n"
304 "failed %u\n"
305 "retry %u\n"
306 "multiretry %u\n"
307 "framedup %u\n"
308 "rtssuccess %u\n"
309 "rtsfailure %u\n"
310 "ackfailure %u\n"
311 "rxfrag %u\n"
312 "mcastrxframe %u\n"
313 "fcserror %u\n"
314 "txframe %u\n"
315 "wepicverrcnt-1 %u\n"
316 "wepicverrcnt-2 %u\n"
317 "wepicverrcnt-3 %u\n"
318 "wepicverrcnt-4 %u\n",
319 stats.mcast_tx_frame,
320 stats.failed,
321 stats.retry,
322 stats.multi_retry,
323 stats.frame_dup,
324 stats.rts_success,
325 stats.rts_failure,
326 stats.ack_failure,
327 stats.rx_frag,
328 stats.mcast_rx_frame,
329 stats.fcs_error,
330 stats.tx_frame,
331 stats.wep_icv_error[0],
332 stats.wep_icv_error[1],
333 stats.wep_icv_error[2],
334 stats.wep_icv_error[3]);
335
336
337 ret = simple_read_from_buffer(ubuf, count, ppos, (char *) page,
338 (unsigned long) p - page);
339
340free_and_exit:
341 free_page(page);
342 return ret;
343}
344
345static struct mwifiex_debug_info info;
346
347/*
348 * Proc debug file read handler.
349 *
350 * This function is called when the 'debug' file is opened for reading
351 * It prints the following log information -
352 * - Interrupt count
353 * - WMM AC VO packets count
354 * - WMM AC VI packets count
355 * - WMM AC BE packets count
356 * - WMM AC BK packets count
357 * - Maximum Tx buffer size
358 * - Tx buffer size
359 * - Current Tx buffer size
360 * - Power Save mode
361 * - Power Save state
362 * - Deep Sleep status
363 * - Device wakeup required status
364 * - Number of wakeup tries
365 * - Host Sleep configured status
366 * - Host Sleep activated status
367 * - Number of Tx timeouts
368 * - Number of command timeouts
369 * - Last timed out command ID
370 * - Last timed out command action
371 * - Last command ID
372 * - Last command action
373 * - Last command index
374 * - Last command response ID
375 * - Last command response index
376 * - Last event
377 * - Last event index
378 * - Number of host to card command failures
379 * - Number of sleep confirm command failures
380 * - Number of host to card data failure
381 * - Number of deauthentication events
382 * - Number of disassociation events
383 * - Number of link lost events
384 * - Number of deauthentication commands
385 * - Number of association success commands
386 * - Number of association failure commands
387 * - Number of commands sent
388 * - Number of data packets sent
389 * - Number of command responses received
390 * - Number of events received
391 * - Tx BA stream table (TID, RA)
392 * - Rx reorder table (TID, TA, Start window, Window size, Buffer)
393 */
394static ssize_t
395mwifiex_debug_read(struct file *file, char __user *ubuf,
396 size_t count, loff_t *ppos)
397{
398 struct mwifiex_private *priv =
399 (struct mwifiex_private *) file->private_data;
400 struct mwifiex_debug_data *d = &items[0];
401 unsigned long page = get_zeroed_page(GFP_KERNEL);
402 char *p = (char *) page;
403 ssize_t ret;
404 size_t size, addr;
405 long val;
406 int i, j;
407
408 if (!p)
409 return -ENOMEM;
410
411 ret = mwifiex_get_debug_info(priv, &info);
412 if (ret)
413 goto free_and_exit;
414
415 for (i = 0; i < num_of_items; i++) {
416 p += sprintf(p, "%s=", d[i].name);
417
418 size = d[i].size / d[i].num;
419
420 if (i < (num_of_items - 3))
421 addr = d[i].addr + (size_t) &info;
422 else /* The last 3 items are struct mwifiex_adapter variables */
423 addr = d[i].addr + (size_t) priv->adapter;
424
425 for (j = 0; j < d[i].num; j++) {
426 switch (size) {
427 case 1:
428 val = *((u8 *) addr);
429 break;
430 case 2:
431 val = *((u16 *) addr);
432 break;
433 case 4:
434 val = *((u32 *) addr);
435 break;
436 case 8:
437 val = *((long long *) addr);
438 break;
439 default:
440 val = -1;
441 break;
442 }
443
444 p += sprintf(p, "%#lx ", val);
445 addr += size;
446 }
447
448 p += sprintf(p, "\n");
449 }
450
451 if (info.tx_tbl_num) {
452 p += sprintf(p, "Tx BA stream table:\n");
453 for (i = 0; i < info.tx_tbl_num; i++)
454 p += sprintf(p, "tid = %d, "
455 "ra = %02x:%02x:%02x:%02x:%02x:%02x\n",
456 info.tx_tbl[i].tid, info.tx_tbl[i].ra[0],
457 info.tx_tbl[i].ra[1], info.tx_tbl[i].ra[2],
458 info.tx_tbl[i].ra[3], info.tx_tbl[i].ra[4],
459 info.tx_tbl[i].ra[5]);
460 }
461
462 if (info.rx_tbl_num) {
463 p += sprintf(p, "Rx reorder table:\n");
464 for (i = 0; i < info.rx_tbl_num; i++) {
465
466 p += sprintf(p, "tid = %d, "
467 "ta = %02x:%02x:%02x:%02x:%02x:%02x, "
468 "start_win = %d, "
469 "win_size = %d, buffer: ",
470 info.rx_tbl[i].tid,
471 info.rx_tbl[i].ta[0], info.rx_tbl[i].ta[1],
472 info.rx_tbl[i].ta[2], info.rx_tbl[i].ta[3],
473 info.rx_tbl[i].ta[4], info.rx_tbl[i].ta[5],
474 info.rx_tbl[i].start_win,
475 info.rx_tbl[i].win_size);
476
477 for (j = 0; j < info.rx_tbl[i].win_size; j++)
478 p += sprintf(p, "%c ",
479 info.rx_tbl[i].buffer[j] ?
480 '1' : '0');
481
482 p += sprintf(p, "\n");
483 }
484 }
485
486 ret = simple_read_from_buffer(ubuf, count, ppos, (char *) page,
487 (unsigned long) p - page);
488
489free_and_exit:
490 free_page(page);
491 return ret;
492}
493
494static u32 saved_reg_type, saved_reg_offset, saved_reg_value;
495
496/*
497 * Proc regrdwr file write handler.
498 *
499 * This function is called when the 'regrdwr' file is opened for writing
500 *
501 * This function can be used to write to a register.
502 */
503static ssize_t
504mwifiex_regrdwr_write(struct file *file,
505 const char __user *ubuf, size_t count, loff_t *ppos)
506{
507 unsigned long addr = get_zeroed_page(GFP_KERNEL);
508 char *buf = (char *) addr;
509 size_t buf_size = min(count, (size_t) (PAGE_SIZE - 1));
510 int ret;
511 u32 reg_type = 0, reg_offset = 0, reg_value = UINT_MAX;
512
513 if (!buf)
514 return -ENOMEM;
515
516
517 if (copy_from_user(buf, ubuf, buf_size)) {
518 ret = -EFAULT;
519 goto done;
520 }
521
522 sscanf(buf, "%u %x %x", &reg_type, &reg_offset, &reg_value);
523
524 if (reg_type == 0 || reg_offset == 0) {
525 ret = -EINVAL;
526 goto done;
527 } else {
528 saved_reg_type = reg_type;
529 saved_reg_offset = reg_offset;
530 saved_reg_value = reg_value;
531 ret = count;
532 }
533done:
534 free_page(addr);
535 return ret;
536}
537
538/*
539 * Proc regrdwr file read handler.
540 *
541 * This function is called when the 'regrdwr' file is opened for reading
542 *
543 * This function can be used to read from a register.
544 */
545static ssize_t
546mwifiex_regrdwr_read(struct file *file, char __user *ubuf,
547 size_t count, loff_t *ppos)
548{
549 struct mwifiex_private *priv =
550 (struct mwifiex_private *) file->private_data;
551 unsigned long addr = get_zeroed_page(GFP_KERNEL);
552 char *buf = (char *) addr;
553 int pos = 0, ret = 0;
554 u32 reg_value;
555
556 if (!buf)
557 return -ENOMEM;
558
559 if (!saved_reg_type) {
560 /* No command has been given */
561 pos += snprintf(buf, PAGE_SIZE, "0");
562 goto done;
563 }
564 /* Set command has been given */
565 if (saved_reg_value != UINT_MAX) {
566 ret = mwifiex_reg_write(priv, saved_reg_type, saved_reg_offset,
567 saved_reg_value);
568
569 pos += snprintf(buf, PAGE_SIZE, "%u 0x%x 0x%x\n",
570 saved_reg_type, saved_reg_offset,
571 saved_reg_value);
572
573 ret = simple_read_from_buffer(ubuf, count, ppos, buf, pos);
574
575 goto done;
576 }
577 /* Get command has been given */
578 ret = mwifiex_reg_read(priv, saved_reg_type,
579 saved_reg_offset, &reg_value);
580 if (ret) {
581 ret = -EINVAL;
582 goto done;
583 }
584
585 pos += snprintf(buf, PAGE_SIZE, "%u 0x%x 0x%x\n", saved_reg_type,
586 saved_reg_offset, reg_value);
587
588 ret = simple_read_from_buffer(ubuf, count, ppos, buf, pos);
589
590done:
591 free_page(addr);
592 return ret;
593}
594
595static u32 saved_offset = -1, saved_bytes = -1;
596
597/*
598 * Proc rdeeprom file write handler.
599 *
600 * This function is called when the 'rdeeprom' file is opened for writing
601 *
602 * This function can be used to write to a RDEEPROM location.
603 */
604static ssize_t
605mwifiex_rdeeprom_write(struct file *file,
606 const char __user *ubuf, size_t count, loff_t *ppos)
607{
608 unsigned long addr = get_zeroed_page(GFP_KERNEL);
609 char *buf = (char *) addr;
610 size_t buf_size = min(count, (size_t) (PAGE_SIZE - 1));
611 int ret = 0;
612 int offset = -1, bytes = -1;
613
614 if (!buf)
615 return -ENOMEM;
616
617
618 if (copy_from_user(buf, ubuf, buf_size)) {
619 ret = -EFAULT;
620 goto done;
621 }
622
623 sscanf(buf, "%d %d", &offset, &bytes);
624
625 if (offset == -1 || bytes == -1) {
626 ret = -EINVAL;
627 goto done;
628 } else {
629 saved_offset = offset;
630 saved_bytes = bytes;
631 ret = count;
632 }
633done:
634 free_page(addr);
635 return ret;
636}
637
638/*
639 * Proc rdeeprom read write handler.
640 *
641 * This function is called when the 'rdeeprom' file is opened for reading
642 *
643 * This function can be used to read from a RDEEPROM location.
644 */
645static ssize_t
646mwifiex_rdeeprom_read(struct file *file, char __user *ubuf,
647 size_t count, loff_t *ppos)
648{
649 struct mwifiex_private *priv =
650 (struct mwifiex_private *) file->private_data;
651 unsigned long addr = get_zeroed_page(GFP_KERNEL);
652 char *buf = (char *) addr;
653 int pos = 0, ret = 0, i;
654 u8 value[MAX_EEPROM_DATA];
655
656 if (!buf)
657 return -ENOMEM;
658
659 if (saved_offset == -1) {
660 /* No command has been given */
661 pos += snprintf(buf, PAGE_SIZE, "0");
662 goto done;
663 }
664
665 /* Get command has been given */
666 ret = mwifiex_eeprom_read(priv, (u16) saved_offset,
667 (u16) saved_bytes, value);
668 if (ret) {
669 ret = -EINVAL;
670 goto done;
671 }
672
673 pos += snprintf(buf, PAGE_SIZE, "%d %d ", saved_offset, saved_bytes);
674
675 for (i = 0; i < saved_bytes; i++)
676 pos += snprintf(buf + strlen(buf), PAGE_SIZE, "%d ", value[i]);
677
678 ret = simple_read_from_buffer(ubuf, count, ppos, buf, pos);
679
680done:
681 free_page(addr);
682 return ret;
683}
684
685
686#define MWIFIEX_DFS_ADD_FILE(name) do { \
687 if (!debugfs_create_file(#name, 0644, priv->dfs_dev_dir, \
688 priv, &mwifiex_dfs_##name##_fops)) \
689 return; \
690} while (0);
691
692#define MWIFIEX_DFS_FILE_OPS(name) \
693static const struct file_operations mwifiex_dfs_##name##_fops = { \
694 .read = mwifiex_##name##_read, \
695 .write = mwifiex_##name##_write, \
696 .open = mwifiex_open_generic, \
697};
698
699#define MWIFIEX_DFS_FILE_READ_OPS(name) \
700static const struct file_operations mwifiex_dfs_##name##_fops = { \
701 .read = mwifiex_##name##_read, \
702 .open = mwifiex_open_generic, \
703};
704
705#define MWIFIEX_DFS_FILE_WRITE_OPS(name) \
706static const struct file_operations mwifiex_dfs_##name##_fops = { \
707 .write = mwifiex_##name##_write, \
708 .open = mwifiex_open_generic, \
709};
710
711
712MWIFIEX_DFS_FILE_READ_OPS(info);
713MWIFIEX_DFS_FILE_READ_OPS(debug);
714MWIFIEX_DFS_FILE_READ_OPS(getlog);
715MWIFIEX_DFS_FILE_OPS(regrdwr);
716MWIFIEX_DFS_FILE_OPS(rdeeprom);
717
718/*
719 * This function creates the debug FS directory structure and the files.
720 */
721void
722mwifiex_dev_debugfs_init(struct mwifiex_private *priv)
723{
724 if (!mwifiex_dfs_dir || !priv)
725 return;
726
727 priv->dfs_dev_dir = debugfs_create_dir(priv->netdev->name,
728 mwifiex_dfs_dir);
729
730 if (!priv->dfs_dev_dir)
731 return;
732
733 MWIFIEX_DFS_ADD_FILE(info);
734 MWIFIEX_DFS_ADD_FILE(debug);
735 MWIFIEX_DFS_ADD_FILE(getlog);
736 MWIFIEX_DFS_ADD_FILE(regrdwr);
737 MWIFIEX_DFS_ADD_FILE(rdeeprom);
738}
739
740/*
741 * This function removes the debug FS directory structure and the files.
742 */
743void
744mwifiex_dev_debugfs_remove(struct mwifiex_private *priv)
745{
746 if (!priv)
747 return;
748
749 debugfs_remove_recursive(priv->dfs_dev_dir);
750}
751
752/*
753 * This function creates the top level proc directory.
754 */
755void
756mwifiex_debugfs_init(void)
757{
758 if (!mwifiex_dfs_dir)
759 mwifiex_dfs_dir = debugfs_create_dir("mwifiex", NULL);
760}
761
762/*
763 * This function removes the top level proc directory.
764 */
765void
766mwifiex_debugfs_remove(void)
767{
768 if (mwifiex_dfs_dir)
769 debugfs_remove(mwifiex_dfs_dir);
770}
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
new file mode 100644
index 000000000000..0e90b0986ed8
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -0,0 +1,129 @@
1/*
2 * Marvell Wireless LAN device driver: generic data structures and APIs
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#ifndef _MWIFIEX_DECL_H_
21#define _MWIFIEX_DECL_H_
22
23#undef pr_fmt
24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26#include <linux/wait.h>
27#include <linux/timer.h>
28#include <linux/ieee80211.h>
29
30
31#define MWIFIEX_MAX_BSS_NUM (1)
32
33#define MWIFIEX_MIN_DATA_HEADER_LEN 32 /* (sizeof(mwifiex_txpd)) */
34
35#define MWIFIEX_MAX_TX_BASTREAM_SUPPORTED 2
36#define MWIFIEX_MAX_RX_BASTREAM_SUPPORTED 16
37
38#define MWIFIEX_AMPDU_DEF_TXWINSIZE 32
39#define MWIFIEX_AMPDU_DEF_RXWINSIZE 16
40#define MWIFIEX_DEFAULT_BLOCK_ACK_TIMEOUT 0xffff
41
42#define MWIFIEX_RATE_INDEX_HRDSSS0 0
43#define MWIFIEX_RATE_INDEX_HRDSSS3 3
44#define MWIFIEX_RATE_INDEX_OFDM0 4
45#define MWIFIEX_RATE_INDEX_OFDM7 11
46#define MWIFIEX_RATE_INDEX_MCS0 12
47
48#define MWIFIEX_RATE_BITMAP_OFDM0 16
49#define MWIFIEX_RATE_BITMAP_OFDM7 23
50#define MWIFIEX_RATE_BITMAP_MCS0 32
51#define MWIFIEX_RATE_BITMAP_MCS127 159
52
53#define MWIFIEX_RX_DATA_BUF_SIZE (4 * 1024)
54
55#define MWIFIEX_RTS_MIN_VALUE (0)
56#define MWIFIEX_RTS_MAX_VALUE (2347)
57#define MWIFIEX_FRAG_MIN_VALUE (256)
58#define MWIFIEX_FRAG_MAX_VALUE (2346)
59
60#define MWIFIEX_SDIO_BLOCK_SIZE 256
61
62#define MWIFIEX_BUF_FLAG_REQUEUED_PKT BIT(0)
63
64enum mwifiex_bss_type {
65 MWIFIEX_BSS_TYPE_STA = 0,
66 MWIFIEX_BSS_TYPE_UAP = 1,
67 MWIFIEX_BSS_TYPE_ANY = 0xff,
68};
69
70enum mwifiex_bss_role {
71 MWIFIEX_BSS_ROLE_STA = 0,
72 MWIFIEX_BSS_ROLE_UAP = 1,
73 MWIFIEX_BSS_ROLE_ANY = 0xff,
74};
75
76#define BSS_ROLE_BIT_MASK BIT(0)
77
78#define GET_BSS_ROLE(priv) ((priv)->bss_role & BSS_ROLE_BIT_MASK)
79
80enum mwifiex_data_frame_type {
81 MWIFIEX_DATA_FRAME_TYPE_ETH_II = 0,
82 MWIFIEX_DATA_FRAME_TYPE_802_11,
83};
84
85struct mwifiex_fw_image {
86 u8 *helper_buf;
87 u32 helper_len;
88 u8 *fw_buf;
89 u32 fw_len;
90};
91
92struct mwifiex_802_11_ssid {
93 u32 ssid_len;
94 u8 ssid[IEEE80211_MAX_SSID_LEN];
95};
96
97struct mwifiex_wait_queue {
98 wait_queue_head_t wait;
99 u16 condition;
100 int status;
101};
102
103struct mwifiex_rxinfo {
104 u8 bss_index;
105 struct sk_buff *parent;
106 u8 use_count;
107};
108
109struct mwifiex_txinfo {
110 u32 status_code;
111 u8 flags;
112 u8 bss_index;
113};
114
115struct mwifiex_bss_attr {
116 u8 bss_type;
117 u8 frame_type;
118 u8 active;
119 u8 bss_priority;
120 u8 bss_num;
121};
122
123enum mwifiex_wmm_ac_e {
124 WMM_AC_BK,
125 WMM_AC_BE,
126 WMM_AC_VI,
127 WMM_AC_VO
128} __packed;
129#endif /* !_MWIFIEX_DECL_H_ */
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
new file mode 100644
index 000000000000..afdd145dff0b
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -0,0 +1,1187 @@
1/*
2 * Marvell Wireless LAN device driver: Firmware specific macros & structures
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#ifndef _MWIFIEX_FW_H_
21#define _MWIFIEX_FW_H_
22
23#include <linux/if_ether.h>
24
25
26#define INTF_HEADER_LEN 4
27
28struct rfc_1042_hdr {
29 u8 llc_dsap;
30 u8 llc_ssap;
31 u8 llc_ctrl;
32 u8 snap_oui[3];
33 u16 snap_type;
34};
35
36struct rx_packet_hdr {
37 struct ethhdr eth803_hdr;
38 struct rfc_1042_hdr rfc1042_hdr;
39};
40
41struct tx_packet_hdr {
42 struct ethhdr eth803_hdr;
43 struct rfc_1042_hdr rfc1042_hdr;
44};
45
46#define B_SUPPORTED_RATES 5
47#define G_SUPPORTED_RATES 9
48#define BG_SUPPORTED_RATES 13
49#define A_SUPPORTED_RATES 9
50#define HOSTCMD_SUPPORTED_RATES 14
51#define N_SUPPORTED_RATES 3
52#define ALL_802_11_BANDS (BAND_A | BAND_B | BAND_G | BAND_GN)
53
54#define FW_MULTI_BANDS_SUPPORT (BIT(8) | BIT(9) | BIT(10) | BIT(11))
55#define IS_SUPPORT_MULTI_BANDS(adapter) \
56 (adapter->fw_cap_info & FW_MULTI_BANDS_SUPPORT)
57#define GET_FW_DEFAULT_BANDS(adapter) \
58 ((adapter->fw_cap_info >> 8) & ALL_802_11_BANDS)
59
60extern u8 supported_rates_b[B_SUPPORTED_RATES];
61extern u8 supported_rates_g[G_SUPPORTED_RATES];
62extern u8 supported_rates_bg[BG_SUPPORTED_RATES];
63extern u8 supported_rates_a[A_SUPPORTED_RATES];
64extern u8 supported_rates_n[N_SUPPORTED_RATES];
65
66#define HostCmd_WEP_KEY_INDEX_MASK 0x3fff
67
68#define KEY_INFO_ENABLED 0x01
69enum KEY_TYPE_ID {
70 KEY_TYPE_ID_WEP = 0,
71 KEY_TYPE_ID_TKIP,
72 KEY_TYPE_ID_AES,
73 KEY_TYPE_ID_WAPI,
74};
75#define KEY_MCAST BIT(0)
76#define KEY_UNICAST BIT(1)
77#define KEY_ENABLED BIT(2)
78
79#define WAPI_KEY_LEN 50
80
81#define MAX_POLL_TRIES 100
82
83#define MAX_MULTI_INTERFACE_POLL_TRIES 1000
84
85#define MAX_FIRMWARE_POLL_TRIES 100
86
87#define FIRMWARE_READY 0xfedc
88
89enum MWIFIEX_802_11_PRIVACY_FILTER {
90 MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL,
91 MWIFIEX_802_11_PRIV_FILTER_8021X_WEP
92};
93
94enum MWIFIEX_802_11_WEP_STATUS {
95 MWIFIEX_802_11_WEP_ENABLED,
96 MWIFIEX_802_11_WEP_DISABLED,
97};
98
99#define CAL_SNR(RSSI, NF) ((s16)((s16)(RSSI)-(s16)(NF)))
100
101#define PROPRIETARY_TLV_BASE_ID 0x0100
102#define TLV_TYPE_KEY_MATERIAL (PROPRIETARY_TLV_BASE_ID + 0)
103#define TLV_TYPE_CHANLIST (PROPRIETARY_TLV_BASE_ID + 1)
104#define TLV_TYPE_NUMPROBES (PROPRIETARY_TLV_BASE_ID + 2)
105#define TLV_TYPE_PASSTHROUGH (PROPRIETARY_TLV_BASE_ID + 10)
106#define TLV_TYPE_WMMQSTATUS (PROPRIETARY_TLV_BASE_ID + 16)
107#define TLV_TYPE_WILDCARDSSID (PROPRIETARY_TLV_BASE_ID + 18)
108#define TLV_TYPE_TSFTIMESTAMP (PROPRIETARY_TLV_BASE_ID + 19)
109#define TLV_TYPE_AUTH_TYPE (PROPRIETARY_TLV_BASE_ID + 31)
110#define TLV_TYPE_CHANNELBANDLIST (PROPRIETARY_TLV_BASE_ID + 42)
111#define TLV_TYPE_RATE_DROP_CONTROL (PROPRIETARY_TLV_BASE_ID + 82)
112#define TLV_TYPE_RATE_SCOPE (PROPRIETARY_TLV_BASE_ID + 83)
113#define TLV_TYPE_POWER_GROUP (PROPRIETARY_TLV_BASE_ID + 84)
114#define TLV_TYPE_WAPI_IE (PROPRIETARY_TLV_BASE_ID + 94)
115#define TLV_TYPE_AUTO_DS_PARAM (PROPRIETARY_TLV_BASE_ID + 113)
116#define TLV_TYPE_PS_PARAM (PROPRIETARY_TLV_BASE_ID + 114)
117
118#define MWIFIEX_TX_DATA_BUF_SIZE_2K 2048
119
120#define SSN_MASK 0xfff0
121
122#define BA_RESULT_SUCCESS 0x0
123#define BA_RESULT_TIMEOUT 0x2
124
125#define IS_BASTREAM_SETUP(ptr) (ptr->ba_status)
126
127#define BA_STREAM_NOT_ALLOWED 0xff
128
129#define IS_11N_ENABLED(priv) ((priv->adapter->config_bands & BAND_GN || \
130 priv->adapter->config_bands & BAND_AN) \
131 && priv->curr_bss_params.bss_descriptor.bcn_ht_cap)
132#define INITIATOR_BIT(DelBAParamSet) (((DelBAParamSet) &\
133 BIT(DELBA_INITIATOR_POS)) >> DELBA_INITIATOR_POS)
134
135#define MWIFIEX_TX_DATA_BUF_SIZE_4K 4096
136#define MWIFIEX_TX_DATA_BUF_SIZE_8K 8192
137
138#define ISSUPP_11NENABLED(FwCapInfo) (FwCapInfo & BIT(11))
139
140/* dev_cap bitmap
141 * BIT
142 * 0-16 reserved
143 * 17 IEEE80211_HT_CAP_SUP_WIDTH_20_40
144 * 18-22 reserved
145 * 23 IEEE80211_HT_CAP_SGI_20
146 * 24 IEEE80211_HT_CAP_SGI_40
147 * 25 IEEE80211_HT_CAP_TX_STBC
148 * 26 IEEE80211_HT_CAP_RX_STBC
149 * 27-28 reserved
150 * 29 IEEE80211_HT_CAP_GRN_FLD
151 * 30-31 reserved
152 */
153#define ISSUPP_CHANWIDTH40(Dot11nDevCap) (Dot11nDevCap & BIT(17))
154#define ISSUPP_SHORTGI20(Dot11nDevCap) (Dot11nDevCap & BIT(23))
155#define ISSUPP_SHORTGI40(Dot11nDevCap) (Dot11nDevCap & BIT(24))
156#define ISSUPP_TXSTBC(Dot11nDevCap) (Dot11nDevCap & BIT(25))
157#define ISSUPP_RXSTBC(Dot11nDevCap) (Dot11nDevCap & BIT(26))
158#define ISSUPP_GREENFIELD(Dot11nDevCap) (Dot11nDevCap & BIT(29))
159
160#define GET_RXMCSSUPP(DevMCSSupported) (DevMCSSupported & 0x0f)
161#define SETHT_MCS32(x) (x[4] |= 1)
162
163#define SET_SECONDARYCHAN(RadioType, SECCHAN) (RadioType |= (SECCHAN << 4))
164
165#define LLC_SNAP_LEN 8
166
167#define MOD_CLASS_HR_DSSS 0x03
168#define MOD_CLASS_OFDM 0x07
169#define MOD_CLASS_HT 0x08
170#define HT_BW_20 0
171#define HT_BW_40 1
172
173#define HostCmd_CMD_GET_HW_SPEC 0x0003
174#define HostCmd_CMD_802_11_SCAN 0x0006
175#define HostCmd_CMD_802_11_GET_LOG 0x000b
176#define HostCmd_CMD_MAC_MULTICAST_ADR 0x0010
177#define HostCmd_CMD_802_11_EEPROM_ACCESS 0x0059
178#define HostCmd_CMD_802_11_ASSOCIATE 0x0012
179#define HostCmd_CMD_802_11_SNMP_MIB 0x0016
180#define HostCmd_CMD_MAC_REG_ACCESS 0x0019
181#define HostCmd_CMD_BBP_REG_ACCESS 0x001a
182#define HostCmd_CMD_RF_REG_ACCESS 0x001b
183#define HostCmd_CMD_PMIC_REG_ACCESS 0x00ad
184#define HostCmd_CMD_802_11_RF_CHANNEL 0x001d
185#define HostCmd_CMD_802_11_DEAUTHENTICATE 0x0024
186#define HostCmd_CMD_MAC_CONTROL 0x0028
187#define HostCmd_CMD_802_11_AD_HOC_START 0x002b
188#define HostCmd_CMD_802_11_AD_HOC_JOIN 0x002c
189#define HostCmd_CMD_802_11_AD_HOC_STOP 0x0040
190#define HostCmd_CMD_802_11_MAC_ADDRESS 0x004D
191#define HostCmd_CMD_802_11D_DOMAIN_INFO 0x005b
192#define HostCmd_CMD_802_11_KEY_MATERIAL 0x005e
193#define HostCmd_CMD_802_11_BG_SCAN_QUERY 0x006c
194#define HostCmd_CMD_WMM_GET_STATUS 0x0071
195#define HostCmd_CMD_802_11_TX_RATE_QUERY 0x007f
196#define HostCmd_CMD_802_11_IBSS_COALESCING_STATUS 0x0083
197#define HostCmd_CMD_VERSION_EXT 0x0097
198#define HostCmd_CMD_RSSI_INFO 0x00a4
199#define HostCmd_CMD_FUNC_INIT 0x00a9
200#define HostCmd_CMD_FUNC_SHUTDOWN 0x00aa
201#define HostCmd_CMD_11N_CFG 0x00cd
202#define HostCmd_CMD_11N_ADDBA_REQ 0x00ce
203#define HostCmd_CMD_11N_ADDBA_RSP 0x00cf
204#define HostCmd_CMD_11N_DELBA 0x00d0
205#define HostCmd_CMD_RECONFIGURE_TX_BUFF 0x00d9
206#define HostCmd_CMD_AMSDU_AGGR_CTRL 0x00df
207#define HostCmd_CMD_TXPWR_CFG 0x00d1
208#define HostCmd_CMD_TX_RATE_CFG 0x00d6
209#define HostCmd_CMD_802_11_PS_MODE_ENH 0x00e4
210#define HostCmd_CMD_802_11_HS_CFG_ENH 0x00e5
211#define HostCmd_CMD_CAU_REG_ACCESS 0x00ed
212#define HostCmd_CMD_SET_BSS_MODE 0x00f7
213
214
215enum ENH_PS_MODES {
216 EN_PS = 1,
217 DIS_PS = 2,
218 EN_AUTO_DS = 3,
219 DIS_AUTO_DS = 4,
220 SLEEP_CONFIRM = 5,
221 GET_PS = 0,
222 EN_AUTO_PS = 0xff,
223 DIS_AUTO_PS = 0xfe,
224};
225
226#define HostCmd_RET_BIT 0x8000
227#define HostCmd_ACT_GEN_GET 0x0000
228#define HostCmd_ACT_GEN_SET 0x0001
229#define HostCmd_RESULT_OK 0x0000
230
231#define HostCmd_ACT_MAC_RX_ON 0x0001
232#define HostCmd_ACT_MAC_TX_ON 0x0002
233#define HostCmd_ACT_MAC_WEP_ENABLE 0x0008
234#define HostCmd_ACT_MAC_ETHERNETII_ENABLE 0x0010
235#define HostCmd_ACT_MAC_PROMISCUOUS_ENABLE 0x0080
236#define HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE 0x0100
237#define HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON 0x2000
238
239#define HostCmd_BSS_MODE_IBSS 0x0002
240#define HostCmd_BSS_MODE_ANY 0x0003
241
242#define HostCmd_SCAN_RADIO_TYPE_BG 0
243#define HostCmd_SCAN_RADIO_TYPE_A 1
244
245#define HOST_SLEEP_CFG_CANCEL 0xffffffff
246#define HOST_SLEEP_CFG_COND_DEF 0x0000000f
247#define HOST_SLEEP_CFG_GPIO_DEF 0xff
248#define HOST_SLEEP_CFG_GAP_DEF 0
249
250#define CMD_F_HOSTCMD (1 << 0)
251#define CMD_F_CANCELED (1 << 1)
252
253#define HostCmd_CMD_ID_MASK 0x0fff
254
255#define HostCmd_SEQ_NUM_MASK 0x00ff
256
257#define HostCmd_BSS_NUM_MASK 0x0f00
258
259#define HostCmd_BSS_TYPE_MASK 0xf000
260
261#define HostCmd_SET_SEQ_NO_BSS_INFO(seq, num, type) { \
262 (((seq) & 0x00ff) | \
263 (((num) & 0x000f) << 8)) | \
264 (((type) & 0x000f) << 12); }
265
266#define HostCmd_GET_SEQ_NO(seq) \
267 ((seq) & HostCmd_SEQ_NUM_MASK)
268
269#define HostCmd_GET_BSS_NO(seq) \
270 (((seq) & HostCmd_BSS_NUM_MASK) >> 8)
271
272#define HostCmd_GET_BSS_TYPE(seq) \
273 (((seq) & HostCmd_BSS_TYPE_MASK) >> 12)
274
275#define EVENT_DUMMY_HOST_WAKEUP_SIGNAL 0x00000001
276#define EVENT_LINK_LOST 0x00000003
277#define EVENT_LINK_SENSED 0x00000004
278#define EVENT_MIB_CHANGED 0x00000006
279#define EVENT_INIT_DONE 0x00000007
280#define EVENT_DEAUTHENTICATED 0x00000008
281#define EVENT_DISASSOCIATED 0x00000009
282#define EVENT_PS_AWAKE 0x0000000a
283#define EVENT_PS_SLEEP 0x0000000b
284#define EVENT_MIC_ERR_MULTICAST 0x0000000d
285#define EVENT_MIC_ERR_UNICAST 0x0000000e
286#define EVENT_DEEP_SLEEP_AWAKE 0x00000010
287#define EVENT_ADHOC_BCN_LOST 0x00000011
288
289#define EVENT_WMM_STATUS_CHANGE 0x00000017
290#define EVENT_BG_SCAN_REPORT 0x00000018
291#define EVENT_RSSI_LOW 0x00000019
292#define EVENT_SNR_LOW 0x0000001a
293#define EVENT_MAX_FAIL 0x0000001b
294#define EVENT_RSSI_HIGH 0x0000001c
295#define EVENT_SNR_HIGH 0x0000001d
296#define EVENT_IBSS_COALESCED 0x0000001e
297#define EVENT_DATA_RSSI_LOW 0x00000024
298#define EVENT_DATA_SNR_LOW 0x00000025
299#define EVENT_DATA_RSSI_HIGH 0x00000026
300#define EVENT_DATA_SNR_HIGH 0x00000027
301#define EVENT_LINK_QUALITY 0x00000028
302#define EVENT_PORT_RELEASE 0x0000002b
303#define EVENT_PRE_BEACON_LOST 0x00000031
304#define EVENT_ADDBA 0x00000033
305#define EVENT_DELBA 0x00000034
306#define EVENT_BA_STREAM_TIEMOUT 0x00000037
307#define EVENT_AMSDU_AGGR_CTRL 0x00000042
308#define EVENT_WEP_ICV_ERR 0x00000046
309#define EVENT_HS_ACT_REQ 0x00000047
310#define EVENT_BW_CHANGE 0x00000048
311
312#define EVENT_HOSTWAKE_STAIE 0x0000004d
313
314#define EVENT_ID_MASK 0xffff
315#define BSS_NUM_MASK 0xf
316
317#define EVENT_GET_BSS_NUM(event_cause) \
318 (((event_cause) >> 16) & BSS_NUM_MASK)
319
320#define EVENT_GET_BSS_TYPE(event_cause) \
321 (((event_cause) >> 24) & 0x00ff)
322
323struct mwifiex_ie_types_header {
324 __le16 type;
325 __le16 len;
326} __packed;
327
328struct mwifiex_ie_types_data {
329 struct mwifiex_ie_types_header header;
330 u8 data[1];
331} __packed;
332
333#define MWIFIEX_TxPD_POWER_MGMT_NULL_PACKET 0x01
334#define MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET 0x08
335
336struct txpd {
337 u8 bss_type;
338 u8 bss_num;
339 __le16 tx_pkt_length;
340 __le16 tx_pkt_offset;
341 __le16 tx_pkt_type;
342 __le32 tx_control;
343 u8 priority;
344 u8 flags;
345 u8 pkt_delay_2ms;
346 u8 reserved1;
347} __packed;
348
349struct rxpd {
350 u8 bss_type;
351 u8 bss_num;
352 u16 rx_pkt_length;
353 u16 rx_pkt_offset;
354 u16 rx_pkt_type;
355 u16 seq_num;
356 u8 priority;
357 u8 rx_rate;
358 s8 snr;
359 s8 nf;
360 /* Ht Info [Bit 0] RxRate format: LG=0, HT=1
361 * [Bit 1] HT Bandwidth: BW20 = 0, BW40 = 1
362 * [Bit 2] HT Guard Interval: LGI = 0, SGI = 1 */
363 u8 ht_info;
364 u8 reserved;
365} __packed;
366
367enum mwifiex_chan_scan_mode_bitmasks {
368 MWIFIEX_PASSIVE_SCAN = BIT(0),
369 MWIFIEX_DISABLE_CHAN_FILT = BIT(1),
370};
371
372#define SECOND_CHANNEL_BELOW 0x30
373#define SECOND_CHANNEL_ABOVE 0x10
374struct mwifiex_chan_scan_param_set {
375 u8 radio_type;
376 u8 chan_number;
377 u8 chan_scan_mode_bitmap;
378 __le16 min_scan_time;
379 __le16 max_scan_time;
380} __packed;
381
382struct mwifiex_ie_types_chan_list_param_set {
383 struct mwifiex_ie_types_header header;
384 struct mwifiex_chan_scan_param_set chan_scan_param[1];
385} __packed;
386
387struct chan_band_param_set {
388 u8 radio_type;
389 u8 chan_number;
390};
391
392struct mwifiex_ie_types_chan_band_list_param_set {
393 struct mwifiex_ie_types_header header;
394 struct chan_band_param_set chan_band_param[1];
395} __packed;
396
397struct mwifiex_ie_types_rates_param_set {
398 struct mwifiex_ie_types_header header;
399 u8 rates[1];
400} __packed;
401
402struct mwifiex_ie_types_ssid_param_set {
403 struct mwifiex_ie_types_header header;
404 u8 ssid[1];
405} __packed;
406
407struct mwifiex_ie_types_num_probes {
408 struct mwifiex_ie_types_header header;
409 __le16 num_probes;
410} __packed;
411
412struct mwifiex_ie_types_wildcard_ssid_params {
413 struct mwifiex_ie_types_header header;
414 u8 max_ssid_length;
415 u8 ssid[1];
416} __packed;
417
418#define TSF_DATA_SIZE 8
419struct mwifiex_ie_types_tsf_timestamp {
420 struct mwifiex_ie_types_header header;
421 u8 tsf_data[1];
422} __packed;
423
424struct mwifiex_cf_param_set {
425 u8 cfp_cnt;
426 u8 cfp_period;
427 u16 cfp_max_duration;
428 u16 cfp_duration_remaining;
429} __packed;
430
431struct mwifiex_ibss_param_set {
432 u16 atim_window;
433} __packed;
434
435struct mwifiex_ie_types_ss_param_set {
436 struct mwifiex_ie_types_header header;
437 union {
438 struct mwifiex_cf_param_set cf_param_set[1];
439 struct mwifiex_ibss_param_set ibss_param_set[1];
440 } cf_ibss;
441} __packed;
442
443struct mwifiex_fh_param_set {
444 u16 dwell_time;
445 u8 hop_set;
446 u8 hop_pattern;
447 u8 hop_index;
448} __packed;
449
450struct mwifiex_ds_param_set {
451 u8 current_chan;
452} __packed;
453
454struct mwifiex_ie_types_phy_param_set {
455 struct mwifiex_ie_types_header header;
456 union {
457 struct mwifiex_fh_param_set fh_param_set[1];
458 struct mwifiex_ds_param_set ds_param_set[1];
459 } fh_ds;
460} __packed;
461
462struct mwifiex_ie_types_auth_type {
463 struct mwifiex_ie_types_header header;
464 __le16 auth_type;
465} __packed;
466
467struct mwifiex_ie_types_vendor_param_set {
468 struct mwifiex_ie_types_header header;
469 u8 ie[MWIFIEX_MAX_VSIE_LEN];
470};
471
472struct mwifiex_ie_types_rsn_param_set {
473 struct mwifiex_ie_types_header header;
474 u8 rsn_ie[1];
475} __packed;
476
477#define KEYPARAMSET_FIXED_LEN 6
478
479struct mwifiex_ie_type_key_param_set {
480 __le16 type;
481 __le16 length;
482 __le16 key_type_id;
483 __le16 key_info;
484 __le16 key_len;
485 u8 key[50];
486} __packed;
487
488struct host_cmd_ds_802_11_key_material {
489 __le16 action;
490 struct mwifiex_ie_type_key_param_set key_param_set;
491} __packed;
492
493struct host_cmd_ds_gen {
494 u16 command;
495 u16 size;
496 u16 seq_num;
497 u16 result;
498};
499
500#define S_DS_GEN sizeof(struct host_cmd_ds_gen)
501
502enum sleep_resp_ctrl {
503 RESP_NOT_NEEDED = 0,
504 RESP_NEEDED,
505};
506
507struct mwifiex_ps_param {
508 __le16 null_pkt_interval;
509 __le16 multiple_dtims;
510 __le16 bcn_miss_timeout;
511 __le16 local_listen_interval;
512 __le16 adhoc_wake_period;
513 __le16 mode;
514 __le16 delay_to_ps;
515};
516
517#define BITMAP_AUTO_DS 0x01
518#define BITMAP_STA_PS 0x10
519
520struct mwifiex_ie_types_auto_ds_param {
521 struct mwifiex_ie_types_header header;
522 __le16 deep_sleep_timeout;
523} __packed;
524
525struct mwifiex_ie_types_ps_param {
526 struct mwifiex_ie_types_header header;
527 struct mwifiex_ps_param param;
528} __packed;
529
530struct host_cmd_ds_802_11_ps_mode_enh {
531 __le16 action;
532
533 union {
534 struct mwifiex_ps_param opt_ps;
535 __le16 ps_bitmap;
536 } params;
537} __packed;
538
539struct host_cmd_ds_get_hw_spec {
540 __le16 hw_if_version;
541 __le16 version;
542 __le16 reserved;
543 __le16 num_of_mcast_adr;
544 u8 permanent_addr[ETH_ALEN];
545 __le16 region_code;
546 __le16 number_of_antenna;
547 __le32 fw_release_number;
548 __le32 reserved_1;
549 __le32 reserved_2;
550 __le32 reserved_3;
551 __le32 fw_cap_info;
552 __le32 dot_11n_dev_cap;
553 u8 dev_mcs_support;
554 __le16 mp_end_port; /* SDIO only, reserved for other interfacces */
555 __le16 reserved_4;
556} __packed;
557
558struct host_cmd_ds_802_11_rssi_info {
559 __le16 action;
560 __le16 ndata;
561 __le16 nbcn;
562 __le16 reserved[9];
563 long long reserved_1;
564};
565
566struct host_cmd_ds_802_11_rssi_info_rsp {
567 __le16 action;
568 __le16 ndata;
569 __le16 nbcn;
570 __le16 data_rssi_last;
571 __le16 data_nf_last;
572 __le16 data_rssi_avg;
573 __le16 data_nf_avg;
574 __le16 bcn_rssi_last;
575 __le16 bcn_nf_last;
576 __le16 bcn_rssi_avg;
577 __le16 bcn_nf_avg;
578 long long tsf_bcn;
579};
580
581struct host_cmd_ds_802_11_mac_address {
582 __le16 action;
583 u8 mac_addr[ETH_ALEN];
584};
585
586struct host_cmd_ds_mac_control {
587 __le16 action;
588 __le16 reserved;
589};
590
591struct host_cmd_ds_mac_multicast_adr {
592 __le16 action;
593 __le16 num_of_adrs;
594 u8 mac_list[MWIFIEX_MAX_MULTICAST_LIST_SIZE][ETH_ALEN];
595} __packed;
596
597struct host_cmd_ds_802_11_deauthenticate {
598 u8 mac_addr[ETH_ALEN];
599 __le16 reason_code;
600} __packed;
601
602struct host_cmd_ds_802_11_associate {
603 u8 peer_sta_addr[ETH_ALEN];
604 __le16 cap_info_bitmap;
605 __le16 listen_interval;
606 __le16 beacon_period;
607 u8 dtim_period;
608} __packed;
609
610struct ieee_types_assoc_rsp {
611 __le16 cap_info_bitmap;
612 __le16 status_code;
613 __le16 a_id;
614 u8 ie_buffer[1];
615} __packed;
616
617struct host_cmd_ds_802_11_associate_rsp {
618 struct ieee_types_assoc_rsp assoc_rsp;
619} __packed;
620
621struct ieee_types_cf_param_set {
622 u8 element_id;
623 u8 len;
624 u8 cfp_cnt;
625 u8 cfp_period;
626 u16 cfp_max_duration;
627 u16 cfp_duration_remaining;
628} __packed;
629
630struct ieee_types_ibss_param_set {
631 u8 element_id;
632 u8 len;
633 __le16 atim_window;
634} __packed;
635
636union ieee_types_ss_param_set {
637 struct ieee_types_cf_param_set cf_param_set;
638 struct ieee_types_ibss_param_set ibss_param_set;
639} __packed;
640
641struct ieee_types_fh_param_set {
642 u8 element_id;
643 u8 len;
644 __le16 dwell_time;
645 u8 hop_set;
646 u8 hop_pattern;
647 u8 hop_index;
648} __packed;
649
650struct ieee_types_ds_param_set {
651 u8 element_id;
652 u8 len;
653 u8 current_chan;
654} __packed;
655
656union ieee_types_phy_param_set {
657 struct ieee_types_fh_param_set fh_param_set;
658 struct ieee_types_ds_param_set ds_param_set;
659} __packed;
660
661struct host_cmd_ds_802_11_ad_hoc_start {
662 u8 ssid[IEEE80211_MAX_SSID_LEN];
663 u8 bss_mode;
664 __le16 beacon_period;
665 u8 dtim_period;
666 union ieee_types_ss_param_set ss_param_set;
667 union ieee_types_phy_param_set phy_param_set;
668 u16 reserved1;
669 __le16 cap_info_bitmap;
670 u8 DataRate[HOSTCMD_SUPPORTED_RATES];
671} __packed;
672
673struct host_cmd_ds_802_11_ad_hoc_result {
674 u8 pad[3];
675 u8 bssid[ETH_ALEN];
676} __packed;
677
678struct adhoc_bss_desc {
679 u8 bssid[ETH_ALEN];
680 u8 ssid[IEEE80211_MAX_SSID_LEN];
681 u8 bss_mode;
682 __le16 beacon_period;
683 u8 dtim_period;
684 u8 time_stamp[8];
685 u8 local_time[8];
686 union ieee_types_phy_param_set phy_param_set;
687 union ieee_types_ss_param_set ss_param_set;
688 __le16 cap_info_bitmap;
689 u8 data_rates[HOSTCMD_SUPPORTED_RATES];
690
691 /*
692 * DO NOT ADD ANY FIELDS TO THIS STRUCTURE.
693 * It is used in the Adhoc join command and will cause a
694 * binary layout mismatch with the firmware
695 */
696} __packed;
697
698struct host_cmd_ds_802_11_ad_hoc_join {
699 struct adhoc_bss_desc bss_descriptor;
700 u16 reserved1;
701 u16 reserved2;
702} __packed;
703
704struct host_cmd_ds_802_11_get_log {
705 __le32 mcast_tx_frame;
706 __le32 failed;
707 __le32 retry;
708 __le32 multi_retry;
709 __le32 frame_dup;
710 __le32 rts_success;
711 __le32 rts_failure;
712 __le32 ack_failure;
713 __le32 rx_frag;
714 __le32 mcast_rx_frame;
715 __le32 fcs_error;
716 __le32 tx_frame;
717 __le32 reserved;
718 __le32 wep_icv_err_cnt[4];
719};
720
721struct host_cmd_ds_tx_rate_query {
722 u8 tx_rate;
723 /* Ht Info [Bit 0] RxRate format: LG=0, HT=1
724 * [Bit 1] HT Bandwidth: BW20 = 0, BW40 = 1
725 * [Bit 2] HT Guard Interval: LGI = 0, SGI = 1 */
726 u8 ht_info;
727} __packed;
728
729enum Host_Sleep_Action {
730 HS_CONFIGURE = 0x0001,
731 HS_ACTIVATE = 0x0002,
732};
733
734struct mwifiex_hs_config_param {
735 __le32 conditions;
736 u8 gpio;
737 u8 gap;
738} __packed;
739
740struct hs_activate_param {
741 u16 resp_ctrl;
742} __packed;
743
744struct host_cmd_ds_802_11_hs_cfg_enh {
745 __le16 action;
746
747 union {
748 struct mwifiex_hs_config_param hs_config;
749 struct hs_activate_param hs_activate;
750 } params;
751} __packed;
752
753enum SNMP_MIB_INDEX {
754 OP_RATE_SET_I = 1,
755 DTIM_PERIOD_I = 3,
756 RTS_THRESH_I = 5,
757 SHORT_RETRY_LIM_I = 6,
758 LONG_RETRY_LIM_I = 7,
759 FRAG_THRESH_I = 8,
760 DOT11D_I = 9,
761};
762
763#define MAX_SNMP_BUF_SIZE 128
764
765struct host_cmd_ds_802_11_snmp_mib {
766 __le16 query_type;
767 __le16 oid;
768 __le16 buf_size;
769 u8 value[1];
770} __packed;
771
772struct mwifiex_rate_scope {
773 __le16 type;
774 __le16 length;
775 __le16 hr_dsss_rate_bitmap;
776 __le16 ofdm_rate_bitmap;
777 __le16 ht_mcs_rate_bitmap[8];
778} __packed;
779
780struct mwifiex_rate_drop_pattern {
781 __le16 type;
782 __le16 length;
783 __le32 rate_drop_mode;
784} __packed;
785
786struct host_cmd_ds_tx_rate_cfg {
787 __le16 action;
788 __le16 cfg_index;
789} __packed;
790
791struct mwifiex_power_group {
792 u8 modulation_class;
793 u8 first_rate_code;
794 u8 last_rate_code;
795 s8 power_step;
796 s8 power_min;
797 s8 power_max;
798 u8 ht_bandwidth;
799 u8 reserved;
800} __packed;
801
802struct mwifiex_types_power_group {
803 u16 type;
804 u16 length;
805} __packed;
806
807struct host_cmd_ds_txpwr_cfg {
808 __le16 action;
809 __le16 cfg_index;
810 __le32 mode;
811} __packed;
812
813#define MWIFIEX_USER_SCAN_CHAN_MAX 50
814
815#define MWIFIEX_MAX_SSID_LIST_LENGTH 10
816
817struct mwifiex_scan_cmd_config {
818 /*
819 * BSS mode to be sent in the firmware command
820 */
821 u8 bss_mode;
822
823 /* Specific BSSID used to filter scan results in the firmware */
824 u8 specific_bssid[ETH_ALEN];
825
826 /* Length of TLVs sent in command starting at tlvBuffer */
827 u32 tlv_buf_len;
828
829 /*
830 * SSID TLV(s) and ChanList TLVs to be sent in the firmware command
831 *
832 * TLV_TYPE_CHANLIST, mwifiex_ie_types_chan_list_param_set
833 * WLAN_EID_SSID, mwifiex_ie_types_ssid_param_set
834 */
835 u8 tlv_buf[1]; /* SSID TLV(s) and ChanList TLVs are stored
836 here */
837} __packed;
838
839struct mwifiex_user_scan_chan {
840 u8 chan_number;
841 u8 radio_type;
842 u8 scan_type;
843 u8 reserved;
844 u32 scan_time;
845} __packed;
846
847struct mwifiex_user_scan_ssid {
848 u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
849 u8 max_len;
850} __packed;
851
852struct mwifiex_user_scan_cfg {
853 /*
854 * Flag set to keep the previous scan table intact
855 *
856 * If set, the scan results will accumulate, replacing any previous
857 * matched entries for a BSS with the new scan data
858 */
859 u8 keep_previous_scan;
860 /*
861 * BSS mode to be sent in the firmware command
862 */
863 u8 bss_mode;
864 /* Configure the number of probe requests for active chan scans */
865 u8 num_probes;
866 u8 reserved;
867 /* BSSID filter sent in the firmware command to limit the results */
868 u8 specific_bssid[ETH_ALEN];
869 /* SSID filter list used in the to limit the scan results */
870 struct mwifiex_user_scan_ssid ssid_list[MWIFIEX_MAX_SSID_LIST_LENGTH];
871 /* Variable number (fixed maximum) of channels to scan up */
872 struct mwifiex_user_scan_chan chan_list[MWIFIEX_USER_SCAN_CHAN_MAX];
873} __packed;
874
875struct ie_body {
876 u8 grp_key_oui[4];
877 u8 ptk_cnt[2];
878 u8 ptk_body[4];
879} __packed;
880
881struct host_cmd_ds_802_11_scan {
882 u8 bss_mode;
883 u8 bssid[ETH_ALEN];
884 u8 tlv_buffer[1];
885} __packed;
886
887struct host_cmd_ds_802_11_scan_rsp {
888 __le16 bss_descript_size;
889 u8 number_of_sets;
890 u8 bss_desc_and_tlv_buffer[1];
891} __packed;
892
893struct host_cmd_ds_802_11_bg_scan_query {
894 u8 flush;
895} __packed;
896
897struct host_cmd_ds_802_11_bg_scan_query_rsp {
898 u32 report_condition;
899 struct host_cmd_ds_802_11_scan_rsp scan_resp;
900} __packed;
901
902struct mwifiex_ietypes_domain_param_set {
903 struct mwifiex_ie_types_header header;
904 u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
905 struct ieee80211_country_ie_triplet triplet[1];
906} __packed;
907
908struct host_cmd_ds_802_11d_domain_info {
909 __le16 action;
910 struct mwifiex_ietypes_domain_param_set domain;
911} __packed;
912
913struct host_cmd_ds_802_11d_domain_info_rsp {
914 __le16 action;
915 struct mwifiex_ietypes_domain_param_set domain;
916} __packed;
917
918struct host_cmd_ds_11n_addba_req {
919 u8 add_req_result;
920 u8 peer_mac_addr[ETH_ALEN];
921 u8 dialog_token;
922 __le16 block_ack_param_set;
923 __le16 block_ack_tmo;
924 __le16 ssn;
925} __packed;
926
927struct host_cmd_ds_11n_addba_rsp {
928 u8 add_rsp_result;
929 u8 peer_mac_addr[ETH_ALEN];
930 u8 dialog_token;
931 __le16 status_code;
932 __le16 block_ack_param_set;
933 __le16 block_ack_tmo;
934 __le16 ssn;
935} __packed;
936
937struct host_cmd_ds_11n_delba {
938 u8 del_result;
939 u8 peer_mac_addr[ETH_ALEN];
940 __le16 del_ba_param_set;
941 __le16 reason_code;
942 u8 reserved;
943} __packed;
944
945struct host_cmd_ds_11n_batimeout {
946 u8 tid;
947 u8 peer_mac_addr[ETH_ALEN];
948 u8 origninator;
949} __packed;
950
951struct host_cmd_ds_11n_cfg {
952 __le16 action;
953 __le16 ht_tx_cap;
954 __le16 ht_tx_info;
955} __packed;
956
957struct host_cmd_ds_txbuf_cfg {
958 __le16 action;
959 __le16 buff_size;
960 __le16 mp_end_port; /* SDIO only, reserved for other interfacces */
961 __le16 reserved3;
962} __packed;
963
964struct host_cmd_ds_amsdu_aggr_ctrl {
965 __le16 action;
966 __le16 enable;
967 __le16 curr_buf_size;
968} __packed;
969
970struct mwifiex_ie_types_wmm_param_set {
971 struct mwifiex_ie_types_header header;
972 u8 wmm_ie[1];
973};
974
975struct mwifiex_ie_types_wmm_queue_status {
976 struct mwifiex_ie_types_header header;
977 u8 queue_index;
978 u8 disabled;
979 u16 medium_time;
980 u8 flow_required;
981 u8 flow_created;
982 u32 reserved;
983};
984
985struct ieee_types_vendor_header {
986 u8 element_id;
987 u8 len;
988 u8 oui[3];
989 u8 oui_type;
990 u8 oui_subtype;
991 u8 version;
992} __packed;
993
994struct ieee_types_wmm_ac_parameters {
995 u8 aci_aifsn_bitmap;
996 u8 ecw_bitmap;
997 __le16 tx_op_limit;
998} __packed;
999
1000struct ieee_types_wmm_parameter {
1001 /*
1002 * WMM Parameter IE - Vendor Specific Header:
1003 * element_id [221/0xdd]
1004 * Len [24]
1005 * Oui [00:50:f2]
1006 * OuiType [2]
1007 * OuiSubType [1]
1008 * Version [1]
1009 */
1010 struct ieee_types_vendor_header vend_hdr;
1011 u8 qos_info_bitmap;
1012 u8 reserved;
1013 struct ieee_types_wmm_ac_parameters ac_params[IEEE80211_MAX_QUEUES];
1014} __packed;
1015
1016struct ieee_types_wmm_info {
1017
1018 /*
1019 * WMM Info IE - Vendor Specific Header:
1020 * element_id [221/0xdd]
1021 * Len [7]
1022 * Oui [00:50:f2]
1023 * OuiType [2]
1024 * OuiSubType [0]
1025 * Version [1]
1026 */
1027 struct ieee_types_vendor_header vend_hdr;
1028
1029 u8 qos_info_bitmap;
1030} __packed;
1031
1032struct host_cmd_ds_wmm_get_status {
1033 u8 queue_status_tlv[sizeof(struct mwifiex_ie_types_wmm_queue_status) *
1034 IEEE80211_MAX_QUEUES];
1035 u8 wmm_param_tlv[sizeof(struct ieee_types_wmm_parameter) + 2];
1036} __packed;
1037
1038struct mwifiex_wmm_ac_status {
1039 u8 disabled;
1040 u8 flow_required;
1041 u8 flow_created;
1042};
1043
1044struct mwifiex_ie_types_htcap {
1045 struct mwifiex_ie_types_header header;
1046 struct ieee80211_ht_cap ht_cap;
1047} __packed;
1048
1049struct mwifiex_ie_types_htinfo {
1050 struct mwifiex_ie_types_header header;
1051 struct ieee80211_ht_info ht_info;
1052} __packed;
1053
1054struct mwifiex_ie_types_2040bssco {
1055 struct mwifiex_ie_types_header header;
1056 u8 bss_co_2040;
1057} __packed;
1058
1059struct mwifiex_ie_types_extcap {
1060 struct mwifiex_ie_types_header header;
1061 u8 ext_cap;
1062} __packed;
1063
1064struct host_cmd_ds_mac_reg_access {
1065 __le16 action;
1066 __le16 offset;
1067 __le32 value;
1068} __packed;
1069
1070struct host_cmd_ds_bbp_reg_access {
1071 __le16 action;
1072 __le16 offset;
1073 u8 value;
1074 u8 reserved[3];
1075} __packed;
1076
1077struct host_cmd_ds_rf_reg_access {
1078 __le16 action;
1079 __le16 offset;
1080 u8 value;
1081 u8 reserved[3];
1082} __packed;
1083
1084struct host_cmd_ds_pmic_reg_access {
1085 __le16 action;
1086 __le16 offset;
1087 u8 value;
1088 u8 reserved[3];
1089} __packed;
1090
1091struct host_cmd_ds_802_11_eeprom_access {
1092 __le16 action;
1093
1094 __le16 offset;
1095 __le16 byte_count;
1096 u8 value;
1097} __packed;
1098
1099struct host_cmd_ds_802_11_rf_channel {
1100 __le16 action;
1101 __le16 current_channel;
1102 __le16 rf_type;
1103 __le16 reserved;
1104 u8 reserved_1[32];
1105} __packed;
1106
1107struct host_cmd_ds_version_ext {
1108 u8 version_str_sel;
1109 char version_str[128];
1110} __packed;
1111
1112struct host_cmd_ds_802_11_ibss_status {
1113 __le16 action;
1114 __le16 enable;
1115 u8 bssid[ETH_ALEN];
1116 __le16 beacon_interval;
1117 __le16 atim_window;
1118 __le16 use_g_rate_protect;
1119} __packed;
1120
1121#define CONNECTION_TYPE_INFRA 0
1122#define CONNECTION_TYPE_ADHOC 1
1123
1124struct host_cmd_ds_set_bss_mode {
1125 u8 con_type;
1126} __packed;
1127
1128struct host_cmd_ds_command {
1129 __le16 command;
1130 __le16 size;
1131 __le16 seq_num;
1132 __le16 result;
1133 union {
1134 struct host_cmd_ds_get_hw_spec hw_spec;
1135 struct host_cmd_ds_mac_control mac_ctrl;
1136 struct host_cmd_ds_802_11_mac_address mac_addr;
1137 struct host_cmd_ds_mac_multicast_adr mc_addr;
1138 struct host_cmd_ds_802_11_get_log get_log;
1139 struct host_cmd_ds_802_11_rssi_info rssi_info;
1140 struct host_cmd_ds_802_11_rssi_info_rsp rssi_info_rsp;
1141 struct host_cmd_ds_802_11_snmp_mib smib;
1142 struct host_cmd_ds_802_11_rf_channel rf_channel;
1143 struct host_cmd_ds_tx_rate_query tx_rate;
1144 struct host_cmd_ds_tx_rate_cfg tx_rate_cfg;
1145 struct host_cmd_ds_txpwr_cfg txp_cfg;
1146 struct host_cmd_ds_802_11_ps_mode_enh psmode_enh;
1147 struct host_cmd_ds_802_11_hs_cfg_enh opt_hs_cfg;
1148 struct host_cmd_ds_802_11_scan scan;
1149 struct host_cmd_ds_802_11_scan_rsp scan_resp;
1150 struct host_cmd_ds_802_11_bg_scan_query bg_scan_query;
1151 struct host_cmd_ds_802_11_bg_scan_query_rsp bg_scan_query_resp;
1152 struct host_cmd_ds_802_11_associate associate;
1153 struct host_cmd_ds_802_11_associate_rsp associate_rsp;
1154 struct host_cmd_ds_802_11_deauthenticate deauth;
1155 struct host_cmd_ds_802_11_ad_hoc_start adhoc_start;
1156 struct host_cmd_ds_802_11_ad_hoc_result adhoc_result;
1157 struct host_cmd_ds_802_11_ad_hoc_join adhoc_join;
1158 struct host_cmd_ds_802_11d_domain_info domain_info;
1159 struct host_cmd_ds_802_11d_domain_info_rsp domain_info_resp;
1160 struct host_cmd_ds_11n_addba_req add_ba_req;
1161 struct host_cmd_ds_11n_addba_rsp add_ba_rsp;
1162 struct host_cmd_ds_11n_delba del_ba;
1163 struct host_cmd_ds_txbuf_cfg tx_buf;
1164 struct host_cmd_ds_amsdu_aggr_ctrl amsdu_aggr_ctrl;
1165 struct host_cmd_ds_11n_cfg htcfg;
1166 struct host_cmd_ds_wmm_get_status get_wmm_status;
1167 struct host_cmd_ds_802_11_key_material key_material;
1168 struct host_cmd_ds_version_ext verext;
1169 struct host_cmd_ds_802_11_ibss_status ibss_coalescing;
1170 struct host_cmd_ds_mac_reg_access mac_reg;
1171 struct host_cmd_ds_bbp_reg_access bbp_reg;
1172 struct host_cmd_ds_rf_reg_access rf_reg;
1173 struct host_cmd_ds_pmic_reg_access pmic_reg;
1174 struct host_cmd_ds_set_bss_mode bss_mode;
1175 struct host_cmd_ds_802_11_eeprom_access eeprom;
1176 } params;
1177} __packed;
1178
1179struct mwifiex_opt_sleep_confirm {
1180 __le16 command;
1181 __le16 size;
1182 __le16 seq_num;
1183 __le16 result;
1184 __le16 action;
1185 __le16 resp_ctrl;
1186} __packed;
1187#endif /* !_MWIFIEX_FW_H_ */
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
new file mode 100644
index 000000000000..3f1559e61320
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -0,0 +1,645 @@
1/*
2 * Marvell Wireless LAN device driver: HW/FW Initialization
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "ioctl.h"
22#include "util.h"
23#include "fw.h"
24#include "main.h"
25#include "wmm.h"
26#include "11n.h"
27
28/*
29 * This function adds a BSS priority table to the table list.
30 *
31 * The function allocates a new BSS priority table node and adds it to
32 * the end of BSS priority table list, kept in driver memory.
33 */
34static int mwifiex_add_bss_prio_tbl(struct mwifiex_private *priv)
35{
36 struct mwifiex_adapter *adapter = priv->adapter;
37 struct mwifiex_bss_prio_node *bss_prio;
38 unsigned long flags;
39
40 bss_prio = kzalloc(sizeof(struct mwifiex_bss_prio_node), GFP_KERNEL);
41 if (!bss_prio) {
42 dev_err(adapter->dev, "%s: failed to alloc bss_prio\n",
43 __func__);
44 return -ENOMEM;
45 }
46
47 bss_prio->priv = priv;
48 INIT_LIST_HEAD(&bss_prio->list);
49 if (!adapter->bss_prio_tbl[priv->bss_priority].bss_prio_cur)
50 adapter->bss_prio_tbl[priv->bss_priority].bss_prio_cur =
51 bss_prio;
52
53 spin_lock_irqsave(&adapter->bss_prio_tbl[priv->bss_priority]
54 .bss_prio_lock, flags);
55 list_add_tail(&bss_prio->list,
56 &adapter->bss_prio_tbl[priv->bss_priority]
57 .bss_prio_head);
58 spin_unlock_irqrestore(&adapter->bss_prio_tbl[priv->bss_priority]
59 .bss_prio_lock, flags);
60
61 return 0;
62}
63
64/*
65 * This function initializes the private structure and sets default
66 * values to the members.
67 *
68 * Additionally, it also initializes all the locks and sets up all the
69 * lists.
70 */
71static int mwifiex_init_priv(struct mwifiex_private *priv)
72{
73 u32 i;
74
75 priv->media_connected = false;
76 memset(priv->curr_addr, 0xff, ETH_ALEN);
77
78 priv->pkt_tx_ctrl = 0;
79 priv->bss_mode = NL80211_IFTYPE_STATION;
80 priv->data_rate = 0; /* Initially indicate the rate as auto */
81 priv->is_data_rate_auto = true;
82 priv->bcn_avg_factor = DEFAULT_BCN_AVG_FACTOR;
83 priv->data_avg_factor = DEFAULT_DATA_AVG_FACTOR;
84
85 priv->sec_info.wep_status = MWIFIEX_802_11_WEP_DISABLED;
86 priv->sec_info.authentication_mode = NL80211_AUTHTYPE_OPEN_SYSTEM;
87 priv->sec_info.encryption_mode = 0;
88 for (i = 0; i < ARRAY_SIZE(priv->wep_key); i++)
89 memset(&priv->wep_key[i], 0, sizeof(struct mwifiex_wep_key));
90 priv->wep_key_curr_index = 0;
91 priv->curr_pkt_filter = HostCmd_ACT_MAC_RX_ON | HostCmd_ACT_MAC_TX_ON |
92 HostCmd_ACT_MAC_ETHERNETII_ENABLE;
93
94 priv->beacon_period = 100; /* beacon interval */ ;
95 priv->attempted_bss_desc = NULL;
96 memset(&priv->curr_bss_params, 0, sizeof(priv->curr_bss_params));
97 priv->listen_interval = MWIFIEX_DEFAULT_LISTEN_INTERVAL;
98
99 memset(&priv->prev_ssid, 0, sizeof(priv->prev_ssid));
100 memset(&priv->prev_bssid, 0, sizeof(priv->prev_bssid));
101 memset(&priv->assoc_rsp_buf, 0, sizeof(priv->assoc_rsp_buf));
102 priv->assoc_rsp_size = 0;
103 priv->adhoc_channel = DEFAULT_AD_HOC_CHANNEL;
104 priv->atim_window = 0;
105 priv->adhoc_state = ADHOC_IDLE;
106 priv->tx_power_level = 0;
107 priv->max_tx_power_level = 0;
108 priv->min_tx_power_level = 0;
109 priv->tx_rate = 0;
110 priv->rxpd_htinfo = 0;
111 priv->rxpd_rate = 0;
112 priv->rate_bitmap = 0;
113 priv->data_rssi_last = 0;
114 priv->data_rssi_avg = 0;
115 priv->data_nf_avg = 0;
116 priv->data_nf_last = 0;
117 priv->bcn_rssi_last = 0;
118 priv->bcn_rssi_avg = 0;
119 priv->bcn_nf_avg = 0;
120 priv->bcn_nf_last = 0;
121 memset(&priv->wpa_ie, 0, sizeof(priv->wpa_ie));
122 memset(&priv->aes_key, 0, sizeof(priv->aes_key));
123 priv->wpa_ie_len = 0;
124 priv->wpa_is_gtk_set = false;
125
126 memset(&priv->assoc_tlv_buf, 0, sizeof(priv->assoc_tlv_buf));
127 priv->assoc_tlv_buf_len = 0;
128 memset(&priv->wps, 0, sizeof(priv->wps));
129 memset(&priv->gen_ie_buf, 0, sizeof(priv->gen_ie_buf));
130 priv->gen_ie_buf_len = 0;
131 memset(priv->vs_ie, 0, sizeof(priv->vs_ie));
132
133 priv->wmm_required = true;
134 priv->wmm_enabled = false;
135 priv->wmm_qosinfo = 0;
136 priv->curr_bcn_buf = NULL;
137 priv->curr_bcn_size = 0;
138
139 priv->scan_block = false;
140
141 return mwifiex_add_bss_prio_tbl(priv);
142}
143
144/*
145 * This function allocates buffers for members of the adapter
146 * structure.
147 *
148 * The memory allocated includes scan table, command buffers, and
149 * sleep confirm command buffer. In addition, the queues are
150 * also initialized.
151 */
152static int mwifiex_allocate_adapter(struct mwifiex_adapter *adapter)
153{
154 int ret;
155 u32 buf_size;
156 struct mwifiex_bssdescriptor *temp_scan_table;
157
158 /* Allocate buffer to store the BSSID list */
159 buf_size = sizeof(struct mwifiex_bssdescriptor) * IW_MAX_AP;
160 temp_scan_table = kzalloc(buf_size, GFP_KERNEL);
161 if (!temp_scan_table) {
162 dev_err(adapter->dev, "%s: failed to alloc temp_scan_table\n",
163 __func__);
164 return -ENOMEM;
165 }
166
167 adapter->scan_table = temp_scan_table;
168
169 /* Allocate command buffer */
170 ret = mwifiex_alloc_cmd_buffer(adapter);
171 if (ret) {
172 dev_err(adapter->dev, "%s: failed to alloc cmd buffer\n",
173 __func__);
174 return -1;
175 }
176
177 adapter->sleep_cfm =
178 dev_alloc_skb(sizeof(struct mwifiex_opt_sleep_confirm)
179 + INTF_HEADER_LEN);
180
181 if (!adapter->sleep_cfm) {
182 dev_err(adapter->dev, "%s: failed to alloc sleep cfm"
183 " cmd buffer\n", __func__);
184 return -1;
185 }
186 skb_reserve(adapter->sleep_cfm, INTF_HEADER_LEN);
187
188 return 0;
189}
190
191/*
192 * This function initializes the adapter structure and sets default
193 * values to the members of adapter.
194 *
195 * This also initializes the WMM related parameters in the driver private
196 * structures.
197 */
198static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
199{
200 struct mwifiex_opt_sleep_confirm *sleep_cfm_buf = NULL;
201
202 skb_put(adapter->sleep_cfm, sizeof(struct mwifiex_opt_sleep_confirm));
203 sleep_cfm_buf = (struct mwifiex_opt_sleep_confirm *)
204 (adapter->sleep_cfm->data);
205
206 adapter->cmd_sent = false;
207 adapter->data_sent = true;
208 adapter->cmd_resp_received = false;
209 adapter->event_received = false;
210 adapter->data_received = false;
211
212 adapter->surprise_removed = false;
213
214 adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
215
216 adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_CAM;
217 adapter->ps_state = PS_STATE_AWAKE;
218 adapter->need_to_wakeup = false;
219
220 adapter->scan_mode = HostCmd_BSS_MODE_ANY;
221 adapter->specific_scan_time = MWIFIEX_SPECIFIC_SCAN_CHAN_TIME;
222 adapter->active_scan_time = MWIFIEX_ACTIVE_SCAN_CHAN_TIME;
223 adapter->passive_scan_time = MWIFIEX_PASSIVE_SCAN_CHAN_TIME;
224
225 adapter->num_in_scan_table = 0;
226 memset(adapter->scan_table, 0,
227 (sizeof(struct mwifiex_bssdescriptor) * IW_MAX_AP));
228 adapter->scan_probes = 1;
229
230 memset(adapter->bcn_buf, 0, sizeof(adapter->bcn_buf));
231 adapter->bcn_buf_end = adapter->bcn_buf;
232
233 adapter->multiple_dtim = 1;
234
235 adapter->local_listen_interval = 0; /* default value in firmware
236 will be used */
237
238 adapter->is_deep_sleep = false;
239
240 adapter->delay_null_pkt = false;
241 adapter->delay_to_ps = 1000;
242 adapter->enhanced_ps_mode = PS_MODE_AUTO;
243
244 adapter->gen_null_pkt = false; /* Disable NULL Pkg generation by
245 default */
246 adapter->pps_uapsd_mode = false; /* Disable pps/uapsd mode by
247 default */
248 adapter->pm_wakeup_card_req = false;
249
250 adapter->pm_wakeup_fw_try = false;
251
252 adapter->max_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
253 adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
254 adapter->curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
255
256 adapter->is_hs_configured = false;
257 adapter->hs_cfg.conditions = cpu_to_le32(HOST_SLEEP_CFG_COND_DEF);
258 adapter->hs_cfg.gpio = HOST_SLEEP_CFG_GPIO_DEF;
259 adapter->hs_cfg.gap = HOST_SLEEP_CFG_GAP_DEF;
260 adapter->hs_activated = false;
261
262 memset(adapter->event_body, 0, sizeof(adapter->event_body));
263 adapter->hw_dot_11n_dev_cap = 0;
264 adapter->hw_dev_mcs_support = 0;
265 adapter->chan_offset = 0;
266 adapter->adhoc_11n_enabled = false;
267
268 mwifiex_wmm_init(adapter);
269
270 if (adapter->sleep_cfm) {
271 memset(sleep_cfm_buf, 0, adapter->sleep_cfm->len);
272 sleep_cfm_buf->command =
273 cpu_to_le16(HostCmd_CMD_802_11_PS_MODE_ENH);
274 sleep_cfm_buf->size =
275 cpu_to_le16(adapter->sleep_cfm->len);
276 sleep_cfm_buf->result = 0;
277 sleep_cfm_buf->action = cpu_to_le16(SLEEP_CONFIRM);
278 sleep_cfm_buf->resp_ctrl = cpu_to_le16(RESP_NEEDED);
279 }
280 memset(&adapter->sleep_params, 0, sizeof(adapter->sleep_params));
281 memset(&adapter->sleep_period, 0, sizeof(adapter->sleep_period));
282 adapter->tx_lock_flag = false;
283 adapter->null_pkt_interval = 0;
284 adapter->fw_bands = 0;
285 adapter->config_bands = 0;
286 adapter->adhoc_start_band = 0;
287 adapter->scan_channels = NULL;
288 adapter->fw_release_number = 0;
289 adapter->fw_cap_info = 0;
290 memset(&adapter->upld_buf, 0, sizeof(adapter->upld_buf));
291 adapter->event_cause = 0;
292 adapter->region_code = 0;
293 adapter->bcn_miss_time_out = DEFAULT_BCN_MISS_TIMEOUT;
294 adapter->adhoc_awake_period = 0;
295 memset(&adapter->arp_filter, 0, sizeof(adapter->arp_filter));
296 adapter->arp_filter_size = 0;
297}
298
299/*
300 * This function frees the adapter structure.
301 *
302 * The freeing operation is done recursively, by canceling all
303 * pending commands, freeing the member buffers previously
304 * allocated (command buffers, scan table buffer, sleep confirm
305 * command buffer), stopping the timers and calling the cleanup
306 * routines for every interface, before the actual adapter
307 * structure is freed.
308 */
309static void
310mwifiex_free_adapter(struct mwifiex_adapter *adapter)
311{
312 if (!adapter) {
313 pr_err("%s: adapter is NULL\n", __func__);
314 return;
315 }
316
317 mwifiex_cancel_all_pending_cmd(adapter);
318
319 /* Free lock variables */
320 mwifiex_free_lock_list(adapter);
321
322 /* Free command buffer */
323 dev_dbg(adapter->dev, "info: free cmd buffer\n");
324 mwifiex_free_cmd_buffer(adapter);
325
326 del_timer(&adapter->cmd_timer);
327
328 dev_dbg(adapter->dev, "info: free scan table\n");
329 kfree(adapter->scan_table);
330 adapter->scan_table = NULL;
331
332 adapter->if_ops.cleanup_if(adapter);
333
334 dev_kfree_skb_any(adapter->sleep_cfm);
335}
336
337/*
338 * This function intializes the lock variables and
339 * the list heads.
340 */
341int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
342{
343 struct mwifiex_private *priv;
344 s32 i, j;
345
346 spin_lock_init(&adapter->mwifiex_lock);
347 spin_lock_init(&adapter->int_lock);
348 spin_lock_init(&adapter->main_proc_lock);
349 spin_lock_init(&adapter->mwifiex_cmd_lock);
350 for (i = 0; i < adapter->priv_num; i++) {
351 if (adapter->priv[i]) {
352 priv = adapter->priv[i];
353 spin_lock_init(&priv->rx_pkt_lock);
354 spin_lock_init(&priv->wmm.ra_list_spinlock);
355 spin_lock_init(&priv->curr_bcn_buf_lock);
356 }
357 }
358
359 /* Initialize cmd_free_q */
360 INIT_LIST_HEAD(&adapter->cmd_free_q);
361 /* Initialize cmd_pending_q */
362 INIT_LIST_HEAD(&adapter->cmd_pending_q);
363 /* Initialize scan_pending_q */
364 INIT_LIST_HEAD(&adapter->scan_pending_q);
365
366 spin_lock_init(&adapter->cmd_free_q_lock);
367 spin_lock_init(&adapter->cmd_pending_q_lock);
368 spin_lock_init(&adapter->scan_pending_q_lock);
369
370 for (i = 0; i < adapter->priv_num; ++i) {
371 INIT_LIST_HEAD(&adapter->bss_prio_tbl[i].bss_prio_head);
372 adapter->bss_prio_tbl[i].bss_prio_cur = NULL;
373 spin_lock_init(&adapter->bss_prio_tbl[i].bss_prio_lock);
374 }
375
376 for (i = 0; i < adapter->priv_num; i++) {
377 if (!adapter->priv[i])
378 continue;
379 priv = adapter->priv[i];
380 for (j = 0; j < MAX_NUM_TID; ++j) {
381 INIT_LIST_HEAD(&priv->wmm.tid_tbl_ptr[j].ra_list);
382 spin_lock_init(&priv->wmm.tid_tbl_ptr[j].tid_tbl_lock);
383 }
384 INIT_LIST_HEAD(&priv->tx_ba_stream_tbl_ptr);
385 INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
386
387 spin_lock_init(&priv->tx_ba_stream_tbl_lock);
388 spin_lock_init(&priv->rx_reorder_tbl_lock);
389 }
390
391 return 0;
392}
393
394/*
395 * This function releases the lock variables and frees the locks and
396 * associated locks.
397 */
398void mwifiex_free_lock_list(struct mwifiex_adapter *adapter)
399{
400 struct mwifiex_private *priv;
401 s32 i, j;
402
403 /* Free lists */
404 list_del(&adapter->cmd_free_q);
405 list_del(&adapter->cmd_pending_q);
406 list_del(&adapter->scan_pending_q);
407
408 for (i = 0; i < adapter->priv_num; i++)
409 list_del(&adapter->bss_prio_tbl[i].bss_prio_head);
410
411 for (i = 0; i < adapter->priv_num; i++) {
412 if (adapter->priv[i]) {
413 priv = adapter->priv[i];
414 for (j = 0; j < MAX_NUM_TID; ++j)
415 list_del(&priv->wmm.tid_tbl_ptr[j].ra_list);
416 list_del(&priv->tx_ba_stream_tbl_ptr);
417 list_del(&priv->rx_reorder_tbl_ptr);
418 }
419 }
420}
421
422/*
423 * This function initializes the firmware.
424 *
425 * The following operations are performed sequentially -
426 * - Allocate adapter structure
427 * - Initialize the adapter structure
428 * - Initialize the private structure
429 * - Add BSS priority tables to the adapter structure
430 * - For each interface, send the init commands to firmware
431 * - Send the first command in command pending queue, if available
432 */
433int mwifiex_init_fw(struct mwifiex_adapter *adapter)
434{
435 int ret;
436 struct mwifiex_private *priv;
437 u8 i, first_sta = true;
438 int is_cmd_pend_q_empty;
439 unsigned long flags;
440
441 adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
442
443 /* Allocate memory for member of adapter structure */
444 ret = mwifiex_allocate_adapter(adapter);
445 if (ret)
446 return -1;
447
448 /* Initialize adapter structure */
449 mwifiex_init_adapter(adapter);
450
451 for (i = 0; i < adapter->priv_num; i++) {
452 if (adapter->priv[i]) {
453 priv = adapter->priv[i];
454
455 /* Initialize private structure */
456 ret = mwifiex_init_priv(priv);
457 if (ret)
458 return -1;
459 }
460 }
461 for (i = 0; i < adapter->priv_num; i++) {
462 if (adapter->priv[i]) {
463 ret = mwifiex_sta_init_cmd(adapter->priv[i], first_sta);
464 if (ret == -1)
465 return -1;
466
467 first_sta = false;
468 }
469 }
470
471 spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
472 is_cmd_pend_q_empty = list_empty(&adapter->cmd_pending_q);
473 spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
474 if (!is_cmd_pend_q_empty) {
475 /* Send the first command in queue and return */
476 if (mwifiex_main_process(adapter) != -1)
477 ret = -EINPROGRESS;
478 } else {
479 adapter->hw_status = MWIFIEX_HW_STATUS_READY;
480 }
481
482 return ret;
483}
484
485/*
486 * This function deletes the BSS priority tables.
487 *
488 * The function traverses through all the allocated BSS priority nodes
489 * in every BSS priority table and frees them.
490 */
491static void mwifiex_delete_bss_prio_tbl(struct mwifiex_private *priv)
492{
493 int i;
494 struct mwifiex_adapter *adapter = priv->adapter;
495 struct mwifiex_bss_prio_node *bssprio_node, *tmp_node, **cur;
496 struct list_head *head;
497 spinlock_t *lock;
498 unsigned long flags;
499
500 for (i = 0; i < adapter->priv_num; ++i) {
501 head = &adapter->bss_prio_tbl[i].bss_prio_head;
502 cur = &adapter->bss_prio_tbl[i].bss_prio_cur;
503 lock = &adapter->bss_prio_tbl[i].bss_prio_lock;
504 dev_dbg(adapter->dev, "info: delete BSS priority table,"
505 " index = %d, i = %d, head = %p, cur = %p\n",
506 priv->bss_index, i, head, *cur);
507 if (*cur) {
508 spin_lock_irqsave(lock, flags);
509 if (list_empty(head)) {
510 spin_unlock_irqrestore(lock, flags);
511 continue;
512 }
513 bssprio_node = list_first_entry(head,
514 struct mwifiex_bss_prio_node, list);
515 spin_unlock_irqrestore(lock, flags);
516
517 list_for_each_entry_safe(bssprio_node, tmp_node, head,
518 list) {
519 if (bssprio_node->priv == priv) {
520 dev_dbg(adapter->dev, "info: Delete "
521 "node %p, next = %p\n",
522 bssprio_node, tmp_node);
523 spin_lock_irqsave(lock, flags);
524 list_del(&bssprio_node->list);
525 spin_unlock_irqrestore(lock, flags);
526 kfree(bssprio_node);
527 }
528 }
529 *cur = (struct mwifiex_bss_prio_node *)head;
530 }
531 }
532}
533
534/*
535 * This function is used to shutdown the driver.
536 *
537 * The following operations are performed sequentially -
538 * - Check if already shut down
539 * - Make sure the main process has stopped
540 * - Clean up the Tx and Rx queues
541 * - Delete BSS priority tables
542 * - Free the adapter
543 * - Notify completion
544 */
545int
546mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
547{
548 int ret = -EINPROGRESS;
549 struct mwifiex_private *priv;
550 s32 i;
551 unsigned long flags;
552
553 /* mwifiex already shutdown */
554 if (adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY)
555 return 0;
556
557 adapter->hw_status = MWIFIEX_HW_STATUS_CLOSING;
558 /* wait for mwifiex_process to complete */
559 if (adapter->mwifiex_processing) {
560 dev_warn(adapter->dev, "main process is still running\n");
561 return ret;
562 }
563
564 /* shut down mwifiex */
565 dev_dbg(adapter->dev, "info: shutdown mwifiex...\n");
566
567 /* Clean up Tx/Rx queues and delete BSS priority table */
568 for (i = 0; i < adapter->priv_num; i++) {
569 if (adapter->priv[i]) {
570 priv = adapter->priv[i];
571
572 mwifiex_clean_txrx(priv);
573 mwifiex_delete_bss_prio_tbl(priv);
574 }
575 }
576
577 spin_lock_irqsave(&adapter->mwifiex_lock, flags);
578
579 /* Free adapter structure */
580 mwifiex_free_adapter(adapter);
581
582 spin_unlock_irqrestore(&adapter->mwifiex_lock, flags);
583
584 /* Notify completion */
585 ret = mwifiex_shutdown_fw_complete(adapter);
586
587 return ret;
588}
589
590/*
591 * This function downloads the firmware to the card.
592 *
593 * The actual download is preceded by two sanity checks -
594 * - Check if firmware is already running
595 * - Check if the interface is the winner to download the firmware
596 *
597 * ...and followed by another -
598 * - Check if the firmware is downloaded successfully
599 *
600 * After download is successfully completed, the host interrupts are enabled.
601 */
602int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
603 struct mwifiex_fw_image *pmfw)
604{
605 int ret, winner;
606 u32 poll_num = 1;
607
608 /* Check if firmware is already running */
609 ret = adapter->if_ops.check_fw_status(adapter, poll_num, &winner);
610 if (!ret) {
611 dev_notice(adapter->dev,
612 "WLAN FW already running! Skip FW download\n");
613 goto done;
614 }
615 poll_num = MAX_FIRMWARE_POLL_TRIES;
616
617 /* Check if we are the winner for downloading FW */
618 if (!winner) {
619 dev_notice(adapter->dev,
620 "Other interface already running!"
621 " Skip FW download\n");
622 poll_num = MAX_MULTI_INTERFACE_POLL_TRIES;
623 goto poll_fw;
624 }
625 if (pmfw) {
626 /* Download firmware with helper */
627 ret = adapter->if_ops.prog_fw(adapter, pmfw);
628 if (ret) {
629 dev_err(adapter->dev, "prog_fw failed ret=%#x\n", ret);
630 return ret;
631 }
632 }
633
634poll_fw:
635 /* Check if the firmware is downloaded successfully or not */
636 ret = adapter->if_ops.check_fw_status(adapter, poll_num, NULL);
637 if (ret) {
638 dev_err(adapter->dev, "FW failed to be active in time\n");
639 return -1;
640 }
641done:
642 /* re-enable host interrupt for mwifiex after fw dnld is successful */
643 adapter->if_ops.enable_int(adapter);
644 return ret;
645}
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
new file mode 100644
index 000000000000..7c1c5ee40eb9
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -0,0 +1,331 @@
1/*
2 * Marvell Wireless LAN device driver: ioctl data structures & APIs
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#ifndef _MWIFIEX_IOCTL_H_
21#define _MWIFIEX_IOCTL_H_
22
23#include <net/mac80211.h>
24
25enum {
26 MWIFIEX_SCAN_TYPE_UNCHANGED = 0,
27 MWIFIEX_SCAN_TYPE_ACTIVE,
28 MWIFIEX_SCAN_TYPE_PASSIVE
29};
30
31struct mwifiex_user_scan {
32 u32 scan_cfg_len;
33 u8 scan_cfg_buf[1];
34};
35
36#define MWIFIEX_PROMISC_MODE 1
37#define MWIFIEX_MULTICAST_MODE 2
38#define MWIFIEX_ALL_MULTI_MODE 4
39#define MWIFIEX_MAX_MULTICAST_LIST_SIZE 32
40
41struct mwifiex_multicast_list {
42 u32 mode;
43 u32 num_multicast_addr;
44 u8 mac_list[MWIFIEX_MAX_MULTICAST_LIST_SIZE][ETH_ALEN];
45};
46
47struct mwifiex_chan_freq {
48 u32 channel;
49 u32 freq;
50};
51
52struct mwifiex_ssid_bssid {
53 struct mwifiex_802_11_ssid ssid;
54 u8 bssid[ETH_ALEN];
55};
56
57enum {
58 BAND_B = 1,
59 BAND_G = 2,
60 BAND_A = 4,
61 BAND_GN = 8,
62 BAND_AN = 16,
63};
64
65#define NO_SEC_CHANNEL 0
66#define SEC_CHANNEL_ABOVE 1
67#define SEC_CHANNEL_BELOW 3
68
69struct mwifiex_ds_band_cfg {
70 u32 config_bands;
71 u32 adhoc_start_band;
72 u32 adhoc_channel;
73 u32 sec_chan_offset;
74};
75
76enum {
77 ADHOC_IDLE,
78 ADHOC_STARTED,
79 ADHOC_JOINED,
80 ADHOC_COALESCED
81};
82
83struct mwifiex_ds_get_stats {
84 u32 mcast_tx_frame;
85 u32 failed;
86 u32 retry;
87 u32 multi_retry;
88 u32 frame_dup;
89 u32 rts_success;
90 u32 rts_failure;
91 u32 ack_failure;
92 u32 rx_frag;
93 u32 mcast_rx_frame;
94 u32 fcs_error;
95 u32 tx_frame;
96 u32 wep_icv_error[4];
97};
98
99#define BCN_RSSI_AVG_MASK 0x00000002
100#define BCN_NF_AVG_MASK 0x00000200
101#define ALL_RSSI_INFO_MASK 0x00000fff
102
103struct mwifiex_ds_get_signal {
104 /*
105 * Bit0: Last Beacon RSSI, Bit1: Average Beacon RSSI,
106 * Bit2: Last Data RSSI, Bit3: Average Data RSSI,
107 * Bit4: Last Beacon SNR, Bit5: Average Beacon SNR,
108 * Bit6: Last Data SNR, Bit7: Average Data SNR,
109 * Bit8: Last Beacon NF, Bit9: Average Beacon NF,
110 * Bit10: Last Data NF, Bit11: Average Data NF
111 */
112 u16 selector;
113 s16 bcn_rssi_last;
114 s16 bcn_rssi_avg;
115 s16 data_rssi_last;
116 s16 data_rssi_avg;
117 s16 bcn_snr_last;
118 s16 bcn_snr_avg;
119 s16 data_snr_last;
120 s16 data_snr_avg;
121 s16 bcn_nf_last;
122 s16 bcn_nf_avg;
123 s16 data_nf_last;
124 s16 data_nf_avg;
125};
126
127#define MWIFIEX_MAX_VER_STR_LEN 128
128
129struct mwifiex_ver_ext {
130 u32 version_str_sel;
131 char version_str[MWIFIEX_MAX_VER_STR_LEN];
132};
133
134struct mwifiex_bss_info {
135 u32 bss_mode;
136 struct mwifiex_802_11_ssid ssid;
137 u32 scan_table_idx;
138 u32 bss_chan;
139 u32 region_code;
140 u32 media_connected;
141 u32 max_power_level;
142 u32 min_power_level;
143 u32 adhoc_state;
144 signed int bcn_nf_last;
145 u32 wep_status;
146 u32 is_hs_configured;
147 u32 is_deep_sleep;
148 u8 bssid[ETH_ALEN];
149};
150
151#define MAX_NUM_TID 8
152
153#define MAX_RX_WINSIZE 64
154
155struct mwifiex_ds_rx_reorder_tbl {
156 u16 tid;
157 u8 ta[ETH_ALEN];
158 u32 start_win;
159 u32 win_size;
160 u32 buffer[MAX_RX_WINSIZE];
161};
162
163struct mwifiex_ds_tx_ba_stream_tbl {
164 u16 tid;
165 u8 ra[ETH_ALEN];
166};
167
168#define DBG_CMD_NUM 5
169
170struct mwifiex_debug_info {
171 u32 int_counter;
172 u32 packets_out[MAX_NUM_TID];
173 u32 max_tx_buf_size;
174 u32 tx_buf_size;
175 u32 curr_tx_buf_size;
176 u32 tx_tbl_num;
177 struct mwifiex_ds_tx_ba_stream_tbl
178 tx_tbl[MWIFIEX_MAX_TX_BASTREAM_SUPPORTED];
179 u32 rx_tbl_num;
180 struct mwifiex_ds_rx_reorder_tbl rx_tbl
181 [MWIFIEX_MAX_RX_BASTREAM_SUPPORTED];
182 u16 ps_mode;
183 u32 ps_state;
184 u8 is_deep_sleep;
185 u8 pm_wakeup_card_req;
186 u32 pm_wakeup_fw_try;
187 u8 is_hs_configured;
188 u8 hs_activated;
189 u32 num_cmd_host_to_card_failure;
190 u32 num_cmd_sleep_cfm_host_to_card_failure;
191 u32 num_tx_host_to_card_failure;
192 u32 num_event_deauth;
193 u32 num_event_disassoc;
194 u32 num_event_link_lost;
195 u32 num_cmd_deauth;
196 u32 num_cmd_assoc_success;
197 u32 num_cmd_assoc_failure;
198 u32 num_tx_timeout;
199 u32 num_cmd_timeout;
200 u16 timeout_cmd_id;
201 u16 timeout_cmd_act;
202 u16 last_cmd_id[DBG_CMD_NUM];
203 u16 last_cmd_act[DBG_CMD_NUM];
204 u16 last_cmd_index;
205 u16 last_cmd_resp_id[DBG_CMD_NUM];
206 u16 last_cmd_resp_index;
207 u16 last_event[DBG_CMD_NUM];
208 u16 last_event_index;
209 u8 data_sent;
210 u8 cmd_sent;
211 u8 cmd_resp_received;
212 u8 event_received;
213};
214
215#define MWIFIEX_KEY_INDEX_UNICAST 0x40000000
216#define WAPI_RXPN_LEN 16
217
218struct mwifiex_ds_encrypt_key {
219 u32 key_disable;
220 u32 key_index;
221 u32 key_len;
222 u8 key_material[WLAN_MAX_KEY_LEN];
223 u8 mac_addr[ETH_ALEN];
224 u32 is_wapi_key;
225 u8 wapi_rxpn[WAPI_RXPN_LEN];
226};
227
228struct mwifiex_rate_cfg {
229 u32 action;
230 u32 is_rate_auto;
231 u32 rate;
232};
233
234struct mwifiex_power_cfg {
235 u32 is_power_auto;
236 u32 power_level;
237};
238
239struct mwifiex_ds_hs_cfg {
240 u32 is_invoke_hostcmd;
241 /* Bit0: non-unicast data
242 * Bit1: unicast data
243 * Bit2: mac events
244 * Bit3: magic packet
245 */
246 u32 conditions;
247 u32 gpio;
248 u32 gap;
249};
250
251#define DEEP_SLEEP_ON 1
252#define DEEP_SLEEP_IDLE_TIME 100
253#define PS_MODE_AUTO 1
254
255struct mwifiex_ds_auto_ds {
256 u16 auto_ds;
257 u16 idle_time;
258};
259
260struct mwifiex_ds_pm_cfg {
261 union {
262 u32 ps_mode;
263 struct mwifiex_ds_hs_cfg hs_cfg;
264 struct mwifiex_ds_auto_ds auto_deep_sleep;
265 u32 sleep_period;
266 } param;
267};
268
269struct mwifiex_ds_11n_tx_cfg {
270 u16 tx_htcap;
271 u16 tx_htinfo;
272};
273
274struct mwifiex_ds_11n_amsdu_aggr_ctrl {
275 u16 enable;
276 u16 curr_buf_size;
277};
278
279#define MWIFIEX_NUM_OF_CMD_BUFFER 20
280#define MWIFIEX_SIZE_OF_CMD_BUFFER 2048
281
282enum {
283 MWIFIEX_IE_TYPE_GEN_IE = 0,
284 MWIFIEX_IE_TYPE_ARP_FILTER,
285};
286
287enum {
288 MWIFIEX_REG_MAC = 1,
289 MWIFIEX_REG_BBP,
290 MWIFIEX_REG_RF,
291 MWIFIEX_REG_PMIC,
292 MWIFIEX_REG_CAU,
293};
294
295struct mwifiex_ds_reg_rw {
296 __le32 type;
297 __le32 offset;
298 __le32 value;
299};
300
301#define MAX_EEPROM_DATA 256
302
303struct mwifiex_ds_read_eeprom {
304 __le16 offset;
305 __le16 byte_count;
306 u8 value[MAX_EEPROM_DATA];
307};
308
309struct mwifiex_ds_misc_gen_ie {
310 u32 type;
311 u32 len;
312 u8 ie_data[IW_CUSTOM_MAX];
313};
314
315struct mwifiex_ds_misc_cmd {
316 u32 len;
317 u8 cmd[MWIFIEX_SIZE_OF_CMD_BUFFER];
318};
319
320#define MWIFIEX_MAX_VSIE_LEN (256)
321#define MWIFIEX_MAX_VSIE_NUM (8)
322#define MWIFIEX_VSIE_MASK_SCAN 0x01
323#define MWIFIEX_VSIE_MASK_ASSOC 0x02
324#define MWIFIEX_VSIE_MASK_ADHOC 0x04
325
326enum {
327 MWIFIEX_FUNC_INIT = 1,
328 MWIFIEX_FUNC_SHUTDOWN,
329};
330
331#endif /* !_MWIFIEX_IOCTL_H_ */
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
new file mode 100644
index 000000000000..5eab3dc29b1c
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -0,0 +1,1423 @@
1/*
2 * Marvell Wireless LAN device driver: association and ad-hoc start/join
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "ioctl.h"
22#include "util.h"
23#include "fw.h"
24#include "main.h"
25#include "wmm.h"
26#include "11n.h"
27
28#define CAPINFO_MASK (~(BIT(15) | BIT(14) | BIT(12) | BIT(11) | BIT(9)))
29
30/*
31 * Append a generic IE as a pass through TLV to a TLV buffer.
32 *
33 * This function is called from the network join command preparation routine.
34 *
35 * If the IE buffer has been setup by the application, this routine appends
36 * the buffer as a pass through TLV type to the request.
37 */
38static int
39mwifiex_cmd_append_generic_ie(struct mwifiex_private *priv, u8 **buffer)
40{
41 int ret_len = 0;
42 struct mwifiex_ie_types_header ie_header;
43
44 /* Null Checks */
45 if (!buffer)
46 return 0;
47 if (!(*buffer))
48 return 0;
49
50 /*
51 * If there is a generic ie buffer setup, append it to the return
52 * parameter buffer pointer.
53 */
54 if (priv->gen_ie_buf_len) {
55 dev_dbg(priv->adapter->dev, "info: %s: append generic %d to %p\n",
56 __func__, priv->gen_ie_buf_len, *buffer);
57
58 /* Wrap the generic IE buffer with a pass through TLV type */
59 ie_header.type = cpu_to_le16(TLV_TYPE_PASSTHROUGH);
60 ie_header.len = cpu_to_le16(priv->gen_ie_buf_len);
61 memcpy(*buffer, &ie_header, sizeof(ie_header));
62
63 /* Increment the return size and the return buffer pointer
64 param */
65 *buffer += sizeof(ie_header);
66 ret_len += sizeof(ie_header);
67
68 /* Copy the generic IE buffer to the output buffer, advance
69 pointer */
70 memcpy(*buffer, priv->gen_ie_buf, priv->gen_ie_buf_len);
71
72 /* Increment the return size and the return buffer pointer
73 param */
74 *buffer += priv->gen_ie_buf_len;
75 ret_len += priv->gen_ie_buf_len;
76
77 /* Reset the generic IE buffer */
78 priv->gen_ie_buf_len = 0;
79 }
80
81 /* return the length appended to the buffer */
82 return ret_len;
83}
84
85/*
86 * Append TSF tracking info from the scan table for the target AP.
87 *
88 * This function is called from the network join command preparation routine.
89 *
90 * The TSF table TSF sent to the firmware contains two TSF values:
91 * - The TSF of the target AP from its previous beacon/probe response
92 * - The TSF timestamp of our local MAC at the time we observed the
93 * beacon/probe response.
94 *
95 * The firmware uses the timestamp values to set an initial TSF value
96 * in the MAC for the new association after a reassociation attempt.
97 */
98static int
99mwifiex_cmd_append_tsf_tlv(struct mwifiex_private *priv, u8 **buffer,
100 struct mwifiex_bssdescriptor *bss_desc)
101{
102 struct mwifiex_ie_types_tsf_timestamp tsf_tlv;
103 __le64 tsf_val;
104
105 /* Null Checks */
106 if (buffer == NULL)
107 return 0;
108 if (*buffer == NULL)
109 return 0;
110
111 memset(&tsf_tlv, 0x00, sizeof(struct mwifiex_ie_types_tsf_timestamp));
112
113 tsf_tlv.header.type = cpu_to_le16(TLV_TYPE_TSFTIMESTAMP);
114 tsf_tlv.header.len = cpu_to_le16(2 * sizeof(tsf_val));
115
116 memcpy(*buffer, &tsf_tlv, sizeof(tsf_tlv.header));
117 *buffer += sizeof(tsf_tlv.header);
118
119 /* TSF at the time when beacon/probe_response was received */
120 tsf_val = cpu_to_le64(bss_desc->network_tsf);
121 memcpy(*buffer, &tsf_val, sizeof(tsf_val));
122 *buffer += sizeof(tsf_val);
123
124 memcpy(&tsf_val, bss_desc->time_stamp, sizeof(tsf_val));
125
126 dev_dbg(priv->adapter->dev, "info: %s: TSF offset calc: %016llx - "
127 "%016llx\n", __func__, tsf_val, bss_desc->network_tsf);
128
129 memcpy(*buffer, &tsf_val, sizeof(tsf_val));
130 *buffer += sizeof(tsf_val);
131
132 return sizeof(tsf_tlv.header) + (2 * sizeof(tsf_val));
133}
134
135/*
136 * This function finds out the common rates between rate1 and rate2.
137 *
138 * It will fill common rates in rate1 as output if found.
139 *
140 * NOTE: Setting the MSB of the basic rates needs to be taken
141 * care of, either before or after calling this function.
142 */
143static int mwifiex_get_common_rates(struct mwifiex_private *priv, u8 *rate1,
144 u32 rate1_size, u8 *rate2, u32 rate2_size)
145{
146 int ret;
147 u8 *ptr = rate1, *tmp;
148 u32 i, j;
149
150 tmp = kmalloc(rate1_size, GFP_KERNEL);
151 if (!tmp) {
152 dev_err(priv->adapter->dev, "failed to alloc tmp buf\n");
153 return -ENOMEM;
154 }
155
156 memcpy(tmp, rate1, rate1_size);
157 memset(rate1, 0, rate1_size);
158
159 for (i = 0; rate2[i] && i < rate2_size; i++) {
160 for (j = 0; tmp[j] && j < rate1_size; j++) {
161 /* Check common rate, excluding the bit for
162 basic rate */
163 if ((rate2[i] & 0x7F) == (tmp[j] & 0x7F)) {
164 *rate1++ = tmp[j];
165 break;
166 }
167 }
168 }
169
170 dev_dbg(priv->adapter->dev, "info: Tx data rate set to %#x\n",
171 priv->data_rate);
172
173 if (!priv->is_data_rate_auto) {
174 while (*ptr) {
175 if ((*ptr & 0x7f) == priv->data_rate) {
176 ret = 0;
177 goto done;
178 }
179 ptr++;
180 }
181 dev_err(priv->adapter->dev, "previously set fixed data rate %#x"
182 " is not compatible with the network\n",
183 priv->data_rate);
184
185 ret = -1;
186 goto done;
187 }
188
189 ret = 0;
190done:
191 kfree(tmp);
192 return ret;
193}
194
195/*
196 * This function creates the intersection of the rates supported by a
197 * target BSS and our adapter settings for use in an assoc/join command.
198 */
199static int
200mwifiex_setup_rates_from_bssdesc(struct mwifiex_private *priv,
201 struct mwifiex_bssdescriptor *bss_desc,
202 u8 *out_rates, u32 *out_rates_size)
203{
204 u8 card_rates[MWIFIEX_SUPPORTED_RATES];
205 u32 card_rates_size;
206
207 /* Copy AP supported rates */
208 memcpy(out_rates, bss_desc->supported_rates, MWIFIEX_SUPPORTED_RATES);
209 /* Get the STA supported rates */
210 card_rates_size = mwifiex_get_active_data_rates(priv, card_rates);
211 /* Get the common rates between AP and STA supported rates */
212 if (mwifiex_get_common_rates(priv, out_rates, MWIFIEX_SUPPORTED_RATES,
213 card_rates, card_rates_size)) {
214 *out_rates_size = 0;
215 dev_err(priv->adapter->dev, "%s: cannot get common rates\n",
216 __func__);
217 return -1;
218 }
219
220 *out_rates_size =
221 min_t(size_t, strlen(out_rates), MWIFIEX_SUPPORTED_RATES);
222
223 return 0;
224}
225
226/*
227 * This function updates the scan entry TSF timestamps to reflect
228 * a new association.
229 */
230static void
231mwifiex_update_tsf_timestamps(struct mwifiex_private *priv,
232 struct mwifiex_bssdescriptor *new_bss_desc)
233{
234 struct mwifiex_adapter *adapter = priv->adapter;
235 u32 table_idx;
236 long long new_tsf_base;
237 signed long long tsf_delta;
238
239 memcpy(&new_tsf_base, new_bss_desc->time_stamp, sizeof(new_tsf_base));
240
241 tsf_delta = new_tsf_base - new_bss_desc->network_tsf;
242
243 dev_dbg(adapter->dev, "info: TSF: update TSF timestamps, "
244 "0x%016llx -> 0x%016llx\n",
245 new_bss_desc->network_tsf, new_tsf_base);
246
247 for (table_idx = 0; table_idx < adapter->num_in_scan_table;
248 table_idx++)
249 adapter->scan_table[table_idx].network_tsf += tsf_delta;
250}
251
252/*
253 * This function appends a WAPI IE.
254 *
255 * This function is called from the network join command preparation routine.
256 *
257 * If the IE buffer has been setup by the application, this routine appends
258 * the buffer as a WAPI TLV type to the request.
259 */
260static int
261mwifiex_cmd_append_wapi_ie(struct mwifiex_private *priv, u8 **buffer)
262{
263 int retLen = 0;
264 struct mwifiex_ie_types_header ie_header;
265
266 /* Null Checks */
267 if (buffer == NULL)
268 return 0;
269 if (*buffer == NULL)
270 return 0;
271
272 /*
273 * If there is a wapi ie buffer setup, append it to the return
274 * parameter buffer pointer.
275 */
276 if (priv->wapi_ie_len) {
277 dev_dbg(priv->adapter->dev, "cmd: append wapi ie %d to %p\n",
278 priv->wapi_ie_len, *buffer);
279
280 /* Wrap the generic IE buffer with a pass through TLV type */
281 ie_header.type = cpu_to_le16(TLV_TYPE_WAPI_IE);
282 ie_header.len = cpu_to_le16(priv->wapi_ie_len);
283 memcpy(*buffer, &ie_header, sizeof(ie_header));
284
285 /* Increment the return size and the return buffer pointer
286 param */
287 *buffer += sizeof(ie_header);
288 retLen += sizeof(ie_header);
289
290 /* Copy the wapi IE buffer to the output buffer, advance
291 pointer */
292 memcpy(*buffer, priv->wapi_ie, priv->wapi_ie_len);
293
294 /* Increment the return size and the return buffer pointer
295 param */
296 *buffer += priv->wapi_ie_len;
297 retLen += priv->wapi_ie_len;
298
299 }
300 /* return the length appended to the buffer */
301 return retLen;
302}
303
304/*
305 * This function appends rsn ie tlv for wpa/wpa2 security modes.
306 * It is called from the network join command preparation routine.
307 */
308static int mwifiex_append_rsn_ie_wpa_wpa2(struct mwifiex_private *priv,
309 u8 **buffer)
310{
311 struct mwifiex_ie_types_rsn_param_set *rsn_ie_tlv;
312 int rsn_ie_len;
313
314 if (!buffer || !(*buffer))
315 return 0;
316
317 rsn_ie_tlv = (struct mwifiex_ie_types_rsn_param_set *) (*buffer);
318 rsn_ie_tlv->header.type = cpu_to_le16((u16) priv->wpa_ie[0]);
319 rsn_ie_tlv->header.type = cpu_to_le16(
320 le16_to_cpu(rsn_ie_tlv->header.type) & 0x00FF);
321 rsn_ie_tlv->header.len = cpu_to_le16((u16) priv->wpa_ie[1]);
322 rsn_ie_tlv->header.len = cpu_to_le16(le16_to_cpu(rsn_ie_tlv->header.len)
323 & 0x00FF);
324 if (le16_to_cpu(rsn_ie_tlv->header.len) <= (sizeof(priv->wpa_ie) - 2))
325 memcpy(rsn_ie_tlv->rsn_ie, &priv->wpa_ie[2],
326 le16_to_cpu(rsn_ie_tlv->header.len));
327 else
328 return -1;
329
330 rsn_ie_len = sizeof(rsn_ie_tlv->header) +
331 le16_to_cpu(rsn_ie_tlv->header.len);
332 *buffer += rsn_ie_len;
333
334 return rsn_ie_len;
335}
336
337/*
338 * This function prepares command for association.
339 *
340 * This sets the following parameters -
341 * - Peer MAC address
342 * - Listen interval
343 * - Beacon interval
344 * - Capability information
345 *
346 * ...and the following TLVs, as required -
347 * - SSID TLV
348 * - PHY TLV
349 * - SS TLV
350 * - Rates TLV
351 * - Authentication TLV
352 * - Channel TLV
353 * - WPA/WPA2 IE
354 * - 11n TLV
355 * - Vendor specific TLV
356 * - WMM TLV
357 * - WAPI IE
358 * - Generic IE
359 * - TSF TLV
360 *
361 * Preparation also includes -
362 * - Setting command ID and proper size
363 * - Ensuring correct endian-ness
364 */
365int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
366 struct host_cmd_ds_command *cmd,
367 void *data_buf)
368{
369 struct host_cmd_ds_802_11_associate *assoc = &cmd->params.associate;
370 struct mwifiex_bssdescriptor *bss_desc;
371 struct mwifiex_ie_types_ssid_param_set *ssid_tlv;
372 struct mwifiex_ie_types_phy_param_set *phy_tlv;
373 struct mwifiex_ie_types_ss_param_set *ss_tlv;
374 struct mwifiex_ie_types_rates_param_set *rates_tlv;
375 struct mwifiex_ie_types_auth_type *auth_tlv;
376 struct mwifiex_ie_types_chan_list_param_set *chan_tlv;
377 u8 rates[MWIFIEX_SUPPORTED_RATES];
378 u32 rates_size;
379 u16 tmp_cap;
380 u8 *pos;
381 int rsn_ie_len = 0;
382
383 bss_desc = (struct mwifiex_bssdescriptor *) data_buf;
384 pos = (u8 *) assoc;
385
386 mwifiex_cfg_tx_buf(priv, bss_desc);
387
388 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_ASSOCIATE);
389
390 /* Save so we know which BSS Desc to use in the response handler */
391 priv->attempted_bss_desc = bss_desc;
392
393 memcpy(assoc->peer_sta_addr,
394 bss_desc->mac_address, sizeof(assoc->peer_sta_addr));
395 pos += sizeof(assoc->peer_sta_addr);
396
397 /* Set the listen interval */
398 assoc->listen_interval = cpu_to_le16(priv->listen_interval);
399 /* Set the beacon period */
400 assoc->beacon_period = cpu_to_le16(bss_desc->beacon_period);
401
402 pos += sizeof(assoc->cap_info_bitmap);
403 pos += sizeof(assoc->listen_interval);
404 pos += sizeof(assoc->beacon_period);
405 pos += sizeof(assoc->dtim_period);
406
407 ssid_tlv = (struct mwifiex_ie_types_ssid_param_set *) pos;
408 ssid_tlv->header.type = cpu_to_le16(WLAN_EID_SSID);
409 ssid_tlv->header.len = cpu_to_le16((u16) bss_desc->ssid.ssid_len);
410 memcpy(ssid_tlv->ssid, bss_desc->ssid.ssid,
411 le16_to_cpu(ssid_tlv->header.len));
412 pos += sizeof(ssid_tlv->header) + le16_to_cpu(ssid_tlv->header.len);
413
414 phy_tlv = (struct mwifiex_ie_types_phy_param_set *) pos;
415 phy_tlv->header.type = cpu_to_le16(WLAN_EID_DS_PARAMS);
416 phy_tlv->header.len = cpu_to_le16(sizeof(phy_tlv->fh_ds.ds_param_set));
417 memcpy(&phy_tlv->fh_ds.ds_param_set,
418 &bss_desc->phy_param_set.ds_param_set.current_chan,
419 sizeof(phy_tlv->fh_ds.ds_param_set));
420 pos += sizeof(phy_tlv->header) + le16_to_cpu(phy_tlv->header.len);
421
422 ss_tlv = (struct mwifiex_ie_types_ss_param_set *) pos;
423 ss_tlv->header.type = cpu_to_le16(WLAN_EID_CF_PARAMS);
424 ss_tlv->header.len = cpu_to_le16(sizeof(ss_tlv->cf_ibss.cf_param_set));
425 pos += sizeof(ss_tlv->header) + le16_to_cpu(ss_tlv->header.len);
426
427 /* Get the common rates supported between the driver and the BSS Desc */
428 if (mwifiex_setup_rates_from_bssdesc
429 (priv, bss_desc, rates, &rates_size))
430 return -1;
431
432 /* Save the data rates into Current BSS state structure */
433 priv->curr_bss_params.num_of_rates = rates_size;
434 memcpy(&priv->curr_bss_params.data_rates, rates, rates_size);
435
436 /* Setup the Rates TLV in the association command */
437 rates_tlv = (struct mwifiex_ie_types_rates_param_set *) pos;
438 rates_tlv->header.type = cpu_to_le16(WLAN_EID_SUPP_RATES);
439 rates_tlv->header.len = cpu_to_le16((u16) rates_size);
440 memcpy(rates_tlv->rates, rates, rates_size);
441 pos += sizeof(rates_tlv->header) + rates_size;
442 dev_dbg(priv->adapter->dev, "info: ASSOC_CMD: rates size = %d\n",
443 rates_size);
444
445 /* Add the Authentication type to be used for Auth frames */
446 auth_tlv = (struct mwifiex_ie_types_auth_type *) pos;
447 auth_tlv->header.type = cpu_to_le16(TLV_TYPE_AUTH_TYPE);
448 auth_tlv->header.len = cpu_to_le16(sizeof(auth_tlv->auth_type));
449 if (priv->sec_info.wep_status == MWIFIEX_802_11_WEP_ENABLED)
450 auth_tlv->auth_type = cpu_to_le16(
451 (u16) priv->sec_info.authentication_mode);
452 else
453 auth_tlv->auth_type = cpu_to_le16(NL80211_AUTHTYPE_OPEN_SYSTEM);
454
455 pos += sizeof(auth_tlv->header) + le16_to_cpu(auth_tlv->header.len);
456
457 if (IS_SUPPORT_MULTI_BANDS(priv->adapter)
458 && !(ISSUPP_11NENABLED(priv->adapter->fw_cap_info)
459 && (!bss_desc->disable_11n)
460 && (priv->adapter->config_bands & BAND_GN
461 || priv->adapter->config_bands & BAND_AN)
462 && (bss_desc->bcn_ht_cap)
463 )
464 ) {
465 /* Append a channel TLV for the channel the attempted AP was
466 found on */
467 chan_tlv = (struct mwifiex_ie_types_chan_list_param_set *) pos;
468 chan_tlv->header.type = cpu_to_le16(TLV_TYPE_CHANLIST);
469 chan_tlv->header.len =
470 cpu_to_le16(sizeof(struct mwifiex_chan_scan_param_set));
471
472 memset(chan_tlv->chan_scan_param, 0x00,
473 sizeof(struct mwifiex_chan_scan_param_set));
474 chan_tlv->chan_scan_param[0].chan_number =
475 (bss_desc->phy_param_set.ds_param_set.current_chan);
476 dev_dbg(priv->adapter->dev, "info: Assoc: TLV Chan = %d\n",
477 chan_tlv->chan_scan_param[0].chan_number);
478
479 chan_tlv->chan_scan_param[0].radio_type =
480 mwifiex_band_to_radio_type((u8) bss_desc->bss_band);
481
482 dev_dbg(priv->adapter->dev, "info: Assoc: TLV Band = %d\n",
483 chan_tlv->chan_scan_param[0].radio_type);
484 pos += sizeof(chan_tlv->header) +
485 sizeof(struct mwifiex_chan_scan_param_set);
486 }
487
488 if (!priv->wps.session_enable) {
489 if (priv->sec_info.wpa_enabled || priv->sec_info.wpa2_enabled)
490 rsn_ie_len = mwifiex_append_rsn_ie_wpa_wpa2(priv, &pos);
491
492 if (rsn_ie_len == -1)
493 return -1;
494 }
495
496 if (ISSUPP_11NENABLED(priv->adapter->fw_cap_info)
497 && (!bss_desc->disable_11n)
498 && (priv->adapter->config_bands & BAND_GN
499 || priv->adapter->config_bands & BAND_AN))
500 mwifiex_cmd_append_11n_tlv(priv, bss_desc, &pos);
501
502 /* Append vendor specific IE TLV */
503 mwifiex_cmd_append_vsie_tlv(priv, MWIFIEX_VSIE_MASK_ASSOC, &pos);
504
505 mwifiex_wmm_process_association_req(priv, &pos, &bss_desc->wmm_ie,
506 bss_desc->bcn_ht_cap);
507 if (priv->sec_info.wapi_enabled && priv->wapi_ie_len)
508 mwifiex_cmd_append_wapi_ie(priv, &pos);
509
510
511 mwifiex_cmd_append_generic_ie(priv, &pos);
512
513 mwifiex_cmd_append_tsf_tlv(priv, &pos, bss_desc);
514
515 cmd->size = cpu_to_le16((u16) (pos - (u8 *) assoc) + S_DS_GEN);
516
517 /* Set the Capability info at last */
518 tmp_cap = bss_desc->cap_info_bitmap;
519
520 if (priv->adapter->config_bands == BAND_B)
521 tmp_cap &= ~WLAN_CAPABILITY_SHORT_SLOT_TIME;
522
523 tmp_cap &= CAPINFO_MASK;
524 dev_dbg(priv->adapter->dev, "info: ASSOC_CMD: tmp_cap=%4X CAPINFO_MASK=%4lX\n",
525 tmp_cap, CAPINFO_MASK);
526 assoc->cap_info_bitmap = cpu_to_le16(tmp_cap);
527
528 return 0;
529}
530
531/*
532 * Association firmware command response handler
533 *
534 * The response buffer for the association command has the following
535 * memory layout.
536 *
537 * For cases where an association response was not received (indicated
538 * by the CapInfo and AId field):
539 *
540 * .------------------------------------------------------------.
541 * | Header(4 * sizeof(t_u16)): Standard command response hdr |
542 * .------------------------------------------------------------.
543 * | cap_info/Error Return(t_u16): |
544 * | 0xFFFF(-1): Internal error |
545 * | 0xFFFE(-2): Authentication unhandled message |
546 * | 0xFFFD(-3): Authentication refused |
547 * | 0xFFFC(-4): Timeout waiting for AP response |
548 * .------------------------------------------------------------.
549 * | status_code(t_u16): |
550 * | If cap_info is -1: |
551 * | An internal firmware failure prevented the |
552 * | command from being processed. The status_code |
553 * | will be set to 1. |
554 * | |
555 * | If cap_info is -2: |
556 * | An authentication frame was received but was |
557 * | not handled by the firmware. IEEE Status |
558 * | code for the failure is returned. |
559 * | |
560 * | If cap_info is -3: |
561 * | An authentication frame was received and the |
562 * | status_code is the IEEE Status reported in the |
563 * | response. |
564 * | |
565 * | If cap_info is -4: |
566 * | (1) Association response timeout |
567 * | (2) Authentication response timeout |
568 * .------------------------------------------------------------.
569 * | a_id(t_u16): 0xFFFF |
570 * .------------------------------------------------------------.
571 *
572 *
573 * For cases where an association response was received, the IEEE
574 * standard association response frame is returned:
575 *
576 * .------------------------------------------------------------.
577 * | Header(4 * sizeof(t_u16)): Standard command response hdr |
578 * .------------------------------------------------------------.
579 * | cap_info(t_u16): IEEE Capability |
580 * .------------------------------------------------------------.
581 * | status_code(t_u16): IEEE Status Code |
582 * .------------------------------------------------------------.
583 * | a_id(t_u16): IEEE Association ID |
584 * .------------------------------------------------------------.
585 * | IEEE IEs(variable): Any received IEs comprising the |
586 * | remaining portion of a received |
587 * | association response frame. |
588 * .------------------------------------------------------------.
589 *
590 * For simplistic handling, the status_code field can be used to determine
591 * an association success (0) or failure (non-zero).
592 */
593int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
594 struct host_cmd_ds_command *resp)
595{
596 struct mwifiex_adapter *adapter = priv->adapter;
597 int ret = 0;
598 struct ieee_types_assoc_rsp *assoc_rsp;
599 struct mwifiex_bssdescriptor *bss_desc;
600 u8 enable_data = true;
601
602 assoc_rsp = (struct ieee_types_assoc_rsp *) &resp->params;
603
604 priv->assoc_rsp_size = min(le16_to_cpu(resp->size) - S_DS_GEN,
605 sizeof(priv->assoc_rsp_buf));
606
607 memcpy(priv->assoc_rsp_buf, &resp->params, priv->assoc_rsp_size);
608
609 if (le16_to_cpu(assoc_rsp->status_code)) {
610 priv->adapter->dbg.num_cmd_assoc_failure++;
611 dev_err(priv->adapter->dev, "ASSOC_RESP: association failed, "
612 "status code = %d, error = 0x%x, a_id = 0x%x\n",
613 le16_to_cpu(assoc_rsp->status_code),
614 le16_to_cpu(assoc_rsp->cap_info_bitmap),
615 le16_to_cpu(assoc_rsp->a_id));
616
617 ret = -1;
618 goto done;
619 }
620
621 /* Send a Media Connected event, according to the Spec */
622 priv->media_connected = true;
623
624 priv->adapter->ps_state = PS_STATE_AWAKE;
625 priv->adapter->pps_uapsd_mode = false;
626 priv->adapter->tx_lock_flag = false;
627
628 /* Set the attempted BSSID Index to current */
629 bss_desc = priv->attempted_bss_desc;
630
631 dev_dbg(priv->adapter->dev, "info: ASSOC_RESP: %s\n",
632 bss_desc->ssid.ssid);
633
634 /* Make a copy of current BSSID descriptor */
635 memcpy(&priv->curr_bss_params.bss_descriptor,
636 bss_desc, sizeof(struct mwifiex_bssdescriptor));
637
638 /* Update curr_bss_params */
639 priv->curr_bss_params.bss_descriptor.channel
640 = bss_desc->phy_param_set.ds_param_set.current_chan;
641
642 priv->curr_bss_params.band = (u8) bss_desc->bss_band;
643
644 /*
645 * Adjust the timestamps in the scan table to be relative to the newly
646 * associated AP's TSF
647 */
648 mwifiex_update_tsf_timestamps(priv, bss_desc);
649
650 if (bss_desc->wmm_ie.vend_hdr.element_id == WLAN_EID_VENDOR_SPECIFIC)
651 priv->curr_bss_params.wmm_enabled = true;
652 else
653 priv->curr_bss_params.wmm_enabled = false;
654
655 if ((priv->wmm_required || bss_desc->bcn_ht_cap)
656 && priv->curr_bss_params.wmm_enabled)
657 priv->wmm_enabled = true;
658 else
659 priv->wmm_enabled = false;
660
661 priv->curr_bss_params.wmm_uapsd_enabled = false;
662
663 if (priv->wmm_enabled)
664 priv->curr_bss_params.wmm_uapsd_enabled
665 = ((bss_desc->wmm_ie.qos_info_bitmap &
666 IEEE80211_WMM_IE_AP_QOSINFO_UAPSD) ? 1 : 0);
667
668 dev_dbg(priv->adapter->dev, "info: ASSOC_RESP: curr_pkt_filter is %#x\n",
669 priv->curr_pkt_filter);
670 if (priv->sec_info.wpa_enabled || priv->sec_info.wpa2_enabled)
671 priv->wpa_is_gtk_set = false;
672
673 if (priv->wmm_enabled) {
674 /* Don't re-enable carrier until we get the WMM_GET_STATUS
675 event */
676 enable_data = false;
677 } else {
678 /* Since WMM is not enabled, setup the queues with the
679 defaults */
680 mwifiex_wmm_setup_queue_priorities(priv, NULL);
681 mwifiex_wmm_setup_ac_downgrade(priv);
682 }
683
684 if (enable_data)
685 dev_dbg(priv->adapter->dev,
686 "info: post association, re-enabling data flow\n");
687
688 /* Reset SNR/NF/RSSI values */
689 priv->data_rssi_last = 0;
690 priv->data_nf_last = 0;
691 priv->data_rssi_avg = 0;
692 priv->data_nf_avg = 0;
693 priv->bcn_rssi_last = 0;
694 priv->bcn_nf_last = 0;
695 priv->bcn_rssi_avg = 0;
696 priv->bcn_nf_avg = 0;
697 priv->rxpd_rate = 0;
698 priv->rxpd_htinfo = 0;
699
700 mwifiex_save_curr_bcn(priv);
701
702 priv->adapter->dbg.num_cmd_assoc_success++;
703
704 dev_dbg(priv->adapter->dev, "info: ASSOC_RESP: associated\n");
705
706 /* Add the ra_list here for infra mode as there will be only 1 ra
707 always */
708 mwifiex_ralist_add(priv,
709 priv->curr_bss_params.bss_descriptor.mac_address);
710
711 if (!netif_carrier_ok(priv->netdev))
712 netif_carrier_on(priv->netdev);
713 if (netif_queue_stopped(priv->netdev))
714 netif_wake_queue(priv->netdev);
715
716 if (priv->sec_info.wpa_enabled || priv->sec_info.wpa2_enabled)
717 priv->scan_block = true;
718
719done:
720 /* Need to indicate IOCTL complete */
721 if (adapter->curr_cmd->wait_q_enabled) {
722 if (ret)
723 adapter->cmd_wait_q.status = -1;
724 else
725 adapter->cmd_wait_q.status = 0;
726 }
727
728 return ret;
729}
730
731/*
732 * This function prepares command for ad-hoc start.
733 *
734 * Driver will fill up SSID, BSS mode, IBSS parameters, physical
735 * parameters, probe delay, and capability information. Firmware
736 * will fill up beacon period, basic rates and operational rates.
737 *
738 * In addition, the following TLVs are added -
739 * - Channel TLV
740 * - Vendor specific IE
741 * - WPA/WPA2 IE
742 * - HT Capabilities IE
743 * - HT Information IE
744 *
745 * Preparation also includes -
746 * - Setting command ID and proper size
747 * - Ensuring correct endian-ness
748 */
749int
750mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
751 struct host_cmd_ds_command *cmd, void *data_buf)
752{
753 int rsn_ie_len = 0;
754 struct mwifiex_adapter *adapter = priv->adapter;
755 struct host_cmd_ds_802_11_ad_hoc_start *adhoc_start =
756 &cmd->params.adhoc_start;
757 struct mwifiex_bssdescriptor *bss_desc;
758 u32 cmd_append_size = 0;
759 u32 i;
760 u16 tmp_cap;
761 uint16_t ht_cap_info;
762 struct mwifiex_ie_types_chan_list_param_set *chan_tlv;
763
764 struct mwifiex_ie_types_htcap *ht_cap;
765 struct mwifiex_ie_types_htinfo *ht_info;
766 u8 *pos = (u8 *) adhoc_start +
767 sizeof(struct host_cmd_ds_802_11_ad_hoc_start);
768
769 if (!adapter)
770 return -1;
771
772 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_AD_HOC_START);
773
774 bss_desc = &priv->curr_bss_params.bss_descriptor;
775 priv->attempted_bss_desc = bss_desc;
776
777 /*
778 * Fill in the parameters for 2 data structures:
779 * 1. struct host_cmd_ds_802_11_ad_hoc_start command
780 * 2. bss_desc
781 * Driver will fill up SSID, bss_mode,IBSS param, Physical Param,
782 * probe delay, and Cap info.
783 * Firmware will fill up beacon period, Basic rates
784 * and operational rates.
785 */
786
787 memset(adhoc_start->ssid, 0, IEEE80211_MAX_SSID_LEN);
788
789 memcpy(adhoc_start->ssid,
790 ((struct mwifiex_802_11_ssid *) data_buf)->ssid,
791 ((struct mwifiex_802_11_ssid *) data_buf)->ssid_len);
792
793 dev_dbg(adapter->dev, "info: ADHOC_S_CMD: SSID = %s\n",
794 adhoc_start->ssid);
795
796 memset(bss_desc->ssid.ssid, 0, IEEE80211_MAX_SSID_LEN);
797 memcpy(bss_desc->ssid.ssid,
798 ((struct mwifiex_802_11_ssid *) data_buf)->ssid,
799 ((struct mwifiex_802_11_ssid *) data_buf)->ssid_len);
800
801 bss_desc->ssid.ssid_len =
802 ((struct mwifiex_802_11_ssid *) data_buf)->ssid_len;
803
804 /* Set the BSS mode */
805 adhoc_start->bss_mode = HostCmd_BSS_MODE_IBSS;
806 bss_desc->bss_mode = NL80211_IFTYPE_ADHOC;
807 adhoc_start->beacon_period = cpu_to_le16(priv->beacon_period);
808 bss_desc->beacon_period = priv->beacon_period;
809
810 /* Set Physical param set */
811/* Parameter IE Id */
812#define DS_PARA_IE_ID 3
813/* Parameter IE length */
814#define DS_PARA_IE_LEN 1
815
816 adhoc_start->phy_param_set.ds_param_set.element_id = DS_PARA_IE_ID;
817 adhoc_start->phy_param_set.ds_param_set.len = DS_PARA_IE_LEN;
818
819 if (!mwifiex_get_cfp_by_band_and_channel_from_cfg80211
820 (priv, adapter->adhoc_start_band, (u16)
821 priv->adhoc_channel)) {
822 struct mwifiex_chan_freq_power *cfp;
823 cfp = mwifiex_get_cfp_by_band_and_channel_from_cfg80211(priv,
824 adapter->adhoc_start_band, FIRST_VALID_CHANNEL);
825 if (cfp)
826 priv->adhoc_channel = (u8) cfp->channel;
827 }
828
829 if (!priv->adhoc_channel) {
830 dev_err(adapter->dev, "ADHOC_S_CMD: adhoc_channel cannot be 0\n");
831 return -1;
832 }
833
834 dev_dbg(adapter->dev, "info: ADHOC_S_CMD: creating ADHOC on channel %d\n",
835 priv->adhoc_channel);
836
837 priv->curr_bss_params.bss_descriptor.channel = priv->adhoc_channel;
838 priv->curr_bss_params.band = adapter->adhoc_start_band;
839
840 bss_desc->channel = priv->adhoc_channel;
841 adhoc_start->phy_param_set.ds_param_set.current_chan =
842 priv->adhoc_channel;
843
844 memcpy(&bss_desc->phy_param_set, &adhoc_start->phy_param_set,
845 sizeof(union ieee_types_phy_param_set));
846
847 /* Set IBSS param set */
848/* IBSS parameter IE Id */
849#define IBSS_PARA_IE_ID 6
850/* IBSS parameter IE length */
851#define IBSS_PARA_IE_LEN 2
852
853 adhoc_start->ss_param_set.ibss_param_set.element_id = IBSS_PARA_IE_ID;
854 adhoc_start->ss_param_set.ibss_param_set.len = IBSS_PARA_IE_LEN;
855 adhoc_start->ss_param_set.ibss_param_set.atim_window
856 = cpu_to_le16(priv->atim_window);
857 memcpy(&bss_desc->ss_param_set, &adhoc_start->ss_param_set,
858 sizeof(union ieee_types_ss_param_set));
859
860 /* Set Capability info */
861 bss_desc->cap_info_bitmap |= WLAN_CAPABILITY_IBSS;
862 tmp_cap = le16_to_cpu(adhoc_start->cap_info_bitmap);
863 tmp_cap &= ~WLAN_CAPABILITY_ESS;
864 tmp_cap |= WLAN_CAPABILITY_IBSS;
865
866 /* Set up privacy in bss_desc */
867 if (priv->sec_info.encryption_mode) {
868 /* Ad-Hoc capability privacy on */
869 dev_dbg(adapter->dev,
870 "info: ADHOC_S_CMD: wep_status set privacy to WEP\n");
871 bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_8021X_WEP;
872 tmp_cap |= WLAN_CAPABILITY_PRIVACY;
873 } else {
874 dev_dbg(adapter->dev, "info: ADHOC_S_CMD: wep_status NOT set,"
875 " setting privacy to ACCEPT ALL\n");
876 bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL;
877 }
878
879 memset(adhoc_start->DataRate, 0, sizeof(adhoc_start->DataRate));
880 mwifiex_get_active_data_rates(priv, adhoc_start->DataRate);
881 if ((adapter->adhoc_start_band & BAND_G) &&
882 (priv->curr_pkt_filter & HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON)) {
883 if (mwifiex_send_cmd_async(priv, HostCmd_CMD_MAC_CONTROL,
884 HostCmd_ACT_GEN_SET, 0,
885 &priv->curr_pkt_filter)) {
886 dev_err(adapter->dev,
887 "ADHOC_S_CMD: G Protection config failed\n");
888 return -1;
889 }
890 }
891 /* Find the last non zero */
892 for (i = 0; i < sizeof(adhoc_start->DataRate) &&
893 adhoc_start->DataRate[i];
894 i++)
895 ;
896
897 priv->curr_bss_params.num_of_rates = i;
898
899 /* Copy the ad-hoc creating rates into Current BSS rate structure */
900 memcpy(&priv->curr_bss_params.data_rates,
901 &adhoc_start->DataRate, priv->curr_bss_params.num_of_rates);
902
903 dev_dbg(adapter->dev, "info: ADHOC_S_CMD: rates=%02x %02x %02x %02x\n",
904 adhoc_start->DataRate[0], adhoc_start->DataRate[1],
905 adhoc_start->DataRate[2], adhoc_start->DataRate[3]);
906
907 dev_dbg(adapter->dev, "info: ADHOC_S_CMD: AD-HOC Start command is ready\n");
908
909 if (IS_SUPPORT_MULTI_BANDS(adapter)) {
910 /* Append a channel TLV */
911 chan_tlv = (struct mwifiex_ie_types_chan_list_param_set *) pos;
912 chan_tlv->header.type = cpu_to_le16(TLV_TYPE_CHANLIST);
913 chan_tlv->header.len =
914 cpu_to_le16(sizeof(struct mwifiex_chan_scan_param_set));
915
916 memset(chan_tlv->chan_scan_param, 0x00,
917 sizeof(struct mwifiex_chan_scan_param_set));
918 chan_tlv->chan_scan_param[0].chan_number =
919 (u8) priv->curr_bss_params.bss_descriptor.channel;
920
921 dev_dbg(adapter->dev, "info: ADHOC_S_CMD: TLV Chan = %d\n",
922 chan_tlv->chan_scan_param[0].chan_number);
923
924 chan_tlv->chan_scan_param[0].radio_type
925 = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
926 if (adapter->adhoc_start_band & BAND_GN
927 || adapter->adhoc_start_band & BAND_AN) {
928 if (adapter->chan_offset == SEC_CHANNEL_ABOVE)
929 chan_tlv->chan_scan_param[0].radio_type |=
930 SECOND_CHANNEL_ABOVE;
931 else if (adapter->chan_offset == SEC_CHANNEL_BELOW)
932 chan_tlv->chan_scan_param[0].radio_type |=
933 SECOND_CHANNEL_BELOW;
934 }
935 dev_dbg(adapter->dev, "info: ADHOC_S_CMD: TLV Band = %d\n",
936 chan_tlv->chan_scan_param[0].radio_type);
937 pos += sizeof(chan_tlv->header) +
938 sizeof(struct mwifiex_chan_scan_param_set);
939 cmd_append_size +=
940 sizeof(chan_tlv->header) +
941 sizeof(struct mwifiex_chan_scan_param_set);
942 }
943
944 /* Append vendor specific IE TLV */
945 cmd_append_size += mwifiex_cmd_append_vsie_tlv(priv,
946 MWIFIEX_VSIE_MASK_ADHOC, &pos);
947
948 if (priv->sec_info.wpa_enabled) {
949 rsn_ie_len = mwifiex_append_rsn_ie_wpa_wpa2(priv, &pos);
950 if (rsn_ie_len == -1)
951 return -1;
952 cmd_append_size += rsn_ie_len;
953 }
954
955 if (adapter->adhoc_11n_enabled) {
956 {
957 ht_cap = (struct mwifiex_ie_types_htcap *) pos;
958 memset(ht_cap, 0,
959 sizeof(struct mwifiex_ie_types_htcap));
960 ht_cap->header.type =
961 cpu_to_le16(WLAN_EID_HT_CAPABILITY);
962 ht_cap->header.len =
963 cpu_to_le16(sizeof(struct ieee80211_ht_cap));
964 ht_cap_info = le16_to_cpu(ht_cap->ht_cap.cap_info);
965
966 ht_cap_info |= IEEE80211_HT_CAP_SGI_20;
967 if (adapter->chan_offset) {
968 ht_cap_info |= IEEE80211_HT_CAP_SGI_40;
969 ht_cap_info |= IEEE80211_HT_CAP_DSSSCCK40;
970 ht_cap_info |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
971 SETHT_MCS32(ht_cap->ht_cap.mcs.rx_mask);
972 }
973
974 ht_cap->ht_cap.ampdu_params_info
975 = IEEE80211_HT_MAX_AMPDU_64K;
976 ht_cap->ht_cap.mcs.rx_mask[0] = 0xff;
977 pos += sizeof(struct mwifiex_ie_types_htcap);
978 cmd_append_size +=
979 sizeof(struct mwifiex_ie_types_htcap);
980 }
981 {
982 ht_info = (struct mwifiex_ie_types_htinfo *) pos;
983 memset(ht_info, 0,
984 sizeof(struct mwifiex_ie_types_htinfo));
985 ht_info->header.type =
986 cpu_to_le16(WLAN_EID_HT_INFORMATION);
987 ht_info->header.len =
988 cpu_to_le16(sizeof(struct ieee80211_ht_info));
989 ht_info->ht_info.control_chan =
990 (u8) priv->curr_bss_params.bss_descriptor.
991 channel;
992 if (adapter->chan_offset) {
993 ht_info->ht_info.ht_param =
994 adapter->chan_offset;
995 ht_info->ht_info.ht_param |=
996 IEEE80211_HT_PARAM_CHAN_WIDTH_ANY;
997 }
998 ht_info->ht_info.operation_mode =
999 cpu_to_le16(IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
1000 ht_info->ht_info.basic_set[0] = 0xff;
1001 pos += sizeof(struct mwifiex_ie_types_htinfo);
1002 cmd_append_size +=
1003 sizeof(struct mwifiex_ie_types_htinfo);
1004 }
1005 }
1006
1007 cmd->size = cpu_to_le16((u16)
1008 (sizeof(struct host_cmd_ds_802_11_ad_hoc_start)
1009 + S_DS_GEN + cmd_append_size));
1010
1011 if (adapter->adhoc_start_band == BAND_B)
1012 tmp_cap &= ~WLAN_CAPABILITY_SHORT_SLOT_TIME;
1013 else
1014 tmp_cap |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
1015
1016 adhoc_start->cap_info_bitmap = cpu_to_le16(tmp_cap);
1017
1018 return 0;
1019}
1020
1021/*
1022 * This function prepares command for ad-hoc join.
1023 *
1024 * Most of the parameters are set up by copying from the target BSS descriptor
1025 * from the scan response.
1026 *
1027 * In addition, the following TLVs are added -
1028 * - Channel TLV
1029 * - Vendor specific IE
1030 * - WPA/WPA2 IE
1031 * - 11n IE
1032 *
1033 * Preparation also includes -
1034 * - Setting command ID and proper size
1035 * - Ensuring correct endian-ness
1036 */
1037int
1038mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv,
1039 struct host_cmd_ds_command *cmd, void *data_buf)
1040{
1041 int rsn_ie_len = 0;
1042 struct host_cmd_ds_802_11_ad_hoc_join *adhoc_join =
1043 &cmd->params.adhoc_join;
1044 struct mwifiex_bssdescriptor *bss_desc =
1045 (struct mwifiex_bssdescriptor *) data_buf;
1046 struct mwifiex_ie_types_chan_list_param_set *chan_tlv;
1047 u32 cmd_append_size = 0;
1048 u16 tmp_cap;
1049 u32 i, rates_size = 0;
1050 u16 curr_pkt_filter;
1051 u8 *pos =
1052 (u8 *) adhoc_join +
1053 sizeof(struct host_cmd_ds_802_11_ad_hoc_join);
1054
1055/* Use G protection */
1056#define USE_G_PROTECTION 0x02
1057 if (bss_desc->erp_flags & USE_G_PROTECTION) {
1058 curr_pkt_filter =
1059 priv->
1060 curr_pkt_filter | HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON;
1061
1062 if (mwifiex_send_cmd_async(priv, HostCmd_CMD_MAC_CONTROL,
1063 HostCmd_ACT_GEN_SET, 0,
1064 &curr_pkt_filter)) {
1065 dev_err(priv->adapter->dev,
1066 "ADHOC_J_CMD: G Protection config failed\n");
1067 return -1;
1068 }
1069 }
1070
1071 priv->attempted_bss_desc = bss_desc;
1072
1073 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_AD_HOC_JOIN);
1074
1075 adhoc_join->bss_descriptor.bss_mode = HostCmd_BSS_MODE_IBSS;
1076
1077 adhoc_join->bss_descriptor.beacon_period
1078 = cpu_to_le16(bss_desc->beacon_period);
1079
1080 memcpy(&adhoc_join->bss_descriptor.bssid,
1081 &bss_desc->mac_address, ETH_ALEN);
1082
1083 memcpy(&adhoc_join->bss_descriptor.ssid,
1084 &bss_desc->ssid.ssid, bss_desc->ssid.ssid_len);
1085
1086 memcpy(&adhoc_join->bss_descriptor.phy_param_set,
1087 &bss_desc->phy_param_set,
1088 sizeof(union ieee_types_phy_param_set));
1089
1090 memcpy(&adhoc_join->bss_descriptor.ss_param_set,
1091 &bss_desc->ss_param_set, sizeof(union ieee_types_ss_param_set));
1092
1093 tmp_cap = bss_desc->cap_info_bitmap;
1094
1095 tmp_cap &= CAPINFO_MASK;
1096
1097 dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: tmp_cap=%4X"
1098 " CAPINFO_MASK=%4lX\n", tmp_cap, CAPINFO_MASK);
1099
1100 /* Information on BSSID descriptor passed to FW */
1101 dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: BSSID = %pM, SSID = %s\n",
1102 adhoc_join->bss_descriptor.bssid,
1103 adhoc_join->bss_descriptor.ssid);
1104
1105 for (i = 0; bss_desc->supported_rates[i] &&
1106 i < MWIFIEX_SUPPORTED_RATES;
1107 i++)
1108 ;
1109 rates_size = i;
1110
1111 /* Copy Data Rates from the Rates recorded in scan response */
1112 memset(adhoc_join->bss_descriptor.data_rates, 0,
1113 sizeof(adhoc_join->bss_descriptor.data_rates));
1114 memcpy(adhoc_join->bss_descriptor.data_rates,
1115 bss_desc->supported_rates, rates_size);
1116
1117 /* Copy the adhoc join rates into Current BSS state structure */
1118 priv->curr_bss_params.num_of_rates = rates_size;
1119 memcpy(&priv->curr_bss_params.data_rates, bss_desc->supported_rates,
1120 rates_size);
1121
1122 /* Copy the channel information */
1123 priv->curr_bss_params.bss_descriptor.channel = bss_desc->channel;
1124 priv->curr_bss_params.band = (u8) bss_desc->bss_band;
1125
1126 if (priv->sec_info.wep_status == MWIFIEX_802_11_WEP_ENABLED
1127 || priv->sec_info.wpa_enabled)
1128 tmp_cap |= WLAN_CAPABILITY_PRIVACY;
1129
1130 if (IS_SUPPORT_MULTI_BANDS(priv->adapter)) {
1131 /* Append a channel TLV */
1132 chan_tlv = (struct mwifiex_ie_types_chan_list_param_set *) pos;
1133 chan_tlv->header.type = cpu_to_le16(TLV_TYPE_CHANLIST);
1134 chan_tlv->header.len =
1135 cpu_to_le16(sizeof(struct mwifiex_chan_scan_param_set));
1136
1137 memset(chan_tlv->chan_scan_param, 0x00,
1138 sizeof(struct mwifiex_chan_scan_param_set));
1139 chan_tlv->chan_scan_param[0].chan_number =
1140 (bss_desc->phy_param_set.ds_param_set.current_chan);
1141 dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: TLV Chan = %d\n",
1142 chan_tlv->chan_scan_param[0].chan_number);
1143
1144 chan_tlv->chan_scan_param[0].radio_type =
1145 mwifiex_band_to_radio_type((u8) bss_desc->bss_band);
1146
1147 dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: TLV Band = %d\n",
1148 chan_tlv->chan_scan_param[0].radio_type);
1149 pos += sizeof(chan_tlv->header) +
1150 sizeof(struct mwifiex_chan_scan_param_set);
1151 cmd_append_size += sizeof(chan_tlv->header) +
1152 sizeof(struct mwifiex_chan_scan_param_set);
1153 }
1154
1155 if (priv->sec_info.wpa_enabled)
1156 rsn_ie_len = mwifiex_append_rsn_ie_wpa_wpa2(priv, &pos);
1157 if (rsn_ie_len == -1)
1158 return -1;
1159 cmd_append_size += rsn_ie_len;
1160
1161 if (ISSUPP_11NENABLED(priv->adapter->fw_cap_info))
1162 cmd_append_size += mwifiex_cmd_append_11n_tlv(priv,
1163 bss_desc, &pos);
1164
1165 /* Append vendor specific IE TLV */
1166 cmd_append_size += mwifiex_cmd_append_vsie_tlv(priv,
1167 MWIFIEX_VSIE_MASK_ADHOC, &pos);
1168
1169 cmd->size = cpu_to_le16((u16)
1170 (sizeof(struct host_cmd_ds_802_11_ad_hoc_join)
1171 + S_DS_GEN + cmd_append_size));
1172
1173 adhoc_join->bss_descriptor.cap_info_bitmap = cpu_to_le16(tmp_cap);
1174
1175 return 0;
1176}
1177
1178/*
1179 * This function handles the command response of ad-hoc start and
1180 * ad-hoc join.
1181 *
1182 * The function generates a device-connected event to notify
1183 * the applications, in case of successful ad-hoc start/join, and
1184 * saves the beacon buffer.
1185 */
1186int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv,
1187 struct host_cmd_ds_command *resp)
1188{
1189 int ret = 0;
1190 struct mwifiex_adapter *adapter = priv->adapter;
1191 struct host_cmd_ds_802_11_ad_hoc_result *adhoc_result;
1192 struct mwifiex_bssdescriptor *bss_desc;
1193
1194 adhoc_result = &resp->params.adhoc_result;
1195
1196 bss_desc = priv->attempted_bss_desc;
1197
1198 /* Join result code 0 --> SUCCESS */
1199 if (le16_to_cpu(resp->result)) {
1200 dev_err(priv->adapter->dev, "ADHOC_RESP: failed\n");
1201 if (priv->media_connected)
1202 mwifiex_reset_connect_state(priv);
1203
1204 memset(&priv->curr_bss_params.bss_descriptor,
1205 0x00, sizeof(struct mwifiex_bssdescriptor));
1206
1207 ret = -1;
1208 goto done;
1209 }
1210
1211 /* Send a Media Connected event, according to the Spec */
1212 priv->media_connected = true;
1213
1214 if (le16_to_cpu(resp->command) == HostCmd_CMD_802_11_AD_HOC_START) {
1215 dev_dbg(priv->adapter->dev, "info: ADHOC_S_RESP %s\n",
1216 bss_desc->ssid.ssid);
1217
1218 /* Update the created network descriptor with the new BSSID */
1219 memcpy(bss_desc->mac_address,
1220 adhoc_result->bssid, ETH_ALEN);
1221
1222 priv->adhoc_state = ADHOC_STARTED;
1223 } else {
1224 /*
1225 * Now the join cmd should be successful.
1226 * If BSSID has changed use SSID to compare instead of BSSID
1227 */
1228 dev_dbg(priv->adapter->dev, "info: ADHOC_J_RESP %s\n",
1229 bss_desc->ssid.ssid);
1230
1231 /*
1232 * Make a copy of current BSSID descriptor, only needed for
1233 * join since the current descriptor is already being used
1234 * for adhoc start
1235 */
1236 memcpy(&priv->curr_bss_params.bss_descriptor,
1237 bss_desc, sizeof(struct mwifiex_bssdescriptor));
1238
1239 priv->adhoc_state = ADHOC_JOINED;
1240 }
1241
1242 dev_dbg(priv->adapter->dev, "info: ADHOC_RESP: channel = %d\n",
1243 priv->adhoc_channel);
1244 dev_dbg(priv->adapter->dev, "info: ADHOC_RESP: BSSID = %pM\n",
1245 priv->curr_bss_params.bss_descriptor.mac_address);
1246
1247 if (!netif_carrier_ok(priv->netdev))
1248 netif_carrier_on(priv->netdev);
1249 if (netif_queue_stopped(priv->netdev))
1250 netif_wake_queue(priv->netdev);
1251
1252 mwifiex_save_curr_bcn(priv);
1253
1254done:
1255 /* Need to indicate IOCTL complete */
1256 if (adapter->curr_cmd->wait_q_enabled) {
1257 if (ret)
1258 adapter->cmd_wait_q.status = -1;
1259 else
1260 adapter->cmd_wait_q.status = 0;
1261
1262 }
1263
1264 return ret;
1265}
1266
1267/*
1268 * This function associates to a specific BSS discovered in a scan.
1269 *
1270 * It clears any past association response stored for application
1271 * retrieval and calls the command preparation routine to send the
1272 * command to firmware.
1273 */
1274int mwifiex_associate(struct mwifiex_private *priv,
1275 struct mwifiex_bssdescriptor *bss_desc)
1276{
1277 u8 current_bssid[ETH_ALEN];
1278
1279 /* Return error if the adapter or table entry is not marked as infra */
1280 if ((priv->bss_mode != NL80211_IFTYPE_STATION) ||
1281 (bss_desc->bss_mode != NL80211_IFTYPE_STATION))
1282 return -1;
1283
1284 memcpy(&current_bssid,
1285 &priv->curr_bss_params.bss_descriptor.mac_address,
1286 sizeof(current_bssid));
1287
1288 /* Clear any past association response stored for application
1289 retrieval */
1290 priv->assoc_rsp_size = 0;
1291
1292 return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_ASSOCIATE,
1293 HostCmd_ACT_GEN_SET, 0, bss_desc);
1294}
1295
1296/*
1297 * This function starts an ad-hoc network.
1298 *
1299 * It calls the command preparation routine to send the command to firmware.
1300 */
1301int
1302mwifiex_adhoc_start(struct mwifiex_private *priv,
1303 struct mwifiex_802_11_ssid *adhoc_ssid)
1304{
1305 dev_dbg(priv->adapter->dev, "info: Adhoc Channel = %d\n",
1306 priv->adhoc_channel);
1307 dev_dbg(priv->adapter->dev, "info: curr_bss_params.channel = %d\n",
1308 priv->curr_bss_params.bss_descriptor.channel);
1309 dev_dbg(priv->adapter->dev, "info: curr_bss_params.band = %d\n",
1310 priv->curr_bss_params.band);
1311
1312 return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_AD_HOC_START,
1313 HostCmd_ACT_GEN_SET, 0, adhoc_ssid);
1314}
1315
1316/*
1317 * This function joins an ad-hoc network found in a previous scan.
1318 *
1319 * It calls the command preparation routine to send the command to firmware,
1320 * if already not connected to the requested SSID.
1321 */
1322int mwifiex_adhoc_join(struct mwifiex_private *priv,
1323 struct mwifiex_bssdescriptor *bss_desc)
1324{
1325 dev_dbg(priv->adapter->dev, "info: adhoc join: curr_bss ssid =%s\n",
1326 priv->curr_bss_params.bss_descriptor.ssid.ssid);
1327 dev_dbg(priv->adapter->dev, "info: adhoc join: curr_bss ssid_len =%u\n",
1328 priv->curr_bss_params.bss_descriptor.ssid.ssid_len);
1329 dev_dbg(priv->adapter->dev, "info: adhoc join: ssid =%s\n",
1330 bss_desc->ssid.ssid);
1331 dev_dbg(priv->adapter->dev, "info: adhoc join: ssid_len =%u\n",
1332 bss_desc->ssid.ssid_len);
1333
1334 /* Check if the requested SSID is already joined */
1335 if (priv->curr_bss_params.bss_descriptor.ssid.ssid_len &&
1336 !mwifiex_ssid_cmp(&bss_desc->ssid,
1337 &priv->curr_bss_params.bss_descriptor.ssid) &&
1338 (priv->curr_bss_params.bss_descriptor.bss_mode ==
1339 NL80211_IFTYPE_ADHOC)) {
1340 dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: new ad-hoc SSID"
1341 " is the same as current; not attempting to re-join\n");
1342 return -1;
1343 }
1344
1345 dev_dbg(priv->adapter->dev, "info: curr_bss_params.channel = %d\n",
1346 priv->curr_bss_params.bss_descriptor.channel);
1347 dev_dbg(priv->adapter->dev, "info: curr_bss_params.band = %c\n",
1348 priv->curr_bss_params.band);
1349
1350 return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_AD_HOC_JOIN,
1351 HostCmd_ACT_GEN_SET, 0, bss_desc);
1352}
1353
1354/*
1355 * This function deauthenticates/disconnects from infra network by sending
1356 * deauthentication request.
1357 */
1358static int mwifiex_deauthenticate_infra(struct mwifiex_private *priv, u8 *mac)
1359{
1360 u8 mac_address[ETH_ALEN];
1361 int ret;
1362 u8 zero_mac[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 };
1363
1364 if (mac) {
1365 if (!memcmp(mac, zero_mac, sizeof(zero_mac)))
1366 memcpy((u8 *) &mac_address,
1367 (u8 *) &priv->curr_bss_params.bss_descriptor.
1368 mac_address, ETH_ALEN);
1369 else
1370 memcpy((u8 *) &mac_address, (u8 *) mac, ETH_ALEN);
1371 } else {
1372 memcpy((u8 *) &mac_address, (u8 *) &priv->curr_bss_params.
1373 bss_descriptor.mac_address, ETH_ALEN);
1374 }
1375
1376 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_DEAUTHENTICATE,
1377 HostCmd_ACT_GEN_SET, 0, &mac_address);
1378
1379 return ret;
1380}
1381
1382/*
1383 * This function deauthenticates/disconnects from a BSS.
1384 *
1385 * In case of infra made, it sends deauthentication request, and
1386 * in case of ad-hoc mode, a stop network request is sent to the firmware.
1387 */
1388int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac)
1389{
1390 int ret = 0;
1391
1392 if (priv->media_connected) {
1393 if (priv->bss_mode == NL80211_IFTYPE_STATION) {
1394 ret = mwifiex_deauthenticate_infra(priv, mac);
1395 } else if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
1396 ret = mwifiex_send_cmd_sync(priv,
1397 HostCmd_CMD_802_11_AD_HOC_STOP,
1398 HostCmd_ACT_GEN_SET, 0, NULL);
1399 }
1400 }
1401
1402 return ret;
1403}
1404EXPORT_SYMBOL_GPL(mwifiex_deauthenticate);
1405
1406/*
1407 * This function converts band to radio type used in channel TLV.
1408 */
1409u8
1410mwifiex_band_to_radio_type(u8 band)
1411{
1412 switch (band) {
1413 case BAND_A:
1414 case BAND_AN:
1415 case BAND_A | BAND_AN:
1416 return HostCmd_SCAN_RADIO_TYPE_A;
1417 case BAND_B:
1418 case BAND_G:
1419 case BAND_B | BAND_G:
1420 default:
1421 return HostCmd_SCAN_RADIO_TYPE_BG;
1422 }
1423}
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
new file mode 100644
index 000000000000..f0582259c935
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -0,0 +1,1055 @@
1/*
2 * Marvell Wireless LAN device driver: major functions
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "main.h"
21#include "wmm.h"
22#include "cfg80211.h"
23#include "11n.h"
24
25#define VERSION "1.0"
26
27const char driver_version[] = "mwifiex " VERSION " (%s) ";
28
29struct mwifiex_adapter *g_adapter;
30EXPORT_SYMBOL_GPL(g_adapter);
31
32static struct mwifiex_bss_attr mwifiex_bss_sta[] = {
33 {MWIFIEX_BSS_TYPE_STA, MWIFIEX_DATA_FRAME_TYPE_ETH_II, true, 0, 0},
34};
35
36static int drv_mode = DRV_MODE_STA;
37
38static char fw_name[32] = DEFAULT_FW_NAME;
39
40/* Supported drv_mode table */
41static struct mwifiex_drv_mode mwifiex_drv_mode_tbl[] = {
42 {
43 .drv_mode = DRV_MODE_STA,
44 .intf_num = ARRAY_SIZE(mwifiex_bss_sta),
45 .bss_attr = mwifiex_bss_sta,
46 },
47};
48
49/*
50 * This function registers the device and performs all the necessary
51 * initializations.
52 *
53 * The following initialization operations are performed -
54 * - Allocate adapter structure
55 * - Save interface specific operations table in adapter
56 * - Call interface specific initialization routine
57 * - Allocate private structures
58 * - Set default adapter structure parameters
59 * - Initialize locks
60 *
61 * In case of any errors during inittialization, this function also ensures
62 * proper cleanup before exiting.
63 */
64static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
65 struct mwifiex_drv_mode *drv_mode_ptr)
66{
67 struct mwifiex_adapter *adapter;
68 int i;
69
70 adapter = kzalloc(sizeof(struct mwifiex_adapter), GFP_KERNEL);
71 if (!adapter)
72 return -ENOMEM;
73
74 g_adapter = adapter;
75 adapter->card = card;
76
77 /* Save interface specific operations in adapter */
78 memmove(&adapter->if_ops, if_ops, sizeof(struct mwifiex_if_ops));
79
80 /* card specific initialization has been deferred until now .. */
81 if (adapter->if_ops.init_if(adapter))
82 goto error;
83
84 adapter->priv_num = 0;
85 for (i = 0; i < drv_mode_ptr->intf_num; i++) {
86 adapter->priv[i] = NULL;
87
88 if (!drv_mode_ptr->bss_attr[i].active)
89 continue;
90
91 /* Allocate memory for private structure */
92 adapter->priv[i] = kzalloc(sizeof(struct mwifiex_private),
93 GFP_KERNEL);
94 if (!adapter->priv[i]) {
95 dev_err(adapter->dev, "%s: failed to alloc priv[%d]\n",
96 __func__, i);
97 goto error;
98 }
99
100 adapter->priv_num++;
101 adapter->priv[i]->adapter = adapter;
102 /* Save bss_type, frame_type & bss_priority */
103 adapter->priv[i]->bss_type = drv_mode_ptr->bss_attr[i].bss_type;
104 adapter->priv[i]->frame_type =
105 drv_mode_ptr->bss_attr[i].frame_type;
106 adapter->priv[i]->bss_priority =
107 drv_mode_ptr->bss_attr[i].bss_priority;
108
109 if (drv_mode_ptr->bss_attr[i].bss_type == MWIFIEX_BSS_TYPE_STA)
110 adapter->priv[i]->bss_role = MWIFIEX_BSS_ROLE_STA;
111 else if (drv_mode_ptr->bss_attr[i].bss_type ==
112 MWIFIEX_BSS_TYPE_UAP)
113 adapter->priv[i]->bss_role = MWIFIEX_BSS_ROLE_UAP;
114
115 /* Save bss_index & bss_num */
116 adapter->priv[i]->bss_index = i;
117 adapter->priv[i]->bss_num = drv_mode_ptr->bss_attr[i].bss_num;
118 }
119 adapter->drv_mode = drv_mode_ptr;
120
121 if (mwifiex_init_lock_list(adapter))
122 goto error;
123
124 init_timer(&adapter->cmd_timer);
125 adapter->cmd_timer.function = mwifiex_cmd_timeout_func;
126 adapter->cmd_timer.data = (unsigned long) adapter;
127
128 return 0;
129
130error:
131 dev_dbg(adapter->dev, "info: leave mwifiex_register with error\n");
132
133 mwifiex_free_lock_list(adapter);
134 for (i = 0; i < drv_mode_ptr->intf_num; i++)
135 kfree(adapter->priv[i]);
136 kfree(adapter);
137
138 return -1;
139}
140
141/*
142 * This function unregisters the device and performs all the necessary
143 * cleanups.
144 *
145 * The following cleanup operations are performed -
146 * - Free the timers
147 * - Free beacon buffers
148 * - Free private structures
149 * - Free adapter structure
150 */
151static int mwifiex_unregister(struct mwifiex_adapter *adapter)
152{
153 s32 i;
154
155 del_timer(&adapter->cmd_timer);
156
157 /* Free private structures */
158 for (i = 0; i < adapter->priv_num; i++) {
159 if (adapter->priv[i]) {
160 mwifiex_free_curr_bcn(adapter->priv[i]);
161 kfree(adapter->priv[i]);
162 }
163 }
164
165 kfree(adapter);
166 return 0;
167}
168
169/*
170 * The main process.
171 *
172 * This function is the main procedure of the driver and handles various driver
173 * operations. It runs in a loop and provides the core functionalities.
174 *
175 * The main responsibilities of this function are -
176 * - Ensure concurrency control
177 * - Handle pending interrupts and call interrupt handlers
178 * - Wake up the card if required
179 * - Handle command responses and call response handlers
180 * - Handle events and call event handlers
181 * - Execute pending commands
182 * - Transmit pending data packets
183 */
184int mwifiex_main_process(struct mwifiex_adapter *adapter)
185{
186 int ret = 0;
187 unsigned long flags;
188
189 spin_lock_irqsave(&adapter->main_proc_lock, flags);
190
191 /* Check if already processing */
192 if (adapter->mwifiex_processing) {
193 spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
194 goto exit_main_proc;
195 } else {
196 adapter->mwifiex_processing = true;
197 spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
198 }
199process_start:
200 do {
201 if ((adapter->hw_status == MWIFIEX_HW_STATUS_CLOSING) ||
202 (adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY))
203 break;
204
205 /* Handle pending interrupt if any */
206 if (adapter->int_status) {
207 if (adapter->hs_activated)
208 mwifiex_process_hs_config(adapter);
209 adapter->if_ops.process_int_status(adapter);
210 }
211
212 /* Need to wake up the card ? */
213 if ((adapter->ps_state == PS_STATE_SLEEP) &&
214 (adapter->pm_wakeup_card_req &&
215 !adapter->pm_wakeup_fw_try) &&
216 (is_command_pending(adapter)
217 || !mwifiex_wmm_lists_empty(adapter))) {
218 adapter->pm_wakeup_fw_try = true;
219 adapter->if_ops.wakeup(adapter);
220 continue;
221 }
222 if (IS_CARD_RX_RCVD(adapter)) {
223 adapter->pm_wakeup_fw_try = false;
224 if (adapter->ps_state == PS_STATE_SLEEP)
225 adapter->ps_state = PS_STATE_AWAKE;
226 } else {
227 /* We have tried to wakeup the card already */
228 if (adapter->pm_wakeup_fw_try)
229 break;
230 if (adapter->ps_state != PS_STATE_AWAKE ||
231 adapter->tx_lock_flag)
232 break;
233
234 if (adapter->scan_processing || adapter->data_sent
235 || mwifiex_wmm_lists_empty(adapter)) {
236 if (adapter->cmd_sent || adapter->curr_cmd
237 || (!is_command_pending(adapter)))
238 break;
239 }
240 }
241
242 /* Check for Cmd Resp */
243 if (adapter->cmd_resp_received) {
244 adapter->cmd_resp_received = false;
245 mwifiex_process_cmdresp(adapter);
246
247 /* call mwifiex back when init_fw is done */
248 if (adapter->hw_status == MWIFIEX_HW_STATUS_INIT_DONE) {
249 adapter->hw_status = MWIFIEX_HW_STATUS_READY;
250 mwifiex_init_fw_complete(adapter);
251 }
252 }
253
254 /* Check for event */
255 if (adapter->event_received) {
256 adapter->event_received = false;
257 mwifiex_process_event(adapter);
258 }
259
260 /* Check if we need to confirm Sleep Request
261 received previously */
262 if (adapter->ps_state == PS_STATE_PRE_SLEEP) {
263 if (!adapter->cmd_sent && !adapter->curr_cmd)
264 mwifiex_check_ps_cond(adapter);
265 }
266
267 /* * The ps_state may have been changed during processing of
268 * Sleep Request event.
269 */
270 if ((adapter->ps_state == PS_STATE_SLEEP)
271 || (adapter->ps_state == PS_STATE_PRE_SLEEP)
272 || (adapter->ps_state == PS_STATE_SLEEP_CFM)
273 || adapter->tx_lock_flag)
274 continue;
275
276 if (!adapter->cmd_sent && !adapter->curr_cmd) {
277 if (mwifiex_exec_next_cmd(adapter) == -1) {
278 ret = -1;
279 break;
280 }
281 }
282
283 if (!adapter->scan_processing && !adapter->data_sent &&
284 !mwifiex_wmm_lists_empty(adapter)) {
285 mwifiex_wmm_process_tx(adapter);
286 if (adapter->hs_activated) {
287 adapter->is_hs_configured = false;
288 mwifiex_hs_activated_event
289 (mwifiex_get_priv
290 (adapter, MWIFIEX_BSS_ROLE_ANY),
291 false);
292 }
293 }
294
295 if (adapter->delay_null_pkt && !adapter->cmd_sent &&
296 !adapter->curr_cmd && !is_command_pending(adapter)
297 && mwifiex_wmm_lists_empty(adapter)) {
298 if (!mwifiex_send_null_packet
299 (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
300 MWIFIEX_TxPD_POWER_MGMT_NULL_PACKET |
301 MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET)) {
302 adapter->delay_null_pkt = false;
303 adapter->ps_state = PS_STATE_SLEEP;
304 }
305 break;
306 }
307 } while (true);
308
309 if ((adapter->int_status) || IS_CARD_RX_RCVD(adapter))
310 goto process_start;
311
312 spin_lock_irqsave(&adapter->main_proc_lock, flags);
313 adapter->mwifiex_processing = false;
314 spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
315
316exit_main_proc:
317 if (adapter->hw_status == MWIFIEX_HW_STATUS_CLOSING)
318 mwifiex_shutdown_drv(adapter);
319 return ret;
320}
321
322/*
323 * This function initializes the software.
324 *
325 * The main work includes allocating and initializing the adapter structure
326 * and initializing the private structures.
327 */
328static int
329mwifiex_init_sw(void *card, struct mwifiex_if_ops *if_ops)
330{
331 int i;
332 struct mwifiex_drv_mode *drv_mode_ptr;
333
334 /* find mwifiex_drv_mode entry from mwifiex_drv_mode_tbl */
335 drv_mode_ptr = NULL;
336 for (i = 0; i < ARRAY_SIZE(mwifiex_drv_mode_tbl); i++) {
337 if (mwifiex_drv_mode_tbl[i].drv_mode == drv_mode) {
338 drv_mode_ptr = &mwifiex_drv_mode_tbl[i];
339 break;
340 }
341 }
342
343 if (!drv_mode_ptr) {
344 pr_err("invalid drv_mode=%d\n", drv_mode);
345 return -1;
346 }
347
348 if (mwifiex_register(card, if_ops, drv_mode_ptr))
349 return -1;
350
351 return 0;
352}
353
354/*
355 * This function frees the adapter structure.
356 *
357 * Additionally, this closes the netlink socket, frees the timers
358 * and private structures.
359 */
360static void mwifiex_free_adapter(struct mwifiex_adapter *adapter)
361{
362 if (!adapter) {
363 pr_err("%s: adapter is NULL\n", __func__);
364 return;
365 }
366
367 mwifiex_unregister(adapter);
368 pr_debug("info: %s: free adapter\n", __func__);
369}
370
371/*
372 * This function initializes the hardware and firmware.
373 *
374 * The main initialization steps followed are -
375 * - Download the correct firmware to card
376 * - Allocate and initialize the adapter structure
377 * - Initialize the private structures
378 * - Issue the init commands to firmware
379 */
380static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter)
381{
382 int ret, err;
383 struct mwifiex_fw_image fw;
384
385 memset(&fw, 0, sizeof(struct mwifiex_fw_image));
386
387 switch (adapter->revision_id) {
388 case SD8787_W0:
389 case SD8787_W1:
390 strcpy(fw_name, SD8787_W1_FW_NAME);
391 break;
392 case SD8787_A0:
393 case SD8787_A1:
394 strcpy(fw_name, SD8787_AX_FW_NAME);
395 break;
396 default:
397 break;
398 }
399
400 err = request_firmware(&adapter->firmware, fw_name, adapter->dev);
401 if (err < 0) {
402 dev_err(adapter->dev, "request_firmware() returned"
403 " error code %#x\n", err);
404 ret = -1;
405 goto done;
406 }
407 fw.fw_buf = (u8 *) adapter->firmware->data;
408 fw.fw_len = adapter->firmware->size;
409
410 ret = mwifiex_dnld_fw(adapter, &fw);
411 if (ret == -1)
412 goto done;
413
414 dev_notice(adapter->dev, "WLAN FW is active\n");
415
416 adapter->init_wait_q_woken = false;
417 ret = mwifiex_init_fw(adapter);
418 if (ret == -1) {
419 goto done;
420 } else if (!ret) {
421 adapter->hw_status = MWIFIEX_HW_STATUS_READY;
422 goto done;
423 }
424 /* Wait for mwifiex_init to complete */
425 wait_event_interruptible(adapter->init_wait_q,
426 adapter->init_wait_q_woken);
427 if (adapter->hw_status != MWIFIEX_HW_STATUS_READY) {
428 ret = -1;
429 goto done;
430 }
431 ret = 0;
432
433done:
434 if (adapter->firmware)
435 release_firmware(adapter->firmware);
436 if (ret)
437 ret = -1;
438 return ret;
439}
440
441/*
442 * This function fills a driver buffer.
443 *
444 * The function associates a given SKB with the provided driver buffer
445 * and also updates some of the SKB parameters, including IP header,
446 * priority and timestamp.
447 */
448static void
449mwifiex_fill_buffer(struct sk_buff *skb)
450{
451 struct ethhdr *eth;
452 struct iphdr *iph;
453 struct timeval tv;
454 u8 tid = 0;
455
456 eth = (struct ethhdr *) skb->data;
457 switch (eth->h_proto) {
458 case __constant_htons(ETH_P_IP):
459 iph = ip_hdr(skb);
460 tid = IPTOS_PREC(iph->tos);
461 pr_debug("data: packet type ETH_P_IP: %04x, tid=%#x prio=%#x\n",
462 eth->h_proto, tid, skb->priority);
463 break;
464 case __constant_htons(ETH_P_ARP):
465 pr_debug("data: ARP packet: %04x\n", eth->h_proto);
466 default:
467 break;
468 }
469/* Offset for TOS field in the IP header */
470#define IPTOS_OFFSET 5
471 tid = (tid >> IPTOS_OFFSET);
472 skb->priority = tid;
473 /* Record the current time the packet was queued; used to
474 determine the amount of time the packet was queued in
475 the driver before it was sent to the firmware.
476 The delay is then sent along with the packet to the
477 firmware for aggregate delay calculation for stats and
478 MSDU lifetime expiry.
479 */
480 do_gettimeofday(&tv);
481 skb->tstamp = timeval_to_ktime(tv);
482}
483
484/*
485 * CFG802.11 network device handler for open.
486 *
487 * Starts the data queue.
488 */
489static int
490mwifiex_open(struct net_device *dev)
491{
492 netif_start_queue(dev);
493 return 0;
494}
495
496/*
497 * CFG802.11 network device handler for close.
498 */
499static int
500mwifiex_close(struct net_device *dev)
501{
502 return 0;
503}
504
505/*
506 * CFG802.11 network device handler for data transmission.
507 */
508static int
509mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
510{
511 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
512 struct sk_buff *new_skb;
513 struct mwifiex_txinfo *tx_info;
514
515 dev_dbg(priv->adapter->dev, "data: %lu BSS(%d): Data <= kernel\n",
516 jiffies, priv->bss_index);
517
518 if (priv->adapter->surprise_removed) {
519 kfree_skb(skb);
520 priv->stats.tx_dropped++;
521 return 0;
522 }
523 if (!skb->len || (skb->len > ETH_FRAME_LEN)) {
524 dev_err(priv->adapter->dev, "Tx: bad skb len %d\n", skb->len);
525 kfree_skb(skb);
526 priv->stats.tx_dropped++;
527 return 0;
528 }
529 if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) {
530 dev_dbg(priv->adapter->dev,
531 "data: Tx: insufficient skb headroom %d\n",
532 skb_headroom(skb));
533 /* Insufficient skb headroom - allocate a new skb */
534 new_skb =
535 skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
536 if (unlikely(!new_skb)) {
537 dev_err(priv->adapter->dev, "Tx: cannot alloca new_skb\n");
538 kfree_skb(skb);
539 priv->stats.tx_dropped++;
540 return 0;
541 }
542 kfree_skb(skb);
543 skb = new_skb;
544 dev_dbg(priv->adapter->dev, "info: new skb headroomd %d\n",
545 skb_headroom(skb));
546 }
547
548 tx_info = MWIFIEX_SKB_TXCB(skb);
549 tx_info->bss_index = priv->bss_index;
550 mwifiex_fill_buffer(skb);
551
552 mwifiex_wmm_add_buf_txqueue(priv->adapter, skb);
553 atomic_inc(&priv->adapter->tx_pending);
554
555 if (atomic_read(&priv->adapter->tx_pending) >= MAX_TX_PENDING) {
556 netif_stop_queue(priv->netdev);
557 dev->trans_start = jiffies;
558 }
559
560 queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
561
562 return 0;
563}
564
565/*
566 * CFG802.11 network device handler for setting MAC address.
567 */
568static int
569mwifiex_set_mac_address(struct net_device *dev, void *addr)
570{
571 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
572 struct sockaddr *hw_addr = (struct sockaddr *) addr;
573 int ret;
574
575 memcpy(priv->curr_addr, hw_addr->sa_data, ETH_ALEN);
576
577 /* Send request to firmware */
578 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_MAC_ADDRESS,
579 HostCmd_ACT_GEN_SET, 0, NULL);
580
581 if (!ret)
582 memcpy(priv->netdev->dev_addr, priv->curr_addr, ETH_ALEN);
583 else
584 dev_err(priv->adapter->dev, "set mac address failed: ret=%d"
585 "\n", ret);
586
587 memcpy(dev->dev_addr, priv->curr_addr, ETH_ALEN);
588
589 return ret;
590}
591
592/*
593 * CFG802.11 network device handler for setting multicast list.
594 */
595static void mwifiex_set_multicast_list(struct net_device *dev)
596{
597 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
598 struct mwifiex_multicast_list mcast_list;
599
600 if (dev->flags & IFF_PROMISC) {
601 mcast_list.mode = MWIFIEX_PROMISC_MODE;
602 } else if (dev->flags & IFF_ALLMULTI ||
603 netdev_mc_count(dev) > MWIFIEX_MAX_MULTICAST_LIST_SIZE) {
604 mcast_list.mode = MWIFIEX_ALL_MULTI_MODE;
605 } else {
606 mcast_list.mode = MWIFIEX_MULTICAST_MODE;
607 if (netdev_mc_count(dev))
608 mcast_list.num_multicast_addr =
609 mwifiex_copy_mcast_addr(&mcast_list, dev);
610 }
611 mwifiex_request_set_multicast_list(priv, &mcast_list);
612}
613
614/*
615 * CFG802.11 network device handler for transmission timeout.
616 */
617static void
618mwifiex_tx_timeout(struct net_device *dev)
619{
620 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
621
622 dev_err(priv->adapter->dev, "%lu : Tx timeout, bss_index=%d\n",
623 jiffies, priv->bss_index);
624 dev->trans_start = jiffies;
625 priv->num_tx_timeout++;
626}
627
628/*
629 * CFG802.11 network device handler for statistics retrieval.
630 */
631static struct net_device_stats *mwifiex_get_stats(struct net_device *dev)
632{
633 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
634
635 return &priv->stats;
636}
637
638/* Network device handlers */
639static const struct net_device_ops mwifiex_netdev_ops = {
640 .ndo_open = mwifiex_open,
641 .ndo_stop = mwifiex_close,
642 .ndo_start_xmit = mwifiex_hard_start_xmit,
643 .ndo_set_mac_address = mwifiex_set_mac_address,
644 .ndo_tx_timeout = mwifiex_tx_timeout,
645 .ndo_get_stats = mwifiex_get_stats,
646 .ndo_set_multicast_list = mwifiex_set_multicast_list,
647};
648
649/*
650 * This function initializes the private structure parameters.
651 *
652 * The following wait queues are initialized -
653 * - IOCTL wait queue
654 * - Command wait queue
655 * - Statistics wait queue
656 *
657 * ...and the following default parameters are set -
658 * - Current key index : Set to 0
659 * - Rate index : Set to auto
660 * - Media connected : Set to disconnected
661 * - Adhoc link sensed : Set to false
662 * - Nick name : Set to null
663 * - Number of Tx timeout : Set to 0
664 * - Device address : Set to current address
665 *
666 * In addition, the CFG80211 work queue is also created.
667 */
668static void
669mwifiex_init_priv_params(struct mwifiex_private *priv, struct net_device *dev)
670{
671 dev->netdev_ops = &mwifiex_netdev_ops;
672 /* Initialize private structure */
673 priv->current_key_index = 0;
674 priv->media_connected = false;
675 memset(&priv->nick_name, 0, sizeof(priv->nick_name));
676 priv->num_tx_timeout = 0;
677 priv->workqueue = create_singlethread_workqueue("cfg80211_wq");
678 INIT_WORK(&priv->cfg_workqueue, mwifiex_cfg80211_results);
679 memcpy(dev->dev_addr, priv->curr_addr, ETH_ALEN);
680}
681
682/*
683 * This function adds a new logical interface.
684 *
685 * It allocates, initializes and registers the interface by performing
686 * the following opearations -
687 * - Allocate a new net device structure
688 * - Assign device name
689 * - Register the new device with CFG80211 subsystem
690 * - Initialize semaphore and private structure
691 * - Register the new device with kernel
692 * - Create the complete debug FS structure if configured
693 */
694static struct mwifiex_private *mwifiex_add_interface(
695 struct mwifiex_adapter *adapter,
696 u8 bss_index, u8 bss_type)
697{
698 struct net_device *dev;
699 struct mwifiex_private *priv;
700 void *mdev_priv;
701
702 dev = alloc_netdev_mq(sizeof(struct mwifiex_private *), "mlan%d",
703 ether_setup, 1);
704 if (!dev) {
705 dev_err(adapter->dev, "no memory available for netdevice\n");
706 goto error;
707 }
708
709 if (mwifiex_register_cfg80211(dev, adapter->priv[bss_index]->curr_addr,
710 adapter->priv[bss_index]) != 0) {
711 dev_err(adapter->dev, "cannot register netdevice with cfg80211\n");
712 goto error;
713 }
714 /* Save the priv pointer in netdev */
715 priv = adapter->priv[bss_index];
716 mdev_priv = netdev_priv(dev);
717 *((unsigned long *) mdev_priv) = (unsigned long) priv;
718
719 priv->netdev = dev;
720
721 sema_init(&priv->async_sem, 1);
722 priv->scan_pending_on_block = false;
723
724 mwifiex_init_priv_params(priv, dev);
725
726 SET_NETDEV_DEV(dev, adapter->dev);
727
728 /* Register network device */
729 if (register_netdev(dev)) {
730 dev_err(adapter->dev, "cannot register virtual network device\n");
731 goto error;
732 }
733
734 dev_dbg(adapter->dev, "info: %s: Marvell 802.11 Adapter\n", dev->name);
735#ifdef CONFIG_DEBUG_FS
736 mwifiex_dev_debugfs_init(priv);
737#endif
738 return priv;
739error:
740 if (dev)
741 free_netdev(dev);
742 return NULL;
743}
744
745/*
746 * This function removes a logical interface.
747 *
748 * It deregisters, resets and frees the interface by performing
749 * the following operations -
750 * - Disconnect the device if connected, send wireless event to
751 * notify applications.
752 * - Remove the debug FS structure if configured
753 * - Unregister the device from kernel
754 * - Free the net device structure
755 * - Cancel all works and destroy work queue
756 * - Unregister and free the wireless device from CFG80211 subsystem
757 */
758static void
759mwifiex_remove_interface(struct mwifiex_adapter *adapter, u8 bss_index)
760{
761 struct net_device *dev;
762 struct mwifiex_private *priv = adapter->priv[bss_index];
763
764 if (!priv)
765 return;
766 dev = priv->netdev;
767
768 if (priv->media_connected)
769 priv->media_connected = false;
770
771#ifdef CONFIG_DEBUG_FS
772 mwifiex_dev_debugfs_remove(priv);
773#endif
774 /* Last reference is our one */
775 dev_dbg(adapter->dev, "info: %s: refcnt = %d\n",
776 dev->name, netdev_refcnt_read(dev));
777
778 if (dev->reg_state == NETREG_REGISTERED)
779 unregister_netdev(dev);
780
781 /* Clear the priv in adapter */
782 priv->netdev = NULL;
783 if (dev)
784 free_netdev(dev);
785
786 cancel_work_sync(&priv->cfg_workqueue);
787 flush_workqueue(priv->workqueue);
788 destroy_workqueue(priv->workqueue);
789 wiphy_unregister(priv->wdev->wiphy);
790 wiphy_free(priv->wdev->wiphy);
791 kfree(priv->wdev);
792}
793
794/*
795 * This function check if command is pending.
796 */
797int is_command_pending(struct mwifiex_adapter *adapter)
798{
799 unsigned long flags;
800 int is_cmd_pend_q_empty;
801
802 spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
803 is_cmd_pend_q_empty = list_empty(&adapter->cmd_pending_q);
804 spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
805
806 return !is_cmd_pend_q_empty;
807}
808
809/*
810 * This function returns the correct private structure pointer based
811 * upon the BSS number.
812 */
813struct mwifiex_private *
814mwifiex_bss_index_to_priv(struct mwifiex_adapter *adapter, u8 bss_index)
815{
816 if (!adapter || (bss_index >= adapter->priv_num))
817 return NULL;
818 return adapter->priv[bss_index];
819}
820
821/*
822 * This is the main work queue function.
823 *
824 * It handles the main process, which in turn handles the complete
825 * driver operations.
826 */
827static void mwifiex_main_work_queue(struct work_struct *work)
828{
829 struct mwifiex_adapter *adapter =
830 container_of(work, struct mwifiex_adapter, main_work);
831
832 if (adapter->surprise_removed)
833 return;
834 mwifiex_main_process(adapter);
835}
836
837/*
838 * This function cancels all works in the queue and destroys
839 * the main workqueue.
840 */
841static void
842mwifiex_terminate_workqueue(struct mwifiex_adapter *adapter)
843{
844 flush_workqueue(adapter->workqueue);
845 destroy_workqueue(adapter->workqueue);
846 adapter->workqueue = NULL;
847}
848
849/*
850 * This function adds the card.
851 *
852 * This function follows the following major steps to set up the device -
853 * - Initialize software. This includes probing the card, registering
854 * the interface operations table, and allocating/initializing the
855 * adapter structure
856 * - Set up the netlink socket
857 * - Create and start the main work queue
858 * - Register the device
859 * - Initialize firmware and hardware
860 * - Add logical interfaces
861 */
862int
863mwifiex_add_card(void *card, struct semaphore *sem,
864 struct mwifiex_if_ops *if_ops)
865{
866 int i;
867 struct mwifiex_adapter *adapter;
868
869 if (down_interruptible(sem))
870 goto exit_sem_err;
871
872 if (mwifiex_init_sw(card, if_ops)) {
873 pr_err("%s: software init failed\n", __func__);
874 goto err_init_sw;
875 }
876
877 adapter = g_adapter;
878
879 adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
880 adapter->surprise_removed = false;
881 init_waitqueue_head(&adapter->init_wait_q);
882 adapter->is_suspended = false;
883 adapter->hs_activated = false;
884 init_waitqueue_head(&adapter->hs_activate_wait_q);
885 adapter->cmd_wait_q_required = false;
886 init_waitqueue_head(&adapter->cmd_wait_q.wait);
887 adapter->cmd_wait_q.condition = false;
888 adapter->cmd_wait_q.status = 0;
889
890 adapter->workqueue = create_workqueue("MWIFIEX_WORK_QUEUE");
891 if (!adapter->workqueue)
892 goto err_kmalloc;
893
894 INIT_WORK(&adapter->main_work, mwifiex_main_work_queue);
895
896 /* Register the device. Fill up the private data structure with relevant
897 information from the card and request for the required IRQ. */
898 if (adapter->if_ops.register_dev(adapter)) {
899 pr_err("%s: failed to register mwifiex device\n", __func__);
900 goto err_registerdev;
901 }
902
903 if (mwifiex_init_hw_fw(adapter)) {
904 pr_err("%s: firmware init failed\n", __func__);
905 goto err_init_fw;
906 }
907
908 /* Add interfaces */
909 for (i = 0; i < adapter->drv_mode->intf_num; i++) {
910 if (!mwifiex_add_interface(adapter, i,
911 adapter->drv_mode->bss_attr[i].bss_type)) {
912 goto err_add_intf;
913 }
914 }
915
916 up(sem);
917
918 return 0;
919
920err_add_intf:
921 for (i = 0; i < adapter->priv_num; i++)
922 mwifiex_remove_interface(adapter, i);
923err_init_fw:
924 pr_debug("info: %s: unregister device\n", __func__);
925 adapter->if_ops.unregister_dev(adapter);
926err_registerdev:
927 adapter->surprise_removed = true;
928 mwifiex_terminate_workqueue(adapter);
929err_kmalloc:
930 if ((adapter->hw_status == MWIFIEX_HW_STATUS_FW_READY) ||
931 (adapter->hw_status == MWIFIEX_HW_STATUS_READY)) {
932 pr_debug("info: %s: shutdown mwifiex\n", __func__);
933 adapter->init_wait_q_woken = false;
934
935 if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS)
936 wait_event_interruptible(adapter->init_wait_q,
937 adapter->init_wait_q_woken);
938 }
939
940 mwifiex_free_adapter(adapter);
941
942err_init_sw:
943 up(sem);
944
945exit_sem_err:
946 return -1;
947}
948EXPORT_SYMBOL_GPL(mwifiex_add_card);
949
950/*
951 * This function removes the card.
952 *
953 * This function follows the following major steps to remove the device -
954 * - Stop data traffic
955 * - Shutdown firmware
956 * - Remove the logical interfaces
957 * - Terminate the work queue
958 * - Unregister the device
959 * - Free the adapter structure
960 */
961int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
962{
963 struct mwifiex_private *priv = NULL;
964 int i;
965
966 if (down_interruptible(sem))
967 goto exit_sem_err;
968
969 if (!adapter)
970 goto exit_remove;
971
972 adapter->surprise_removed = true;
973
974 /* Stop data */
975 for (i = 0; i < adapter->priv_num; i++) {
976 priv = adapter->priv[i];
977 if (priv) {
978 if (!netif_queue_stopped(priv->netdev))
979 netif_stop_queue(priv->netdev);
980 if (netif_carrier_ok(priv->netdev))
981 netif_carrier_off(priv->netdev);
982 }
983 }
984
985 dev_dbg(adapter->dev, "cmd: calling mwifiex_shutdown_drv...\n");
986 adapter->init_wait_q_woken = false;
987
988 if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS)
989 wait_event_interruptible(adapter->init_wait_q,
990 adapter->init_wait_q_woken);
991 dev_dbg(adapter->dev, "cmd: mwifiex_shutdown_drv done\n");
992 if (atomic_read(&adapter->rx_pending) ||
993 atomic_read(&adapter->tx_pending) ||
994 atomic_read(&adapter->cmd_pending)) {
995 dev_err(adapter->dev, "rx_pending=%d, tx_pending=%d, "
996 "cmd_pending=%d\n",
997 atomic_read(&adapter->rx_pending),
998 atomic_read(&adapter->tx_pending),
999 atomic_read(&adapter->cmd_pending));
1000 }
1001
1002 /* Remove interface */
1003 for (i = 0; i < adapter->priv_num; i++)
1004 mwifiex_remove_interface(adapter, i);
1005
1006 mwifiex_terminate_workqueue(adapter);
1007
1008 /* Unregister device */
1009 dev_dbg(adapter->dev, "info: unregister device\n");
1010 adapter->if_ops.unregister_dev(adapter);
1011 /* Free adapter structure */
1012 dev_dbg(adapter->dev, "info: free adapter\n");
1013 mwifiex_free_adapter(adapter);
1014
1015exit_remove:
1016 up(sem);
1017exit_sem_err:
1018 return 0;
1019}
1020EXPORT_SYMBOL_GPL(mwifiex_remove_card);
1021
1022/*
1023 * This function initializes the module.
1024 *
1025 * The debug FS is also initialized if configured.
1026 */
1027static int
1028mwifiex_init_module(void)
1029{
1030#ifdef CONFIG_DEBUG_FS
1031 mwifiex_debugfs_init();
1032#endif
1033 return 0;
1034}
1035
1036/*
1037 * This function cleans up the module.
1038 *
1039 * The debug FS is removed if available.
1040 */
1041static void
1042mwifiex_cleanup_module(void)
1043{
1044#ifdef CONFIG_DEBUG_FS
1045 mwifiex_debugfs_remove();
1046#endif
1047}
1048
1049module_init(mwifiex_init_module);
1050module_exit(mwifiex_cleanup_module);
1051
1052MODULE_AUTHOR("Marvell International Ltd.");
1053MODULE_DESCRIPTION("Marvell WiFi-Ex Driver version " VERSION);
1054MODULE_VERSION(VERSION);
1055MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
new file mode 100644
index 000000000000..672701dc2721
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -0,0 +1,1004 @@
1/*
2 * Marvell Wireless LAN device driver: major data structures and prototypes
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#ifndef _MWIFIEX_MAIN_H_
21#define _MWIFIEX_MAIN_H_
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/sched.h>
26#include <linux/semaphore.h>
27#include <linux/ip.h>
28#include <linux/skbuff.h>
29#include <linux/if_arp.h>
30#include <linux/etherdevice.h>
31#include <net/sock.h>
32#include <net/lib80211.h>
33#include <linux/firmware.h>
34#include <linux/ctype.h>
35
36#include "decl.h"
37#include "ioctl.h"
38#include "util.h"
39#include "fw.h"
40
41extern const char driver_version[];
42extern struct mwifiex_adapter *g_adapter;
43
44enum {
45 MWIFIEX_ASYNC_CMD,
46 MWIFIEX_SYNC_CMD
47};
48
49#define DRV_MODE_STA 0x1
50
51#define SD8787_W0 0x30
52#define SD8787_W1 0x31
53#define SD8787_A0 0x40
54#define SD8787_A1 0x41
55
56#define DEFAULT_FW_NAME "mrvl/sd8787_uapsta.bin"
57#define SD8787_W1_FW_NAME "mrvl/sd8787_uapsta_w1.bin"
58#define SD8787_AX_FW_NAME "mrvl/sd8787_uapsta.bin"
59
60struct mwifiex_drv_mode {
61 u16 drv_mode;
62 u16 intf_num;
63 struct mwifiex_bss_attr *bss_attr;
64};
65
66
67#define MWIFIEX_DEFAULT_WATCHDOG_TIMEOUT (5 * HZ)
68
69#define MWIFIEX_TIMER_10S 10000
70#define MWIFIEX_TIMER_1S 1000
71
72#define MAX_TX_PENDING 60
73
74#define MWIFIEX_UPLD_SIZE (2312)
75
76#define MAX_EVENT_SIZE 1024
77
78#define ARP_FILTER_MAX_BUF_SIZE 68
79
80#define MWIFIEX_KEY_BUFFER_SIZE 16
81#define MWIFIEX_DEFAULT_LISTEN_INTERVAL 10
82#define MWIFIEX_MAX_REGION_CODE 7
83
84#define DEFAULT_BCN_AVG_FACTOR 8
85#define DEFAULT_DATA_AVG_FACTOR 8
86
87#define FIRST_VALID_CHANNEL 0xff
88#define DEFAULT_AD_HOC_CHANNEL 6
89#define DEFAULT_AD_HOC_CHANNEL_A 36
90
91#define DEFAULT_BCN_MISS_TIMEOUT 5
92
93#define MAX_SCAN_BEACON_BUFFER 8000
94
95#define SCAN_BEACON_ENTRY_PAD 6
96
97#define MWIFIEX_PASSIVE_SCAN_CHAN_TIME 200
98#define MWIFIEX_ACTIVE_SCAN_CHAN_TIME 200
99#define MWIFIEX_SPECIFIC_SCAN_CHAN_TIME 110
100
101#define SCAN_RSSI(RSSI) (0x100 - ((u8)(RSSI)))
102
103#define MWIFIEX_MAX_TOTAL_SCAN_TIME (MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S)
104
105#define RSN_GTK_OUI_OFFSET 2
106
107#define MWIFIEX_OUI_NOT_PRESENT 0
108#define MWIFIEX_OUI_PRESENT 1
109
110#define IS_CARD_RX_RCVD(adapter) (adapter->cmd_resp_received || \
111 adapter->event_received || \
112 adapter->data_received)
113
114#define MWIFIEX_TYPE_CMD 1
115#define MWIFIEX_TYPE_DATA 0
116#define MWIFIEX_TYPE_EVENT 3
117
118#define DBG_CMD_NUM 5
119
120#define MAX_BITMAP_RATES_SIZE 10
121
122#define MAX_CHANNEL_BAND_BG 14
123
124#define MAX_FREQUENCY_BAND_BG 2484
125
126struct mwifiex_dbg {
127 u32 num_cmd_host_to_card_failure;
128 u32 num_cmd_sleep_cfm_host_to_card_failure;
129 u32 num_tx_host_to_card_failure;
130 u32 num_event_deauth;
131 u32 num_event_disassoc;
132 u32 num_event_link_lost;
133 u32 num_cmd_deauth;
134 u32 num_cmd_assoc_success;
135 u32 num_cmd_assoc_failure;
136 u32 num_tx_timeout;
137 u32 num_cmd_timeout;
138 u16 timeout_cmd_id;
139 u16 timeout_cmd_act;
140 u16 last_cmd_id[DBG_CMD_NUM];
141 u16 last_cmd_act[DBG_CMD_NUM];
142 u16 last_cmd_index;
143 u16 last_cmd_resp_id[DBG_CMD_NUM];
144 u16 last_cmd_resp_index;
145 u16 last_event[DBG_CMD_NUM];
146 u16 last_event_index;
147};
148
149enum MWIFIEX_HARDWARE_STATUS {
150 MWIFIEX_HW_STATUS_READY,
151 MWIFIEX_HW_STATUS_INITIALIZING,
152 MWIFIEX_HW_STATUS_FW_READY,
153 MWIFIEX_HW_STATUS_INIT_DONE,
154 MWIFIEX_HW_STATUS_RESET,
155 MWIFIEX_HW_STATUS_CLOSING,
156 MWIFIEX_HW_STATUS_NOT_READY
157};
158
159enum MWIFIEX_802_11_POWER_MODE {
160 MWIFIEX_802_11_POWER_MODE_CAM,
161 MWIFIEX_802_11_POWER_MODE_PSP
162};
163
164struct mwifiex_tx_param {
165 u32 next_pkt_len;
166};
167
168enum MWIFIEX_PS_STATE {
169 PS_STATE_AWAKE,
170 PS_STATE_PRE_SLEEP,
171 PS_STATE_SLEEP_CFM,
172 PS_STATE_SLEEP
173};
174
175struct mwifiex_add_ba_param {
176 u32 tx_win_size;
177 u32 rx_win_size;
178 u32 timeout;
179};
180
181struct mwifiex_tx_aggr {
182 u8 ampdu_user;
183 u8 ampdu_ap;
184 u8 amsdu;
185};
186
187struct mwifiex_ra_list_tbl {
188 struct list_head list;
189 struct sk_buff_head skb_head;
190 u8 ra[ETH_ALEN];
191 u32 total_pkts_size;
192 u32 is_11n_enabled;
193};
194
195struct mwifiex_tid_tbl {
196 struct list_head ra_list;
197 /* spin lock for tid table */
198 spinlock_t tid_tbl_lock;
199 struct mwifiex_ra_list_tbl *ra_list_curr;
200};
201
202#define WMM_HIGHEST_PRIORITY 7
203#define HIGH_PRIO_TID 7
204#define LOW_PRIO_TID 0
205
206struct mwifiex_wmm_desc {
207 struct mwifiex_tid_tbl tid_tbl_ptr[MAX_NUM_TID];
208 u32 packets_out[MAX_NUM_TID];
209 /* spin lock to protect ra_list */
210 spinlock_t ra_list_spinlock;
211 struct mwifiex_wmm_ac_status ac_status[IEEE80211_MAX_QUEUES];
212 enum mwifiex_wmm_ac_e ac_down_graded_vals[IEEE80211_MAX_QUEUES];
213 u32 drv_pkt_delay_max;
214 u8 queue_priority[IEEE80211_MAX_QUEUES];
215 u32 user_pri_pkt_tx_ctrl[WMM_HIGHEST_PRIORITY + 1]; /* UP: 0 to 7 */
216
217};
218
219struct mwifiex_802_11_security {
220 u8 wpa_enabled;
221 u8 wpa2_enabled;
222 u8 wapi_enabled;
223 u8 wapi_key_on;
224 enum MWIFIEX_802_11_WEP_STATUS wep_status;
225 u32 authentication_mode;
226 u32 encryption_mode;
227};
228
229struct ieee_types_header {
230 u8 element_id;
231 u8 len;
232} __packed;
233
234struct ieee_obss_scan_param {
235 u16 obss_scan_passive_dwell;
236 u16 obss_scan_active_dwell;
237 u16 bss_chan_width_trigger_scan_int;
238 u16 obss_scan_passive_total;
239 u16 obss_scan_active_total;
240 u16 bss_width_chan_trans_delay;
241 u16 obss_scan_active_threshold;
242} __packed;
243
244struct ieee_types_obss_scan_param {
245 struct ieee_types_header ieee_hdr;
246 struct ieee_obss_scan_param obss_scan;
247} __packed;
248
249#define MWIFIEX_SUPPORTED_RATES 14
250
251#define MWIFIEX_SUPPORTED_RATES_EXT 32
252
253#define IEEE_MAX_IE_SIZE 256
254
255struct ieee_types_vendor_specific {
256 struct ieee_types_vendor_header vend_hdr;
257 u8 data[IEEE_MAX_IE_SIZE - sizeof(struct ieee_types_vendor_header)];
258} __packed;
259
260struct ieee_types_generic {
261 struct ieee_types_header ieee_hdr;
262 u8 data[IEEE_MAX_IE_SIZE - sizeof(struct ieee_types_header)];
263} __packed;
264
265struct mwifiex_bssdescriptor {
266 u8 mac_address[ETH_ALEN];
267 struct mwifiex_802_11_ssid ssid;
268 u32 privacy;
269 s32 rssi;
270 u32 channel;
271 u32 freq;
272 u16 beacon_period;
273 u8 erp_flags;
274 u32 bss_mode;
275 u8 supported_rates[MWIFIEX_SUPPORTED_RATES];
276 u8 data_rates[MWIFIEX_SUPPORTED_RATES];
277 /* Network band.
278 * BAND_B(0x01): 'b' band
279 * BAND_G(0x02): 'g' band
280 * BAND_A(0X04): 'a' band
281 */
282 u16 bss_band;
283 u64 network_tsf;
284 u8 time_stamp[8];
285 union ieee_types_phy_param_set phy_param_set;
286 union ieee_types_ss_param_set ss_param_set;
287 u16 cap_info_bitmap;
288 struct ieee_types_wmm_parameter wmm_ie;
289 u8 disable_11n;
290 struct ieee80211_ht_cap *bcn_ht_cap;
291 u16 ht_cap_offset;
292 struct ieee80211_ht_info *bcn_ht_info;
293 u16 ht_info_offset;
294 u8 *bcn_bss_co_2040;
295 u16 bss_co_2040_offset;
296 u8 *bcn_ext_cap;
297 u16 ext_cap_offset;
298 struct ieee_types_obss_scan_param *bcn_obss_scan;
299 u16 overlap_bss_offset;
300 struct ieee_types_vendor_specific *bcn_wpa_ie;
301 u16 wpa_offset;
302 struct ieee_types_generic *bcn_rsn_ie;
303 u16 rsn_offset;
304 struct ieee_types_generic *bcn_wapi_ie;
305 u16 wapi_offset;
306 u8 *beacon_buf;
307 u32 beacon_buf_size;
308 u32 beacon_buf_size_max;
309
310};
311
312struct mwifiex_current_bss_params {
313 struct mwifiex_bssdescriptor bss_descriptor;
314 u8 wmm_enabled;
315 u8 wmm_uapsd_enabled;
316 u8 band;
317 u32 num_of_rates;
318 u8 data_rates[MWIFIEX_SUPPORTED_RATES];
319};
320
321struct mwifiex_sleep_params {
322 u16 sp_error;
323 u16 sp_offset;
324 u16 sp_stable_time;
325 u8 sp_cal_control;
326 u8 sp_ext_sleep_clk;
327 u16 sp_reserved;
328};
329
330struct mwifiex_sleep_period {
331 u16 period;
332 u16 reserved;
333};
334
335struct mwifiex_wep_key {
336 u32 length;
337 u32 key_index;
338 u32 key_length;
339 u8 key_material[MWIFIEX_KEY_BUFFER_SIZE];
340};
341
342#define MAX_REGION_CHANNEL_NUM 2
343
344struct mwifiex_chan_freq_power {
345 u16 channel;
346 u32 freq;
347 u16 max_tx_power;
348 u8 unsupported;
349};
350
351enum state_11d_t {
352 DISABLE_11D = 0,
353 ENABLE_11D = 1,
354};
355
356#define MWIFIEX_MAX_TRIPLET_802_11D 83
357
358struct mwifiex_802_11d_domain_reg {
359 u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
360 u8 no_of_triplet;
361 struct ieee80211_country_ie_triplet
362 triplet[MWIFIEX_MAX_TRIPLET_802_11D];
363};
364
365struct mwifiex_vendor_spec_cfg_ie {
366 u16 mask;
367 u16 flag;
368 u8 ie[MWIFIEX_MAX_VSIE_LEN];
369};
370
371struct wps {
372 u8 session_enable;
373};
374
375struct mwifiex_adapter;
376struct mwifiex_private;
377
378struct mwifiex_private {
379 struct mwifiex_adapter *adapter;
380 u8 bss_index;
381 u8 bss_type;
382 u8 bss_role;
383 u8 bss_priority;
384 u8 bss_num;
385 u8 frame_type;
386 u8 curr_addr[ETH_ALEN];
387 u8 media_connected;
388 u32 num_tx_timeout;
389 struct net_device *netdev;
390 struct net_device_stats stats;
391 u16 curr_pkt_filter;
392 u32 bss_mode;
393 u32 pkt_tx_ctrl;
394 u16 tx_power_level;
395 u8 max_tx_power_level;
396 u8 min_tx_power_level;
397 u8 tx_rate;
398 u8 tx_htinfo;
399 u8 rxpd_htinfo;
400 u8 rxpd_rate;
401 u16 rate_bitmap;
402 u16 bitmap_rates[MAX_BITMAP_RATES_SIZE];
403 u32 data_rate;
404 u8 is_data_rate_auto;
405 u16 bcn_avg_factor;
406 u16 data_avg_factor;
407 s16 data_rssi_last;
408 s16 data_nf_last;
409 s16 data_rssi_avg;
410 s16 data_nf_avg;
411 s16 bcn_rssi_last;
412 s16 bcn_nf_last;
413 s16 bcn_rssi_avg;
414 s16 bcn_nf_avg;
415 struct mwifiex_bssdescriptor *attempted_bss_desc;
416 struct mwifiex_802_11_ssid prev_ssid;
417 u8 prev_bssid[ETH_ALEN];
418 struct mwifiex_current_bss_params curr_bss_params;
419 u16 beacon_period;
420 u16 listen_interval;
421 u16 atim_window;
422 u8 adhoc_channel;
423 u8 adhoc_is_link_sensed;
424 u8 adhoc_state;
425 struct mwifiex_802_11_security sec_info;
426 struct mwifiex_wep_key wep_key[NUM_WEP_KEYS];
427 u16 wep_key_curr_index;
428 u8 wpa_ie[256];
429 u8 wpa_ie_len;
430 u8 wpa_is_gtk_set;
431 struct host_cmd_ds_802_11_key_material aes_key;
432 u8 wapi_ie[256];
433 u8 wapi_ie_len;
434 u8 wmm_required;
435 u8 wmm_enabled;
436 u8 wmm_qosinfo;
437 struct mwifiex_wmm_desc wmm;
438 struct list_head tx_ba_stream_tbl_ptr;
439 /* spin lock for tx_ba_stream_tbl_ptr queue */
440 spinlock_t tx_ba_stream_tbl_lock;
441 struct mwifiex_tx_aggr aggr_prio_tbl[MAX_NUM_TID];
442 struct mwifiex_add_ba_param add_ba_param;
443 u16 rx_seq[MAX_NUM_TID];
444 struct list_head rx_reorder_tbl_ptr;
445 /* spin lock for rx_reorder_tbl_ptr queue */
446 spinlock_t rx_reorder_tbl_lock;
447 /* spin lock for Rx packets */
448 spinlock_t rx_pkt_lock;
449
450#define MWIFIEX_ASSOC_RSP_BUF_SIZE 500
451 u8 assoc_rsp_buf[MWIFIEX_ASSOC_RSP_BUF_SIZE];
452 u32 assoc_rsp_size;
453
454#define MWIFIEX_GENIE_BUF_SIZE 256
455 u8 gen_ie_buf[MWIFIEX_GENIE_BUF_SIZE];
456 u8 gen_ie_buf_len;
457
458 struct mwifiex_vendor_spec_cfg_ie vs_ie[MWIFIEX_MAX_VSIE_NUM];
459
460#define MWIFIEX_ASSOC_TLV_BUF_SIZE 256
461 u8 assoc_tlv_buf[MWIFIEX_ASSOC_TLV_BUF_SIZE];
462 u8 assoc_tlv_buf_len;
463
464 u8 *curr_bcn_buf;
465 u32 curr_bcn_size;
466 /* spin lock for beacon buffer */
467 spinlock_t curr_bcn_buf_lock;
468 struct wireless_dev *wdev;
469 struct mwifiex_chan_freq_power cfp;
470 char version_str[128];
471#ifdef CONFIG_DEBUG_FS
472 struct dentry *dfs_dev_dir;
473#endif
474 u8 nick_name[16];
475 struct iw_statistics w_stats;
476 u16 current_key_index;
477 struct semaphore async_sem;
478 u8 scan_pending_on_block;
479 u8 report_scan_result;
480 struct cfg80211_scan_request *scan_request;
481 int scan_result_status;
482 int assoc_request;
483 u16 assoc_result;
484 int ibss_join_request;
485 u16 ibss_join_result;
486 bool disconnect;
487 u8 cfg_bssid[6];
488 struct workqueue_struct *workqueue;
489 struct work_struct cfg_workqueue;
490 u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
491 struct wps wps;
492 u8 scan_block;
493};
494
495enum mwifiex_ba_status {
496 BA_STREAM_NOT_SETUP = 0,
497 BA_STREAM_SETUP_INPROGRESS,
498 BA_STREAM_SETUP_COMPLETE
499};
500
501struct mwifiex_tx_ba_stream_tbl {
502 struct list_head list;
503 int tid;
504 u8 ra[ETH_ALEN];
505 enum mwifiex_ba_status ba_status;
506};
507
508struct mwifiex_rx_reorder_tbl;
509
510struct reorder_tmr_cnxt {
511 struct timer_list timer;
512 struct mwifiex_rx_reorder_tbl *ptr;
513 struct mwifiex_private *priv;
514};
515
516struct mwifiex_rx_reorder_tbl {
517 struct list_head list;
518 int tid;
519 u8 ta[ETH_ALEN];
520 int start_win;
521 int win_size;
522 void **rx_reorder_ptr;
523 struct reorder_tmr_cnxt timer_context;
524};
525
526struct mwifiex_bss_prio_node {
527 struct list_head list;
528 struct mwifiex_private *priv;
529};
530
531struct mwifiex_bss_prio_tbl {
532 struct list_head bss_prio_head;
533 /* spin lock for bss priority */
534 spinlock_t bss_prio_lock;
535 struct mwifiex_bss_prio_node *bss_prio_cur;
536};
537
538struct cmd_ctrl_node {
539 struct list_head list;
540 struct mwifiex_private *priv;
541 u32 cmd_oid;
542 u32 cmd_flag;
543 struct sk_buff *cmd_skb;
544 struct sk_buff *resp_skb;
545 void *data_buf;
546 u32 wait_q_enabled;
547 struct sk_buff *skb;
548};
549
550struct mwifiex_if_ops {
551 int (*init_if) (struct mwifiex_adapter *);
552 void (*cleanup_if) (struct mwifiex_adapter *);
553 int (*check_fw_status) (struct mwifiex_adapter *, u32, int *);
554 int (*prog_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *);
555 int (*register_dev) (struct mwifiex_adapter *);
556 void (*unregister_dev) (struct mwifiex_adapter *);
557 int (*enable_int) (struct mwifiex_adapter *);
558 int (*process_int_status) (struct mwifiex_adapter *);
559 int (*host_to_card) (struct mwifiex_adapter *, u8,
560 u8 *payload, u32 pkt_len,
561 struct mwifiex_tx_param *);
562 int (*wakeup) (struct mwifiex_adapter *);
563 int (*wakeup_complete) (struct mwifiex_adapter *);
564
565 void (*update_mp_end_port) (struct mwifiex_adapter *, u16);
566 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
567};
568
569struct mwifiex_adapter {
570 struct mwifiex_private *priv[MWIFIEX_MAX_BSS_NUM];
571 u8 priv_num;
572 struct mwifiex_drv_mode *drv_mode;
573 const struct firmware *firmware;
574 struct device *dev;
575 bool surprise_removed;
576 u32 fw_release_number;
577 u32 revision_id;
578 u16 init_wait_q_woken;
579 wait_queue_head_t init_wait_q;
580 void *card;
581 struct mwifiex_if_ops if_ops;
582 atomic_t rx_pending;
583 atomic_t tx_pending;
584 atomic_t cmd_pending;
585 struct workqueue_struct *workqueue;
586 struct work_struct main_work;
587 struct mwifiex_bss_prio_tbl bss_prio_tbl[MWIFIEX_MAX_BSS_NUM];
588 /* spin lock for init/shutdown */
589 spinlock_t mwifiex_lock;
590 /* spin lock for main process */
591 spinlock_t main_proc_lock;
592 u32 mwifiex_processing;
593 u16 max_tx_buf_size;
594 u16 tx_buf_size;
595 u16 curr_tx_buf_size;
596 u32 ioport;
597 enum MWIFIEX_HARDWARE_STATUS hw_status;
598 u16 number_of_antenna;
599 u32 fw_cap_info;
600 /* spin lock for interrupt handling */
601 spinlock_t int_lock;
602 u8 int_status;
603 u32 event_cause;
604 struct sk_buff *event_skb;
605 u8 upld_buf[MWIFIEX_UPLD_SIZE];
606 u8 data_sent;
607 u8 cmd_sent;
608 u8 cmd_resp_received;
609 u8 event_received;
610 u8 data_received;
611 u16 seq_num;
612 struct cmd_ctrl_node *cmd_pool;
613 struct cmd_ctrl_node *curr_cmd;
614 /* spin lock for command */
615 spinlock_t mwifiex_cmd_lock;
616 u32 num_cmd_timeout;
617 u16 last_init_cmd;
618 struct timer_list cmd_timer;
619 struct list_head cmd_free_q;
620 /* spin lock for cmd_free_q */
621 spinlock_t cmd_free_q_lock;
622 struct list_head cmd_pending_q;
623 /* spin lock for cmd_pending_q */
624 spinlock_t cmd_pending_q_lock;
625 struct list_head scan_pending_q;
626 /* spin lock for scan_pending_q */
627 spinlock_t scan_pending_q_lock;
628 u32 scan_processing;
629 u16 region_code;
630 struct mwifiex_802_11d_domain_reg domain_reg;
631 struct mwifiex_bssdescriptor *scan_table;
632 u32 num_in_scan_table;
633 u16 scan_probes;
634 u32 scan_mode;
635 u16 specific_scan_time;
636 u16 active_scan_time;
637 u16 passive_scan_time;
638 u8 bcn_buf[MAX_SCAN_BEACON_BUFFER];
639 u8 *bcn_buf_end;
640 u8 fw_bands;
641 u8 adhoc_start_band;
642 u8 config_bands;
643 struct mwifiex_chan_scan_param_set *scan_channels;
644 u8 tx_lock_flag;
645 struct mwifiex_sleep_params sleep_params;
646 struct mwifiex_sleep_period sleep_period;
647 u16 ps_mode;
648 u32 ps_state;
649 u8 need_to_wakeup;
650 u16 multiple_dtim;
651 u16 local_listen_interval;
652 u16 null_pkt_interval;
653 struct sk_buff *sleep_cfm;
654 u16 bcn_miss_time_out;
655 u16 adhoc_awake_period;
656 u8 is_deep_sleep;
657 u8 delay_null_pkt;
658 u16 delay_to_ps;
659 u16 enhanced_ps_mode;
660 u8 pm_wakeup_card_req;
661 u16 gen_null_pkt;
662 u16 pps_uapsd_mode;
663 u32 pm_wakeup_fw_try;
664 u8 is_hs_configured;
665 struct mwifiex_hs_config_param hs_cfg;
666 u8 hs_activated;
667 u16 hs_activate_wait_q_woken;
668 wait_queue_head_t hs_activate_wait_q;
669 bool is_suspended;
670 u8 event_body[MAX_EVENT_SIZE];
671 u32 hw_dot_11n_dev_cap;
672 u8 hw_dev_mcs_support;
673 u8 adhoc_11n_enabled;
674 u8 chan_offset;
675 struct mwifiex_dbg dbg;
676 u8 arp_filter[ARP_FILTER_MAX_BUF_SIZE];
677 u32 arp_filter_size;
678 u16 cmd_wait_q_required;
679 struct mwifiex_wait_queue cmd_wait_q;
680};
681
682int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
683void mwifiex_free_lock_list(struct mwifiex_adapter *adapter);
684
685int mwifiex_init_fw(struct mwifiex_adapter *adapter);
686
687int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter);
688
689int mwifiex_shutdown_drv(struct mwifiex_adapter *adapter);
690
691int mwifiex_shutdown_fw_complete(struct mwifiex_adapter *adapter);
692
693int mwifiex_dnld_fw(struct mwifiex_adapter *, struct mwifiex_fw_image *);
694
695int mwifiex_recv_packet(struct mwifiex_adapter *, struct sk_buff *skb);
696
697int mwifiex_process_event(struct mwifiex_adapter *adapter);
698
699int mwifiex_complete_cmd(struct mwifiex_adapter *adapter);
700
701int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
702 u16 cmd_action, u32 cmd_oid, void *data_buf);
703
704int mwifiex_send_cmd_sync(struct mwifiex_private *priv, uint16_t cmd_no,
705 u16 cmd_action, u32 cmd_oid, void *data_buf);
706
707void mwifiex_cmd_timeout_func(unsigned long function_context);
708
709int mwifiex_get_debug_info(struct mwifiex_private *,
710 struct mwifiex_debug_info *);
711
712int mwifiex_alloc_cmd_buffer(struct mwifiex_adapter *adapter);
713int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter);
714void mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter);
715void mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter);
716
717void mwifiex_insert_cmd_to_free_q(struct mwifiex_adapter *adapter,
718 struct cmd_ctrl_node *cmd_node);
719
720void mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter,
721 struct cmd_ctrl_node *cmd_node,
722 u32 addtail);
723
724int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter);
725int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter);
726int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter,
727 struct sk_buff *skb);
728int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
729 struct mwifiex_tx_param *tx_param);
730int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags);
731int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
732 struct sk_buff *skb, int status);
733int mwifiex_recv_packet_complete(struct mwifiex_adapter *,
734 struct sk_buff *skb, int status);
735void mwifiex_clean_txrx(struct mwifiex_private *priv);
736u8 mwifiex_check_last_packet_indication(struct mwifiex_private *priv);
737void mwifiex_check_ps_cond(struct mwifiex_adapter *adapter);
738void mwifiex_process_sleep_confirm_resp(struct mwifiex_adapter *, u8 *,
739 u32);
740int mwifiex_cmd_enh_power_mode(struct mwifiex_private *priv,
741 struct host_cmd_ds_command *cmd,
742 u16 cmd_action, uint16_t ps_bitmap,
743 void *data_buf);
744int mwifiex_ret_enh_power_mode(struct mwifiex_private *priv,
745 struct host_cmd_ds_command *resp,
746 void *data_buf);
747void mwifiex_process_hs_config(struct mwifiex_adapter *adapter);
748void mwifiex_hs_activated_event(struct mwifiex_private *priv,
749 u8 activated);
750int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
751 struct host_cmd_ds_command *resp);
752int mwifiex_process_rx_packet(struct mwifiex_adapter *adapter,
753 struct sk_buff *skb);
754int mwifiex_sta_prepare_cmd(struct mwifiex_private *, uint16_t cmd_no,
755 u16 cmd_action, u32 cmd_oid,
756 void *data_buf, void *cmd_buf);
757int mwifiex_process_sta_cmdresp(struct mwifiex_private *, u16 cmdresp_no,
758 void *cmd_buf);
759int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *,
760 struct sk_buff *skb);
761int mwifiex_process_sta_event(struct mwifiex_private *);
762void *mwifiex_process_sta_txpd(struct mwifiex_private *, struct sk_buff *skb);
763int mwifiex_sta_init_cmd(struct mwifiex_private *, u8 first_sta);
764int mwifiex_scan_networks(struct mwifiex_private *priv,
765 const struct mwifiex_user_scan_cfg *user_scan_in);
766int mwifiex_cmd_802_11_scan(struct host_cmd_ds_command *cmd,
767 void *data_buf);
768void mwifiex_queue_scan_cmd(struct mwifiex_private *priv,
769 struct cmd_ctrl_node *cmd_node);
770int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
771 struct host_cmd_ds_command *resp);
772s32 mwifiex_find_ssid_in_list(struct mwifiex_private *priv,
773 struct mwifiex_802_11_ssid *ssid, u8 *bssid,
774 u32 mode);
775s32 mwifiex_find_bssid_in_list(struct mwifiex_private *priv, u8 *bssid,
776 u32 mode);
777int mwifiex_find_best_network(struct mwifiex_private *priv,
778 struct mwifiex_ssid_bssid *req_ssid_bssid);
779s32 mwifiex_ssid_cmp(struct mwifiex_802_11_ssid *ssid1,
780 struct mwifiex_802_11_ssid *ssid2);
781int mwifiex_associate(struct mwifiex_private *priv,
782 struct mwifiex_bssdescriptor *bss_desc);
783int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
784 struct host_cmd_ds_command
785 *cmd, void *data_buf);
786int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
787 struct host_cmd_ds_command *resp);
788void mwifiex_reset_connect_state(struct mwifiex_private *priv);
789void mwifiex_2040_coex_event(struct mwifiex_private *priv);
790u8 mwifiex_band_to_radio_type(u8 band);
791int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac);
792int mwifiex_adhoc_start(struct mwifiex_private *priv,
793 struct mwifiex_802_11_ssid *adhoc_ssid);
794int mwifiex_adhoc_join(struct mwifiex_private *priv,
795 struct mwifiex_bssdescriptor *bss_desc);
796int mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
797 struct host_cmd_ds_command *cmd,
798 void *data_buf);
799int mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv,
800 struct host_cmd_ds_command *cmd,
801 void *data_buf);
802int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv,
803 struct host_cmd_ds_command *resp);
804int mwifiex_cmd_802_11_bg_scan_query(struct host_cmd_ds_command *cmd);
805struct mwifiex_chan_freq_power *
806 mwifiex_get_cfp_by_band_and_channel_from_cfg80211(
807 struct mwifiex_private *priv,
808 u8 band, u16 channel);
809struct mwifiex_chan_freq_power *mwifiex_get_cfp_by_band_and_freq_from_cfg80211(
810 struct mwifiex_private *priv,
811 u8 band, u32 freq);
812u32 mwifiex_index_to_data_rate(u8 index, u8 ht_info);
813u32 mwifiex_find_freq_from_band_chan(u8, u8);
814int mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv, u16 vsie_mask,
815 u8 **buffer);
816u32 mwifiex_get_active_data_rates(struct mwifiex_private *priv,
817 u8 *rates);
818u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates);
819u8 mwifiex_data_rate_to_index(u32 rate);
820u8 mwifiex_is_rate_auto(struct mwifiex_private *priv);
821int mwifiex_get_rate_index(u16 *rateBitmap, int size);
822extern u16 region_code_index[MWIFIEX_MAX_REGION_CODE];
823void mwifiex_save_curr_bcn(struct mwifiex_private *priv);
824void mwifiex_free_curr_bcn(struct mwifiex_private *priv);
825int mwifiex_cmd_get_hw_spec(struct mwifiex_private *priv,
826 struct host_cmd_ds_command *cmd);
827int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
828 struct host_cmd_ds_command *resp);
829int is_command_pending(struct mwifiex_adapter *adapter);
830
831/*
832 * This function checks if the queuing is RA based or not.
833 */
834static inline u8
835mwifiex_queuing_ra_based(struct mwifiex_private *priv)
836{
837 /*
838 * Currently we assume if we are in Infra, then DA=RA. This might not be
839 * true in the future
840 */
841 if ((priv->bss_mode == NL80211_IFTYPE_STATION) &&
842 (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA))
843 return false;
844
845 return true;
846}
847
848/*
849 * This function copies rates.
850 */
851static inline u32
852mwifiex_copy_rates(u8 *dest, u32 pos, u8 *src, int len)
853{
854 int i;
855
856 for (i = 0; i < len && src[i]; i++, pos++) {
857 if (pos >= MWIFIEX_SUPPORTED_RATES)
858 break;
859 dest[pos] = src[i];
860 }
861
862 return pos;
863}
864
865/*
866 * This function returns the correct private structure pointer based
867 * upon the BSS type and BSS number.
868 */
869static inline struct mwifiex_private *
870mwifiex_get_priv_by_id(struct mwifiex_adapter *adapter,
871 u8 bss_num, u8 bss_type)
872{
873 int i;
874
875 for (i = 0; i < adapter->priv_num; i++) {
876 if (adapter->priv[i]) {
877 if ((adapter->priv[i]->bss_num == bss_num)
878 && (adapter->priv[i]->bss_type == bss_type))
879 break;
880 }
881 }
882 return ((i < adapter->priv_num) ? adapter->priv[i] : NULL);
883}
884
885/*
886 * This function returns the first available private structure pointer
887 * based upon the BSS role.
888 */
889static inline struct mwifiex_private *
890mwifiex_get_priv(struct mwifiex_adapter *adapter,
891 enum mwifiex_bss_role bss_role)
892{
893 int i;
894
895 for (i = 0; i < adapter->priv_num; i++) {
896 if (adapter->priv[i]) {
897 if (bss_role == MWIFIEX_BSS_ROLE_ANY ||
898 GET_BSS_ROLE(adapter->priv[i]) == bss_role)
899 break;
900 }
901 }
902
903 return ((i < adapter->priv_num) ? adapter->priv[i] : NULL);
904}
905
906/*
907 * This function returns the driver private structure of a network device.
908 */
909static inline struct mwifiex_private *
910mwifiex_netdev_get_priv(struct net_device *dev)
911{
912 return (struct mwifiex_private *) (*(unsigned long *) netdev_priv(dev));
913}
914
915struct mwifiex_private *mwifiex_bss_index_to_priv(struct mwifiex_adapter
916 *adapter, u8 bss_index);
917int mwifiex_init_shutdown_fw(struct mwifiex_private *priv,
918 u32 func_init_shutdown);
919int mwifiex_add_card(void *, struct semaphore *, struct mwifiex_if_ops *);
920int mwifiex_remove_card(struct mwifiex_adapter *, struct semaphore *);
921
922void mwifiex_get_version(struct mwifiex_adapter *adapter, char *version,
923 int maxlen);
924int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
925 struct mwifiex_multicast_list *mcast_list);
926int mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist,
927 struct net_device *dev);
928int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter);
929int mwifiex_bss_start(struct mwifiex_private *priv,
930 struct mwifiex_ssid_bssid *ssid_bssid);
931int mwifiex_set_hs_params(struct mwifiex_private *priv,
932 u16 action, int cmd_type,
933 struct mwifiex_ds_hs_cfg *hscfg);
934int mwifiex_cancel_hs(struct mwifiex_private *priv, int cmd_type);
935int mwifiex_enable_hs(struct mwifiex_adapter *adapter);
936int mwifiex_get_signal_info(struct mwifiex_private *priv,
937 struct mwifiex_ds_get_signal *signal);
938int mwifiex_drv_get_data_rate(struct mwifiex_private *priv,
939 struct mwifiex_rate_cfg *rate);
940int mwifiex_find_best_bss(struct mwifiex_private *priv,
941 struct mwifiex_ssid_bssid *ssid_bssid);
942int mwifiex_request_scan(struct mwifiex_private *priv,
943 struct mwifiex_802_11_ssid *req_ssid);
944int mwifiex_set_user_scan_ioctl(struct mwifiex_private *priv,
945 struct mwifiex_user_scan_cfg *scan_req);
946int mwifiex_change_adhoc_chan(struct mwifiex_private *priv, int channel);
947int mwifiex_set_radio(struct mwifiex_private *priv, u8 option);
948
949int mwifiex_drv_change_adhoc_chan(struct mwifiex_private *priv, int channel);
950
951int mwifiex_set_encode(struct mwifiex_private *priv, const u8 *key,
952 int key_len, u8 key_index, int disable);
953
954int mwifiex_set_gen_ie(struct mwifiex_private *priv, u8 *ie, int ie_len);
955
956int mwifiex_get_ver_ext(struct mwifiex_private *priv);
957
958int mwifiex_get_stats_info(struct mwifiex_private *priv,
959 struct mwifiex_ds_get_stats *log);
960
961int mwifiex_reg_write(struct mwifiex_private *priv, u32 reg_type,
962 u32 reg_offset, u32 reg_value);
963
964int mwifiex_reg_read(struct mwifiex_private *priv, u32 reg_type,
965 u32 reg_offset, u32 *value);
966
967int mwifiex_eeprom_read(struct mwifiex_private *priv, u16 offset, u16 bytes,
968 u8 *value);
969
970int mwifiex_set_11n_httx_cfg(struct mwifiex_private *priv, int data);
971
972int mwifiex_get_11n_httx_cfg(struct mwifiex_private *priv, int *data);
973
974int mwifiex_set_tx_rate_cfg(struct mwifiex_private *priv, int tx_rate_index);
975
976int mwifiex_get_tx_rate_cfg(struct mwifiex_private *priv, int *tx_rate_index);
977
978int mwifiex_drv_set_power(struct mwifiex_private *priv, u32 *ps_mode);
979
980int mwifiex_drv_get_driver_version(struct mwifiex_adapter *adapter,
981 char *version, int max_len);
982
983int mwifiex_set_tx_power(struct mwifiex_private *priv,
984 struct mwifiex_power_cfg *power_cfg);
985
986int mwifiex_main_process(struct mwifiex_adapter *);
987
988int mwifiex_bss_set_channel(struct mwifiex_private *,
989 struct mwifiex_chan_freq_power *cfp);
990int mwifiex_bss_ioctl_find_bss(struct mwifiex_private *,
991 struct mwifiex_ssid_bssid *);
992int mwifiex_set_radio_band_cfg(struct mwifiex_private *,
993 struct mwifiex_ds_band_cfg *);
994int mwifiex_get_bss_info(struct mwifiex_private *,
995 struct mwifiex_bss_info *);
996
997#ifdef CONFIG_DEBUG_FS
998void mwifiex_debugfs_init(void);
999void mwifiex_debugfs_remove(void);
1000
1001void mwifiex_dev_debugfs_init(struct mwifiex_private *priv);
1002void mwifiex_dev_debugfs_remove(struct mwifiex_private *priv);
1003#endif
1004#endif /* !_MWIFIEX_MAIN_H_ */
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
new file mode 100644
index 000000000000..5c22860fb40a
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -0,0 +1,3025 @@
1/*
2 * Marvell Wireless LAN device driver: scan ioctl and command handling
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "ioctl.h"
22#include "util.h"
23#include "fw.h"
24#include "main.h"
25#include "11n.h"
26#include "cfg80211.h"
27
28/* The maximum number of channels the firmware can scan per command */
29#define MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN 14
30
31#define MWIFIEX_CHANNELS_PER_SCAN_CMD 4
32
33/* Memory needed to store a max sized Channel List TLV for a firmware scan */
34#define CHAN_TLV_MAX_SIZE (sizeof(struct mwifiex_ie_types_header) \
35 + (MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN \
36 *sizeof(struct mwifiex_chan_scan_param_set)))
37
38/* Memory needed to store supported rate */
39#define RATE_TLV_MAX_SIZE (sizeof(struct mwifiex_ie_types_rates_param_set) \
40 + HOSTCMD_SUPPORTED_RATES)
41
42/* Memory needed to store a max number/size WildCard SSID TLV for a firmware
43 scan */
44#define WILDCARD_SSID_TLV_MAX_SIZE \
45 (MWIFIEX_MAX_SSID_LIST_LENGTH * \
46 (sizeof(struct mwifiex_ie_types_wildcard_ssid_params) \
47 + IEEE80211_MAX_SSID_LEN))
48
49/* Maximum memory needed for a mwifiex_scan_cmd_config with all TLVs at max */
50#define MAX_SCAN_CFG_ALLOC (sizeof(struct mwifiex_scan_cmd_config) \
51 + sizeof(struct mwifiex_ie_types_num_probes) \
52 + sizeof(struct mwifiex_ie_types_htcap) \
53 + CHAN_TLV_MAX_SIZE \
54 + RATE_TLV_MAX_SIZE \
55 + WILDCARD_SSID_TLV_MAX_SIZE)
56
57
58union mwifiex_scan_cmd_config_tlv {
59 /* Scan configuration (variable length) */
60 struct mwifiex_scan_cmd_config config;
61 /* Max allocated block */
62 u8 config_alloc_buf[MAX_SCAN_CFG_ALLOC];
63};
64
65enum cipher_suite {
66 CIPHER_SUITE_TKIP,
67 CIPHER_SUITE_CCMP,
68 CIPHER_SUITE_MAX
69};
70static u8 mwifiex_wpa_oui[CIPHER_SUITE_MAX][4] = {
71 { 0x00, 0x50, 0xf2, 0x02 }, /* TKIP */
72 { 0x00, 0x50, 0xf2, 0x04 }, /* AES */
73};
74static u8 mwifiex_rsn_oui[CIPHER_SUITE_MAX][4] = {
75 { 0x00, 0x0f, 0xac, 0x02 }, /* TKIP */
76 { 0x00, 0x0f, 0xac, 0x04 }, /* AES */
77};
78
79/*
80 * This function parses a given IE for a given OUI.
81 *
82 * This is used to parse a WPA/RSN IE to find if it has
83 * a given oui in PTK.
84 */
85static u8
86mwifiex_search_oui_in_ie(struct ie_body *iebody, u8 *oui)
87{
88 u8 count;
89
90 count = iebody->ptk_cnt[0];
91
92 /* There could be multiple OUIs for PTK hence
93 1) Take the length.
94 2) Check all the OUIs for AES.
95 3) If one of them is AES then pass success. */
96 while (count) {
97 if (!memcmp(iebody->ptk_body, oui, sizeof(iebody->ptk_body)))
98 return MWIFIEX_OUI_PRESENT;
99
100 --count;
101 if (count)
102 iebody = (struct ie_body *) ((u8 *) iebody +
103 sizeof(iebody->ptk_body));
104 }
105
106 pr_debug("info: %s: OUI is not found in PTK\n", __func__);
107 return MWIFIEX_OUI_NOT_PRESENT;
108}
109
110/*
111 * This function checks if a given OUI is present in a RSN IE.
112 *
113 * The function first checks if a RSN IE is present or not in the
114 * BSS descriptor. It tries to locate the OUI only if such an IE is
115 * present.
116 */
117static u8
118mwifiex_is_rsn_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher)
119{
120 u8 *oui;
121 struct ie_body *iebody;
122 u8 ret = MWIFIEX_OUI_NOT_PRESENT;
123
124 if (((bss_desc->bcn_rsn_ie) && ((*(bss_desc->bcn_rsn_ie)).
125 ieee_hdr.element_id == WLAN_EID_RSN))) {
126 iebody = (struct ie_body *)
127 (((u8 *) bss_desc->bcn_rsn_ie->data) +
128 RSN_GTK_OUI_OFFSET);
129 oui = &mwifiex_rsn_oui[cipher][0];
130 ret = mwifiex_search_oui_in_ie(iebody, oui);
131 if (ret)
132 return ret;
133 }
134 return ret;
135}
136
137/*
138 * This function checks if a given OUI is present in a WPA IE.
139 *
140 * The function first checks if a WPA IE is present or not in the
141 * BSS descriptor. It tries to locate the OUI only if such an IE is
142 * present.
143 */
144static u8
145mwifiex_is_wpa_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher)
146{
147 u8 *oui;
148 struct ie_body *iebody;
149 u8 ret = MWIFIEX_OUI_NOT_PRESENT;
150
151 if (((bss_desc->bcn_wpa_ie) && ((*(bss_desc->bcn_wpa_ie)).
152 vend_hdr.element_id == WLAN_EID_WPA))) {
153 iebody = (struct ie_body *) bss_desc->bcn_wpa_ie->data;
154 oui = &mwifiex_wpa_oui[cipher][0];
155 ret = mwifiex_search_oui_in_ie(iebody, oui);
156 if (ret)
157 return ret;
158 }
159 return ret;
160}
161
162/*
163 * This function compares two SSIDs and checks if they match.
164 */
165s32
166mwifiex_ssid_cmp(struct mwifiex_802_11_ssid *ssid1,
167 struct mwifiex_802_11_ssid *ssid2)
168{
169 if (!ssid1 || !ssid2 || (ssid1->ssid_len != ssid2->ssid_len))
170 return -1;
171 return memcmp(ssid1->ssid, ssid2->ssid, ssid1->ssid_len);
172}
173
174/*
175 * Sends IOCTL request to get the best BSS.
176 *
177 * This function allocates the IOCTL request buffer, fills it
178 * with requisite parameters and calls the IOCTL handler.
179 */
180int mwifiex_find_best_bss(struct mwifiex_private *priv,
181 struct mwifiex_ssid_bssid *ssid_bssid)
182{
183 struct mwifiex_ssid_bssid tmp_ssid_bssid;
184 u8 *mac;
185
186 if (!ssid_bssid)
187 return -1;
188
189 memcpy(&tmp_ssid_bssid, ssid_bssid,
190 sizeof(struct mwifiex_ssid_bssid));
191
192 if (!mwifiex_bss_ioctl_find_bss(priv, &tmp_ssid_bssid)) {
193 memcpy(ssid_bssid, &tmp_ssid_bssid,
194 sizeof(struct mwifiex_ssid_bssid));
195 mac = (u8 *) &ssid_bssid->bssid;
196 dev_dbg(priv->adapter->dev, "cmd: found network: ssid=%s,"
197 " %pM\n", ssid_bssid->ssid.ssid, mac);
198 return 0;
199 }
200
201 return -1;
202}
203
204/*
205 * Sends IOCTL request to start a scan with user configurations.
206 *
207 * This function allocates the IOCTL request buffer, fills it
208 * with requisite parameters and calls the IOCTL handler.
209 *
210 * Upon completion, it also generates a wireless event to notify
211 * applications.
212 */
213int mwifiex_set_user_scan_ioctl(struct mwifiex_private *priv,
214 struct mwifiex_user_scan_cfg *scan_req)
215{
216 int status;
217
218 priv->adapter->cmd_wait_q.condition = false;
219
220 status = mwifiex_scan_networks(priv, scan_req);
221 if (!status)
222 status = mwifiex_wait_queue_complete(priv->adapter);
223
224 return status;
225}
226
227/*
228 * This function checks if wapi is enabled in driver and scanned network is
229 * compatible with it.
230 */
231static bool
232mwifiex_is_network_compatible_for_wapi(struct mwifiex_private *priv,
233 struct mwifiex_bssdescriptor *bss_desc)
234{
235 if (priv->sec_info.wapi_enabled &&
236 (bss_desc->bcn_wapi_ie &&
237 ((*(bss_desc->bcn_wapi_ie)).ieee_hdr.element_id ==
238 WLAN_EID_BSS_AC_ACCESS_DELAY))) {
239 return true;
240 }
241 return false;
242}
243
244/*
245 * This function checks if driver is configured with no security mode and
246 * scanned network is compatible with it.
247 */
248static bool
249mwifiex_is_network_compatible_for_no_sec(struct mwifiex_private *priv,
250 struct mwifiex_bssdescriptor *bss_desc)
251{
252 if (priv->sec_info.wep_status == MWIFIEX_802_11_WEP_DISABLED
253 && !priv->sec_info.wpa_enabled && !priv->sec_info.wpa2_enabled
254 && ((!bss_desc->bcn_wpa_ie) ||
255 ((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id !=
256 WLAN_EID_WPA))
257 && ((!bss_desc->bcn_rsn_ie) ||
258 ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id !=
259 WLAN_EID_RSN))
260 && !priv->sec_info.encryption_mode
261 && !bss_desc->privacy) {
262 return true;
263 }
264 return false;
265}
266
267/*
268 * This function checks if static WEP is enabled in driver and scanned network
269 * is compatible with it.
270 */
271static bool
272mwifiex_is_network_compatible_for_static_wep(struct mwifiex_private *priv,
273 struct mwifiex_bssdescriptor *bss_desc)
274{
275 if (priv->sec_info.wep_status == MWIFIEX_802_11_WEP_ENABLED
276 && !priv->sec_info.wpa_enabled && !priv->sec_info.wpa2_enabled
277 && bss_desc->privacy) {
278 return true;
279 }
280 return false;
281}
282
283/*
284 * This function checks if wpa is enabled in driver and scanned network is
285 * compatible with it.
286 */
287static bool
288mwifiex_is_network_compatible_for_wpa(struct mwifiex_private *priv,
289 struct mwifiex_bssdescriptor *bss_desc,
290 int index)
291{
292 if (priv->sec_info.wep_status == MWIFIEX_802_11_WEP_DISABLED
293 && priv->sec_info.wpa_enabled && !priv->sec_info.wpa2_enabled
294 && ((bss_desc->bcn_wpa_ie) && ((*(bss_desc->bcn_wpa_ie)).vend_hdr.
295 element_id == WLAN_EID_WPA))
296 /*
297 * Privacy bit may NOT be set in some APs like
298 * LinkSys WRT54G && bss_desc->privacy
299 */
300 ) {
301 dev_dbg(priv->adapter->dev, "info: %s: WPA: index=%d"
302 " wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s "
303 "EncMode=%#x privacy=%#x\n", __func__, index,
304 (bss_desc->bcn_wpa_ie) ?
305 (*(bss_desc->bcn_wpa_ie)).
306 vend_hdr.element_id : 0,
307 (bss_desc->bcn_rsn_ie) ?
308 (*(bss_desc->bcn_rsn_ie)).
309 ieee_hdr.element_id : 0,
310 (priv->sec_info.wep_status ==
311 MWIFIEX_802_11_WEP_ENABLED) ? "e" : "d",
312 (priv->sec_info.wpa_enabled) ? "e" : "d",
313 (priv->sec_info.wpa2_enabled) ? "e" : "d",
314 priv->sec_info.encryption_mode,
315 bss_desc->privacy);
316 return true;
317 }
318 return false;
319}
320
321/*
322 * This function checks if wpa2 is enabled in driver and scanned network is
323 * compatible with it.
324 */
325static bool
326mwifiex_is_network_compatible_for_wpa2(struct mwifiex_private *priv,
327 struct mwifiex_bssdescriptor *bss_desc,
328 int index)
329{
330 if (priv->sec_info.wep_status == MWIFIEX_802_11_WEP_DISABLED
331 && !priv->sec_info.wpa_enabled && priv->sec_info.wpa2_enabled
332 && ((bss_desc->bcn_rsn_ie) && ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.
333 element_id == WLAN_EID_RSN))
334 /*
335 * Privacy bit may NOT be set in some APs like
336 * LinkSys WRT54G && bss_desc->privacy
337 */
338 ) {
339 dev_dbg(priv->adapter->dev, "info: %s: WPA2: index=%d"
340 " wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s "
341 "EncMode=%#x privacy=%#x\n", __func__, index,
342 (bss_desc->bcn_wpa_ie) ?
343 (*(bss_desc->bcn_wpa_ie)).
344 vend_hdr.element_id : 0,
345 (bss_desc->bcn_rsn_ie) ?
346 (*(bss_desc->bcn_rsn_ie)).
347 ieee_hdr.element_id : 0,
348 (priv->sec_info.wep_status ==
349 MWIFIEX_802_11_WEP_ENABLED) ? "e" : "d",
350 (priv->sec_info.wpa_enabled) ? "e" : "d",
351 (priv->sec_info.wpa2_enabled) ? "e" : "d",
352 priv->sec_info.encryption_mode,
353 bss_desc->privacy);
354 return true;
355 }
356 return false;
357}
358
359/*
360 * This function checks if adhoc AES is enabled in driver and scanned network is
361 * compatible with it.
362 */
363static bool
364mwifiex_is_network_compatible_for_adhoc_aes(struct mwifiex_private *priv,
365 struct mwifiex_bssdescriptor *bss_desc)
366{
367 if (priv->sec_info.wep_status == MWIFIEX_802_11_WEP_DISABLED
368 && !priv->sec_info.wpa_enabled && !priv->sec_info.wpa2_enabled
369 && ((!bss_desc->bcn_wpa_ie) || ((*(bss_desc->bcn_wpa_ie)).vend_hdr.
370 element_id != WLAN_EID_WPA))
371 && ((!bss_desc->bcn_rsn_ie) || ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.
372 element_id != WLAN_EID_RSN))
373 && !priv->sec_info.encryption_mode
374 && bss_desc->privacy) {
375 return true;
376 }
377 return false;
378}
379
380/*
381 * This function checks if dynamic WEP is enabled in driver and scanned network
382 * is compatible with it.
383 */
384static bool
385mwifiex_is_network_compatible_for_dynamic_wep(struct mwifiex_private *priv,
386 struct mwifiex_bssdescriptor *bss_desc,
387 int index)
388{
389 if (priv->sec_info.wep_status == MWIFIEX_802_11_WEP_DISABLED
390 && !priv->sec_info.wpa_enabled && !priv->sec_info.wpa2_enabled
391 && ((!bss_desc->bcn_wpa_ie) || ((*(bss_desc->bcn_wpa_ie)).vend_hdr.
392 element_id != WLAN_EID_WPA))
393 && ((!bss_desc->bcn_rsn_ie) || ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.
394 element_id != WLAN_EID_RSN))
395 && priv->sec_info.encryption_mode
396 && bss_desc->privacy) {
397 dev_dbg(priv->adapter->dev, "info: %s: dynamic "
398 "WEP: index=%d wpa_ie=%#x wpa2_ie=%#x "
399 "EncMode=%#x privacy=%#x\n",
400 __func__, index,
401 (bss_desc->bcn_wpa_ie) ?
402 (*(bss_desc->bcn_wpa_ie)).
403 vend_hdr.element_id : 0,
404 (bss_desc->bcn_rsn_ie) ?
405 (*(bss_desc->bcn_rsn_ie)).
406 ieee_hdr.element_id : 0,
407 priv->sec_info.encryption_mode,
408 bss_desc->privacy);
409 return true;
410 }
411 return false;
412}
413
414/*
415 * This function checks if a scanned network is compatible with the driver
416 * settings.
417 *
418 * WEP WPA WPA2 ad-hoc encrypt Network
419 * enabled enabled enabled AES mode Privacy WPA WPA2 Compatible
420 * 0 0 0 0 NONE 0 0 0 yes No security
421 * 0 1 0 0 x 1x 1 x yes WPA (disable
422 * HT if no AES)
423 * 0 0 1 0 x 1x x 1 yes WPA2 (disable
424 * HT if no AES)
425 * 0 0 0 1 NONE 1 0 0 yes Ad-hoc AES
426 * 1 0 0 0 NONE 1 0 0 yes Static WEP
427 * (disable HT)
428 * 0 0 0 0 !=NONE 1 0 0 yes Dynamic WEP
429 *
430 * Compatibility is not matched while roaming, except for mode.
431 */
432static s32
433mwifiex_is_network_compatible(struct mwifiex_private *priv, u32 index, u32 mode)
434{
435 struct mwifiex_adapter *adapter = priv->adapter;
436 struct mwifiex_bssdescriptor *bss_desc;
437
438 bss_desc = &adapter->scan_table[index];
439 bss_desc->disable_11n = false;
440
441 /* Don't check for compatibility if roaming */
442 if (priv->media_connected && (priv->bss_mode == NL80211_IFTYPE_STATION)
443 && (bss_desc->bss_mode == NL80211_IFTYPE_STATION))
444 return index;
445
446 if (priv->wps.session_enable) {
447 dev_dbg(adapter->dev,
448 "info: return success directly in WPS period\n");
449 return index;
450 }
451
452 if (mwifiex_is_network_compatible_for_wapi(priv, bss_desc)) {
453 dev_dbg(adapter->dev, "info: return success for WAPI AP\n");
454 return index;
455 }
456
457 if (bss_desc->bss_mode == mode) {
458 if (mwifiex_is_network_compatible_for_no_sec(priv, bss_desc)) {
459 /* No security */
460 return index;
461 } else if (mwifiex_is_network_compatible_for_static_wep(priv,
462 bss_desc)) {
463 /* Static WEP enabled */
464 dev_dbg(adapter->dev, "info: Disable 11n in WEP mode.\n");
465 bss_desc->disable_11n = true;
466 return index;
467 } else if (mwifiex_is_network_compatible_for_wpa(priv, bss_desc,
468 index)) {
469 /* WPA enabled */
470 if (((priv->adapter->config_bands & BAND_GN
471 || priv->adapter->config_bands & BAND_AN)
472 && bss_desc->bcn_ht_cap)
473 && !mwifiex_is_wpa_oui_present(bss_desc,
474 CIPHER_SUITE_CCMP)) {
475
476 if (mwifiex_is_wpa_oui_present(bss_desc,
477 CIPHER_SUITE_TKIP)) {
478 dev_dbg(adapter->dev,
479 "info: Disable 11n if AES "
480 "is not supported by AP\n");
481 bss_desc->disable_11n = true;
482 } else {
483 return -1;
484 }
485 }
486 return index;
487 } else if (mwifiex_is_network_compatible_for_wpa2(priv,
488 bss_desc, index)) {
489 /* WPA2 enabled */
490 if (((priv->adapter->config_bands & BAND_GN
491 || priv->adapter->config_bands & BAND_AN)
492 && bss_desc->bcn_ht_cap)
493 && !mwifiex_is_rsn_oui_present(bss_desc,
494 CIPHER_SUITE_CCMP)) {
495
496 if (mwifiex_is_rsn_oui_present(bss_desc,
497 CIPHER_SUITE_TKIP)) {
498 dev_dbg(adapter->dev,
499 "info: Disable 11n if AES "
500 "is not supported by AP\n");
501 bss_desc->disable_11n = true;
502 } else {
503 return -1;
504 }
505 }
506 return index;
507 } else if (mwifiex_is_network_compatible_for_adhoc_aes(priv,
508 bss_desc)) {
509 /* Ad-hoc AES enabled */
510 return index;
511 } else if (mwifiex_is_network_compatible_for_dynamic_wep(priv,
512 bss_desc, index)) {
513 /* Dynamic WEP enabled */
514 return index;
515 }
516
517 /* Security doesn't match */
518 dev_dbg(adapter->dev, "info: %s: failed: index=%d "
519 "wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s EncMode"
520 "=%#x privacy=%#x\n",
521 __func__, index,
522 (bss_desc->bcn_wpa_ie) ?
523 (*(bss_desc->bcn_wpa_ie)).vend_hdr.
524 element_id : 0,
525 (bss_desc->bcn_rsn_ie) ?
526 (*(bss_desc->bcn_rsn_ie)).ieee_hdr.
527 element_id : 0,
528 (priv->sec_info.wep_status ==
529 MWIFIEX_802_11_WEP_ENABLED) ? "e" : "d",
530 (priv->sec_info.wpa_enabled) ? "e" : "d",
531 (priv->sec_info.wpa2_enabled) ? "e" : "d",
532 priv->sec_info.encryption_mode, bss_desc->privacy);
533 return -1;
534 }
535
536 /* Mode doesn't match */
537 return -1;
538}
539
540/*
541 * This function finds the best SSID in the scan list.
542 *
543 * It searches the scan table for the best SSID that also matches the current
544 * adapter network preference (mode, security etc.).
545 */
546static s32
547mwifiex_find_best_network_in_list(struct mwifiex_private *priv)
548{
549 struct mwifiex_adapter *adapter = priv->adapter;
550 u32 mode = priv->bss_mode;
551 s32 best_net = -1;
552 s32 best_rssi = 0;
553 u32 i;
554
555 dev_dbg(adapter->dev, "info: num of BSSIDs = %d\n",
556 adapter->num_in_scan_table);
557
558 for (i = 0; i < adapter->num_in_scan_table; i++) {
559 switch (mode) {
560 case NL80211_IFTYPE_STATION:
561 case NL80211_IFTYPE_ADHOC:
562 if (mwifiex_is_network_compatible(priv, i, mode) >= 0) {
563 if (SCAN_RSSI(adapter->scan_table[i].rssi) >
564 best_rssi) {
565 best_rssi = SCAN_RSSI(adapter->
566 scan_table[i].rssi);
567 best_net = i;
568 }
569 }
570 break;
571 case NL80211_IFTYPE_UNSPECIFIED:
572 default:
573 if (SCAN_RSSI(adapter->scan_table[i].rssi) >
574 best_rssi) {
575 best_rssi = SCAN_RSSI(adapter->scan_table[i].
576 rssi);
577 best_net = i;
578 }
579 break;
580 }
581 }
582
583 return best_net;
584}
585
586/*
587 * This function creates a channel list for the driver to scan, based
588 * on region/band information.
589 *
590 * This routine is used for any scan that is not provided with a
591 * specific channel list to scan.
592 */
593static void
594mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
595 const struct mwifiex_user_scan_cfg
596 *user_scan_in,
597 struct mwifiex_chan_scan_param_set
598 *scan_chan_list,
599 u8 filtered_scan)
600{
601 enum ieee80211_band band;
602 struct ieee80211_supported_band *sband;
603 struct ieee80211_channel *ch;
604 struct mwifiex_adapter *adapter = priv->adapter;
605 int chan_idx = 0, i;
606 u8 scan_type;
607
608 for (band = 0; (band < IEEE80211_NUM_BANDS) ; band++) {
609
610 if (!priv->wdev->wiphy->bands[band])
611 continue;
612
613 sband = priv->wdev->wiphy->bands[band];
614
615 for (i = 0; (i < sband->n_channels) ; i++, chan_idx++) {
616 ch = &sband->channels[i];
617 if (ch->flags & IEEE80211_CHAN_DISABLED)
618 continue;
619 scan_chan_list[chan_idx].radio_type = band;
620 scan_type = ch->flags & IEEE80211_CHAN_PASSIVE_SCAN;
621 if (user_scan_in &&
622 user_scan_in->chan_list[0].scan_time)
623 scan_chan_list[chan_idx].max_scan_time =
624 cpu_to_le16((u16) user_scan_in->
625 chan_list[0].scan_time);
626 else if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE)
627 scan_chan_list[chan_idx].max_scan_time =
628 cpu_to_le16(adapter->passive_scan_time);
629 else
630 scan_chan_list[chan_idx].max_scan_time =
631 cpu_to_le16(adapter->active_scan_time);
632 if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE)
633 scan_chan_list[chan_idx].chan_scan_mode_bitmap
634 |= MWIFIEX_PASSIVE_SCAN;
635 else
636 scan_chan_list[chan_idx].chan_scan_mode_bitmap
637 &= ~MWIFIEX_PASSIVE_SCAN;
638 scan_chan_list[chan_idx].chan_number =
639 (u32) ch->hw_value;
640 if (filtered_scan) {
641 scan_chan_list[chan_idx].max_scan_time =
642 cpu_to_le16(adapter->specific_scan_time);
643 scan_chan_list[chan_idx].chan_scan_mode_bitmap
644 |= MWIFIEX_DISABLE_CHAN_FILT;
645 }
646 }
647
648 }
649}
650
651/*
652 * This function constructs and sends multiple scan config commands to
653 * the firmware.
654 *
655 * Previous routines in the code flow have created a scan command configuration
656 * with any requested TLVs. This function splits the channel TLV into maximum
657 * channels supported per scan lists and sends the portion of the channel TLV,
658 * along with the other TLVs, to the firmware.
659 */
660static int
661mwifiex_scan_channel_list(struct mwifiex_private *priv,
662 u32 max_chan_per_scan, u8 filtered_scan,
663 struct mwifiex_scan_cmd_config *scan_cfg_out,
664 struct mwifiex_ie_types_chan_list_param_set
665 *chan_tlv_out,
666 struct mwifiex_chan_scan_param_set *scan_chan_list)
667{
668 int ret = 0;
669 struct mwifiex_chan_scan_param_set *tmp_chan_list;
670 struct mwifiex_chan_scan_param_set *start_chan;
671
672 u32 tlv_idx;
673 u32 total_scan_time;
674 u32 done_early;
675
676 if (!scan_cfg_out || !chan_tlv_out || !scan_chan_list) {
677 dev_dbg(priv->adapter->dev,
678 "info: Scan: Null detect: %p, %p, %p\n",
679 scan_cfg_out, chan_tlv_out, scan_chan_list);
680 return -1;
681 }
682
683 chan_tlv_out->header.type = cpu_to_le16(TLV_TYPE_CHANLIST);
684
685 /* Set the temp channel struct pointer to the start of the desired
686 list */
687 tmp_chan_list = scan_chan_list;
688
689 /* Loop through the desired channel list, sending a new firmware scan
690 commands for each max_chan_per_scan channels (or for 1,6,11
691 individually if configured accordingly) */
692 while (tmp_chan_list->chan_number) {
693
694 tlv_idx = 0;
695 total_scan_time = 0;
696 chan_tlv_out->header.len = 0;
697 start_chan = tmp_chan_list;
698 done_early = false;
699
700 /*
701 * Construct the Channel TLV for the scan command. Continue to
702 * insert channel TLVs until:
703 * - the tlv_idx hits the maximum configured per scan command
704 * - the next channel to insert is 0 (end of desired channel
705 * list)
706 * - done_early is set (controlling individual scanning of
707 * 1,6,11)
708 */
709 while (tlv_idx < max_chan_per_scan
710 && tmp_chan_list->chan_number && !done_early) {
711
712 dev_dbg(priv->adapter->dev,
713 "info: Scan: Chan(%3d), Radio(%d),"
714 " Mode(%d, %d), Dur(%d)\n",
715 tmp_chan_list->chan_number,
716 tmp_chan_list->radio_type,
717 tmp_chan_list->chan_scan_mode_bitmap
718 & MWIFIEX_PASSIVE_SCAN,
719 (tmp_chan_list->chan_scan_mode_bitmap
720 & MWIFIEX_DISABLE_CHAN_FILT) >> 1,
721 le16_to_cpu(tmp_chan_list->max_scan_time));
722
723 /* Copy the current channel TLV to the command being
724 prepared */
725 memcpy(chan_tlv_out->chan_scan_param + tlv_idx,
726 tmp_chan_list,
727 sizeof(chan_tlv_out->chan_scan_param));
728
729 /* Increment the TLV header length by the size
730 appended */
731 chan_tlv_out->header.len =
732 cpu_to_le16(le16_to_cpu(chan_tlv_out->header.len) +
733 (sizeof(chan_tlv_out->chan_scan_param)));
734
735 /*
736 * The tlv buffer length is set to the number of bytes
737 * of the between the channel tlv pointer and the start
738 * of the tlv buffer. This compensates for any TLVs
739 * that were appended before the channel list.
740 */
741 scan_cfg_out->tlv_buf_len = (u32) ((u8 *) chan_tlv_out -
742 scan_cfg_out->tlv_buf);
743
744 /* Add the size of the channel tlv header and the data
745 length */
746 scan_cfg_out->tlv_buf_len +=
747 (sizeof(chan_tlv_out->header)
748 + le16_to_cpu(chan_tlv_out->header.len));
749
750 /* Increment the index to the channel tlv we are
751 constructing */
752 tlv_idx++;
753
754 /* Count the total scan time per command */
755 total_scan_time +=
756 le16_to_cpu(tmp_chan_list->max_scan_time);
757
758 done_early = false;
759
760 /* Stop the loop if the *current* channel is in the
761 1,6,11 set and we are not filtering on a BSSID
762 or SSID. */
763 if (!filtered_scan && (tmp_chan_list->chan_number == 1
764 || tmp_chan_list->chan_number == 6
765 || tmp_chan_list->chan_number == 11))
766 done_early = true;
767
768 /* Increment the tmp pointer to the next channel to
769 be scanned */
770 tmp_chan_list++;
771
772 /* Stop the loop if the *next* channel is in the 1,6,11
773 set. This will cause it to be the only channel
774 scanned on the next interation */
775 if (!filtered_scan && (tmp_chan_list->chan_number == 1
776 || tmp_chan_list->chan_number == 6
777 || tmp_chan_list->chan_number == 11))
778 done_early = true;
779 }
780
781 /* The total scan time should be less than scan command timeout
782 value */
783 if (total_scan_time > MWIFIEX_MAX_TOTAL_SCAN_TIME) {
784 dev_err(priv->adapter->dev, "total scan time %dms"
785 " is over limit (%dms), scan skipped\n",
786 total_scan_time, MWIFIEX_MAX_TOTAL_SCAN_TIME);
787 ret = -1;
788 break;
789 }
790
791 priv->adapter->scan_channels = start_chan;
792
793 /* Send the scan command to the firmware with the specified
794 cfg */
795 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SCAN,
796 HostCmd_ACT_GEN_SET, 0,
797 scan_cfg_out);
798 if (ret)
799 break;
800 }
801
802 if (ret)
803 return -1;
804
805 return 0;
806}
807
808/*
809 * This function constructs a scan command configuration structure to use
810 * in scan commands.
811 *
812 * Application layer or other functions can invoke network scanning
813 * with a scan configuration supplied in a user scan configuration structure.
814 * This structure is used as the basis of one or many scan command configuration
815 * commands that are sent to the command processing module and eventually to the
816 * firmware.
817 *
818 * This function creates a scan command configuration structure based on the
819 * following user supplied parameters (if present):
820 * - SSID filter
821 * - BSSID filter
822 * - Number of Probes to be sent
823 * - Channel list
824 *
825 * If the SSID or BSSID filter is not present, the filter is disabled/cleared.
826 * If the number of probes is not set, adapter default setting is used.
827 */
828static void
829mwifiex_scan_setup_scan_config(struct mwifiex_private *priv,
830 const struct mwifiex_user_scan_cfg *user_scan_in,
831 struct mwifiex_scan_cmd_config *scan_cfg_out,
832 struct mwifiex_ie_types_chan_list_param_set
833 **chan_list_out,
834 struct mwifiex_chan_scan_param_set
835 *scan_chan_list,
836 u8 *max_chan_per_scan, u8 *filtered_scan,
837 u8 *scan_current_only)
838{
839 struct mwifiex_adapter *adapter = priv->adapter;
840 struct mwifiex_ie_types_num_probes *num_probes_tlv;
841 struct mwifiex_ie_types_wildcard_ssid_params *wildcard_ssid_tlv;
842 struct mwifiex_ie_types_rates_param_set *rates_tlv;
843 const u8 zero_mac[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 };
844 u8 *tlv_pos;
845 u32 num_probes;
846 u32 ssid_len;
847 u32 chan_idx;
848 u32 scan_type;
849 u16 scan_dur;
850 u8 channel;
851 u8 radio_type;
852 u32 ssid_idx;
853 u8 ssid_filter;
854 u8 rates[MWIFIEX_SUPPORTED_RATES];
855 u32 rates_size;
856 struct mwifiex_ie_types_htcap *ht_cap;
857
858 /* The tlv_buf_len is calculated for each scan command. The TLVs added
859 in this routine will be preserved since the routine that sends the
860 command will append channelTLVs at *chan_list_out. The difference
861 between the *chan_list_out and the tlv_buf start will be used to
862 calculate the size of anything we add in this routine. */
863 scan_cfg_out->tlv_buf_len = 0;
864
865 /* Running tlv pointer. Assigned to chan_list_out at end of function
866 so later routines know where channels can be added to the command
867 buf */
868 tlv_pos = scan_cfg_out->tlv_buf;
869
870 /* Initialize the scan as un-filtered; the flag is later set to TRUE
871 below if a SSID or BSSID filter is sent in the command */
872 *filtered_scan = false;
873
874 /* Initialize the scan as not being only on the current channel. If
875 the channel list is customized, only contains one channel, and is
876 the active channel, this is set true and data flow is not halted. */
877 *scan_current_only = false;
878
879 if (user_scan_in) {
880
881 /* Default the ssid_filter flag to TRUE, set false under
882 certain wildcard conditions and qualified by the existence
883 of an SSID list before marking the scan as filtered */
884 ssid_filter = true;
885
886 /* Set the BSS type scan filter, use Adapter setting if
887 unset */
888 scan_cfg_out->bss_mode =
889 (user_scan_in->bss_mode ? (u8) user_scan_in->
890 bss_mode : (u8) adapter->scan_mode);
891
892 /* Set the number of probes to send, use Adapter setting
893 if unset */
894 num_probes =
895 (user_scan_in->num_probes ? user_scan_in->
896 num_probes : adapter->scan_probes);
897
898 /*
899 * Set the BSSID filter to the incoming configuration,
900 * if non-zero. If not set, it will remain disabled
901 * (all zeros).
902 */
903 memcpy(scan_cfg_out->specific_bssid,
904 user_scan_in->specific_bssid,
905 sizeof(scan_cfg_out->specific_bssid));
906
907 for (ssid_idx = 0;
908 ((ssid_idx < ARRAY_SIZE(user_scan_in->ssid_list))
909 && (*user_scan_in->ssid_list[ssid_idx].ssid
910 || user_scan_in->ssid_list[ssid_idx].max_len));
911 ssid_idx++) {
912
913 ssid_len = strlen(user_scan_in->ssid_list[ssid_idx].
914 ssid) + 1;
915
916 wildcard_ssid_tlv =
917 (struct mwifiex_ie_types_wildcard_ssid_params *)
918 tlv_pos;
919 wildcard_ssid_tlv->header.type =
920 cpu_to_le16(TLV_TYPE_WILDCARDSSID);
921 wildcard_ssid_tlv->header.len = cpu_to_le16(
922 (u16) (ssid_len + sizeof(wildcard_ssid_tlv->
923 max_ssid_length)));
924 wildcard_ssid_tlv->max_ssid_length =
925 user_scan_in->ssid_list[ssid_idx].max_len;
926
927 memcpy(wildcard_ssid_tlv->ssid,
928 user_scan_in->ssid_list[ssid_idx].ssid,
929 ssid_len);
930
931 tlv_pos += (sizeof(wildcard_ssid_tlv->header)
932 + le16_to_cpu(wildcard_ssid_tlv->header.len));
933
934 dev_dbg(adapter->dev, "info: scan: ssid_list[%d]: %s, %d\n",
935 ssid_idx, wildcard_ssid_tlv->ssid,
936 wildcard_ssid_tlv->max_ssid_length);
937
938 /* Empty wildcard ssid with a maxlen will match many or
939 potentially all SSIDs (maxlen == 32), therefore do
940 not treat the scan as
941 filtered. */
942 if (!ssid_len && wildcard_ssid_tlv->max_ssid_length)
943 ssid_filter = false;
944
945 }
946
947 /*
948 * The default number of channels sent in the command is low to
949 * ensure the response buffer from the firmware does not
950 * truncate scan results. That is not an issue with an SSID
951 * or BSSID filter applied to the scan results in the firmware.
952 */
953 if ((ssid_idx && ssid_filter)
954 || memcmp(scan_cfg_out->specific_bssid, &zero_mac,
955 sizeof(zero_mac)))
956 *filtered_scan = true;
957 } else {
958 scan_cfg_out->bss_mode = (u8) adapter->scan_mode;
959 num_probes = adapter->scan_probes;
960 }
961
962 /*
963 * If a specific BSSID or SSID is used, the number of channels in the
964 * scan command will be increased to the absolute maximum.
965 */
966 if (*filtered_scan)
967 *max_chan_per_scan = MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN;
968 else
969 *max_chan_per_scan = MWIFIEX_CHANNELS_PER_SCAN_CMD;
970
971 /* If the input config or adapter has the number of Probes set,
972 add tlv */
973 if (num_probes) {
974
975 dev_dbg(adapter->dev, "info: scan: num_probes = %d\n",
976 num_probes);
977
978 num_probes_tlv = (struct mwifiex_ie_types_num_probes *) tlv_pos;
979 num_probes_tlv->header.type = cpu_to_le16(TLV_TYPE_NUMPROBES);
980 num_probes_tlv->header.len =
981 cpu_to_le16(sizeof(num_probes_tlv->num_probes));
982 num_probes_tlv->num_probes = cpu_to_le16((u16) num_probes);
983
984 tlv_pos += sizeof(num_probes_tlv->header) +
985 le16_to_cpu(num_probes_tlv->header.len);
986
987 }
988
989 /* Append rates tlv */
990 memset(rates, 0, sizeof(rates));
991
992 rates_size = mwifiex_get_supported_rates(priv, rates);
993
994 rates_tlv = (struct mwifiex_ie_types_rates_param_set *) tlv_pos;
995 rates_tlv->header.type = cpu_to_le16(WLAN_EID_SUPP_RATES);
996 rates_tlv->header.len = cpu_to_le16((u16) rates_size);
997 memcpy(rates_tlv->rates, rates, rates_size);
998 tlv_pos += sizeof(rates_tlv->header) + rates_size;
999
1000 dev_dbg(adapter->dev, "info: SCAN_CMD: Rates size = %d\n", rates_size);
1001
1002 if (ISSUPP_11NENABLED(priv->adapter->fw_cap_info)
1003 && (priv->adapter->config_bands & BAND_GN
1004 || priv->adapter->config_bands & BAND_AN)) {
1005 ht_cap = (struct mwifiex_ie_types_htcap *) tlv_pos;
1006 memset(ht_cap, 0, sizeof(struct mwifiex_ie_types_htcap));
1007 ht_cap->header.type = cpu_to_le16(WLAN_EID_HT_CAPABILITY);
1008 ht_cap->header.len =
1009 cpu_to_le16(sizeof(struct ieee80211_ht_cap));
1010 radio_type =
1011 mwifiex_band_to_radio_type(priv->adapter->config_bands);
1012 mwifiex_fill_cap_info(priv, radio_type, ht_cap);
1013 tlv_pos += sizeof(struct mwifiex_ie_types_htcap);
1014 }
1015
1016 /* Append vendor specific IE TLV */
1017 mwifiex_cmd_append_vsie_tlv(priv, MWIFIEX_VSIE_MASK_SCAN, &tlv_pos);
1018
1019 /*
1020 * Set the output for the channel TLV to the address in the tlv buffer
1021 * past any TLVs that were added in this function (SSID, num_probes).
1022 * Channel TLVs will be added past this for each scan command,
1023 * preserving the TLVs that were previously added.
1024 */
1025 *chan_list_out =
1026 (struct mwifiex_ie_types_chan_list_param_set *) tlv_pos;
1027
1028 if (user_scan_in && user_scan_in->chan_list[0].chan_number) {
1029
1030 dev_dbg(adapter->dev, "info: Scan: Using supplied channel list\n");
1031
1032 for (chan_idx = 0;
1033 chan_idx < MWIFIEX_USER_SCAN_CHAN_MAX
1034 && user_scan_in->chan_list[chan_idx].chan_number;
1035 chan_idx++) {
1036
1037 channel = user_scan_in->chan_list[chan_idx].chan_number;
1038 (scan_chan_list + chan_idx)->chan_number = channel;
1039
1040 radio_type =
1041 user_scan_in->chan_list[chan_idx].radio_type;
1042 (scan_chan_list + chan_idx)->radio_type = radio_type;
1043
1044 scan_type = user_scan_in->chan_list[chan_idx].scan_type;
1045
1046 if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE)
1047 (scan_chan_list +
1048 chan_idx)->chan_scan_mode_bitmap
1049 |= MWIFIEX_PASSIVE_SCAN;
1050 else
1051 (scan_chan_list +
1052 chan_idx)->chan_scan_mode_bitmap
1053 &= ~MWIFIEX_PASSIVE_SCAN;
1054
1055 if (user_scan_in->chan_list[chan_idx].scan_time) {
1056 scan_dur = (u16) user_scan_in->
1057 chan_list[chan_idx].scan_time;
1058 } else {
1059 if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE)
1060 scan_dur = adapter->passive_scan_time;
1061 else if (*filtered_scan)
1062 scan_dur = adapter->specific_scan_time;
1063 else
1064 scan_dur = adapter->active_scan_time;
1065 }
1066
1067 (scan_chan_list + chan_idx)->min_scan_time =
1068 cpu_to_le16(scan_dur);
1069 (scan_chan_list + chan_idx)->max_scan_time =
1070 cpu_to_le16(scan_dur);
1071 }
1072
1073 /* Check if we are only scanning the current channel */
1074 if ((chan_idx == 1)
1075 && (user_scan_in->chan_list[0].chan_number
1076 == priv->curr_bss_params.bss_descriptor.channel)) {
1077 *scan_current_only = true;
1078 dev_dbg(adapter->dev,
1079 "info: Scan: Scanning current channel only\n");
1080 }
1081
1082 } else {
1083 dev_dbg(adapter->dev,
1084 "info: Scan: Creating full region channel list\n");
1085 mwifiex_scan_create_channel_list(priv, user_scan_in,
1086 scan_chan_list,
1087 *filtered_scan);
1088 }
1089}
1090
1091/*
1092 * This function inspects the scan response buffer for pointers to
1093 * expected TLVs.
1094 *
1095 * TLVs can be included at the end of the scan response BSS information.
1096 *
1097 * Data in the buffer is parsed pointers to TLVs that can potentially
1098 * be passed back in the response.
1099 */
1100static void
1101mwifiex_ret_802_11_scan_get_tlv_ptrs(struct mwifiex_adapter *adapter,
1102 struct mwifiex_ie_types_data *tlv,
1103 u32 tlv_buf_size, u32 req_tlv_type,
1104 struct mwifiex_ie_types_data **tlv_data)
1105{
1106 struct mwifiex_ie_types_data *current_tlv;
1107 u32 tlv_buf_left;
1108 u32 tlv_type;
1109 u32 tlv_len;
1110
1111 current_tlv = tlv;
1112 tlv_buf_left = tlv_buf_size;
1113 *tlv_data = NULL;
1114
1115 dev_dbg(adapter->dev, "info: SCAN_RESP: tlv_buf_size = %d\n",
1116 tlv_buf_size);
1117
1118 while (tlv_buf_left >= sizeof(struct mwifiex_ie_types_header)) {
1119
1120 tlv_type = le16_to_cpu(current_tlv->header.type);
1121 tlv_len = le16_to_cpu(current_tlv->header.len);
1122
1123 if (sizeof(tlv->header) + tlv_len > tlv_buf_left) {
1124 dev_err(adapter->dev, "SCAN_RESP: TLV buffer corrupt\n");
1125 break;
1126 }
1127
1128 if (req_tlv_type == tlv_type) {
1129 switch (tlv_type) {
1130 case TLV_TYPE_TSFTIMESTAMP:
1131 dev_dbg(adapter->dev, "info: SCAN_RESP: TSF "
1132 "timestamp TLV, len = %d\n", tlv_len);
1133 *tlv_data = (struct mwifiex_ie_types_data *)
1134 current_tlv;
1135 break;
1136 case TLV_TYPE_CHANNELBANDLIST:
1137 dev_dbg(adapter->dev, "info: SCAN_RESP: channel"
1138 " band list TLV, len = %d\n", tlv_len);
1139 *tlv_data = (struct mwifiex_ie_types_data *)
1140 current_tlv;
1141 break;
1142 default:
1143 dev_err(adapter->dev,
1144 "SCAN_RESP: unhandled TLV = %d\n",
1145 tlv_type);
1146 /* Give up, this seems corrupted */
1147 return;
1148 }
1149 }
1150
1151 if (*tlv_data)
1152 break;
1153
1154
1155 tlv_buf_left -= (sizeof(tlv->header) + tlv_len);
1156 current_tlv =
1157 (struct mwifiex_ie_types_data *) (current_tlv->data +
1158 tlv_len);
1159
1160 } /* while */
1161}
1162
1163/*
1164 * This function interprets a BSS scan response returned from the firmware.
1165 *
1166 * The various fixed fields and IEs are parsed and passed back for a BSS
1167 * probe response or beacon from scan command. Information is recorded as
1168 * needed in the scan table for that entry.
1169 *
1170 * The following IE types are recognized and parsed -
1171 * - SSID
1172 * - Supported rates
1173 * - FH parameters set
1174 * - DS parameters set
1175 * - CF parameters set
1176 * - IBSS parameters set
1177 * - ERP information
1178 * - Extended supported rates
1179 * - Vendor specific (221)
1180 * - RSN IE
1181 * - WAPI IE
1182 * - HT capability
1183 * - HT operation
1184 * - BSS Coexistence 20/40
1185 * - Extended capability
1186 * - Overlapping BSS scan parameters
1187 */
1188static int
1189mwifiex_interpret_bss_desc_with_ie(struct mwifiex_adapter *adapter,
1190 struct mwifiex_bssdescriptor *bss_entry,
1191 u8 **beacon_info, u32 *bytes_left)
1192{
1193 int ret = 0;
1194 u8 element_id;
1195 struct ieee_types_fh_param_set *fh_param_set;
1196 struct ieee_types_ds_param_set *ds_param_set;
1197 struct ieee_types_cf_param_set *cf_param_set;
1198 struct ieee_types_ibss_param_set *ibss_param_set;
1199 __le16 beacon_interval;
1200 __le16 capabilities;
1201 u8 *current_ptr;
1202 u8 *rate;
1203 u8 element_len;
1204 u16 total_ie_len;
1205 u8 bytes_to_copy;
1206 u8 rate_size;
1207 u16 beacon_size;
1208 u8 found_data_rate_ie;
1209 u32 bytes_left_for_current_beacon;
1210 struct ieee_types_vendor_specific *vendor_ie;
1211 const u8 wpa_oui[4] = { 0x00, 0x50, 0xf2, 0x01 };
1212 const u8 wmm_oui[4] = { 0x00, 0x50, 0xf2, 0x02 };
1213
1214 found_data_rate_ie = false;
1215 rate_size = 0;
1216 beacon_size = 0;
1217
1218 if (*bytes_left >= sizeof(beacon_size)) {
1219 /* Extract & convert beacon size from the command buffer */
1220 memcpy(&beacon_size, *beacon_info, sizeof(beacon_size));
1221 *bytes_left -= sizeof(beacon_size);
1222 *beacon_info += sizeof(beacon_size);
1223 }
1224
1225 if (!beacon_size || beacon_size > *bytes_left) {
1226 *beacon_info += *bytes_left;
1227 *bytes_left = 0;
1228 return -1;
1229 }
1230
1231 /* Initialize the current working beacon pointer for this BSS
1232 iteration */
1233 current_ptr = *beacon_info;
1234
1235 /* Advance the return beacon pointer past the current beacon */
1236 *beacon_info += beacon_size;
1237 *bytes_left -= beacon_size;
1238
1239 bytes_left_for_current_beacon = beacon_size;
1240
1241 memcpy(bss_entry->mac_address, current_ptr, ETH_ALEN);
1242 dev_dbg(adapter->dev, "info: InterpretIE: AP MAC Addr: %pM\n",
1243 bss_entry->mac_address);
1244
1245 current_ptr += ETH_ALEN;
1246 bytes_left_for_current_beacon -= ETH_ALEN;
1247
1248 if (bytes_left_for_current_beacon < 12) {
1249 dev_err(adapter->dev, "InterpretIE: not enough bytes left\n");
1250 return -1;
1251 }
1252
1253 /*
1254 * Next 4 fields are RSSI, time stamp, beacon interval,
1255 * and capability information
1256 */
1257
1258 /* RSSI is 1 byte long */
1259 bss_entry->rssi = (s32) (*current_ptr);
1260 dev_dbg(adapter->dev, "info: InterpretIE: RSSI=%02X\n", *current_ptr);
1261 current_ptr += 1;
1262 bytes_left_for_current_beacon -= 1;
1263
1264 /*
1265 * The RSSI is not part of the beacon/probe response. After we have
1266 * advanced current_ptr past the RSSI field, save the remaining
1267 * data for use at the application layer
1268 */
1269 bss_entry->beacon_buf = current_ptr;
1270 bss_entry->beacon_buf_size = bytes_left_for_current_beacon;
1271
1272 /* Time stamp is 8 bytes long */
1273 memcpy(bss_entry->time_stamp, current_ptr, 8);
1274 current_ptr += 8;
1275 bytes_left_for_current_beacon -= 8;
1276
1277 /* Beacon interval is 2 bytes long */
1278 memcpy(&beacon_interval, current_ptr, 2);
1279 bss_entry->beacon_period = le16_to_cpu(beacon_interval);
1280 current_ptr += 2;
1281 bytes_left_for_current_beacon -= 2;
1282
1283 /* Capability information is 2 bytes long */
1284 memcpy(&capabilities, current_ptr, 2);
1285 dev_dbg(adapter->dev, "info: InterpretIE: capabilities=0x%X\n",
1286 capabilities);
1287 bss_entry->cap_info_bitmap = le16_to_cpu(capabilities);
1288 current_ptr += 2;
1289 bytes_left_for_current_beacon -= 2;
1290
1291 /* Rest of the current buffer are IE's */
1292 dev_dbg(adapter->dev, "info: InterpretIE: IELength for this AP = %d\n",
1293 bytes_left_for_current_beacon);
1294
1295 if (bss_entry->cap_info_bitmap & WLAN_CAPABILITY_PRIVACY) {
1296 dev_dbg(adapter->dev, "info: InterpretIE: AP WEP enabled\n");
1297 bss_entry->privacy = MWIFIEX_802_11_PRIV_FILTER_8021X_WEP;
1298 } else {
1299 bss_entry->privacy = MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL;
1300 }
1301
1302 if (bss_entry->cap_info_bitmap & WLAN_CAPABILITY_IBSS)
1303 bss_entry->bss_mode = NL80211_IFTYPE_ADHOC;
1304 else
1305 bss_entry->bss_mode = NL80211_IFTYPE_STATION;
1306
1307
1308 /* Process variable IE */
1309 while (bytes_left_for_current_beacon >= 2) {
1310 element_id = *current_ptr;
1311 element_len = *(current_ptr + 1);
1312 total_ie_len = element_len + sizeof(struct ieee_types_header);
1313
1314 if (bytes_left_for_current_beacon < total_ie_len) {
1315 dev_err(adapter->dev, "err: InterpretIE: in processing"
1316 " IE, bytes left < IE length\n");
1317 bytes_left_for_current_beacon = 0;
1318 ret = -1;
1319 continue;
1320 }
1321 switch (element_id) {
1322 case WLAN_EID_SSID:
1323 bss_entry->ssid.ssid_len = element_len;
1324 memcpy(bss_entry->ssid.ssid, (current_ptr + 2),
1325 element_len);
1326 dev_dbg(adapter->dev, "info: InterpretIE: ssid: %-32s\n",
1327 bss_entry->ssid.ssid);
1328 break;
1329
1330 case WLAN_EID_SUPP_RATES:
1331 memcpy(bss_entry->data_rates, current_ptr + 2,
1332 element_len);
1333 memcpy(bss_entry->supported_rates, current_ptr + 2,
1334 element_len);
1335 rate_size = element_len;
1336 found_data_rate_ie = true;
1337 break;
1338
1339 case WLAN_EID_FH_PARAMS:
1340 fh_param_set =
1341 (struct ieee_types_fh_param_set *) current_ptr;
1342 memcpy(&bss_entry->phy_param_set.fh_param_set,
1343 fh_param_set,
1344 sizeof(struct ieee_types_fh_param_set));
1345 break;
1346
1347 case WLAN_EID_DS_PARAMS:
1348 ds_param_set =
1349 (struct ieee_types_ds_param_set *) current_ptr;
1350
1351 bss_entry->channel = ds_param_set->current_chan;
1352
1353 memcpy(&bss_entry->phy_param_set.ds_param_set,
1354 ds_param_set,
1355 sizeof(struct ieee_types_ds_param_set));
1356 break;
1357
1358 case WLAN_EID_CF_PARAMS:
1359 cf_param_set =
1360 (struct ieee_types_cf_param_set *) current_ptr;
1361 memcpy(&bss_entry->ss_param_set.cf_param_set,
1362 cf_param_set,
1363 sizeof(struct ieee_types_cf_param_set));
1364 break;
1365
1366 case WLAN_EID_IBSS_PARAMS:
1367 ibss_param_set =
1368 (struct ieee_types_ibss_param_set *)
1369 current_ptr;
1370 memcpy(&bss_entry->ss_param_set.ibss_param_set,
1371 ibss_param_set,
1372 sizeof(struct ieee_types_ibss_param_set));
1373 break;
1374
1375 case WLAN_EID_ERP_INFO:
1376 bss_entry->erp_flags = *(current_ptr + 2);
1377 break;
1378
1379 case WLAN_EID_EXT_SUPP_RATES:
1380 /*
1381 * Only process extended supported rate
1382 * if data rate is already found.
1383 * Data rate IE should come before
1384 * extended supported rate IE
1385 */
1386 if (found_data_rate_ie) {
1387 if ((element_len + rate_size) >
1388 MWIFIEX_SUPPORTED_RATES)
1389 bytes_to_copy =
1390 (MWIFIEX_SUPPORTED_RATES -
1391 rate_size);
1392 else
1393 bytes_to_copy = element_len;
1394
1395 rate = (u8 *) bss_entry->data_rates;
1396 rate += rate_size;
1397 memcpy(rate, current_ptr + 2, bytes_to_copy);
1398
1399 rate = (u8 *) bss_entry->supported_rates;
1400 rate += rate_size;
1401 memcpy(rate, current_ptr + 2, bytes_to_copy);
1402 }
1403 break;
1404
1405 case WLAN_EID_VENDOR_SPECIFIC:
1406 vendor_ie = (struct ieee_types_vendor_specific *)
1407 current_ptr;
1408
1409 if (!memcmp
1410 (vendor_ie->vend_hdr.oui, wpa_oui,
1411 sizeof(wpa_oui))) {
1412 bss_entry->bcn_wpa_ie =
1413 (struct ieee_types_vendor_specific *)
1414 current_ptr;
1415 bss_entry->wpa_offset = (u16) (current_ptr -
1416 bss_entry->beacon_buf);
1417 } else if (!memcmp(vendor_ie->vend_hdr.oui, wmm_oui,
1418 sizeof(wmm_oui))) {
1419 if (total_ie_len ==
1420 sizeof(struct ieee_types_wmm_parameter)
1421 || total_ie_len ==
1422 sizeof(struct ieee_types_wmm_info))
1423 /*
1424 * Only accept and copy the WMM IE if
1425 * it matches the size expected for the
1426 * WMM Info IE or the WMM Parameter IE.
1427 */
1428 memcpy((u8 *) &bss_entry->wmm_ie,
1429 current_ptr, total_ie_len);
1430 }
1431 break;
1432 case WLAN_EID_RSN:
1433 bss_entry->bcn_rsn_ie =
1434 (struct ieee_types_generic *) current_ptr;
1435 bss_entry->rsn_offset = (u16) (current_ptr -
1436 bss_entry->beacon_buf);
1437 break;
1438 case WLAN_EID_BSS_AC_ACCESS_DELAY:
1439 bss_entry->bcn_wapi_ie =
1440 (struct ieee_types_generic *) current_ptr;
1441 bss_entry->wapi_offset = (u16) (current_ptr -
1442 bss_entry->beacon_buf);
1443 break;
1444 case WLAN_EID_HT_CAPABILITY:
1445 bss_entry->bcn_ht_cap = (struct ieee80211_ht_cap *)
1446 (current_ptr +
1447 sizeof(struct ieee_types_header));
1448 bss_entry->ht_cap_offset = (u16) (current_ptr +
1449 sizeof(struct ieee_types_header) -
1450 bss_entry->beacon_buf);
1451 break;
1452 case WLAN_EID_HT_INFORMATION:
1453 bss_entry->bcn_ht_info = (struct ieee80211_ht_info *)
1454 (current_ptr +
1455 sizeof(struct ieee_types_header));
1456 bss_entry->ht_info_offset = (u16) (current_ptr +
1457 sizeof(struct ieee_types_header) -
1458 bss_entry->beacon_buf);
1459 break;
1460 case WLAN_EID_BSS_COEX_2040:
1461 bss_entry->bcn_bss_co_2040 = (u8 *) (current_ptr +
1462 sizeof(struct ieee_types_header));
1463 bss_entry->bss_co_2040_offset = (u16) (current_ptr +
1464 sizeof(struct ieee_types_header) -
1465 bss_entry->beacon_buf);
1466 break;
1467 case WLAN_EID_EXT_CAPABILITY:
1468 bss_entry->bcn_ext_cap = (u8 *) (current_ptr +
1469 sizeof(struct ieee_types_header));
1470 bss_entry->ext_cap_offset = (u16) (current_ptr +
1471 sizeof(struct ieee_types_header) -
1472 bss_entry->beacon_buf);
1473 break;
1474 case WLAN_EID_OVERLAP_BSS_SCAN_PARAM:
1475 bss_entry->bcn_obss_scan =
1476 (struct ieee_types_obss_scan_param *)
1477 current_ptr;
1478 bss_entry->overlap_bss_offset = (u16) (current_ptr -
1479 bss_entry->beacon_buf);
1480 break;
1481 default:
1482 break;
1483 }
1484
1485 current_ptr += element_len + 2;
1486
1487 /* Need to account for IE ID and IE Len */
1488 bytes_left_for_current_beacon -= (element_len + 2);
1489
1490 } /* while (bytes_left_for_current_beacon > 2) */
1491 return ret;
1492}
1493
1494/*
1495 * This function adjusts the pointers used in beacon buffers to reflect
1496 * shifts.
1497 *
1498 * The memory allocated for beacon buffers is of fixed sizes where all the
1499 * saved beacons must be stored. New beacons are added in the free portion
1500 * of this memory, space permitting; while duplicate beacon buffers are
1501 * placed at the same start location. However, since duplicate beacon
1502 * buffers may not match the size of the old one, all the following buffers
1503 * in the memory must be shifted to either make space, or to fill up freed
1504 * up space.
1505 *
1506 * This function is used to update the beacon buffer pointers that are past
1507 * an existing beacon buffer that is updated with a new one of different
1508 * size. The pointers are shifted by a fixed amount, either forward or
1509 * backward.
1510 *
1511 * the following pointers in every affected beacon buffers are changed, if
1512 * present -
1513 * - WPA IE pointer
1514 * - RSN IE pointer
1515 * - WAPI IE pointer
1516 * - HT capability IE pointer
1517 * - HT information IE pointer
1518 * - BSS coexistence 20/40 IE pointer
1519 * - Extended capability IE pointer
1520 * - Overlapping BSS scan parameter IE pointer
1521 */
1522static void
1523mwifiex_adjust_beacon_buffer_ptrs(struct mwifiex_private *priv, u8 advance,
1524 u8 *bcn_store, u32 rem_bcn_size,
1525 u32 num_of_ent)
1526{
1527 struct mwifiex_adapter *adapter = priv->adapter;
1528 u32 adj_idx;
1529 for (adj_idx = 0; adj_idx < num_of_ent; adj_idx++) {
1530 if (adapter->scan_table[adj_idx].beacon_buf > bcn_store) {
1531
1532 if (advance)
1533 adapter->scan_table[adj_idx].beacon_buf +=
1534 rem_bcn_size;
1535 else
1536 adapter->scan_table[adj_idx].beacon_buf -=
1537 rem_bcn_size;
1538
1539 if (adapter->scan_table[adj_idx].bcn_wpa_ie)
1540 adapter->scan_table[adj_idx].bcn_wpa_ie =
1541 (struct ieee_types_vendor_specific *)
1542 (adapter->scan_table[adj_idx].beacon_buf +
1543 adapter->scan_table[adj_idx].wpa_offset);
1544 if (adapter->scan_table[adj_idx].bcn_rsn_ie)
1545 adapter->scan_table[adj_idx].bcn_rsn_ie =
1546 (struct ieee_types_generic *)
1547 (adapter->scan_table[adj_idx].beacon_buf +
1548 adapter->scan_table[adj_idx].rsn_offset);
1549 if (adapter->scan_table[adj_idx].bcn_wapi_ie)
1550 adapter->scan_table[adj_idx].bcn_wapi_ie =
1551 (struct ieee_types_generic *)
1552 (adapter->scan_table[adj_idx].beacon_buf +
1553 adapter->scan_table[adj_idx].wapi_offset);
1554 if (adapter->scan_table[adj_idx].bcn_ht_cap)
1555 adapter->scan_table[adj_idx].bcn_ht_cap =
1556 (struct ieee80211_ht_cap *)
1557 (adapter->scan_table[adj_idx].beacon_buf +
1558 adapter->scan_table[adj_idx].ht_cap_offset);
1559
1560 if (adapter->scan_table[adj_idx].bcn_ht_info)
1561 adapter->scan_table[adj_idx].bcn_ht_info =
1562 (struct ieee80211_ht_info *)
1563 (adapter->scan_table[adj_idx].beacon_buf +
1564 adapter->scan_table[adj_idx].ht_info_offset);
1565 if (adapter->scan_table[adj_idx].bcn_bss_co_2040)
1566 adapter->scan_table[adj_idx].bcn_bss_co_2040 =
1567 (u8 *)
1568 (adapter->scan_table[adj_idx].beacon_buf +
1569 adapter->scan_table[adj_idx].bss_co_2040_offset);
1570 if (adapter->scan_table[adj_idx].bcn_ext_cap)
1571 adapter->scan_table[adj_idx].bcn_ext_cap =
1572 (u8 *)
1573 (adapter->scan_table[adj_idx].beacon_buf +
1574 adapter->scan_table[adj_idx].ext_cap_offset);
1575 if (adapter->scan_table[adj_idx].bcn_obss_scan)
1576 adapter->scan_table[adj_idx].bcn_obss_scan =
1577 (struct ieee_types_obss_scan_param *)
1578 (adapter->scan_table[adj_idx].beacon_buf +
1579 adapter->scan_table[adj_idx].overlap_bss_offset);
1580 }
1581 }
1582}
1583
1584/*
1585 * This function updates the pointers used in beacon buffer for given bss
1586 * descriptor to reflect shifts
1587 *
1588 * Following pointers are updated
1589 * - WPA IE pointer
1590 * - RSN IE pointer
1591 * - WAPI IE pointer
1592 * - HT capability IE pointer
1593 * - HT information IE pointer
1594 * - BSS coexistence 20/40 IE pointer
1595 * - Extended capability IE pointer
1596 * - Overlapping BSS scan parameter IE pointer
1597 */
1598static void
1599mwifiex_update_beacon_buffer_ptrs(struct mwifiex_bssdescriptor *beacon)
1600{
1601 if (beacon->bcn_wpa_ie)
1602 beacon->bcn_wpa_ie = (struct ieee_types_vendor_specific *)
1603 (beacon->beacon_buf + beacon->wpa_offset);
1604 if (beacon->bcn_rsn_ie)
1605 beacon->bcn_rsn_ie = (struct ieee_types_generic *)
1606 (beacon->beacon_buf + beacon->rsn_offset);
1607 if (beacon->bcn_wapi_ie)
1608 beacon->bcn_wapi_ie = (struct ieee_types_generic *)
1609 (beacon->beacon_buf + beacon->wapi_offset);
1610 if (beacon->bcn_ht_cap)
1611 beacon->bcn_ht_cap = (struct ieee80211_ht_cap *)
1612 (beacon->beacon_buf + beacon->ht_cap_offset);
1613 if (beacon->bcn_ht_info)
1614 beacon->bcn_ht_info = (struct ieee80211_ht_info *)
1615 (beacon->beacon_buf + beacon->ht_info_offset);
1616 if (beacon->bcn_bss_co_2040)
1617 beacon->bcn_bss_co_2040 = (u8 *) (beacon->beacon_buf +
1618 beacon->bss_co_2040_offset);
1619 if (beacon->bcn_ext_cap)
1620 beacon->bcn_ext_cap = (u8 *) (beacon->beacon_buf +
1621 beacon->ext_cap_offset);
1622 if (beacon->bcn_obss_scan)
1623 beacon->bcn_obss_scan = (struct ieee_types_obss_scan_param *)
1624 (beacon->beacon_buf + beacon->overlap_bss_offset);
1625}
1626
1627/*
1628 * This function stores a beacon or probe response for a BSS returned
1629 * in the scan.
1630 *
1631 * This stores a new scan response or an update for a previous scan response.
1632 * New entries need to verify that they do not exceed the total amount of
1633 * memory allocated for the table.
1634 *
1635 * Replacement entries need to take into consideration the amount of space
1636 * currently allocated for the beacon/probe response and adjust the entry
1637 * as needed.
1638 *
1639 * A small amount of extra pad (SCAN_BEACON_ENTRY_PAD) is generally reserved
1640 * for an entry in case it is a beacon since a probe response for the
1641 * network will by larger per the standard. This helps to reduce the
1642 * amount of memory copying to fit a new probe response into an entry
1643 * already occupied by a network's previously stored beacon.
1644 */
1645static void
1646mwifiex_ret_802_11_scan_store_beacon(struct mwifiex_private *priv,
1647 u32 beacon_idx, u32 num_of_ent,
1648 struct mwifiex_bssdescriptor *new_beacon)
1649{
1650 struct mwifiex_adapter *adapter = priv->adapter;
1651 u8 *bcn_store;
1652 u32 new_bcn_size;
1653 u32 old_bcn_size;
1654 u32 bcn_space;
1655
1656 if (adapter->scan_table[beacon_idx].beacon_buf) {
1657
1658 new_bcn_size = new_beacon->beacon_buf_size;
1659 old_bcn_size = adapter->scan_table[beacon_idx].beacon_buf_size;
1660 bcn_space = adapter->scan_table[beacon_idx].beacon_buf_size_max;
1661 bcn_store = adapter->scan_table[beacon_idx].beacon_buf;
1662
1663 /* Set the max to be the same as current entry unless changed
1664 below */
1665 new_beacon->beacon_buf_size_max = bcn_space;
1666 if (new_bcn_size == old_bcn_size) {
1667 /*
1668 * Beacon is the same size as the previous entry.
1669 * Replace the previous contents with the scan result
1670 */
1671 memcpy(bcn_store, new_beacon->beacon_buf,
1672 new_beacon->beacon_buf_size);
1673
1674 } else if (new_bcn_size <= bcn_space) {
1675 /*
1676 * New beacon size will fit in the amount of space
1677 * we have previously allocated for it
1678 */
1679
1680 /* Copy the new beacon buffer entry over the old one */
1681 memcpy(bcn_store, new_beacon->beacon_buf, new_bcn_size);
1682
1683 /*
1684 * If the old beacon size was less than the maximum
1685 * we had alloted for the entry, and the new entry
1686 * is even smaller, reset the max size to the old
1687 * beacon entry and compress the storage space
1688 * (leaving a new pad space of (old_bcn_size -
1689 * new_bcn_size).
1690 */
1691 if (old_bcn_size < bcn_space
1692 && new_bcn_size <= old_bcn_size) {
1693 /*
1694 * Old Beacon size is smaller than the alloted
1695 * storage size. Shrink the alloted storage
1696 * space.
1697 */
1698 dev_dbg(adapter->dev, "info: AppControl:"
1699 " smaller duplicate beacon "
1700 "(%d), old = %d, new = %d, space = %d,"
1701 "left = %d\n",
1702 beacon_idx, old_bcn_size, new_bcn_size,
1703 bcn_space,
1704 (int)(sizeof(adapter->bcn_buf) -
1705 (adapter->bcn_buf_end -
1706 adapter->bcn_buf)));
1707
1708 /*
1709 * memmove (since the memory overlaps) the
1710 * data after the beacon we just stored to the
1711 * end of the current beacon. This cleans up
1712 * any unused space the old larger beacon was
1713 * using in the buffer
1714 */
1715 memmove(bcn_store + old_bcn_size,
1716 bcn_store + bcn_space,
1717 adapter->bcn_buf_end - (bcn_store +
1718 bcn_space));
1719
1720 /*
1721 * Decrement the end pointer by the difference
1722 * between the old larger size and the new
1723 * smaller size since we are using less space
1724 * due to the new beacon being smaller
1725 */
1726 adapter->bcn_buf_end -=
1727 (bcn_space - old_bcn_size);
1728
1729 /* Set the maximum storage size to the old
1730 beacon size */
1731 new_beacon->beacon_buf_size_max = old_bcn_size;
1732
1733 /* Adjust beacon buffer pointers that are past
1734 the current */
1735 mwifiex_adjust_beacon_buffer_ptrs(priv, 0,
1736 bcn_store, (bcn_space - old_bcn_size),
1737 num_of_ent);
1738 }
1739 } else if (adapter->bcn_buf_end + (new_bcn_size - bcn_space)
1740 < (adapter->bcn_buf + sizeof(adapter->bcn_buf))) {
1741 /*
1742 * Beacon is larger than space previously allocated
1743 * (bcn_space) and there is enough space left in the
1744 * beaconBuffer to store the additional data
1745 */
1746 dev_dbg(adapter->dev, "info: AppControl:"
1747 " larger duplicate beacon (%d), "
1748 "old = %d, new = %d, space = %d, left = %d\n",
1749 beacon_idx, old_bcn_size, new_bcn_size,
1750 bcn_space,
1751 (int)(sizeof(adapter->bcn_buf) -
1752 (adapter->bcn_buf_end -
1753 adapter->bcn_buf)));
1754
1755 /*
1756 * memmove (since the memory overlaps) the data
1757 * after the beacon we just stored to the end of
1758 * the current beacon. This moves the data for
1759 * the beacons after this further in memory to
1760 * make space for the new larger beacon we are
1761 * about to copy in.
1762 */
1763 memmove(bcn_store + new_bcn_size,
1764 bcn_store + bcn_space,
1765 adapter->bcn_buf_end - (bcn_store + bcn_space));
1766
1767 /* Copy the new beacon buffer entry over the old one */
1768 memcpy(bcn_store, new_beacon->beacon_buf, new_bcn_size);
1769
1770 /* Move the beacon end pointer by the amount of new
1771 beacon data we are adding */
1772 adapter->bcn_buf_end += (new_bcn_size - bcn_space);
1773
1774 /*
1775 * This entry is bigger than the alloted max space
1776 * previously reserved. Increase the max space to
1777 * be equal to the new beacon size
1778 */
1779 new_beacon->beacon_buf_size_max = new_bcn_size;
1780
1781 /* Adjust beacon buffer pointers that are past the
1782 current */
1783 mwifiex_adjust_beacon_buffer_ptrs(priv, 1, bcn_store,
1784 (new_bcn_size - bcn_space),
1785 num_of_ent);
1786 } else {
1787 /*
1788 * Beacon is larger than the previously allocated space,
1789 * but there is not enough free space to store the
1790 * additional data.
1791 */
1792 dev_err(adapter->dev, "AppControl: larger duplicate "
1793 " beacon (%d), old = %d new = %d, space = %d,"
1794 " left = %d\n", beacon_idx, old_bcn_size,
1795 new_bcn_size, bcn_space,
1796 (int)(sizeof(adapter->bcn_buf) -
1797 (adapter->bcn_buf_end - adapter->bcn_buf)));
1798
1799 /* Storage failure, keep old beacon intact */
1800 new_beacon->beacon_buf_size = old_bcn_size;
1801 if (new_beacon->bcn_wpa_ie)
1802 new_beacon->wpa_offset =
1803 adapter->scan_table[beacon_idx].
1804 wpa_offset;
1805 if (new_beacon->bcn_rsn_ie)
1806 new_beacon->rsn_offset =
1807 adapter->scan_table[beacon_idx].
1808 rsn_offset;
1809 if (new_beacon->bcn_wapi_ie)
1810 new_beacon->wapi_offset =
1811 adapter->scan_table[beacon_idx].
1812 wapi_offset;
1813 if (new_beacon->bcn_ht_cap)
1814 new_beacon->ht_cap_offset =
1815 adapter->scan_table[beacon_idx].
1816 ht_cap_offset;
1817 if (new_beacon->bcn_ht_info)
1818 new_beacon->ht_info_offset =
1819 adapter->scan_table[beacon_idx].
1820 ht_info_offset;
1821 if (new_beacon->bcn_bss_co_2040)
1822 new_beacon->bss_co_2040_offset =
1823 adapter->scan_table[beacon_idx].
1824 bss_co_2040_offset;
1825 if (new_beacon->bcn_ext_cap)
1826 new_beacon->ext_cap_offset =
1827 adapter->scan_table[beacon_idx].
1828 ext_cap_offset;
1829 if (new_beacon->bcn_obss_scan)
1830 new_beacon->overlap_bss_offset =
1831 adapter->scan_table[beacon_idx].
1832 overlap_bss_offset;
1833 }
1834 /* Point the new entry to its permanent storage space */
1835 new_beacon->beacon_buf = bcn_store;
1836 mwifiex_update_beacon_buffer_ptrs(new_beacon);
1837 } else {
1838 /*
1839 * No existing beacon data exists for this entry, check to see
1840 * if we can fit it in the remaining space
1841 */
1842 if (adapter->bcn_buf_end + new_beacon->beacon_buf_size +
1843 SCAN_BEACON_ENTRY_PAD < (adapter->bcn_buf +
1844 sizeof(adapter->bcn_buf))) {
1845
1846 /*
1847 * Copy the beacon buffer data from the local entry to
1848 * the adapter dev struct buffer space used to store
1849 * the raw beacon data for each entry in the scan table
1850 */
1851 memcpy(adapter->bcn_buf_end, new_beacon->beacon_buf,
1852 new_beacon->beacon_buf_size);
1853
1854 /* Update the beacon ptr to point to the table save
1855 area */
1856 new_beacon->beacon_buf = adapter->bcn_buf_end;
1857 new_beacon->beacon_buf_size_max =
1858 (new_beacon->beacon_buf_size +
1859 SCAN_BEACON_ENTRY_PAD);
1860
1861 mwifiex_update_beacon_buffer_ptrs(new_beacon);
1862
1863 /* Increment the end pointer by the size reserved */
1864 adapter->bcn_buf_end += new_beacon->beacon_buf_size_max;
1865
1866 dev_dbg(adapter->dev, "info: AppControl: beacon[%02d]"
1867 " sz=%03d, used = %04d, left = %04d\n",
1868 beacon_idx,
1869 new_beacon->beacon_buf_size,
1870 (int)(adapter->bcn_buf_end - adapter->bcn_buf),
1871 (int)(sizeof(adapter->bcn_buf) -
1872 (adapter->bcn_buf_end -
1873 adapter->bcn_buf)));
1874 } else {
1875 /* No space for new beacon */
1876 dev_dbg(adapter->dev, "info: AppControl: no space for"
1877 " beacon (%d): %pM sz=%03d, left=%03d\n",
1878 beacon_idx, new_beacon->mac_address,
1879 new_beacon->beacon_buf_size,
1880 (int)(sizeof(adapter->bcn_buf) -
1881 (adapter->bcn_buf_end -
1882 adapter->bcn_buf)));
1883
1884 /* Storage failure; clear storage records for this
1885 bcn */
1886 new_beacon->beacon_buf = NULL;
1887 new_beacon->beacon_buf_size = 0;
1888 new_beacon->beacon_buf_size_max = 0;
1889 new_beacon->bcn_wpa_ie = NULL;
1890 new_beacon->wpa_offset = 0;
1891 new_beacon->bcn_rsn_ie = NULL;
1892 new_beacon->rsn_offset = 0;
1893 new_beacon->bcn_wapi_ie = NULL;
1894 new_beacon->wapi_offset = 0;
1895 new_beacon->bcn_ht_cap = NULL;
1896 new_beacon->ht_cap_offset = 0;
1897 new_beacon->bcn_ht_info = NULL;
1898 new_beacon->ht_info_offset = 0;
1899 new_beacon->bcn_bss_co_2040 = NULL;
1900 new_beacon->bss_co_2040_offset = 0;
1901 new_beacon->bcn_ext_cap = NULL;
1902 new_beacon->ext_cap_offset = 0;
1903 new_beacon->bcn_obss_scan = NULL;
1904 new_beacon->overlap_bss_offset = 0;
1905 }
1906 }
1907}
1908
1909/*
1910 * This function restores a beacon buffer of the current BSS descriptor.
1911 */
1912static void mwifiex_restore_curr_bcn(struct mwifiex_private *priv)
1913{
1914 struct mwifiex_adapter *adapter = priv->adapter;
1915 struct mwifiex_bssdescriptor *curr_bss =
1916 &priv->curr_bss_params.bss_descriptor;
1917 unsigned long flags;
1918
1919 if (priv->curr_bcn_buf &&
1920 ((adapter->bcn_buf_end + priv->curr_bcn_size) <
1921 (adapter->bcn_buf + sizeof(adapter->bcn_buf)))) {
1922 spin_lock_irqsave(&priv->curr_bcn_buf_lock, flags);
1923
1924 /* restore the current beacon buffer */
1925 memcpy(adapter->bcn_buf_end, priv->curr_bcn_buf,
1926 priv->curr_bcn_size);
1927 curr_bss->beacon_buf = adapter->bcn_buf_end;
1928 curr_bss->beacon_buf_size = priv->curr_bcn_size;
1929 adapter->bcn_buf_end += priv->curr_bcn_size;
1930
1931 /* adjust the pointers in the current BSS descriptor */
1932 if (curr_bss->bcn_wpa_ie)
1933 curr_bss->bcn_wpa_ie =
1934 (struct ieee_types_vendor_specific *)
1935 (curr_bss->beacon_buf +
1936 curr_bss->wpa_offset);
1937
1938 if (curr_bss->bcn_rsn_ie)
1939 curr_bss->bcn_rsn_ie = (struct ieee_types_generic *)
1940 (curr_bss->beacon_buf +
1941 curr_bss->rsn_offset);
1942
1943 if (curr_bss->bcn_ht_cap)
1944 curr_bss->bcn_ht_cap = (struct ieee80211_ht_cap *)
1945 (curr_bss->beacon_buf +
1946 curr_bss->ht_cap_offset);
1947
1948 if (curr_bss->bcn_ht_info)
1949 curr_bss->bcn_ht_info = (struct ieee80211_ht_info *)
1950 (curr_bss->beacon_buf +
1951 curr_bss->ht_info_offset);
1952
1953 if (curr_bss->bcn_bss_co_2040)
1954 curr_bss->bcn_bss_co_2040 =
1955 (u8 *) (curr_bss->beacon_buf +
1956 curr_bss->bss_co_2040_offset);
1957
1958 if (curr_bss->bcn_ext_cap)
1959 curr_bss->bcn_ext_cap = (u8 *) (curr_bss->beacon_buf +
1960 curr_bss->ext_cap_offset);
1961
1962 if (curr_bss->bcn_obss_scan)
1963 curr_bss->bcn_obss_scan =
1964 (struct ieee_types_obss_scan_param *)
1965 (curr_bss->beacon_buf +
1966 curr_bss->overlap_bss_offset);
1967
1968 spin_unlock_irqrestore(&priv->curr_bcn_buf_lock, flags);
1969
1970 dev_dbg(adapter->dev, "info: current beacon restored %d\n",
1971 priv->curr_bcn_size);
1972 } else {
1973 dev_warn(adapter->dev,
1974 "curr_bcn_buf not saved or bcn_buf has no space\n");
1975 }
1976}
1977
1978/*
1979 * This function post processes the scan table after a new scan command has
1980 * completed.
1981 *
1982 * It inspects each entry of the scan table and tries to find an entry that
1983 * matches with our current associated/joined network from the scan. If
1984 * one is found, the stored copy of the BSS descriptor of our current network
1985 * is updated.
1986 *
1987 * It also debug dumps the current scan table contents after processing is over.
1988 */
1989static void
1990mwifiex_process_scan_results(struct mwifiex_private *priv)
1991{
1992 struct mwifiex_adapter *adapter = priv->adapter;
1993 s32 j;
1994 u32 i;
1995 unsigned long flags;
1996
1997 if (priv->media_connected) {
1998
1999 j = mwifiex_find_ssid_in_list(priv, &priv->curr_bss_params.
2000 bss_descriptor.ssid,
2001 priv->curr_bss_params.
2002 bss_descriptor.mac_address,
2003 priv->bss_mode);
2004
2005 if (j >= 0) {
2006 spin_lock_irqsave(&priv->curr_bcn_buf_lock, flags);
2007 priv->curr_bss_params.bss_descriptor.bcn_wpa_ie = NULL;
2008 priv->curr_bss_params.bss_descriptor.wpa_offset = 0;
2009 priv->curr_bss_params.bss_descriptor.bcn_rsn_ie = NULL;
2010 priv->curr_bss_params.bss_descriptor.rsn_offset = 0;
2011 priv->curr_bss_params.bss_descriptor.bcn_wapi_ie = NULL;
2012 priv->curr_bss_params.bss_descriptor.wapi_offset = 0;
2013 priv->curr_bss_params.bss_descriptor.bcn_ht_cap = NULL;
2014 priv->curr_bss_params.bss_descriptor.ht_cap_offset =
2015 0;
2016 priv->curr_bss_params.bss_descriptor.bcn_ht_info = NULL;
2017 priv->curr_bss_params.bss_descriptor.ht_info_offset =
2018 0;
2019 priv->curr_bss_params.bss_descriptor.bcn_bss_co_2040 =
2020 NULL;
2021 priv->curr_bss_params.bss_descriptor.
2022 bss_co_2040_offset = 0;
2023 priv->curr_bss_params.bss_descriptor.bcn_ext_cap = NULL;
2024 priv->curr_bss_params.bss_descriptor.ext_cap_offset = 0;
2025 priv->curr_bss_params.bss_descriptor.
2026 bcn_obss_scan = NULL;
2027 priv->curr_bss_params.bss_descriptor.
2028 overlap_bss_offset = 0;
2029 priv->curr_bss_params.bss_descriptor.beacon_buf = NULL;
2030 priv->curr_bss_params.bss_descriptor.beacon_buf_size =
2031 0;
2032 priv->curr_bss_params.bss_descriptor.
2033 beacon_buf_size_max = 0;
2034
2035 dev_dbg(adapter->dev, "info: Found current ssid/bssid"
2036 " in list @ index #%d\n", j);
2037 /* Make a copy of current BSSID descriptor */
2038 memcpy(&priv->curr_bss_params.bss_descriptor,
2039 &adapter->scan_table[j],
2040 sizeof(priv->curr_bss_params.bss_descriptor));
2041
2042 mwifiex_save_curr_bcn(priv);
2043 spin_unlock_irqrestore(&priv->curr_bcn_buf_lock, flags);
2044
2045 } else {
2046 mwifiex_restore_curr_bcn(priv);
2047 }
2048 }
2049
2050 for (i = 0; i < adapter->num_in_scan_table; i++)
2051 dev_dbg(adapter->dev, "info: scan:(%02d) %pM "
2052 "RSSI[%03d], SSID[%s]\n",
2053 i, adapter->scan_table[i].mac_address,
2054 (s32) adapter->scan_table[i].rssi,
2055 adapter->scan_table[i].ssid.ssid);
2056}
2057
2058/*
2059 * This function converts radio type scan parameter to a band configuration
2060 * to be used in join command.
2061 */
2062static u8
2063mwifiex_radio_type_to_band(u8 radio_type)
2064{
2065 switch (radio_type) {
2066 case HostCmd_SCAN_RADIO_TYPE_A:
2067 return BAND_A;
2068 case HostCmd_SCAN_RADIO_TYPE_BG:
2069 default:
2070 return BAND_G;
2071 }
2072}
2073
2074/*
2075 * This function deletes a specific indexed entry from the scan table.
2076 *
2077 * This also compacts the remaining entries and adjusts any buffering
2078 * of beacon/probe response data if needed.
2079 */
2080static void
2081mwifiex_scan_delete_table_entry(struct mwifiex_private *priv, s32 table_idx)
2082{
2083 struct mwifiex_adapter *adapter = priv->adapter;
2084 u32 del_idx;
2085 u32 beacon_buf_adj;
2086 u8 *beacon_buf;
2087
2088 /*
2089 * Shift the saved beacon buffer data for the scan table back over the
2090 * entry being removed. Update the end of buffer pointer. Save the
2091 * deleted buffer allocation size for pointer adjustments for entries
2092 * compacted after the deleted index.
2093 */
2094 beacon_buf_adj = adapter->scan_table[table_idx].beacon_buf_size_max;
2095
2096 dev_dbg(adapter->dev, "info: Scan: Delete Entry %d, beacon buffer "
2097 "removal = %d bytes\n", table_idx, beacon_buf_adj);
2098
2099 /* Check if the table entry had storage allocated for its beacon */
2100 if (beacon_buf_adj) {
2101 beacon_buf = adapter->scan_table[table_idx].beacon_buf;
2102
2103 /*
2104 * Remove the entry's buffer space, decrement the table end
2105 * pointer by the amount we are removing
2106 */
2107 adapter->bcn_buf_end -= beacon_buf_adj;
2108
2109 dev_dbg(adapter->dev, "info: scan: delete entry %d,"
2110 " compact data: %p <- %p (sz = %d)\n",
2111 table_idx, beacon_buf,
2112 beacon_buf + beacon_buf_adj,
2113 (int)(adapter->bcn_buf_end - beacon_buf));
2114
2115 /*
2116 * Compact data storage. Copy all data after the deleted
2117 * entry's end address (beacon_buf + beacon_buf_adj) back
2118 * to the original start address (beacon_buf).
2119 *
2120 * Scan table entries affected by the move will have their
2121 * entry pointer adjusted below.
2122 *
2123 * Use memmove since the dest/src memory regions overlap.
2124 */
2125 memmove(beacon_buf, beacon_buf + beacon_buf_adj,
2126 adapter->bcn_buf_end - beacon_buf);
2127 }
2128
2129 dev_dbg(adapter->dev,
2130 "info: Scan: Delete Entry %d, num_in_scan_table = %d\n",
2131 table_idx, adapter->num_in_scan_table);
2132
2133 /* Shift all of the entries after the table_idx back by one, compacting
2134 the table and removing the requested entry */
2135 for (del_idx = table_idx; (del_idx + 1) < adapter->num_in_scan_table;
2136 del_idx++) {
2137 /* Copy the next entry over this one */
2138 memcpy(adapter->scan_table + del_idx,
2139 adapter->scan_table + del_idx + 1,
2140 sizeof(struct mwifiex_bssdescriptor));
2141
2142 /*
2143 * Adjust this entry's pointer to its beacon buffer based on
2144 * the removed/compacted entry from the deleted index. Don't
2145 * decrement if the buffer pointer is NULL (no data stored for
2146 * this entry).
2147 */
2148 if (adapter->scan_table[del_idx].beacon_buf) {
2149 adapter->scan_table[del_idx].beacon_buf -=
2150 beacon_buf_adj;
2151 if (adapter->scan_table[del_idx].bcn_wpa_ie)
2152 adapter->scan_table[del_idx].bcn_wpa_ie =
2153 (struct ieee_types_vendor_specific *)
2154 (adapter->scan_table[del_idx].
2155 beacon_buf +
2156 adapter->scan_table[del_idx].
2157 wpa_offset);
2158 if (adapter->scan_table[del_idx].bcn_rsn_ie)
2159 adapter->scan_table[del_idx].bcn_rsn_ie =
2160 (struct ieee_types_generic *)
2161 (adapter->scan_table[del_idx].
2162 beacon_buf +
2163 adapter->scan_table[del_idx].
2164 rsn_offset);
2165 if (adapter->scan_table[del_idx].bcn_wapi_ie)
2166 adapter->scan_table[del_idx].bcn_wapi_ie =
2167 (struct ieee_types_generic *)
2168 (adapter->scan_table[del_idx].beacon_buf
2169 + adapter->scan_table[del_idx].
2170 wapi_offset);
2171 if (adapter->scan_table[del_idx].bcn_ht_cap)
2172 adapter->scan_table[del_idx].bcn_ht_cap =
2173 (struct ieee80211_ht_cap *)
2174 (adapter->scan_table[del_idx].beacon_buf
2175 + adapter->scan_table[del_idx].
2176 ht_cap_offset);
2177
2178 if (adapter->scan_table[del_idx].bcn_ht_info)
2179 adapter->scan_table[del_idx].bcn_ht_info =
2180 (struct ieee80211_ht_info *)
2181 (adapter->scan_table[del_idx].beacon_buf
2182 + adapter->scan_table[del_idx].
2183 ht_info_offset);
2184 if (adapter->scan_table[del_idx].bcn_bss_co_2040)
2185 adapter->scan_table[del_idx].bcn_bss_co_2040 =
2186 (u8 *)
2187 (adapter->scan_table[del_idx].beacon_buf
2188 + adapter->scan_table[del_idx].
2189 bss_co_2040_offset);
2190 if (adapter->scan_table[del_idx].bcn_ext_cap)
2191 adapter->scan_table[del_idx].bcn_ext_cap =
2192 (u8 *)
2193 (adapter->scan_table[del_idx].beacon_buf
2194 + adapter->scan_table[del_idx].
2195 ext_cap_offset);
2196 if (adapter->scan_table[del_idx].bcn_obss_scan)
2197 adapter->scan_table[del_idx].
2198 bcn_obss_scan =
2199 (struct ieee_types_obss_scan_param *)
2200 (adapter->scan_table[del_idx].beacon_buf
2201 + adapter->scan_table[del_idx].
2202 overlap_bss_offset);
2203 }
2204 }
2205
2206 /* The last entry is invalid now that it has been deleted or moved
2207 back */
2208 memset(adapter->scan_table + adapter->num_in_scan_table - 1,
2209 0x00, sizeof(struct mwifiex_bssdescriptor));
2210
2211 adapter->num_in_scan_table--;
2212}
2213
2214/*
2215 * This function deletes all occurrences of a given SSID from the scan table.
2216 *
2217 * This iterates through the scan table and deletes all entries that match
2218 * the given SSID. It also compacts the remaining scan table entries.
2219 */
2220static int
2221mwifiex_scan_delete_ssid_table_entry(struct mwifiex_private *priv,
2222 struct mwifiex_802_11_ssid *del_ssid)
2223{
2224 s32 table_idx = -1;
2225
2226 dev_dbg(priv->adapter->dev, "info: scan: delete ssid entry: %-32s\n",
2227 del_ssid->ssid);
2228
2229 /* If the requested SSID is found in the table, delete it. Then keep
2230 searching the table for multiple entires for the SSID until no
2231 more are found */
2232 while ((table_idx = mwifiex_find_ssid_in_list(priv, del_ssid, NULL,
2233 NL80211_IFTYPE_UNSPECIFIED)) >= 0) {
2234 dev_dbg(priv->adapter->dev,
2235 "info: Scan: Delete SSID Entry: Found Idx = %d\n",
2236 table_idx);
2237 mwifiex_scan_delete_table_entry(priv, table_idx);
2238 }
2239
2240 return table_idx == -1 ? -1 : 0;
2241}
2242
2243/*
2244 * This is an internal function used to start a scan based on an input
2245 * configuration.
2246 *
2247 * This uses the input user scan configuration information when provided in
2248 * order to send the appropriate scan commands to firmware to populate or
2249 * update the internal driver scan table.
2250 */
2251int mwifiex_scan_networks(struct mwifiex_private *priv,
2252 const struct mwifiex_user_scan_cfg *user_scan_in)
2253{
2254 int ret = 0;
2255 struct mwifiex_adapter *adapter = priv->adapter;
2256 struct cmd_ctrl_node *cmd_node;
2257 union mwifiex_scan_cmd_config_tlv *scan_cfg_out;
2258 struct mwifiex_ie_types_chan_list_param_set *chan_list_out;
2259 u32 buf_size;
2260 struct mwifiex_chan_scan_param_set *scan_chan_list;
2261 u8 keep_previous_scan;
2262 u8 filtered_scan;
2263 u8 scan_current_chan_only;
2264 u8 max_chan_per_scan;
2265 unsigned long flags;
2266
2267 if (adapter->scan_processing) {
2268 dev_dbg(adapter->dev, "cmd: Scan already in process...\n");
2269 return ret;
2270 }
2271
2272 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
2273 adapter->scan_processing = true;
2274 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
2275
2276 if (priv->scan_block) {
2277 dev_dbg(adapter->dev,
2278 "cmd: Scan is blocked during association...\n");
2279 return ret;
2280 }
2281
2282 scan_cfg_out = kzalloc(sizeof(union mwifiex_scan_cmd_config_tlv),
2283 GFP_KERNEL);
2284 if (!scan_cfg_out) {
2285 dev_err(adapter->dev, "failed to alloc scan_cfg_out\n");
2286 return -ENOMEM;
2287 }
2288
2289 buf_size = sizeof(struct mwifiex_chan_scan_param_set) *
2290 MWIFIEX_USER_SCAN_CHAN_MAX;
2291 scan_chan_list = kzalloc(buf_size, GFP_KERNEL);
2292 if (!scan_chan_list) {
2293 dev_err(adapter->dev, "failed to alloc scan_chan_list\n");
2294 kfree(scan_cfg_out);
2295 return -ENOMEM;
2296 }
2297
2298 keep_previous_scan = false;
2299
2300 mwifiex_scan_setup_scan_config(priv, user_scan_in,
2301 &scan_cfg_out->config, &chan_list_out,
2302 scan_chan_list, &max_chan_per_scan,
2303 &filtered_scan, &scan_current_chan_only);
2304
2305 if (user_scan_in)
2306 keep_previous_scan = user_scan_in->keep_previous_scan;
2307
2308
2309 if (!keep_previous_scan) {
2310 memset(adapter->scan_table, 0x00,
2311 sizeof(struct mwifiex_bssdescriptor) * IW_MAX_AP);
2312 adapter->num_in_scan_table = 0;
2313 adapter->bcn_buf_end = adapter->bcn_buf;
2314 }
2315
2316 ret = mwifiex_scan_channel_list(priv, max_chan_per_scan, filtered_scan,
2317 &scan_cfg_out->config, chan_list_out,
2318 scan_chan_list);
2319
2320 /* Get scan command from scan_pending_q and put to cmd_pending_q */
2321 if (!ret) {
2322 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
2323 if (!list_empty(&adapter->scan_pending_q)) {
2324 cmd_node = list_first_entry(&adapter->scan_pending_q,
2325 struct cmd_ctrl_node, list);
2326 list_del(&cmd_node->list);
2327 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
2328 flags);
2329 mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
2330 true);
2331 } else {
2332 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
2333 flags);
2334 }
2335 } else {
2336 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
2337 adapter->scan_processing = true;
2338 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
2339 }
2340
2341 kfree(scan_cfg_out);
2342 kfree(scan_chan_list);
2343 return ret;
2344}
2345
2346/*
2347 * This function prepares a scan command to be sent to the firmware.
2348 *
2349 * This uses the scan command configuration sent to the command processing
2350 * module in command preparation stage to configure a scan command structure
2351 * to send to firmware.
2352 *
2353 * The fixed fields specifying the BSS type and BSSID filters as well as a
2354 * variable number/length of TLVs are sent in the command to firmware.
2355 *
2356 * Preparation also includes -
2357 * - Setting command ID, and proper size
2358 * - Ensuring correct endian-ness
2359 */
2360int mwifiex_cmd_802_11_scan(struct host_cmd_ds_command *cmd, void *data_buf)
2361{
2362 struct host_cmd_ds_802_11_scan *scan_cmd = &cmd->params.scan;
2363 struct mwifiex_scan_cmd_config *scan_cfg;
2364
2365 scan_cfg = (struct mwifiex_scan_cmd_config *) data_buf;
2366
2367 /* Set fixed field variables in scan command */
2368 scan_cmd->bss_mode = scan_cfg->bss_mode;
2369 memcpy(scan_cmd->bssid, scan_cfg->specific_bssid,
2370 sizeof(scan_cmd->bssid));
2371 memcpy(scan_cmd->tlv_buffer, scan_cfg->tlv_buf, scan_cfg->tlv_buf_len);
2372
2373 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_SCAN);
2374
2375 /* Size is equal to the sizeof(fixed portions) + the TLV len + header */
2376 cmd->size = cpu_to_le16((u16) (sizeof(scan_cmd->bss_mode)
2377 + sizeof(scan_cmd->bssid)
2378 + scan_cfg->tlv_buf_len + S_DS_GEN));
2379
2380 return 0;
2381}
2382
2383/*
2384 * This function handles the command response of scan.
2385 *
2386 * The response buffer for the scan command has the following
2387 * memory layout:
2388 *
2389 * .-------------------------------------------------------------.
2390 * | Header (4 * sizeof(t_u16)): Standard command response hdr |
2391 * .-------------------------------------------------------------.
2392 * | BufSize (t_u16) : sizeof the BSS Description data |
2393 * .-------------------------------------------------------------.
2394 * | NumOfSet (t_u8) : Number of BSS Descs returned |
2395 * .-------------------------------------------------------------.
2396 * | BSSDescription data (variable, size given in BufSize) |
2397 * .-------------------------------------------------------------.
2398 * | TLV data (variable, size calculated using Header->Size, |
2399 * | BufSize and sizeof the fixed fields above) |
2400 * .-------------------------------------------------------------.
2401 */
2402int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
2403 struct host_cmd_ds_command *resp)
2404{
2405 int ret = 0;
2406 struct mwifiex_adapter *adapter = priv->adapter;
2407 struct cmd_ctrl_node *cmd_node;
2408 struct host_cmd_ds_802_11_scan_rsp *scan_rsp;
2409 struct mwifiex_bssdescriptor *bss_new_entry = NULL;
2410 struct mwifiex_ie_types_data *tlv_data;
2411 struct mwifiex_ie_types_tsf_timestamp *tsf_tlv;
2412 u8 *bss_info;
2413 u32 scan_resp_size;
2414 u32 bytes_left;
2415 u32 num_in_table;
2416 u32 bss_idx;
2417 u32 idx;
2418 u32 tlv_buf_size;
2419 long long tsf_val;
2420 struct mwifiex_chan_freq_power *cfp;
2421 struct mwifiex_ie_types_chan_band_list_param_set *chan_band_tlv;
2422 struct chan_band_param_set *chan_band;
2423 u8 band;
2424 u8 is_bgscan_resp;
2425 unsigned long flags;
2426
2427 is_bgscan_resp = (le16_to_cpu(resp->command)
2428 == HostCmd_CMD_802_11_BG_SCAN_QUERY);
2429 if (is_bgscan_resp)
2430 scan_rsp = &resp->params.bg_scan_query_resp.scan_resp;
2431 else
2432 scan_rsp = &resp->params.scan_resp;
2433
2434
2435 if (scan_rsp->number_of_sets > IW_MAX_AP) {
2436 dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n",
2437 scan_rsp->number_of_sets);
2438 ret = -1;
2439 goto done;
2440 }
2441
2442 bytes_left = le16_to_cpu(scan_rsp->bss_descript_size);
2443 dev_dbg(adapter->dev, "info: SCAN_RESP: bss_descript_size %d\n",
2444 bytes_left);
2445
2446 scan_resp_size = le16_to_cpu(resp->size);
2447
2448 dev_dbg(adapter->dev,
2449 "info: SCAN_RESP: returned %d APs before parsing\n",
2450 scan_rsp->number_of_sets);
2451
2452 num_in_table = adapter->num_in_scan_table;
2453 bss_info = scan_rsp->bss_desc_and_tlv_buffer;
2454
2455 /*
2456 * The size of the TLV buffer is equal to the entire command response
2457 * size (scan_resp_size) minus the fixed fields (sizeof()'s), the
2458 * BSS Descriptions (bss_descript_size as bytesLef) and the command
2459 * response header (S_DS_GEN)
2460 */
2461 tlv_buf_size = scan_resp_size - (bytes_left
2462 + sizeof(scan_rsp->bss_descript_size)
2463 + sizeof(scan_rsp->number_of_sets)
2464 + S_DS_GEN);
2465
2466 tlv_data = (struct mwifiex_ie_types_data *) (scan_rsp->
2467 bss_desc_and_tlv_buffer +
2468 bytes_left);
2469
2470 /* Search the TLV buffer space in the scan response for any valid
2471 TLVs */
2472 mwifiex_ret_802_11_scan_get_tlv_ptrs(adapter, tlv_data, tlv_buf_size,
2473 TLV_TYPE_TSFTIMESTAMP,
2474 (struct mwifiex_ie_types_data **)
2475 &tsf_tlv);
2476
2477 /* Search the TLV buffer space in the scan response for any valid
2478 TLVs */
2479 mwifiex_ret_802_11_scan_get_tlv_ptrs(adapter, tlv_data, tlv_buf_size,
2480 TLV_TYPE_CHANNELBANDLIST,
2481 (struct mwifiex_ie_types_data **)
2482 &chan_band_tlv);
2483
2484 /*
2485 * Process each scan response returned (scan_rsp->number_of_sets).
2486 * Save the information in the bss_new_entry and then insert into the
2487 * driver scan table either as an update to an existing entry
2488 * or as an addition at the end of the table
2489 */
2490 bss_new_entry = kzalloc(sizeof(struct mwifiex_bssdescriptor),
2491 GFP_KERNEL);
2492 if (!bss_new_entry) {
2493 dev_err(adapter->dev, " failed to alloc bss_new_entry\n");
2494 return -ENOMEM;
2495 }
2496
2497 for (idx = 0; idx < scan_rsp->number_of_sets && bytes_left; idx++) {
2498 /* Zero out the bss_new_entry we are about to store info in */
2499 memset(bss_new_entry, 0x00,
2500 sizeof(struct mwifiex_bssdescriptor));
2501
2502 if (mwifiex_interpret_bss_desc_with_ie(adapter, bss_new_entry,
2503 &bss_info,
2504 &bytes_left)) {
2505 /* Error parsing/interpreting scan response, skipped */
2506 dev_err(adapter->dev, "SCAN_RESP: "
2507 "mwifiex_interpret_bss_desc_with_ie "
2508 "returned ERROR\n");
2509 continue;
2510 }
2511
2512 /* Process the data fields and IEs returned for this BSS */
2513 dev_dbg(adapter->dev, "info: SCAN_RESP: BSSID = %pM\n",
2514 bss_new_entry->mac_address);
2515
2516 /* Search the scan table for the same bssid */
2517 for (bss_idx = 0; bss_idx < num_in_table; bss_idx++) {
2518 if (memcmp(bss_new_entry->mac_address,
2519 adapter->scan_table[bss_idx].mac_address,
2520 sizeof(bss_new_entry->mac_address))) {
2521 continue;
2522 }
2523 /*
2524 * If the SSID matches as well, it is a
2525 * duplicate of this entry. Keep the bss_idx
2526 * set to this entry so we replace the old
2527 * contents in the table
2528 */
2529 if ((bss_new_entry->ssid.ssid_len
2530 == adapter->scan_table[bss_idx]. ssid.ssid_len)
2531 && (!memcmp(bss_new_entry->ssid.ssid,
2532 adapter->scan_table[bss_idx].ssid.ssid,
2533 bss_new_entry->ssid.ssid_len))) {
2534 dev_dbg(adapter->dev, "info: SCAN_RESP:"
2535 " duplicate of index: %d\n", bss_idx);
2536 break;
2537 }
2538 }
2539 /*
2540 * If the bss_idx is equal to the number of entries in
2541 * the table, the new entry was not a duplicate; append
2542 * it to the scan table
2543 */
2544 if (bss_idx == num_in_table) {
2545 /* Range check the bss_idx, keep it limited to
2546 the last entry */
2547 if (bss_idx == IW_MAX_AP)
2548 bss_idx--;
2549 else
2550 num_in_table++;
2551 }
2552
2553 /*
2554 * Save the beacon/probe response returned for later application
2555 * retrieval. Duplicate beacon/probe responses are updated if
2556 * possible
2557 */
2558 mwifiex_ret_802_11_scan_store_beacon(priv, bss_idx,
2559 num_in_table, bss_new_entry);
2560 /*
2561 * If the TSF TLV was appended to the scan results, save this
2562 * entry's TSF value in the networkTSF field.The networkTSF is
2563 * the firmware's TSF value at the time the beacon or probe
2564 * response was received.
2565 */
2566 if (tsf_tlv) {
2567 memcpy(&tsf_val, &tsf_tlv->tsf_data[idx * TSF_DATA_SIZE]
2568 , sizeof(tsf_val));
2569 memcpy(&bss_new_entry->network_tsf, &tsf_val,
2570 sizeof(bss_new_entry->network_tsf));
2571 }
2572 band = BAND_G;
2573 if (chan_band_tlv) {
2574 chan_band = &chan_band_tlv->chan_band_param[idx];
2575 band = mwifiex_radio_type_to_band(chan_band->radio_type
2576 & (BIT(0) | BIT(1)));
2577 }
2578
2579 /* Save the band designation for this entry for use in join */
2580 bss_new_entry->bss_band = band;
2581 cfp = mwifiex_get_cfp_by_band_and_channel_from_cfg80211(priv,
2582 (u8) bss_new_entry->bss_band,
2583 (u16)bss_new_entry->channel);
2584
2585 if (cfp)
2586 bss_new_entry->freq = cfp->freq;
2587 else
2588 bss_new_entry->freq = 0;
2589
2590 /* Copy the locally created bss_new_entry to the scan table */
2591 memcpy(&adapter->scan_table[bss_idx], bss_new_entry,
2592 sizeof(adapter->scan_table[bss_idx]));
2593
2594 }
2595
2596 dev_dbg(adapter->dev,
2597 "info: SCAN_RESP: Scanned %2d APs, %d valid, %d total\n",
2598 scan_rsp->number_of_sets,
2599 num_in_table - adapter->num_in_scan_table, num_in_table);
2600
2601 /* Update the total number of BSSIDs in the scan table */
2602 adapter->num_in_scan_table = num_in_table;
2603
2604 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
2605 if (list_empty(&adapter->scan_pending_q)) {
2606 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
2607 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
2608 adapter->scan_processing = false;
2609 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
2610 /*
2611 * Process the resulting scan table:
2612 * - Remove any bad ssids
2613 * - Update our current BSS information from scan data
2614 */
2615 mwifiex_process_scan_results(priv);
2616
2617 /* Need to indicate IOCTL complete */
2618 if (adapter->curr_cmd->wait_q_enabled) {
2619 adapter->cmd_wait_q.status = 0;
2620 mwifiex_complete_cmd(adapter);
2621 }
2622 if (priv->report_scan_result)
2623 priv->report_scan_result = false;
2624 if (priv->scan_pending_on_block) {
2625 priv->scan_pending_on_block = false;
2626 up(&priv->async_sem);
2627 }
2628
2629 } else {
2630 /* Get scan command from scan_pending_q and put to
2631 cmd_pending_q */
2632 cmd_node = list_first_entry(&adapter->scan_pending_q,
2633 struct cmd_ctrl_node, list);
2634 list_del(&cmd_node->list);
2635 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
2636
2637 mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
2638 }
2639
2640done:
2641 kfree((u8 *) bss_new_entry);
2642 return ret;
2643}
2644
2645/*
2646 * This function prepares command for background scan query.
2647 *
2648 * Preparation includes -
2649 * - Setting command ID and proper size
2650 * - Setting background scan flush parameter
2651 * - Ensuring correct endian-ness
2652 */
2653int mwifiex_cmd_802_11_bg_scan_query(struct host_cmd_ds_command *cmd)
2654{
2655 struct host_cmd_ds_802_11_bg_scan_query *bg_query =
2656 &cmd->params.bg_scan_query;
2657
2658 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_BG_SCAN_QUERY);
2659 cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_802_11_bg_scan_query)
2660 + S_DS_GEN);
2661
2662 bg_query->flush = 1;
2663
2664 return 0;
2665}
2666
2667/*
2668 * This function finds a SSID in the scan table.
2669 *
2670 * A BSSID may optionally be provided to qualify the SSID.
2671 * For non-Auto mode, further check is made to make sure the
2672 * BSS found in the scan table is compatible with the current
2673 * settings of the driver.
2674 */
2675s32
2676mwifiex_find_ssid_in_list(struct mwifiex_private *priv,
2677 struct mwifiex_802_11_ssid *ssid, u8 *bssid,
2678 u32 mode)
2679{
2680 struct mwifiex_adapter *adapter = priv->adapter;
2681 s32 net = -1, j;
2682 u8 best_rssi = 0;
2683 u32 i;
2684
2685 dev_dbg(adapter->dev, "info: num of entries in table = %d\n",
2686 adapter->num_in_scan_table);
2687
2688 /*
2689 * Loop through the table until the maximum is reached or until a match
2690 * is found based on the bssid field comparison
2691 */
2692 for (i = 0;
2693 i < adapter->num_in_scan_table && (!bssid || (bssid && net < 0));
2694 i++) {
2695 if (!mwifiex_ssid_cmp(&adapter->scan_table[i].ssid, ssid) &&
2696 (!bssid
2697 || !memcmp(adapter->scan_table[i].mac_address, bssid,
2698 ETH_ALEN))
2699 &&
2700 (mwifiex_get_cfp_by_band_and_channel_from_cfg80211
2701 (priv, (u8) adapter->scan_table[i].bss_band,
2702 (u16) adapter->scan_table[i].channel))) {
2703 switch (mode) {
2704 case NL80211_IFTYPE_STATION:
2705 case NL80211_IFTYPE_ADHOC:
2706 j = mwifiex_is_network_compatible(priv, i,
2707 mode);
2708
2709 if (j >= 0) {
2710 if (SCAN_RSSI
2711 (adapter->scan_table[i].rssi) >
2712 best_rssi) {
2713 best_rssi = SCAN_RSSI(adapter->
2714 scan_table
2715 [i].rssi);
2716 net = i;
2717 }
2718 } else {
2719 if (net == -1)
2720 net = j;
2721 }
2722 break;
2723 case NL80211_IFTYPE_UNSPECIFIED:
2724 default:
2725 /*
2726 * Do not check compatibility if the mode
2727 * requested is Auto/Unknown. Allows generic
2728 * find to work without verifying against the
2729 * Adapter security settings
2730 */
2731 if (SCAN_RSSI(adapter->scan_table[i].rssi) >
2732 best_rssi) {
2733 best_rssi = SCAN_RSSI(adapter->
2734 scan_table[i].rssi);
2735 net = i;
2736 }
2737 break;
2738 }
2739 }
2740 }
2741
2742 return net;
2743}
2744
2745/*
2746 * This function finds a specific compatible BSSID in the scan list.
2747 *
2748 * This function loops through the scan table looking for a compatible
2749 * match. If a BSSID matches, but the BSS is found to be not compatible
2750 * the function ignores it and continues to search through the rest of
2751 * the entries in case there is an AP with multiple SSIDs assigned to
2752 * the same BSSID.
2753 */
2754s32
2755mwifiex_find_bssid_in_list(struct mwifiex_private *priv, u8 *bssid,
2756 u32 mode)
2757{
2758 struct mwifiex_adapter *adapter = priv->adapter;
2759 s32 net = -1;
2760 u32 i;
2761
2762 if (!bssid)
2763 return -1;
2764
2765 dev_dbg(adapter->dev, "info: FindBSSID: Num of BSSIDs = %d\n",
2766 adapter->num_in_scan_table);
2767
2768 /*
2769 * Look through the scan table for a compatible match. The ret return
2770 * variable will be equal to the index in the scan table (greater
2771 * than zero) if the network is compatible. The loop will continue
2772 * past a matched bssid that is not compatible in case there is an
2773 * AP with multiple SSIDs assigned to the same BSSID
2774 */
2775 for (i = 0; net < 0 && i < adapter->num_in_scan_table; i++) {
2776 if (!memcmp
2777 (adapter->scan_table[i].mac_address, bssid, ETH_ALEN)
2778 && mwifiex_get_cfp_by_band_and_channel_from_cfg80211
2779 (priv,
2780 (u8) adapter->
2781 scan_table[i].
2782 bss_band,
2783 (u16) adapter->
2784 scan_table[i].
2785 channel)) {
2786 switch (mode) {
2787 case NL80211_IFTYPE_STATION:
2788 case NL80211_IFTYPE_ADHOC:
2789 net = mwifiex_is_network_compatible(priv, i,
2790 mode);
2791 break;
2792 default:
2793 net = i;
2794 break;
2795 }
2796 }
2797 }
2798
2799 return net;
2800}
2801
2802/*
2803 * This function inserts scan command node to the scan pending queue.
2804 */
2805void
2806mwifiex_queue_scan_cmd(struct mwifiex_private *priv,
2807 struct cmd_ctrl_node *cmd_node)
2808{
2809 struct mwifiex_adapter *adapter = priv->adapter;
2810 unsigned long flags;
2811
2812 cmd_node->wait_q_enabled = true;
2813 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
2814 list_add_tail(&cmd_node->list, &adapter->scan_pending_q);
2815 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
2816}
2817
2818/*
2819 * This function finds an AP with specific ssid in the scan list.
2820 */
2821int mwifiex_find_best_network(struct mwifiex_private *priv,
2822 struct mwifiex_ssid_bssid *req_ssid_bssid)
2823{
2824 struct mwifiex_adapter *adapter = priv->adapter;
2825 struct mwifiex_bssdescriptor *req_bss;
2826 s32 i;
2827
2828 memset(req_ssid_bssid, 0, sizeof(struct mwifiex_ssid_bssid));
2829
2830 i = mwifiex_find_best_network_in_list(priv);
2831
2832 if (i >= 0) {
2833 req_bss = &adapter->scan_table[i];
2834 memcpy(&req_ssid_bssid->ssid, &req_bss->ssid,
2835 sizeof(struct mwifiex_802_11_ssid));
2836 memcpy((u8 *) &req_ssid_bssid->bssid,
2837 (u8 *) &req_bss->mac_address, ETH_ALEN);
2838
2839 /* Make sure we are in the right mode */
2840 if (priv->bss_mode == NL80211_IFTYPE_UNSPECIFIED)
2841 priv->bss_mode = req_bss->bss_mode;
2842 }
2843
2844 if (!req_ssid_bssid->ssid.ssid_len)
2845 return -1;
2846
2847 dev_dbg(adapter->dev, "info: Best network found = [%s], "
2848 "[%pM]\n", req_ssid_bssid->ssid.ssid,
2849 req_ssid_bssid->bssid);
2850
2851 return 0;
2852}
2853
2854/*
2855 * This function sends a scan command for all available channels to the
2856 * firmware, filtered on a specific SSID.
2857 */
2858static int mwifiex_scan_specific_ssid(struct mwifiex_private *priv,
2859 struct mwifiex_802_11_ssid *req_ssid)
2860{
2861 struct mwifiex_adapter *adapter = priv->adapter;
2862 int ret = 0;
2863 struct mwifiex_user_scan_cfg *scan_cfg;
2864
2865 if (!req_ssid)
2866 return -1;
2867
2868 if (adapter->scan_processing) {
2869 dev_dbg(adapter->dev, "cmd: Scan already in process...\n");
2870 return ret;
2871 }
2872
2873 if (priv->scan_block) {
2874 dev_dbg(adapter->dev,
2875 "cmd: Scan is blocked during association...\n");
2876 return ret;
2877 }
2878
2879 mwifiex_scan_delete_ssid_table_entry(priv, req_ssid);
2880
2881 scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg), GFP_KERNEL);
2882 if (!scan_cfg) {
2883 dev_err(adapter->dev, "failed to alloc scan_cfg\n");
2884 return -ENOMEM;
2885 }
2886
2887 memcpy(scan_cfg->ssid_list[0].ssid, req_ssid->ssid,
2888 req_ssid->ssid_len);
2889 scan_cfg->keep_previous_scan = true;
2890
2891 ret = mwifiex_scan_networks(priv, scan_cfg);
2892
2893 kfree(scan_cfg);
2894 return ret;
2895}
2896
2897/*
2898 * Sends IOCTL request to start a scan.
2899 *
2900 * This function allocates the IOCTL request buffer, fills it
2901 * with requisite parameters and calls the IOCTL handler.
2902 *
2903 * Scan command can be issued for both normal scan and specific SSID
2904 * scan, depending upon whether an SSID is provided or not.
2905 */
2906int mwifiex_request_scan(struct mwifiex_private *priv,
2907 struct mwifiex_802_11_ssid *req_ssid)
2908{
2909 int ret;
2910
2911 if (down_interruptible(&priv->async_sem)) {
2912 dev_err(priv->adapter->dev, "%s: acquire semaphore\n",
2913 __func__);
2914 return -1;
2915 }
2916 priv->scan_pending_on_block = true;
2917
2918 priv->adapter->cmd_wait_q.condition = false;
2919
2920 if (req_ssid && req_ssid->ssid_len != 0)
2921 /* Specific SSID scan */
2922 ret = mwifiex_scan_specific_ssid(priv, req_ssid);
2923 else
2924 /* Normal scan */
2925 ret = mwifiex_scan_networks(priv, NULL);
2926
2927 if (!ret)
2928 ret = mwifiex_wait_queue_complete(priv->adapter);
2929
2930 if (ret == -1) {
2931 priv->scan_pending_on_block = false;
2932 up(&priv->async_sem);
2933 }
2934
2935 return ret;
2936}
2937
2938/*
2939 * This function appends the vendor specific IE TLV to a buffer.
2940 */
2941int
2942mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv,
2943 u16 vsie_mask, u8 **buffer)
2944{
2945 int id, ret_len = 0;
2946 struct mwifiex_ie_types_vendor_param_set *vs_param_set;
2947
2948 if (!buffer)
2949 return 0;
2950 if (!(*buffer))
2951 return 0;
2952
2953 /*
2954 * Traverse through the saved vendor specific IE array and append
2955 * the selected(scan/assoc/adhoc) IE as TLV to the command
2956 */
2957 for (id = 0; id < MWIFIEX_MAX_VSIE_NUM; id++) {
2958 if (priv->vs_ie[id].mask & vsie_mask) {
2959 vs_param_set =
2960 (struct mwifiex_ie_types_vendor_param_set *)
2961 *buffer;
2962 vs_param_set->header.type =
2963 cpu_to_le16(TLV_TYPE_PASSTHROUGH);
2964 vs_param_set->header.len =
2965 cpu_to_le16((((u16) priv->vs_ie[id].ie[1])
2966 & 0x00FF) + 2);
2967 memcpy(vs_param_set->ie, priv->vs_ie[id].ie,
2968 le16_to_cpu(vs_param_set->header.len));
2969 *buffer += le16_to_cpu(vs_param_set->header.len) +
2970 sizeof(struct mwifiex_ie_types_header);
2971 ret_len += le16_to_cpu(vs_param_set->header.len) +
2972 sizeof(struct mwifiex_ie_types_header);
2973 }
2974 }
2975 return ret_len;
2976}
2977
2978/*
2979 * This function saves a beacon buffer of the current BSS descriptor.
2980 *
2981 * The current beacon buffer is saved so that it can be restored in the
2982 * following cases that makes the beacon buffer not to contain the current
2983 * ssid's beacon buffer.
2984 * - The current ssid was not found somehow in the last scan.
2985 * - The current ssid was the last entry of the scan table and overloaded.
2986 */
2987void
2988mwifiex_save_curr_bcn(struct mwifiex_private *priv)
2989{
2990 struct mwifiex_bssdescriptor *curr_bss =
2991 &priv->curr_bss_params.bss_descriptor;
2992
2993 if (!curr_bss->beacon_buf_size)
2994 return;
2995
2996 /* allocate beacon buffer at 1st time; or if it's size has changed */
2997 if (!priv->curr_bcn_buf ||
2998 priv->curr_bcn_size != curr_bss->beacon_buf_size) {
2999 priv->curr_bcn_size = curr_bss->beacon_buf_size;
3000
3001 kfree(priv->curr_bcn_buf);
3002 priv->curr_bcn_buf = kzalloc(curr_bss->beacon_buf_size,
3003 GFP_KERNEL);
3004 if (!priv->curr_bcn_buf) {
3005 dev_err(priv->adapter->dev,
3006 "failed to alloc curr_bcn_buf\n");
3007 return;
3008 }
3009 }
3010
3011 memcpy(priv->curr_bcn_buf, curr_bss->beacon_buf,
3012 curr_bss->beacon_buf_size);
3013 dev_dbg(priv->adapter->dev, "info: current beacon saved %d\n",
3014 priv->curr_bcn_size);
3015}
3016
3017/*
3018 * This function frees the current BSS descriptor beacon buffer.
3019 */
3020void
3021mwifiex_free_curr_bcn(struct mwifiex_private *priv)
3022{
3023 kfree(priv->curr_bcn_buf);
3024 priv->curr_bcn_buf = NULL;
3025}
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
new file mode 100644
index 000000000000..d425dbd91d19
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -0,0 +1,1754 @@
1/*
2 * Marvell Wireless LAN device driver: SDIO specific handling
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include <linux/firmware.h>
21
22#include "decl.h"
23#include "ioctl.h"
24#include "util.h"
25#include "fw.h"
26#include "main.h"
27#include "wmm.h"
28#include "11n.h"
29#include "sdio.h"
30
31
32#define SDIO_VERSION "1.0"
33
34static struct mwifiex_if_ops sdio_ops;
35
36static struct semaphore add_remove_card_sem;
37
38/*
39 * SDIO probe.
40 *
41 * This function probes an mwifiex device and registers it. It allocates
42 * the card structure, enables SDIO function number and initiates the
43 * device registration and initialization procedure by adding a logical
44 * interface.
45 */
46static int
47mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
48{
49 int ret;
50 struct sdio_mmc_card *card = NULL;
51
52 pr_debug("info: vendor=0x%4.04X device=0x%4.04X class=%d function=%d\n",
53 func->vendor, func->device, func->class, func->num);
54
55 card = kzalloc(sizeof(struct sdio_mmc_card), GFP_KERNEL);
56 if (!card) {
57 pr_err("%s: failed to alloc memory\n", __func__);
58 return -ENOMEM;
59 }
60
61 card->func = func;
62
63 func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
64
65 sdio_claim_host(func);
66 ret = sdio_enable_func(func);
67 sdio_release_host(func);
68
69 if (ret) {
70 pr_err("%s: failed to enable function\n", __func__);
71 kfree(card);
72 return -EIO;
73 }
74
75 if (mwifiex_add_card(card, &add_remove_card_sem, &sdio_ops)) {
76 pr_err("%s: add card failed\n", __func__);
77 kfree(card);
78 sdio_claim_host(func);
79 ret = sdio_disable_func(func);
80 sdio_release_host(func);
81 ret = -1;
82 }
83
84 return ret;
85}
86
87/*
88 * SDIO remove.
89 *
90 * This function removes the interface and frees up the card structure.
91 */
92static void
93mwifiex_sdio_remove(struct sdio_func *func)
94{
95 struct sdio_mmc_card *card;
96
97 pr_debug("info: SDIO func num=%d\n", func->num);
98
99 if (func) {
100 card = sdio_get_drvdata(func);
101 if (card) {
102 mwifiex_remove_card(card->adapter,
103 &add_remove_card_sem);
104 kfree(card);
105 }
106 }
107}
108
109/*
110 * SDIO suspend.
111 *
112 * Kernel needs to suspend all functions separately. Therefore all
113 * registered functions must have drivers with suspend and resume
114 * methods. Failing that the kernel simply removes the whole card.
115 *
116 * If already not suspended, this function allocates and sends a host
117 * sleep activate request to the firmware and turns off the traffic.
118 */
119static int mwifiex_sdio_suspend(struct device *dev)
120{
121 struct sdio_func *func = dev_to_sdio_func(dev);
122 struct sdio_mmc_card *card;
123 struct mwifiex_adapter *adapter;
124 mmc_pm_flag_t pm_flag = 0;
125 int hs_actived = 0;
126 int i;
127 int ret = 0;
128
129 if (func) {
130 pm_flag = sdio_get_host_pm_caps(func);
131 pr_debug("cmd: %s: suspend: PM flag = 0x%x\n",
132 sdio_func_id(func), pm_flag);
133 if (!(pm_flag & MMC_PM_KEEP_POWER)) {
134 pr_err("%s: cannot remain alive while host is"
135 " suspended\n", sdio_func_id(func));
136 return -ENOSYS;
137 }
138
139 card = sdio_get_drvdata(func);
140 if (!card || !card->adapter) {
141 pr_err("suspend: invalid card or adapter\n");
142 return 0;
143 }
144 } else {
145 pr_err("suspend: sdio_func is not specified\n");
146 return 0;
147 }
148
149 adapter = card->adapter;
150
151 /* Enable the Host Sleep */
152 hs_actived = mwifiex_enable_hs(adapter);
153 if (hs_actived) {
154 pr_debug("cmd: suspend with MMC_PM_KEEP_POWER\n");
155 ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
156 }
157
158 /* Indicate device suspended */
159 adapter->is_suspended = true;
160
161 for (i = 0; i < adapter->priv_num; i++)
162 netif_carrier_off(adapter->priv[i]->netdev);
163
164 return ret;
165}
166
167/*
168 * SDIO resume.
169 *
170 * Kernel needs to suspend all functions separately. Therefore all
171 * registered functions must have drivers with suspend and resume
172 * methods. Failing that the kernel simply removes the whole card.
173 *
174 * If already not resumed, this function turns on the traffic and
175 * sends a host sleep cancel request to the firmware.
176 */
177static int mwifiex_sdio_resume(struct device *dev)
178{
179 struct sdio_func *func = dev_to_sdio_func(dev);
180 struct sdio_mmc_card *card;
181 struct mwifiex_adapter *adapter;
182 mmc_pm_flag_t pm_flag = 0;
183 int i;
184
185 if (func) {
186 pm_flag = sdio_get_host_pm_caps(func);
187 card = sdio_get_drvdata(func);
188 if (!card || !card->adapter) {
189 pr_err("resume: invalid card or adapter\n");
190 return 0;
191 }
192 } else {
193 pr_err("resume: sdio_func is not specified\n");
194 return 0;
195 }
196
197 adapter = card->adapter;
198
199 if (!adapter->is_suspended) {
200 dev_warn(adapter->dev, "device already resumed\n");
201 return 0;
202 }
203
204 adapter->is_suspended = false;
205
206 for (i = 0; i < adapter->priv_num; i++)
207 if (adapter->priv[i]->media_connected)
208 netif_carrier_on(adapter->priv[i]->netdev);
209
210 /* Disable Host Sleep */
211 mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
212 MWIFIEX_ASYNC_CMD);
213
214 return 0;
215}
216
217/* Device ID for SD8787 */
218#define SDIO_DEVICE_ID_MARVELL_8787 (0x9119)
219
220/* WLAN IDs */
221static const struct sdio_device_id mwifiex_ids[] = {
222 {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8787)},
223 {},
224};
225
226MODULE_DEVICE_TABLE(sdio, mwifiex_ids);
227
228static const struct dev_pm_ops mwifiex_sdio_pm_ops = {
229 .suspend = mwifiex_sdio_suspend,
230 .resume = mwifiex_sdio_resume,
231};
232
233static struct sdio_driver mwifiex_sdio = {
234 .name = "mwifiex_sdio",
235 .id_table = mwifiex_ids,
236 .probe = mwifiex_sdio_probe,
237 .remove = mwifiex_sdio_remove,
238 .drv = {
239 .owner = THIS_MODULE,
240 .pm = &mwifiex_sdio_pm_ops,
241 }
242};
243
244/*
245 * This function writes data into SDIO card register.
246 */
247static int
248mwifiex_write_reg(struct mwifiex_adapter *adapter, u32 reg, u32 data)
249{
250 struct sdio_mmc_card *card = adapter->card;
251 int ret = -1;
252
253 sdio_claim_host(card->func);
254 sdio_writeb(card->func, (u8) data, reg, &ret);
255 sdio_release_host(card->func);
256
257 return ret;
258}
259
260/*
261 * This function reads data from SDIO card register.
262 */
263static int
264mwifiex_read_reg(struct mwifiex_adapter *adapter, u32 reg, u32 *data)
265{
266 struct sdio_mmc_card *card = adapter->card;
267 int ret = -1;
268 u8 val;
269
270 sdio_claim_host(card->func);
271 val = sdio_readb(card->func, reg, &ret);
272 sdio_release_host(card->func);
273
274 *data = val;
275
276 return ret;
277}
278
279/*
280 * This function writes multiple data into SDIO card memory.
281 *
282 * This does not work in suspended mode.
283 */
284static int
285mwifiex_write_data_sync(struct mwifiex_adapter *adapter,
286 u8 *buffer, u32 pkt_len, u32 port)
287{
288 struct sdio_mmc_card *card = adapter->card;
289 int ret = -1;
290 u8 blk_mode =
291 (port & MWIFIEX_SDIO_BYTE_MODE_MASK) ? BYTE_MODE : BLOCK_MODE;
292 u32 blk_size = (blk_mode == BLOCK_MODE) ? MWIFIEX_SDIO_BLOCK_SIZE : 1;
293 u32 blk_cnt =
294 (blk_mode ==
295 BLOCK_MODE) ? (pkt_len /
296 MWIFIEX_SDIO_BLOCK_SIZE) : pkt_len;
297 u32 ioport = (port & MWIFIEX_SDIO_IO_PORT_MASK);
298
299 if (adapter->is_suspended) {
300 dev_err(adapter->dev,
301 "%s: not allowed while suspended\n", __func__);
302 return -1;
303 }
304
305 sdio_claim_host(card->func);
306
307 if (!sdio_writesb(card->func, ioport, buffer, blk_cnt * blk_size))
308 ret = 0;
309
310 sdio_release_host(card->func);
311
312 return ret;
313}
314
315/*
316 * This function reads multiple data from SDIO card memory.
317 */
318static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *buffer,
319 u32 len, u32 port, u8 claim)
320{
321 struct sdio_mmc_card *card = adapter->card;
322 int ret = -1;
323 u8 blk_mode =
324 (port & MWIFIEX_SDIO_BYTE_MODE_MASK) ? BYTE_MODE : BLOCK_MODE;
325 u32 blk_size = (blk_mode == BLOCK_MODE) ? MWIFIEX_SDIO_BLOCK_SIZE : 1;
326 u32 blk_cnt =
327 (blk_mode ==
328 BLOCK_MODE) ? (len / MWIFIEX_SDIO_BLOCK_SIZE) : len;
329 u32 ioport = (port & MWIFIEX_SDIO_IO_PORT_MASK);
330
331 if (claim)
332 sdio_claim_host(card->func);
333
334 if (!sdio_readsb(card->func, buffer, ioport, blk_cnt * blk_size))
335 ret = 0;
336
337 if (claim)
338 sdio_release_host(card->func);
339
340 return ret;
341}
342
343/*
344 * This function wakes up the card.
345 *
346 * A host power up command is written to the card configuration
347 * register to wake up the card.
348 */
349static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
350{
351 dev_dbg(adapter->dev, "event: wakeup device...\n");
352
353 return mwifiex_write_reg(adapter, CONFIGURATION_REG, HOST_POWER_UP);
354}
355
356/*
357 * This function is called after the card has woken up.
358 *
359 * The card configuration register is reset.
360 */
361static int mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter)
362{
363 dev_dbg(adapter->dev, "cmd: wakeup device completed\n");
364
365 return mwifiex_write_reg(adapter, CONFIGURATION_REG, 0);
366}
367
368/*
369 * This function initializes the IO ports.
370 *
371 * The following operations are performed -
372 * - Read the IO ports (0, 1 and 2)
373 * - Set host interrupt Reset-To-Read to clear
374 * - Set auto re-enable interrupt
375 */
376static int mwifiex_init_sdio_ioport(struct mwifiex_adapter *adapter)
377{
378 u32 reg;
379
380 adapter->ioport = 0;
381
382 /* Read the IO port */
383 if (!mwifiex_read_reg(adapter, IO_PORT_0_REG, &reg))
384 adapter->ioport |= (reg & 0xff);
385 else
386 return -1;
387
388 if (!mwifiex_read_reg(adapter, IO_PORT_1_REG, &reg))
389 adapter->ioport |= ((reg & 0xff) << 8);
390 else
391 return -1;
392
393 if (!mwifiex_read_reg(adapter, IO_PORT_2_REG, &reg))
394 adapter->ioport |= ((reg & 0xff) << 16);
395 else
396 return -1;
397
398 pr_debug("info: SDIO FUNC1 IO port: %#x\n", adapter->ioport);
399
400 /* Set Host interrupt reset to read to clear */
401 if (!mwifiex_read_reg(adapter, HOST_INT_RSR_REG, &reg))
402 mwifiex_write_reg(adapter, HOST_INT_RSR_REG,
403 reg | SDIO_INT_MASK);
404 else
405 return -1;
406
407 /* Dnld/Upld ready set to auto reset */
408 if (!mwifiex_read_reg(adapter, CARD_MISC_CFG_REG, &reg))
409 mwifiex_write_reg(adapter, CARD_MISC_CFG_REG,
410 reg | AUTO_RE_ENABLE_INT);
411 else
412 return -1;
413
414 return 0;
415}
416
417/*
418 * This function sends data to the card.
419 */
420static int mwifiex_write_data_to_card(struct mwifiex_adapter *adapter,
421 u8 *payload, u32 pkt_len, u32 port)
422{
423 u32 i = 0;
424 int ret;
425
426 do {
427 ret = mwifiex_write_data_sync(adapter, payload, pkt_len, port);
428 if (ret) {
429 i++;
430 dev_err(adapter->dev, "host_to_card, write iomem"
431 " (%d) failed: %d\n", i, ret);
432 if (mwifiex_write_reg(adapter,
433 CONFIGURATION_REG, 0x04))
434 dev_err(adapter->dev, "write CFG reg failed\n");
435
436 ret = -1;
437 if (i > MAX_WRITE_IOMEM_RETRY)
438 return ret;
439 }
440 } while (ret == -1);
441
442 return ret;
443}
444
445/*
446 * This function gets the read port.
447 *
448 * If control port bit is set in MP read bitmap, the control port
449 * is returned, otherwise the current read port is returned and
450 * the value is increased (provided it does not reach the maximum
451 * limit, in which case it is reset to 1)
452 */
453static int mwifiex_get_rd_port(struct mwifiex_adapter *adapter, u8 *port)
454{
455 struct sdio_mmc_card *card = adapter->card;
456 u16 rd_bitmap = card->mp_rd_bitmap;
457
458 dev_dbg(adapter->dev, "data: mp_rd_bitmap=0x%04x\n", rd_bitmap);
459
460 if (!(rd_bitmap & (CTRL_PORT_MASK | DATA_PORT_MASK)))
461 return -1;
462
463 if (card->mp_rd_bitmap & CTRL_PORT_MASK) {
464 card->mp_rd_bitmap &= (u16) (~CTRL_PORT_MASK);
465 *port = CTRL_PORT;
466 dev_dbg(adapter->dev, "data: port=%d mp_rd_bitmap=0x%04x\n",
467 *port, card->mp_rd_bitmap);
468 } else {
469 if (card->mp_rd_bitmap & (1 << card->curr_rd_port)) {
470 card->mp_rd_bitmap &=
471 (u16) (~(1 << card->curr_rd_port));
472 *port = card->curr_rd_port;
473
474 if (++card->curr_rd_port == MAX_PORT)
475 card->curr_rd_port = 1;
476 } else {
477 return -1;
478 }
479
480 dev_dbg(adapter->dev,
481 "data: port=%d mp_rd_bitmap=0x%04x -> 0x%04x\n",
482 *port, rd_bitmap, card->mp_rd_bitmap);
483 }
484 return 0;
485}
486
487/*
488 * This function gets the write port for data.
489 *
490 * The current write port is returned if available and the value is
491 * increased (provided it does not reach the maximum limit, in which
492 * case it is reset to 1)
493 */
494static int mwifiex_get_wr_port_data(struct mwifiex_adapter *adapter, u8 *port)
495{
496 struct sdio_mmc_card *card = adapter->card;
497 u16 wr_bitmap = card->mp_wr_bitmap;
498
499 dev_dbg(adapter->dev, "data: mp_wr_bitmap=0x%04x\n", wr_bitmap);
500
501 if (!(wr_bitmap & card->mp_data_port_mask))
502 return -1;
503
504 if (card->mp_wr_bitmap & (1 << card->curr_wr_port)) {
505 card->mp_wr_bitmap &= (u16) (~(1 << card->curr_wr_port));
506 *port = card->curr_wr_port;
507 if (++card->curr_wr_port == card->mp_end_port)
508 card->curr_wr_port = 1;
509 } else {
510 adapter->data_sent = true;
511 return -EBUSY;
512 }
513
514 if (*port == CTRL_PORT) {
515 dev_err(adapter->dev, "invalid data port=%d cur port=%d"
516 " mp_wr_bitmap=0x%04x -> 0x%04x\n",
517 *port, card->curr_wr_port, wr_bitmap,
518 card->mp_wr_bitmap);
519 return -1;
520 }
521
522 dev_dbg(adapter->dev, "data: port=%d mp_wr_bitmap=0x%04x -> 0x%04x\n",
523 *port, wr_bitmap, card->mp_wr_bitmap);
524
525 return 0;
526}
527
528/*
529 * This function polls the card status.
530 */
531static int
532mwifiex_sdio_poll_card_status(struct mwifiex_adapter *adapter, u8 bits)
533{
534 u32 tries;
535 u32 cs;
536
537 for (tries = 0; tries < MAX_POLL_TRIES; tries++) {
538 if (mwifiex_read_reg(adapter, CARD_STATUS_REG, &cs))
539 break;
540 else if ((cs & bits) == bits)
541 return 0;
542
543 udelay(10);
544 }
545
546 dev_err(adapter->dev, "poll card status failed, tries = %d\n",
547 tries);
548 return -1;
549}
550
551/*
552 * This function reads the firmware status.
553 */
554static int
555mwifiex_sdio_read_fw_status(struct mwifiex_adapter *adapter, u16 *dat)
556{
557 u32 fws0, fws1;
558
559 if (mwifiex_read_reg(adapter, CARD_FW_STATUS0_REG, &fws0))
560 return -1;
561
562 if (mwifiex_read_reg(adapter, CARD_FW_STATUS1_REG, &fws1))
563 return -1;
564
565 *dat = (u16) ((fws1 << 8) | fws0);
566
567 return 0;
568}
569
570/*
571 * This function disables the host interrupt.
572 *
573 * The host interrupt mask is read, the disable bit is reset and
574 * written back to the card host interrupt mask register.
575 */
576static int mwifiex_sdio_disable_host_int(struct mwifiex_adapter *adapter)
577{
578 u32 host_int_mask;
579
580 /* Read back the host_int_mask register */
581 if (mwifiex_read_reg(adapter, HOST_INT_MASK_REG, &host_int_mask))
582 return -1;
583
584 /* Update with the mask and write back to the register */
585 host_int_mask &= ~HOST_INT_DISABLE;
586
587 if (mwifiex_write_reg(adapter, HOST_INT_MASK_REG, host_int_mask)) {
588 dev_err(adapter->dev, "disable host interrupt failed\n");
589 return -1;
590 }
591
592 return 0;
593}
594
595/*
596 * This function enables the host interrupt.
597 *
598 * The host interrupt enable mask is written to the card
599 * host interrupt mask register.
600 */
601static int mwifiex_sdio_enable_host_int(struct mwifiex_adapter *adapter)
602{
603 /* Simply write the mask to the register */
604 if (mwifiex_write_reg(adapter, HOST_INT_MASK_REG, HOST_INT_ENABLE)) {
605 dev_err(adapter->dev, "enable host interrupt failed\n");
606 return -1;
607 }
608 return 0;
609}
610
611/*
612 * This function sends a data buffer to the card.
613 */
614static int mwifiex_sdio_card_to_host(struct mwifiex_adapter *adapter,
615 u32 *type, u8 *buffer,
616 u32 npayload, u32 ioport)
617{
618 int ret;
619 u32 nb;
620
621 if (!buffer) {
622 dev_err(adapter->dev, "%s: buffer is NULL\n", __func__);
623 return -1;
624 }
625
626 ret = mwifiex_read_data_sync(adapter, buffer, npayload, ioport, 1);
627
628 if (ret) {
629 dev_err(adapter->dev, "%s: read iomem failed: %d\n", __func__,
630 ret);
631 return -1;
632 }
633
634 nb = le16_to_cpu(*(__le16 *) (buffer));
635 if (nb > npayload) {
636 dev_err(adapter->dev, "%s: invalid packet, nb=%d, npayload=%d\n",
637 __func__, nb, npayload);
638 return -1;
639 }
640
641 *type = le16_to_cpu(*(__le16 *) (buffer + 2));
642
643 return ret;
644}
645
646/*
647 * This function downloads the firmware to the card.
648 *
649 * Firmware is downloaded to the card in blocks. Every block download
650 * is tested for CRC errors, and retried a number of times before
651 * returning failure.
652 */
653static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
654 struct mwifiex_fw_image *fw)
655{
656 int ret;
657 u8 *firmware = fw->fw_buf;
658 u32 firmware_len = fw->fw_len;
659 u32 offset = 0;
660 u32 base0, base1;
661 u8 *fwbuf;
662 u16 len = 0;
663 u32 txlen, tx_blocks = 0, tries;
664 u32 i = 0;
665
666 if (!firmware_len) {
667 dev_err(adapter->dev, "firmware image not found!"
668 " Terminating download\n");
669 return -1;
670 }
671
672 dev_dbg(adapter->dev, "info: downloading FW image (%d bytes)\n",
673 firmware_len);
674
675 /* Assume that the allocated buffer is 8-byte aligned */
676 fwbuf = kzalloc(MWIFIEX_UPLD_SIZE, GFP_KERNEL);
677 if (!fwbuf) {
678 dev_err(adapter->dev, "unable to alloc buffer for firmware."
679 " Terminating download\n");
680 return -ENOMEM;
681 }
682
683 /* Perform firmware data transfer */
684 do {
685 /* The host polls for the DN_LD_CARD_RDY and CARD_IO_READY
686 bits */
687 ret = mwifiex_sdio_poll_card_status(adapter, CARD_IO_READY |
688 DN_LD_CARD_RDY);
689 if (ret) {
690 dev_err(adapter->dev, "FW download with helper:"
691 " poll status timeout @ %d\n", offset);
692 goto done;
693 }
694
695 /* More data? */
696 if (offset >= firmware_len)
697 break;
698
699 for (tries = 0; tries < MAX_POLL_TRIES; tries++) {
700 ret = mwifiex_read_reg(adapter, HOST_F1_RD_BASE_0,
701 &base0);
702 if (ret) {
703 dev_err(adapter->dev, "dev BASE0 register read"
704 " failed: base0=0x%04X(%d). Terminating "
705 "download\n", base0, base0);
706 goto done;
707 }
708 ret = mwifiex_read_reg(adapter, HOST_F1_RD_BASE_1,
709 &base1);
710 if (ret) {
711 dev_err(adapter->dev, "dev BASE1 register read"
712 " failed: base1=0x%04X(%d). Terminating "
713 "download\n", base1, base1);
714 goto done;
715 }
716 len = (u16) (((base1 & 0xff) << 8) | (base0 & 0xff));
717
718 if (len)
719 break;
720
721 udelay(10);
722 }
723
724 if (!len) {
725 break;
726 } else if (len > MWIFIEX_UPLD_SIZE) {
727 dev_err(adapter->dev, "FW download failed @ %d,"
728 " invalid length %d\n", offset, len);
729 ret = -1;
730 goto done;
731 }
732
733 txlen = len;
734
735 if (len & BIT(0)) {
736 i++;
737 if (i > MAX_WRITE_IOMEM_RETRY) {
738 dev_err(adapter->dev, "FW download failed @"
739 " %d, over max retry count\n", offset);
740 ret = -1;
741 goto done;
742 }
743 dev_err(adapter->dev, "CRC indicated by the helper:"
744 " len = 0x%04X, txlen = %d\n", len, txlen);
745 len &= ~BIT(0);
746 /* Setting this to 0 to resend from same offset */
747 txlen = 0;
748 } else {
749 i = 0;
750
751 /* Set blocksize to transfer - checking for last
752 block */
753 if (firmware_len - offset < txlen)
754 txlen = firmware_len - offset;
755
756 tx_blocks = (txlen + MWIFIEX_SDIO_BLOCK_SIZE -
757 1) / MWIFIEX_SDIO_BLOCK_SIZE;
758
759 /* Copy payload to buffer */
760 memmove(fwbuf, &firmware[offset], txlen);
761 }
762
763 ret = mwifiex_write_data_sync(adapter, fwbuf, tx_blocks *
764 MWIFIEX_SDIO_BLOCK_SIZE,
765 adapter->ioport);
766 if (ret) {
767 dev_err(adapter->dev, "FW download, write iomem (%d)"
768 " failed @ %d\n", i, offset);
769 if (mwifiex_write_reg(adapter, CONFIGURATION_REG, 0x04))
770 dev_err(adapter->dev, "write CFG reg failed\n");
771
772 ret = -1;
773 goto done;
774 }
775
776 offset += txlen;
777 } while (true);
778
779 dev_dbg(adapter->dev, "info: FW download over, size %d bytes\n",
780 offset);
781
782 ret = 0;
783done:
784 kfree(fwbuf);
785 return ret;
786}
787
788/*
789 * This function checks the firmware status in card.
790 *
791 * The winner interface is also determined by this function.
792 */
793static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
794 u32 poll_num, int *winner)
795{
796 int ret = 0;
797 u16 firmware_stat;
798 u32 tries;
799 u32 winner_status;
800
801 /* Wait for firmware initialization event */
802 for (tries = 0; tries < poll_num; tries++) {
803 ret = mwifiex_sdio_read_fw_status(adapter, &firmware_stat);
804 if (ret)
805 continue;
806 if (firmware_stat == FIRMWARE_READY) {
807 ret = 0;
808 break;
809 } else {
810 mdelay(100);
811 ret = -1;
812 }
813 }
814
815 if (winner && ret) {
816 if (mwifiex_read_reg
817 (adapter, CARD_FW_STATUS0_REG, &winner_status))
818 winner_status = 0;
819
820 if (winner_status)
821 *winner = 0;
822 else
823 *winner = 1;
824 }
825 return ret;
826}
827
828/*
829 * This function reads the interrupt status from card.
830 */
831static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
832{
833 struct sdio_mmc_card *card = adapter->card;
834 u32 sdio_ireg;
835 unsigned long flags;
836
837 if (mwifiex_read_data_sync(adapter, card->mp_regs, MAX_MP_REGS,
838 REG_PORT | MWIFIEX_SDIO_BYTE_MODE_MASK,
839 0)) {
840 dev_err(adapter->dev, "read mp_regs failed\n");
841 return;
842 }
843
844 sdio_ireg = card->mp_regs[HOST_INTSTATUS_REG];
845 if (sdio_ireg) {
846 /*
847 * DN_LD_HOST_INT_STATUS and/or UP_LD_HOST_INT_STATUS
848 * Clear the interrupt status register
849 */
850 dev_dbg(adapter->dev, "int: sdio_ireg = %#x\n", sdio_ireg);
851 spin_lock_irqsave(&adapter->int_lock, flags);
852 adapter->int_status |= sdio_ireg;
853 spin_unlock_irqrestore(&adapter->int_lock, flags);
854 }
855}
856
857/*
858 * SDIO interrupt handler.
859 *
860 * This function reads the interrupt status from firmware and assigns
861 * the main process in workqueue which will handle the interrupt.
862 */
863static void
864mwifiex_sdio_interrupt(struct sdio_func *func)
865{
866 struct mwifiex_adapter *adapter;
867 struct sdio_mmc_card *card;
868
869 card = sdio_get_drvdata(func);
870 if (!card || !card->adapter) {
871 pr_debug("int: func=%p card=%p adapter=%p\n",
872 func, card, card ? card->adapter : NULL);
873 return;
874 }
875 adapter = card->adapter;
876
877 if (adapter->surprise_removed)
878 return;
879
880 if (!adapter->pps_uapsd_mode && adapter->ps_state == PS_STATE_SLEEP)
881 adapter->ps_state = PS_STATE_AWAKE;
882
883 mwifiex_interrupt_status(adapter);
884 queue_work(adapter->workqueue, &adapter->main_work);
885}
886
887/*
888 * This function decodes a received packet.
889 *
890 * Based on the type, the packet is treated as either a data, or
891 * a command response, or an event, and the correct handler
892 * function is invoked.
893 */
894static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
895 struct sk_buff *skb, u32 upld_typ)
896{
897 u8 *cmd_buf;
898
899 skb_pull(skb, INTF_HEADER_LEN);
900
901 switch (upld_typ) {
902 case MWIFIEX_TYPE_DATA:
903 dev_dbg(adapter->dev, "info: --- Rx: Data packet ---\n");
904 mwifiex_handle_rx_packet(adapter, skb);
905 break;
906
907 case MWIFIEX_TYPE_CMD:
908 dev_dbg(adapter->dev, "info: --- Rx: Cmd Response ---\n");
909 /* take care of curr_cmd = NULL case */
910 if (!adapter->curr_cmd) {
911 cmd_buf = adapter->upld_buf;
912
913 if (adapter->ps_state == PS_STATE_SLEEP_CFM)
914 mwifiex_process_sleep_confirm_resp(adapter,
915 skb->data, skb->len);
916
917 memcpy(cmd_buf, skb->data, min_t(u32,
918 MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len));
919
920 dev_kfree_skb_any(skb);
921 } else {
922 adapter->cmd_resp_received = true;
923 adapter->curr_cmd->resp_skb = skb;
924 }
925 break;
926
927 case MWIFIEX_TYPE_EVENT:
928 dev_dbg(adapter->dev, "info: --- Rx: Event ---\n");
929 adapter->event_cause = *(u32 *) skb->data;
930
931 skb_pull(skb, MWIFIEX_EVENT_HEADER_LEN);
932
933 if ((skb->len > 0) && (skb->len < MAX_EVENT_SIZE))
934 memcpy(adapter->event_body, skb->data, skb->len);
935
936 /* event cause has been saved to adapter->event_cause */
937 adapter->event_received = true;
938 adapter->event_skb = skb;
939
940 break;
941
942 default:
943 dev_err(adapter->dev, "unknown upload type %#x\n", upld_typ);
944 dev_kfree_skb_any(skb);
945 break;
946 }
947
948 return 0;
949}
950
951/*
952 * This function transfers received packets from card to driver, performing
953 * aggregation if required.
954 *
955 * For data received on control port, or if aggregation is disabled, the
956 * received buffers are uploaded as separate packets. However, if aggregation
957 * is enabled and required, the buffers are copied onto an aggregation buffer,
958 * provided there is space left, processed and finally uploaded.
959 */
960static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
961 struct sk_buff *skb, u8 port)
962{
963 struct sdio_mmc_card *card = adapter->card;
964 s32 f_do_rx_aggr = 0;
965 s32 f_do_rx_cur = 0;
966 s32 f_aggr_cur = 0;
967 struct sk_buff *skb_deaggr;
968 u32 pind;
969 u32 pkt_len, pkt_type = 0;
970 u8 *curr_ptr;
971 u32 rx_len = skb->len;
972
973 if (port == CTRL_PORT) {
974 /* Read the command Resp without aggr */
975 dev_dbg(adapter->dev, "info: %s: no aggregation for cmd "
976 "response\n", __func__);
977
978 f_do_rx_cur = 1;
979 goto rx_curr_single;
980 }
981
982 if (!card->mpa_rx.enabled) {
983 dev_dbg(adapter->dev, "info: %s: rx aggregation disabled\n",
984 __func__);
985
986 f_do_rx_cur = 1;
987 goto rx_curr_single;
988 }
989
990 if (card->mp_rd_bitmap & (~((u16) CTRL_PORT_MASK))) {
991 /* Some more data RX pending */
992 dev_dbg(adapter->dev, "info: %s: not last packet\n", __func__);
993
994 if (MP_RX_AGGR_IN_PROGRESS(card)) {
995 if (MP_RX_AGGR_BUF_HAS_ROOM(card, skb->len)) {
996 f_aggr_cur = 1;
997 } else {
998 /* No room in Aggr buf, do rx aggr now */
999 f_do_rx_aggr = 1;
1000 f_do_rx_cur = 1;
1001 }
1002 } else {
1003 /* Rx aggr not in progress */
1004 f_aggr_cur = 1;
1005 }
1006
1007 } else {
1008 /* No more data RX pending */
1009 dev_dbg(adapter->dev, "info: %s: last packet\n", __func__);
1010
1011 if (MP_RX_AGGR_IN_PROGRESS(card)) {
1012 f_do_rx_aggr = 1;
1013 if (MP_RX_AGGR_BUF_HAS_ROOM(card, skb->len))
1014 f_aggr_cur = 1;
1015 else
1016 /* No room in Aggr buf, do rx aggr now */
1017 f_do_rx_cur = 1;
1018 } else {
1019 f_do_rx_cur = 1;
1020 }
1021 }
1022
1023 if (f_aggr_cur) {
1024 dev_dbg(adapter->dev, "info: current packet aggregation\n");
1025 /* Curr pkt can be aggregated */
1026 MP_RX_AGGR_SETUP(card, skb, port);
1027
1028 if (MP_RX_AGGR_PKT_LIMIT_REACHED(card) ||
1029 MP_RX_AGGR_PORT_LIMIT_REACHED(card)) {
1030 dev_dbg(adapter->dev, "info: %s: aggregated packet "
1031 "limit reached\n", __func__);
1032 /* No more pkts allowed in Aggr buf, rx it */
1033 f_do_rx_aggr = 1;
1034 }
1035 }
1036
1037 if (f_do_rx_aggr) {
1038 /* do aggr RX now */
1039 dev_dbg(adapter->dev, "info: do_rx_aggr: num of packets: %d\n",
1040 card->mpa_rx.pkt_cnt);
1041
1042 if (mwifiex_read_data_sync(adapter, card->mpa_rx.buf,
1043 card->mpa_rx.buf_len,
1044 (adapter->ioport | 0x1000 |
1045 (card->mpa_rx.ports << 4)) +
1046 card->mpa_rx.start_port, 1))
1047 return -1;
1048
1049 curr_ptr = card->mpa_rx.buf;
1050
1051 for (pind = 0; pind < card->mpa_rx.pkt_cnt; pind++) {
1052
1053 /* get curr PKT len & type */
1054 pkt_len = *(u16 *) &curr_ptr[0];
1055 pkt_type = *(u16 *) &curr_ptr[2];
1056
1057 /* copy pkt to deaggr buf */
1058 skb_deaggr = card->mpa_rx.skb_arr[pind];
1059
1060 if ((pkt_type == MWIFIEX_TYPE_DATA) && (pkt_len <=
1061 card->mpa_rx.len_arr[pind])) {
1062
1063 memcpy(skb_deaggr->data, curr_ptr, pkt_len);
1064
1065 skb_trim(skb_deaggr, pkt_len);
1066
1067 /* Process de-aggr packet */
1068 mwifiex_decode_rx_packet(adapter, skb_deaggr,
1069 pkt_type);
1070 } else {
1071 dev_err(adapter->dev, "wrong aggr pkt:"
1072 " type=%d len=%d max_len=%d\n",
1073 pkt_type, pkt_len,
1074 card->mpa_rx.len_arr[pind]);
1075 dev_kfree_skb_any(skb_deaggr);
1076 }
1077 curr_ptr += card->mpa_rx.len_arr[pind];
1078 }
1079 MP_RX_AGGR_BUF_RESET(card);
1080 }
1081
1082rx_curr_single:
1083 if (f_do_rx_cur) {
1084 dev_dbg(adapter->dev, "info: RX: port: %d, rx_len: %d\n",
1085 port, rx_len);
1086
1087 if (mwifiex_sdio_card_to_host(adapter, &pkt_type,
1088 skb->data, skb->len,
1089 adapter->ioport + port))
1090 return -1;
1091
1092 mwifiex_decode_rx_packet(adapter, skb, pkt_type);
1093 }
1094
1095 return 0;
1096}
1097
1098/*
1099 * This function checks the current interrupt status.
1100 *
1101 * The following interrupts are checked and handled by this function -
1102 * - Data sent
1103 * - Command sent
1104 * - Packets received
1105 *
1106 * Since the firmware does not generate download ready interrupt if the
1107 * port updated is command port only, command sent interrupt checking
1108 * should be done manually, and for every SDIO interrupt.
1109 *
1110 * In case of Rx packets received, the packets are uploaded from card to
1111 * host and processed accordingly.
1112 */
1113static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
1114{
1115 struct sdio_mmc_card *card = adapter->card;
1116 int ret = 0;
1117 u8 sdio_ireg;
1118 struct sk_buff *skb;
1119 u8 port = CTRL_PORT;
1120 u32 len_reg_l, len_reg_u;
1121 u32 rx_blocks;
1122 u16 rx_len;
1123 unsigned long flags;
1124
1125 spin_lock_irqsave(&adapter->int_lock, flags);
1126 sdio_ireg = adapter->int_status;
1127 adapter->int_status = 0;
1128 spin_unlock_irqrestore(&adapter->int_lock, flags);
1129
1130 if (!sdio_ireg)
1131 return ret;
1132
1133 if (sdio_ireg & DN_LD_HOST_INT_STATUS) {
1134 card->mp_wr_bitmap = ((u16) card->mp_regs[WR_BITMAP_U]) << 8;
1135 card->mp_wr_bitmap |= (u16) card->mp_regs[WR_BITMAP_L];
1136 dev_dbg(adapter->dev, "int: DNLD: wr_bitmap=0x%04x\n",
1137 card->mp_wr_bitmap);
1138 if (adapter->data_sent &&
1139 (card->mp_wr_bitmap & card->mp_data_port_mask)) {
1140 dev_dbg(adapter->dev,
1141 "info: <--- Tx DONE Interrupt --->\n");
1142 adapter->data_sent = false;
1143 }
1144 }
1145
1146 /* As firmware will not generate download ready interrupt if the port
1147 updated is command port only, cmd_sent should be done for any SDIO
1148 interrupt. */
1149 if (adapter->cmd_sent) {
1150 /* Check if firmware has attach buffer at command port and
1151 update just that in wr_bit_map. */
1152 card->mp_wr_bitmap |=
1153 (u16) card->mp_regs[WR_BITMAP_L] & CTRL_PORT_MASK;
1154 if (card->mp_wr_bitmap & CTRL_PORT_MASK)
1155 adapter->cmd_sent = false;
1156 }
1157
1158 dev_dbg(adapter->dev, "info: cmd_sent=%d data_sent=%d\n",
1159 adapter->cmd_sent, adapter->data_sent);
1160 if (sdio_ireg & UP_LD_HOST_INT_STATUS) {
1161 card->mp_rd_bitmap = ((u16) card->mp_regs[RD_BITMAP_U]) << 8;
1162 card->mp_rd_bitmap |= (u16) card->mp_regs[RD_BITMAP_L];
1163 dev_dbg(adapter->dev, "int: UPLD: rd_bitmap=0x%04x\n",
1164 card->mp_rd_bitmap);
1165
1166 while (true) {
1167 ret = mwifiex_get_rd_port(adapter, &port);
1168 if (ret) {
1169 dev_dbg(adapter->dev,
1170 "info: no more rd_port available\n");
1171 break;
1172 }
1173 len_reg_l = RD_LEN_P0_L + (port << 1);
1174 len_reg_u = RD_LEN_P0_U + (port << 1);
1175 rx_len = ((u16) card->mp_regs[len_reg_u]) << 8;
1176 rx_len |= (u16) card->mp_regs[len_reg_l];
1177 dev_dbg(adapter->dev, "info: RX: port=%d rx_len=%u\n",
1178 port, rx_len);
1179 rx_blocks =
1180 (rx_len + MWIFIEX_SDIO_BLOCK_SIZE -
1181 1) / MWIFIEX_SDIO_BLOCK_SIZE;
1182 if (rx_len <= INTF_HEADER_LEN
1183 || (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE) >
1184 MWIFIEX_RX_DATA_BUF_SIZE) {
1185 dev_err(adapter->dev, "invalid rx_len=%d\n",
1186 rx_len);
1187 return -1;
1188 }
1189 rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE);
1190
1191 skb = dev_alloc_skb(rx_len);
1192
1193 if (!skb) {
1194 dev_err(adapter->dev, "%s: failed to alloc skb",
1195 __func__);
1196 return -1;
1197 }
1198
1199 skb_put(skb, rx_len);
1200
1201 dev_dbg(adapter->dev, "info: rx_len = %d skb->len = %d\n",
1202 rx_len, skb->len);
1203
1204 if (mwifiex_sdio_card_to_host_mp_aggr(adapter, skb,
1205 port)) {
1206 u32 cr = 0;
1207
1208 dev_err(adapter->dev, "card_to_host_mpa failed:"
1209 " int status=%#x\n", sdio_ireg);
1210 if (mwifiex_read_reg(adapter,
1211 CONFIGURATION_REG, &cr))
1212 dev_err(adapter->dev,
1213 "read CFG reg failed\n");
1214
1215 dev_dbg(adapter->dev,
1216 "info: CFG reg val = %d\n", cr);
1217 if (mwifiex_write_reg(adapter,
1218 CONFIGURATION_REG,
1219 (cr | 0x04)))
1220 dev_err(adapter->dev,
1221 "write CFG reg failed\n");
1222
1223 dev_dbg(adapter->dev, "info: write success\n");
1224 if (mwifiex_read_reg(adapter,
1225 CONFIGURATION_REG, &cr))
1226 dev_err(adapter->dev,
1227 "read CFG reg failed\n");
1228
1229 dev_dbg(adapter->dev,
1230 "info: CFG reg val =%x\n", cr);
1231 dev_kfree_skb_any(skb);
1232 return -1;
1233 }
1234 }
1235 }
1236
1237 return 0;
1238}
1239
1240/*
1241 * This function aggregates transmission buffers in driver and downloads
1242 * the aggregated packet to card.
1243 *
1244 * The individual packets are aggregated by copying into an aggregation
1245 * buffer and then downloaded to the card. Previous unsent packets in the
1246 * aggregation buffer are pre-copied first before new packets are added.
1247 * Aggregation is done till there is space left in the aggregation buffer,
1248 * or till new packets are available.
1249 *
1250 * The function will only download the packet to the card when aggregation
1251 * stops, otherwise it will just aggregate the packet in aggregation buffer
1252 * and return.
1253 */
1254static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
1255 u8 *payload, u32 pkt_len, u8 port,
1256 u32 next_pkt_len)
1257{
1258 struct sdio_mmc_card *card = adapter->card;
1259 int ret = 0;
1260 s32 f_send_aggr_buf = 0;
1261 s32 f_send_cur_buf = 0;
1262 s32 f_precopy_cur_buf = 0;
1263 s32 f_postcopy_cur_buf = 0;
1264
1265 if ((!card->mpa_tx.enabled) || (port == CTRL_PORT)) {
1266 dev_dbg(adapter->dev, "info: %s: tx aggregation disabled\n",
1267 __func__);
1268
1269 f_send_cur_buf = 1;
1270 goto tx_curr_single;
1271 }
1272
1273 if (next_pkt_len) {
1274 /* More pkt in TX queue */
1275 dev_dbg(adapter->dev, "info: %s: more packets in queue.\n",
1276 __func__);
1277
1278 if (MP_TX_AGGR_IN_PROGRESS(card)) {
1279 if (!MP_TX_AGGR_PORT_LIMIT_REACHED(card) &&
1280 MP_TX_AGGR_BUF_HAS_ROOM(card, pkt_len)) {
1281 f_precopy_cur_buf = 1;
1282
1283 if (!(card->mp_wr_bitmap &
1284 (1 << card->curr_wr_port))
1285 || !MP_TX_AGGR_BUF_HAS_ROOM(
1286 card, next_pkt_len))
1287 f_send_aggr_buf = 1;
1288 } else {
1289 /* No room in Aggr buf, send it */
1290 f_send_aggr_buf = 1;
1291
1292 if (MP_TX_AGGR_PORT_LIMIT_REACHED(card) ||
1293 !(card->mp_wr_bitmap &
1294 (1 << card->curr_wr_port)))
1295 f_send_cur_buf = 1;
1296 else
1297 f_postcopy_cur_buf = 1;
1298 }
1299 } else {
1300 if (MP_TX_AGGR_BUF_HAS_ROOM(card, pkt_len)
1301 && (card->mp_wr_bitmap & (1 << card->curr_wr_port)))
1302 f_precopy_cur_buf = 1;
1303 else
1304 f_send_cur_buf = 1;
1305 }
1306 } else {
1307 /* Last pkt in TX queue */
1308 dev_dbg(adapter->dev, "info: %s: Last packet in Tx Queue.\n",
1309 __func__);
1310
1311 if (MP_TX_AGGR_IN_PROGRESS(card)) {
1312 /* some packs in Aggr buf already */
1313 f_send_aggr_buf = 1;
1314
1315 if (MP_TX_AGGR_BUF_HAS_ROOM(card, pkt_len))
1316 f_precopy_cur_buf = 1;
1317 else
1318 /* No room in Aggr buf, send it */
1319 f_send_cur_buf = 1;
1320 } else {
1321 f_send_cur_buf = 1;
1322 }
1323 }
1324
1325 if (f_precopy_cur_buf) {
1326 dev_dbg(adapter->dev, "data: %s: precopy current buffer\n",
1327 __func__);
1328 MP_TX_AGGR_BUF_PUT(card, payload, pkt_len, port);
1329
1330 if (MP_TX_AGGR_PKT_LIMIT_REACHED(card) ||
1331 MP_TX_AGGR_PORT_LIMIT_REACHED(card))
1332 /* No more pkts allowed in Aggr buf, send it */
1333 f_send_aggr_buf = 1;
1334 }
1335
1336 if (f_send_aggr_buf) {
1337 dev_dbg(adapter->dev, "data: %s: send aggr buffer: %d %d\n",
1338 __func__,
1339 card->mpa_tx.start_port, card->mpa_tx.ports);
1340 ret = mwifiex_write_data_to_card(adapter, card->mpa_tx.buf,
1341 card->mpa_tx.buf_len,
1342 (adapter->ioport | 0x1000 |
1343 (card->mpa_tx.ports << 4)) +
1344 card->mpa_tx.start_port);
1345
1346 MP_TX_AGGR_BUF_RESET(card);
1347 }
1348
1349tx_curr_single:
1350 if (f_send_cur_buf) {
1351 dev_dbg(adapter->dev, "data: %s: send current buffer %d\n",
1352 __func__, port);
1353 ret = mwifiex_write_data_to_card(adapter, payload, pkt_len,
1354 adapter->ioport + port);
1355 }
1356
1357 if (f_postcopy_cur_buf) {
1358 dev_dbg(adapter->dev, "data: %s: postcopy current buffer\n",
1359 __func__);
1360 MP_TX_AGGR_BUF_PUT(card, payload, pkt_len, port);
1361 }
1362
1363 return ret;
1364}
1365
1366/*
1367 * This function downloads data from driver to card.
1368 *
1369 * Both commands and data packets are transferred to the card by this
1370 * function.
1371 *
1372 * This function adds the SDIO specific header to the front of the buffer
1373 * before transferring. The header contains the length of the packet and
1374 * the type. The firmware handles the packets based upon this set type.
1375 */
1376static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter,
1377 u8 type, u8 *payload, u32 pkt_len,
1378 struct mwifiex_tx_param *tx_param)
1379{
1380 struct sdio_mmc_card *card = adapter->card;
1381 int ret;
1382 u32 buf_block_len;
1383 u32 blk_size;
1384 u8 port = CTRL_PORT;
1385
1386 /* Allocate buffer and copy payload */
1387 blk_size = MWIFIEX_SDIO_BLOCK_SIZE;
1388 buf_block_len = (pkt_len + blk_size - 1) / blk_size;
1389 *(u16 *) &payload[0] = (u16) pkt_len;
1390 *(u16 *) &payload[2] = type;
1391
1392 /*
1393 * This is SDIO specific header
1394 * u16 length,
1395 * u16 type (MWIFIEX_TYPE_DATA = 0, MWIFIEX_TYPE_CMD = 1,
1396 * MWIFIEX_TYPE_EVENT = 3)
1397 */
1398 if (type == MWIFIEX_TYPE_DATA) {
1399 ret = mwifiex_get_wr_port_data(adapter, &port);
1400 if (ret) {
1401 dev_err(adapter->dev, "%s: no wr_port available\n",
1402 __func__);
1403 return ret;
1404 }
1405 } else {
1406 adapter->cmd_sent = true;
1407 /* Type must be MWIFIEX_TYPE_CMD */
1408
1409 if (pkt_len <= INTF_HEADER_LEN ||
1410 pkt_len > MWIFIEX_UPLD_SIZE)
1411 dev_err(adapter->dev, "%s: payload=%p, nb=%d\n",
1412 __func__, payload, pkt_len);
1413 }
1414
1415 /* Transfer data to card */
1416 pkt_len = buf_block_len * blk_size;
1417
1418 if (tx_param)
1419 ret = mwifiex_host_to_card_mp_aggr(adapter, payload, pkt_len,
1420 port, tx_param->next_pkt_len);
1421 else
1422 ret = mwifiex_host_to_card_mp_aggr(adapter, payload, pkt_len,
1423 port, 0);
1424
1425 if (ret) {
1426 if (type == MWIFIEX_TYPE_CMD)
1427 adapter->cmd_sent = false;
1428 if (type == MWIFIEX_TYPE_DATA)
1429 adapter->data_sent = false;
1430 } else {
1431 if (type == MWIFIEX_TYPE_DATA) {
1432 if (!(card->mp_wr_bitmap & (1 << card->curr_wr_port)))
1433 adapter->data_sent = true;
1434 else
1435 adapter->data_sent = false;
1436 }
1437 }
1438
1439 return ret;
1440}
1441
1442/*
1443 * This function allocates the MPA Tx and Rx buffers.
1444 */
1445static int mwifiex_alloc_sdio_mpa_buffers(struct mwifiex_adapter *adapter,
1446 u32 mpa_tx_buf_size, u32 mpa_rx_buf_size)
1447{
1448 struct sdio_mmc_card *card = adapter->card;
1449 int ret = 0;
1450
1451 card->mpa_tx.buf = kzalloc(mpa_tx_buf_size, GFP_KERNEL);
1452 if (!card->mpa_tx.buf) {
1453 dev_err(adapter->dev, "could not alloc buffer for MP-A TX\n");
1454 ret = -1;
1455 goto error;
1456 }
1457
1458 card->mpa_tx.buf_size = mpa_tx_buf_size;
1459
1460 card->mpa_rx.buf = kzalloc(mpa_rx_buf_size, GFP_KERNEL);
1461 if (!card->mpa_rx.buf) {
1462 dev_err(adapter->dev, "could not alloc buffer for MP-A RX\n");
1463 ret = -1;
1464 goto error;
1465 }
1466
1467 card->mpa_rx.buf_size = mpa_rx_buf_size;
1468
1469error:
1470 if (ret) {
1471 kfree(card->mpa_tx.buf);
1472 kfree(card->mpa_rx.buf);
1473 }
1474
1475 return ret;
1476}
1477
1478/*
1479 * This function unregisters the SDIO device.
1480 *
1481 * The SDIO IRQ is released, the function is disabled and driver
1482 * data is set to null.
1483 */
1484static void
1485mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
1486{
1487 struct sdio_mmc_card *card = adapter->card;
1488
1489 if (adapter->card) {
1490 /* Release the SDIO IRQ */
1491 sdio_claim_host(card->func);
1492 sdio_release_irq(card->func);
1493 sdio_disable_func(card->func);
1494 sdio_release_host(card->func);
1495 sdio_set_drvdata(card->func, NULL);
1496 }
1497}
1498
1499/*
1500 * This function registers the SDIO device.
1501 *
1502 * SDIO IRQ is claimed, block size is set and driver data is initialized.
1503 */
1504static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
1505{
1506 int ret = 0;
1507 struct sdio_mmc_card *card = adapter->card;
1508 struct sdio_func *func = card->func;
1509
1510 /* save adapter pointer in card */
1511 card->adapter = adapter;
1512
1513 sdio_claim_host(func);
1514
1515 /* Request the SDIO IRQ */
1516 ret = sdio_claim_irq(func, mwifiex_sdio_interrupt);
1517 if (ret) {
1518 pr_err("claim irq failed: ret=%d\n", ret);
1519 goto disable_func;
1520 }
1521
1522 /* Set block size */
1523 ret = sdio_set_block_size(card->func, MWIFIEX_SDIO_BLOCK_SIZE);
1524 if (ret) {
1525 pr_err("cannot set SDIO block size\n");
1526 ret = -1;
1527 goto release_irq;
1528 }
1529
1530 sdio_release_host(func);
1531 sdio_set_drvdata(func, card);
1532
1533 adapter->dev = &func->dev;
1534
1535 return 0;
1536
1537release_irq:
1538 sdio_release_irq(func);
1539disable_func:
1540 sdio_disable_func(func);
1541 sdio_release_host(func);
1542 adapter->card = NULL;
1543
1544 return -1;
1545}
1546
1547/*
1548 * This function initializes the SDIO driver.
1549 *
1550 * The following initializations steps are followed -
1551 * - Read the Host interrupt status register to acknowledge
1552 * the first interrupt got from bootloader
1553 * - Disable host interrupt mask register
1554 * - Get SDIO port
1555 * - Get revision ID
1556 * - Initialize SDIO variables in card
1557 * - Allocate MP registers
1558 * - Allocate MPA Tx and Rx buffers
1559 */
1560static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
1561{
1562 struct sdio_mmc_card *card = adapter->card;
1563 int ret;
1564 u32 sdio_ireg;
1565
1566 /*
1567 * Read the HOST_INT_STATUS_REG for ACK the first interrupt got
1568 * from the bootloader. If we don't do this we get a interrupt
1569 * as soon as we register the irq.
1570 */
1571 mwifiex_read_reg(adapter, HOST_INTSTATUS_REG, &sdio_ireg);
1572
1573 /* Disable host interrupt mask register for SDIO */
1574 mwifiex_sdio_disable_host_int(adapter);
1575
1576 /* Get SDIO ioport */
1577 mwifiex_init_sdio_ioport(adapter);
1578
1579 /* Get revision ID */
1580#define REV_ID_REG 0x5c
1581 mwifiex_read_reg(adapter, REV_ID_REG, &adapter->revision_id);
1582
1583 /* Initialize SDIO variables in card */
1584 card->mp_rd_bitmap = 0;
1585 card->mp_wr_bitmap = 0;
1586 card->curr_rd_port = 1;
1587 card->curr_wr_port = 1;
1588
1589 card->mp_data_port_mask = DATA_PORT_MASK;
1590
1591 card->mpa_tx.buf_len = 0;
1592 card->mpa_tx.pkt_cnt = 0;
1593 card->mpa_tx.start_port = 0;
1594
1595 card->mpa_tx.enabled = 0;
1596 card->mpa_tx.pkt_aggr_limit = SDIO_MP_AGGR_DEF_PKT_LIMIT;
1597
1598 card->mpa_rx.buf_len = 0;
1599 card->mpa_rx.pkt_cnt = 0;
1600 card->mpa_rx.start_port = 0;
1601
1602 card->mpa_rx.enabled = 0;
1603 card->mpa_rx.pkt_aggr_limit = SDIO_MP_AGGR_DEF_PKT_LIMIT;
1604
1605 /* Allocate buffers for SDIO MP-A */
1606 card->mp_regs = kzalloc(MAX_MP_REGS, GFP_KERNEL);
1607 if (!card->mp_regs) {
1608 dev_err(adapter->dev, "failed to alloc mp_regs\n");
1609 return -ENOMEM;
1610 }
1611
1612 ret = mwifiex_alloc_sdio_mpa_buffers(adapter,
1613 SDIO_MP_TX_AGGR_DEF_BUF_SIZE,
1614 SDIO_MP_RX_AGGR_DEF_BUF_SIZE);
1615 if (ret) {
1616 dev_err(adapter->dev, "failed to alloc sdio mp-a buffers\n");
1617 kfree(card->mp_regs);
1618 return -1;
1619 }
1620
1621 return ret;
1622}
1623
1624/*
1625 * This function resets the MPA Tx and Rx buffers.
1626 */
1627static void mwifiex_cleanup_mpa_buf(struct mwifiex_adapter *adapter)
1628{
1629 struct sdio_mmc_card *card = adapter->card;
1630
1631 MP_TX_AGGR_BUF_RESET(card);
1632 MP_RX_AGGR_BUF_RESET(card);
1633}
1634
1635/*
1636 * This function cleans up the allocated card buffers.
1637 *
1638 * The following are freed by this function -
1639 * - MP registers
1640 * - MPA Tx buffer
1641 * - MPA Rx buffer
1642 */
1643static void mwifiex_cleanup_sdio(struct mwifiex_adapter *adapter)
1644{
1645 struct sdio_mmc_card *card = adapter->card;
1646
1647 kfree(card->mp_regs);
1648 kfree(card->mpa_tx.buf);
1649 kfree(card->mpa_rx.buf);
1650}
1651
1652/*
1653 * This function updates the MP end port in card.
1654 */
1655static void
1656mwifiex_update_mp_end_port(struct mwifiex_adapter *adapter, u16 port)
1657{
1658 struct sdio_mmc_card *card = adapter->card;
1659 int i;
1660
1661 card->mp_end_port = port;
1662
1663 card->mp_data_port_mask = DATA_PORT_MASK;
1664
1665 for (i = 1; i <= MAX_PORT - card->mp_end_port; i++)
1666 card->mp_data_port_mask &= ~(1 << (MAX_PORT - i));
1667
1668 card->curr_wr_port = 1;
1669
1670 dev_dbg(adapter->dev, "cmd: mp_end_port %d, data port mask 0x%x\n",
1671 port, card->mp_data_port_mask);
1672}
1673
1674static struct mwifiex_if_ops sdio_ops = {
1675 .init_if = mwifiex_init_sdio,
1676 .cleanup_if = mwifiex_cleanup_sdio,
1677 .check_fw_status = mwifiex_check_fw_status,
1678 .prog_fw = mwifiex_prog_fw_w_helper,
1679 .register_dev = mwifiex_register_dev,
1680 .unregister_dev = mwifiex_unregister_dev,
1681 .enable_int = mwifiex_sdio_enable_host_int,
1682 .process_int_status = mwifiex_process_int_status,
1683 .host_to_card = mwifiex_sdio_host_to_card,
1684 .wakeup = mwifiex_pm_wakeup_card,
1685 .wakeup_complete = mwifiex_pm_wakeup_card_complete,
1686
1687 /* SDIO specific */
1688 .update_mp_end_port = mwifiex_update_mp_end_port,
1689 .cleanup_mpa_buf = mwifiex_cleanup_mpa_buf,
1690};
1691
1692/*
1693 * This function initializes the SDIO driver.
1694 *
1695 * This initiates the semaphore and registers the device with
1696 * SDIO bus.
1697 */
1698static int
1699mwifiex_sdio_init_module(void)
1700{
1701 sema_init(&add_remove_card_sem, 1);
1702
1703 return sdio_register_driver(&mwifiex_sdio);
1704}
1705
1706/*
1707 * This function cleans up the SDIO driver.
1708 *
1709 * The following major steps are followed for cleanup -
1710 * - Resume the device if its suspended
1711 * - Disconnect the device if connected
1712 * - Shutdown the firmware
1713 * - Unregister the device from SDIO bus.
1714 */
1715static void
1716mwifiex_sdio_cleanup_module(void)
1717{
1718 struct mwifiex_adapter *adapter = g_adapter;
1719 int i;
1720
1721 if (down_interruptible(&add_remove_card_sem))
1722 goto exit_sem_err;
1723
1724 if (!adapter || !adapter->priv_num)
1725 goto exit;
1726
1727 if (adapter->is_suspended)
1728 mwifiex_sdio_resume(adapter->dev);
1729
1730 for (i = 0; i < adapter->priv_num; i++)
1731 if ((GET_BSS_ROLE(adapter->priv[i]) == MWIFIEX_BSS_ROLE_STA) &&
1732 adapter->priv[i]->media_connected)
1733 mwifiex_deauthenticate(adapter->priv[i], NULL);
1734
1735 if (!adapter->surprise_removed)
1736 mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter,
1737 MWIFIEX_BSS_ROLE_ANY),
1738 MWIFIEX_FUNC_SHUTDOWN);
1739
1740exit:
1741 up(&add_remove_card_sem);
1742
1743exit_sem_err:
1744 sdio_unregister_driver(&mwifiex_sdio);
1745}
1746
1747module_init(mwifiex_sdio_init_module);
1748module_exit(mwifiex_sdio_cleanup_module);
1749
1750MODULE_AUTHOR("Marvell International Ltd.");
1751MODULE_DESCRIPTION("Marvell WiFi-Ex SDIO Driver version " SDIO_VERSION);
1752MODULE_VERSION(SDIO_VERSION);
1753MODULE_LICENSE("GPL v2");
1754MODULE_FIRMWARE("sd8787.bin");
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
new file mode 100644
index 000000000000..a0e9bc5253e0
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -0,0 +1,305 @@
1/*
2 * Marvell Wireless LAN device driver: SDIO specific definitions
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#ifndef _MWIFIEX_SDIO_H
21#define _MWIFIEX_SDIO_H
22
23
24#include <linux/mmc/sdio.h>
25#include <linux/mmc/sdio_ids.h>
26#include <linux/mmc/sdio_func.h>
27#include <linux/mmc/card.h>
28
29#include "main.h"
30
31#define BLOCK_MODE 1
32#define BYTE_MODE 0
33
34#define REG_PORT 0
35#define RD_BITMAP_L 0x04
36#define RD_BITMAP_U 0x05
37#define WR_BITMAP_L 0x06
38#define WR_BITMAP_U 0x07
39#define RD_LEN_P0_L 0x08
40#define RD_LEN_P0_U 0x09
41
42#define MWIFIEX_SDIO_IO_PORT_MASK 0xfffff
43
44#define MWIFIEX_SDIO_BYTE_MODE_MASK 0x80000000
45
46#define CTRL_PORT 0
47#define CTRL_PORT_MASK 0x0001
48#define DATA_PORT_MASK 0xfffe
49
50#define MAX_MP_REGS 64
51#define MAX_PORT 16
52
53#define SDIO_MP_AGGR_DEF_PKT_LIMIT 8
54
55#define SDIO_MP_TX_AGGR_DEF_BUF_SIZE (4096) /* 4K */
56
57/* Multi port RX aggregation buffer size */
58#define SDIO_MP_RX_AGGR_DEF_BUF_SIZE (4096) /* 4K */
59
60/* Misc. Config Register : Auto Re-enable interrupts */
61#define AUTO_RE_ENABLE_INT BIT(4)
62
63/* Host Control Registers */
64/* Host Control Registers : I/O port 0 */
65#define IO_PORT_0_REG 0x78
66/* Host Control Registers : I/O port 1 */
67#define IO_PORT_1_REG 0x79
68/* Host Control Registers : I/O port 2 */
69#define IO_PORT_2_REG 0x7A
70
71/* Host Control Registers : Configuration */
72#define CONFIGURATION_REG 0x00
73/* Host Control Registers : Host without Command 53 finish host*/
74#define HOST_TO_CARD_EVENT (0x1U << 3)
75/* Host Control Registers : Host without Command 53 finish host */
76#define HOST_WO_CMD53_FINISH_HOST (0x1U << 2)
77/* Host Control Registers : Host power up */
78#define HOST_POWER_UP (0x1U << 1)
79/* Host Control Registers : Host power down */
80#define HOST_POWER_DOWN (0x1U << 0)
81
82/* Host Control Registers : Host interrupt mask */
83#define HOST_INT_MASK_REG 0x02
84/* Host Control Registers : Upload host interrupt mask */
85#define UP_LD_HOST_INT_MASK (0x1U)
86/* Host Control Registers : Download host interrupt mask */
87#define DN_LD_HOST_INT_MASK (0x2U)
88/* Enable Host interrupt mask */
89#define HOST_INT_ENABLE (UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK)
90/* Disable Host interrupt mask */
91#define HOST_INT_DISABLE 0xff
92
93/* Host Control Registers : Host interrupt status */
94#define HOST_INTSTATUS_REG 0x03
95/* Host Control Registers : Upload host interrupt status */
96#define UP_LD_HOST_INT_STATUS (0x1U)
97/* Host Control Registers : Download host interrupt status */
98#define DN_LD_HOST_INT_STATUS (0x2U)
99
100/* Host Control Registers : Host interrupt RSR */
101#define HOST_INT_RSR_REG 0x01
102/* Host Control Registers : Upload host interrupt RSR */
103#define UP_LD_HOST_INT_RSR (0x1U)
104#define SDIO_INT_MASK 0x3F
105
106/* Host Control Registers : Host interrupt status */
107#define HOST_INT_STATUS_REG 0x28
108/* Host Control Registers : Upload CRC error */
109#define UP_LD_CRC_ERR (0x1U << 2)
110/* Host Control Registers : Upload restart */
111#define UP_LD_RESTART (0x1U << 1)
112/* Host Control Registers : Download restart */
113#define DN_LD_RESTART (0x1U << 0)
114
115/* Card Control Registers : Card status register */
116#define CARD_STATUS_REG 0x30
117/* Card Control Registers : Card I/O ready */
118#define CARD_IO_READY (0x1U << 3)
119/* Card Control Registers : CIS card ready */
120#define CIS_CARD_RDY (0x1U << 2)
121/* Card Control Registers : Upload card ready */
122#define UP_LD_CARD_RDY (0x1U << 1)
123/* Card Control Registers : Download card ready */
124#define DN_LD_CARD_RDY (0x1U << 0)
125
126/* Card Control Registers : Host interrupt mask register */
127#define HOST_INTERRUPT_MASK_REG 0x34
128/* Card Control Registers : Host power interrupt mask */
129#define HOST_POWER_INT_MASK (0x1U << 3)
130/* Card Control Registers : Abort card interrupt mask */
131#define ABORT_CARD_INT_MASK (0x1U << 2)
132/* Card Control Registers : Upload card interrupt mask */
133#define UP_LD_CARD_INT_MASK (0x1U << 1)
134/* Card Control Registers : Download card interrupt mask */
135#define DN_LD_CARD_INT_MASK (0x1U << 0)
136
137/* Card Control Registers : Card interrupt status register */
138#define CARD_INTERRUPT_STATUS_REG 0x38
139/* Card Control Registers : Power up interrupt */
140#define POWER_UP_INT (0x1U << 4)
141/* Card Control Registers : Power down interrupt */
142#define POWER_DOWN_INT (0x1U << 3)
143
144/* Card Control Registers : Card interrupt RSR register */
145#define CARD_INTERRUPT_RSR_REG 0x3c
146/* Card Control Registers : Power up RSR */
147#define POWER_UP_RSR (0x1U << 4)
148/* Card Control Registers : Power down RSR */
149#define POWER_DOWN_RSR (0x1U << 3)
150
151/* Card Control Registers : Miscellaneous Configuration Register */
152#define CARD_MISC_CFG_REG 0x6C
153
154/* Host F1 read base 0 */
155#define HOST_F1_RD_BASE_0 0x0040
156/* Host F1 read base 1 */
157#define HOST_F1_RD_BASE_1 0x0041
158/* Host F1 card ready */
159#define HOST_F1_CARD_RDY 0x0020
160
161/* Firmware status 0 register */
162#define CARD_FW_STATUS0_REG 0x60
163/* Firmware status 1 register */
164#define CARD_FW_STATUS1_REG 0x61
165/* Rx length register */
166#define CARD_RX_LEN_REG 0x62
167/* Rx unit register */
168#define CARD_RX_UNIT_REG 0x63
169
170/* Event header Len*/
171#define MWIFIEX_EVENT_HEADER_LEN 8
172
173/* Max retry number of CMD53 write */
174#define MAX_WRITE_IOMEM_RETRY 2
175
176/* SDIO Tx aggregation in progress ? */
177#define MP_TX_AGGR_IN_PROGRESS(a) (a->mpa_tx.pkt_cnt > 0)
178
179/* SDIO Tx aggregation buffer room for next packet ? */
180#define MP_TX_AGGR_BUF_HAS_ROOM(a, len) ((a->mpa_tx.buf_len+len) \
181 <= a->mpa_tx.buf_size)
182
183/* Copy current packet (SDIO Tx aggregation buffer) to SDIO buffer */
184#define MP_TX_AGGR_BUF_PUT(a, payload, pkt_len, port) do { \
185 memmove(&a->mpa_tx.buf[a->mpa_tx.buf_len], \
186 payload, pkt_len); \
187 a->mpa_tx.buf_len += pkt_len; \
188 if (!a->mpa_tx.pkt_cnt) \
189 a->mpa_tx.start_port = port; \
190 if (a->mpa_tx.start_port <= port) \
191 a->mpa_tx.ports |= (1<<(a->mpa_tx.pkt_cnt)); \
192 else \
193 a->mpa_tx.ports |= (1<<(a->mpa_tx.pkt_cnt+1+(MAX_PORT - \
194 a->mp_end_port))); \
195 a->mpa_tx.pkt_cnt++; \
196} while (0);
197
198/* SDIO Tx aggregation limit ? */
199#define MP_TX_AGGR_PKT_LIMIT_REACHED(a) \
200 (a->mpa_tx.pkt_cnt == a->mpa_tx.pkt_aggr_limit)
201
202/* SDIO Tx aggregation port limit ? */
203#define MP_TX_AGGR_PORT_LIMIT_REACHED(a) ((a->curr_wr_port < \
204 a->mpa_tx.start_port) && (((MAX_PORT - \
205 a->mpa_tx.start_port) + a->curr_wr_port) >= \
206 SDIO_MP_AGGR_DEF_PKT_LIMIT))
207
208/* Reset SDIO Tx aggregation buffer parameters */
209#define MP_TX_AGGR_BUF_RESET(a) do { \
210 a->mpa_tx.pkt_cnt = 0; \
211 a->mpa_tx.buf_len = 0; \
212 a->mpa_tx.ports = 0; \
213 a->mpa_tx.start_port = 0; \
214} while (0);
215
216/* SDIO Rx aggregation limit ? */
217#define MP_RX_AGGR_PKT_LIMIT_REACHED(a) \
218 (a->mpa_rx.pkt_cnt == a->mpa_rx.pkt_aggr_limit)
219
220/* SDIO Tx aggregation port limit ? */
221#define MP_RX_AGGR_PORT_LIMIT_REACHED(a) ((a->curr_rd_port < \
222 a->mpa_rx.start_port) && (((MAX_PORT - \
223 a->mpa_rx.start_port) + a->curr_rd_port) >= \
224 SDIO_MP_AGGR_DEF_PKT_LIMIT))
225
226/* SDIO Rx aggregation in progress ? */
227#define MP_RX_AGGR_IN_PROGRESS(a) (a->mpa_rx.pkt_cnt > 0)
228
229/* SDIO Rx aggregation buffer room for next packet ? */
230#define MP_RX_AGGR_BUF_HAS_ROOM(a, rx_len) \
231 ((a->mpa_rx.buf_len+rx_len) <= a->mpa_rx.buf_size)
232
233/* Prepare to copy current packet from card to SDIO Rx aggregation buffer */
234#define MP_RX_AGGR_SETUP(a, skb, port) do { \
235 a->mpa_rx.buf_len += skb->len; \
236 if (!a->mpa_rx.pkt_cnt) \
237 a->mpa_rx.start_port = port; \
238 if (a->mpa_rx.start_port <= port) \
239 a->mpa_rx.ports |= (1<<(a->mpa_rx.pkt_cnt)); \
240 else \
241 a->mpa_rx.ports |= (1<<(a->mpa_rx.pkt_cnt+1)); \
242 a->mpa_rx.skb_arr[a->mpa_rx.pkt_cnt] = skb; \
243 a->mpa_rx.len_arr[a->mpa_rx.pkt_cnt] = skb->len; \
244 a->mpa_rx.pkt_cnt++; \
245} while (0);
246
247/* Reset SDIO Rx aggregation buffer parameters */
248#define MP_RX_AGGR_BUF_RESET(a) do { \
249 a->mpa_rx.pkt_cnt = 0; \
250 a->mpa_rx.buf_len = 0; \
251 a->mpa_rx.ports = 0; \
252 a->mpa_rx.start_port = 0; \
253} while (0);
254
255
256/* data structure for SDIO MPA TX */
257struct mwifiex_sdio_mpa_tx {
258 /* multiport tx aggregation buffer pointer */
259 u8 *buf;
260 u32 buf_len;
261 u32 pkt_cnt;
262 u16 ports;
263 u16 start_port;
264 u8 enabled;
265 u32 buf_size;
266 u32 pkt_aggr_limit;
267};
268
269struct mwifiex_sdio_mpa_rx {
270 u8 *buf;
271 u32 buf_len;
272 u32 pkt_cnt;
273 u16 ports;
274 u16 start_port;
275
276 struct sk_buff *skb_arr[SDIO_MP_AGGR_DEF_PKT_LIMIT];
277 u32 len_arr[SDIO_MP_AGGR_DEF_PKT_LIMIT];
278
279 u8 enabled;
280 u32 buf_size;
281 u32 pkt_aggr_limit;
282};
283
284int mwifiex_bus_register(void);
285void mwifiex_bus_unregister(void);
286
287struct sdio_mmc_card {
288 struct sdio_func *func;
289 struct mwifiex_adapter *adapter;
290
291 u16 mp_rd_bitmap;
292 u16 mp_wr_bitmap;
293
294 u16 mp_end_port;
295 u16 mp_data_port_mask;
296
297 u8 curr_rd_port;
298 u8 curr_wr_port;
299
300 u8 *mp_regs;
301
302 struct mwifiex_sdio_mpa_tx mpa_tx;
303 struct mwifiex_sdio_mpa_rx mpa_rx;
304};
305#endif /* _MWIFIEX_SDIO_H */
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
new file mode 100644
index 000000000000..8af3a78d2723
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -0,0 +1,1219 @@
1/*
2 * Marvell Wireless LAN device driver: station command handling
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "ioctl.h"
22#include "util.h"
23#include "fw.h"
24#include "main.h"
25#include "wmm.h"
26#include "11n.h"
27
28/*
29 * This function prepares command to set/get RSSI information.
30 *
31 * Preparation includes -
32 * - Setting command ID, action and proper size
33 * - Setting data/beacon average factors
34 * - Resetting SNR/NF/RSSI values in private structure
35 * - Ensuring correct endian-ness
36 */
37static int
38mwifiex_cmd_802_11_rssi_info(struct mwifiex_private *priv,
39 struct host_cmd_ds_command *cmd, u16 cmd_action)
40{
41 cmd->command = cpu_to_le16(HostCmd_CMD_RSSI_INFO);
42 cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_802_11_rssi_info) +
43 S_DS_GEN);
44 cmd->params.rssi_info.action = cpu_to_le16(cmd_action);
45 cmd->params.rssi_info.ndata = cpu_to_le16(priv->data_avg_factor);
46 cmd->params.rssi_info.nbcn = cpu_to_le16(priv->bcn_avg_factor);
47
48 /* Reset SNR/NF/RSSI values in private structure */
49 priv->data_rssi_last = 0;
50 priv->data_nf_last = 0;
51 priv->data_rssi_avg = 0;
52 priv->data_nf_avg = 0;
53 priv->bcn_rssi_last = 0;
54 priv->bcn_nf_last = 0;
55 priv->bcn_rssi_avg = 0;
56 priv->bcn_nf_avg = 0;
57
58 return 0;
59}
60
61/*
62 * This function prepares command to set MAC control.
63 *
64 * Preparation includes -
65 * - Setting command ID, action and proper size
66 * - Ensuring correct endian-ness
67 */
68static int mwifiex_cmd_mac_control(struct mwifiex_private *priv,
69 struct host_cmd_ds_command *cmd,
70 u16 cmd_action, void *data_buf)
71{
72 struct host_cmd_ds_mac_control *mac_ctrl = &cmd->params.mac_ctrl;
73 u16 action = *((u16 *) data_buf);
74
75 if (cmd_action != HostCmd_ACT_GEN_SET) {
76 dev_err(priv->adapter->dev,
77 "mac_control: only support set cmd\n");
78 return -1;
79 }
80
81 cmd->command = cpu_to_le16(HostCmd_CMD_MAC_CONTROL);
82 cmd->size =
83 cpu_to_le16(sizeof(struct host_cmd_ds_mac_control) + S_DS_GEN);
84 mac_ctrl->action = cpu_to_le16(action);
85
86 return 0;
87}
88
89/*
90 * This function prepares command to set/get SNMP MIB.
91 *
92 * Preparation includes -
93 * - Setting command ID, action and proper size
94 * - Setting SNMP MIB OID number and value
95 * (as required)
96 * - Ensuring correct endian-ness
97 *
98 * The following SNMP MIB OIDs are supported -
99 * - FRAG_THRESH_I : Fragmentation threshold
100 * - RTS_THRESH_I : RTS threshold
101 * - SHORT_RETRY_LIM_I : Short retry limit
102 * - DOT11D_I : 11d support
103 */
104static int mwifiex_cmd_802_11_snmp_mib(struct mwifiex_private *priv,
105 struct host_cmd_ds_command *cmd,
106 u16 cmd_action, u32 cmd_oid,
107 void *data_buf)
108{
109 struct host_cmd_ds_802_11_snmp_mib *snmp_mib = &cmd->params.smib;
110 u32 ul_temp;
111
112 dev_dbg(priv->adapter->dev, "cmd: SNMP_CMD: cmd_oid = 0x%x\n", cmd_oid);
113 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_SNMP_MIB);
114 cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_802_11_snmp_mib)
115 - 1 + S_DS_GEN);
116
117 if (cmd_action == HostCmd_ACT_GEN_GET) {
118 snmp_mib->query_type = cpu_to_le16(HostCmd_ACT_GEN_GET);
119 snmp_mib->buf_size = cpu_to_le16(MAX_SNMP_BUF_SIZE);
120 cmd->size = cpu_to_le16(le16_to_cpu(cmd->size)
121 + MAX_SNMP_BUF_SIZE);
122 }
123
124 switch (cmd_oid) {
125 case FRAG_THRESH_I:
126 snmp_mib->oid = cpu_to_le16((u16) FRAG_THRESH_I);
127 if (cmd_action == HostCmd_ACT_GEN_SET) {
128 snmp_mib->query_type = cpu_to_le16(HostCmd_ACT_GEN_SET);
129 snmp_mib->buf_size = cpu_to_le16(sizeof(u16));
130 ul_temp = *((u32 *) data_buf);
131 *((__le16 *) (snmp_mib->value)) =
132 cpu_to_le16((u16) ul_temp);
133 cmd->size = cpu_to_le16(le16_to_cpu(cmd->size)
134 + sizeof(u16));
135 }
136 break;
137 case RTS_THRESH_I:
138 snmp_mib->oid = cpu_to_le16((u16) RTS_THRESH_I);
139 if (cmd_action == HostCmd_ACT_GEN_SET) {
140 snmp_mib->query_type = cpu_to_le16(HostCmd_ACT_GEN_SET);
141 snmp_mib->buf_size = cpu_to_le16(sizeof(u16));
142 ul_temp = *((u32 *) data_buf);
143 *(__le16 *) (snmp_mib->value) =
144 cpu_to_le16((u16) ul_temp);
145 cmd->size = cpu_to_le16(le16_to_cpu(cmd->size)
146 + sizeof(u16));
147 }
148 break;
149
150 case SHORT_RETRY_LIM_I:
151 snmp_mib->oid = cpu_to_le16((u16) SHORT_RETRY_LIM_I);
152 if (cmd_action == HostCmd_ACT_GEN_SET) {
153 snmp_mib->query_type = cpu_to_le16(HostCmd_ACT_GEN_SET);
154 snmp_mib->buf_size = cpu_to_le16(sizeof(u16));
155 ul_temp = (*(u32 *) data_buf);
156 *((__le16 *) (snmp_mib->value)) =
157 cpu_to_le16((u16) ul_temp);
158 cmd->size = cpu_to_le16(le16_to_cpu(cmd->size)
159 + sizeof(u16));
160 }
161 break;
162 case DOT11D_I:
163 snmp_mib->oid = cpu_to_le16((u16) DOT11D_I);
164 if (cmd_action == HostCmd_ACT_GEN_SET) {
165 snmp_mib->query_type = cpu_to_le16(HostCmd_ACT_GEN_SET);
166 snmp_mib->buf_size = cpu_to_le16(sizeof(u16));
167 ul_temp = *(u32 *) data_buf;
168 *((__le16 *) (snmp_mib->value)) =
169 cpu_to_le16((u16) ul_temp);
170 cmd->size = cpu_to_le16(le16_to_cpu(cmd->size)
171 + sizeof(u16));
172 }
173 break;
174 default:
175 break;
176 }
177 dev_dbg(priv->adapter->dev,
178 "cmd: SNMP_CMD: Action=0x%x, OID=0x%x, OIDSize=0x%x,"
179 " Value=0x%x\n",
180 cmd_action, cmd_oid, le16_to_cpu(snmp_mib->buf_size),
181 le16_to_cpu(*(__le16 *) snmp_mib->value));
182 return 0;
183}
184
185/*
186 * This function prepares command to get log.
187 *
188 * Preparation includes -
189 * - Setting command ID and proper size
190 * - Ensuring correct endian-ness
191 */
192static int
193mwifiex_cmd_802_11_get_log(struct host_cmd_ds_command *cmd)
194{
195 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_GET_LOG);
196 cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_802_11_get_log) +
197 S_DS_GEN);
198 return 0;
199}
200
201/*
202 * This function prepares command to set/get Tx data rate configuration.
203 *
204 * Preparation includes -
205 * - Setting command ID, action and proper size
206 * - Setting configuration index, rate scope and rate drop pattern
207 * parameters (as required)
208 * - Ensuring correct endian-ness
209 */
210static int mwifiex_cmd_tx_rate_cfg(struct mwifiex_private *priv,
211 struct host_cmd_ds_command *cmd,
212 u16 cmd_action, void *data_buf)
213{
214 struct host_cmd_ds_tx_rate_cfg *rate_cfg = &cmd->params.tx_rate_cfg;
215 struct mwifiex_rate_scope *rate_scope;
216 struct mwifiex_rate_drop_pattern *rate_drop;
217 u16 *pbitmap_rates = (u16 *) data_buf;
218
219 u32 i;
220
221 cmd->command = cpu_to_le16(HostCmd_CMD_TX_RATE_CFG);
222
223 rate_cfg->action = cpu_to_le16(cmd_action);
224 rate_cfg->cfg_index = 0;
225
226 rate_scope = (struct mwifiex_rate_scope *) ((u8 *) rate_cfg +
227 sizeof(struct host_cmd_ds_tx_rate_cfg));
228 rate_scope->type = cpu_to_le16(TLV_TYPE_RATE_SCOPE);
229 rate_scope->length = cpu_to_le16(sizeof(struct mwifiex_rate_scope) -
230 sizeof(struct mwifiex_ie_types_header));
231 if (pbitmap_rates != NULL) {
232 rate_scope->hr_dsss_rate_bitmap = cpu_to_le16(pbitmap_rates[0]);
233 rate_scope->ofdm_rate_bitmap = cpu_to_le16(pbitmap_rates[1]);
234 for (i = 0;
235 i < sizeof(rate_scope->ht_mcs_rate_bitmap) / sizeof(u16);
236 i++)
237 rate_scope->ht_mcs_rate_bitmap[i] =
238 cpu_to_le16(pbitmap_rates[2 + i]);
239 } else {
240 rate_scope->hr_dsss_rate_bitmap =
241 cpu_to_le16(priv->bitmap_rates[0]);
242 rate_scope->ofdm_rate_bitmap =
243 cpu_to_le16(priv->bitmap_rates[1]);
244 for (i = 0;
245 i < sizeof(rate_scope->ht_mcs_rate_bitmap) / sizeof(u16);
246 i++)
247 rate_scope->ht_mcs_rate_bitmap[i] =
248 cpu_to_le16(priv->bitmap_rates[2 + i]);
249 }
250
251 rate_drop = (struct mwifiex_rate_drop_pattern *) ((u8 *) rate_scope +
252 sizeof(struct mwifiex_rate_scope));
253 rate_drop->type = cpu_to_le16(TLV_TYPE_RATE_DROP_CONTROL);
254 rate_drop->length = cpu_to_le16(sizeof(rate_drop->rate_drop_mode));
255 rate_drop->rate_drop_mode = 0;
256
257 cmd->size =
258 cpu_to_le16(S_DS_GEN + sizeof(struct host_cmd_ds_tx_rate_cfg) +
259 sizeof(struct mwifiex_rate_scope) +
260 sizeof(struct mwifiex_rate_drop_pattern));
261
262 return 0;
263}
264
265/*
266 * This function prepares command to set/get Tx power configuration.
267 *
268 * Preparation includes -
269 * - Setting command ID, action and proper size
270 * - Setting Tx power mode, power group TLV
271 * (as required)
272 * - Ensuring correct endian-ness
273 */
274static int mwifiex_cmd_tx_power_cfg(struct host_cmd_ds_command *cmd,
275 u16 cmd_action, void *data_buf)
276{
277 struct mwifiex_types_power_group *pg_tlv;
278 struct host_cmd_ds_txpwr_cfg *txp;
279 struct host_cmd_ds_txpwr_cfg *cmd_txp_cfg = &cmd->params.txp_cfg;
280
281 cmd->command = cpu_to_le16(HostCmd_CMD_TXPWR_CFG);
282 cmd->size =
283 cpu_to_le16(S_DS_GEN + sizeof(struct host_cmd_ds_txpwr_cfg));
284 switch (cmd_action) {
285 case HostCmd_ACT_GEN_SET:
286 txp = (struct host_cmd_ds_txpwr_cfg *) data_buf;
287 if (txp->mode) {
288 pg_tlv = (struct mwifiex_types_power_group
289 *) ((unsigned long) data_buf +
290 sizeof(struct host_cmd_ds_txpwr_cfg));
291 memmove(cmd_txp_cfg, data_buf,
292 sizeof(struct host_cmd_ds_txpwr_cfg) +
293 sizeof(struct mwifiex_types_power_group) +
294 pg_tlv->length);
295
296 pg_tlv = (struct mwifiex_types_power_group *) ((u8 *)
297 cmd_txp_cfg +
298 sizeof(struct host_cmd_ds_txpwr_cfg));
299 cmd->size = cpu_to_le16(le16_to_cpu(cmd->size) +
300 sizeof(struct mwifiex_types_power_group) +
301 pg_tlv->length);
302 } else {
303 memmove(cmd_txp_cfg, data_buf,
304 sizeof(struct host_cmd_ds_txpwr_cfg));
305 }
306 cmd_txp_cfg->action = cpu_to_le16(cmd_action);
307 break;
308 case HostCmd_ACT_GEN_GET:
309 cmd_txp_cfg->action = cpu_to_le16(cmd_action);
310 break;
311 }
312
313 return 0;
314}
315
316/*
317 * This function prepares command to set Host Sleep configuration.
318 *
319 * Preparation includes -
320 * - Setting command ID and proper size
321 * - Setting Host Sleep action, conditions, ARP filters
322 * (as required)
323 * - Ensuring correct endian-ness
324 */
325static int mwifiex_cmd_802_11_hs_cfg(struct mwifiex_private *priv,
326 struct host_cmd_ds_command *cmd,
327 u16 cmd_action,
328 struct mwifiex_hs_config_param *data_buf)
329{
330 struct mwifiex_adapter *adapter = priv->adapter;
331 struct host_cmd_ds_802_11_hs_cfg_enh *hs_cfg = &cmd->params.opt_hs_cfg;
332 u16 hs_activate = false;
333
334 if (data_buf == NULL)
335 /* New Activate command */
336 hs_activate = true;
337 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_HS_CFG_ENH);
338
339 if (!hs_activate &&
340 (data_buf->conditions
341 != cpu_to_le32(HOST_SLEEP_CFG_CANCEL))
342 && ((adapter->arp_filter_size > 0)
343 && (adapter->arp_filter_size <= ARP_FILTER_MAX_BUF_SIZE))) {
344 dev_dbg(adapter->dev,
345 "cmd: Attach %d bytes ArpFilter to HSCfg cmd\n",
346 adapter->arp_filter_size);
347 memcpy(((u8 *) hs_cfg) +
348 sizeof(struct host_cmd_ds_802_11_hs_cfg_enh),
349 adapter->arp_filter, adapter->arp_filter_size);
350 cmd->size = cpu_to_le16(adapter->arp_filter_size +
351 sizeof(struct host_cmd_ds_802_11_hs_cfg_enh)
352 + S_DS_GEN);
353 } else {
354 cmd->size = cpu_to_le16(S_DS_GEN + sizeof(struct
355 host_cmd_ds_802_11_hs_cfg_enh));
356 }
357 if (hs_activate) {
358 hs_cfg->action = cpu_to_le16(HS_ACTIVATE);
359 hs_cfg->params.hs_activate.resp_ctrl = RESP_NEEDED;
360 } else {
361 hs_cfg->action = cpu_to_le16(HS_CONFIGURE);
362 hs_cfg->params.hs_config.conditions = data_buf->conditions;
363 hs_cfg->params.hs_config.gpio = data_buf->gpio;
364 hs_cfg->params.hs_config.gap = data_buf->gap;
365 dev_dbg(adapter->dev,
366 "cmd: HS_CFG_CMD: condition:0x%x gpio:0x%x gap:0x%x\n",
367 hs_cfg->params.hs_config.conditions,
368 hs_cfg->params.hs_config.gpio,
369 hs_cfg->params.hs_config.gap);
370 }
371
372 return 0;
373}
374
375/*
376 * This function prepares command to set/get MAC address.
377 *
378 * Preparation includes -
379 * - Setting command ID, action and proper size
380 * - Setting MAC address (for SET only)
381 * - Ensuring correct endian-ness
382 */
383static int mwifiex_cmd_802_11_mac_address(struct mwifiex_private *priv,
384 struct host_cmd_ds_command *cmd,
385 u16 cmd_action)
386{
387 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_MAC_ADDRESS);
388 cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_802_11_mac_address) +
389 S_DS_GEN);
390 cmd->result = 0;
391
392 cmd->params.mac_addr.action = cpu_to_le16(cmd_action);
393
394 if (cmd_action == HostCmd_ACT_GEN_SET)
395 memcpy(cmd->params.mac_addr.mac_addr, priv->curr_addr,
396 ETH_ALEN);
397 return 0;
398}
399
400/*
401 * This function prepares command to set MAC multicast address.
402 *
403 * Preparation includes -
404 * - Setting command ID, action and proper size
405 * - Setting MAC multicast address
406 * - Ensuring correct endian-ness
407 */
408static int mwifiex_cmd_mac_multicast_adr(struct host_cmd_ds_command *cmd,
409 u16 cmd_action, void *data_buf)
410{
411 struct mwifiex_multicast_list *mcast_list =
412 (struct mwifiex_multicast_list *) data_buf;
413 struct host_cmd_ds_mac_multicast_adr *mcast_addr = &cmd->params.mc_addr;
414
415 cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_mac_multicast_adr) +
416 S_DS_GEN);
417 cmd->command = cpu_to_le16(HostCmd_CMD_MAC_MULTICAST_ADR);
418
419 mcast_addr->action = cpu_to_le16(cmd_action);
420 mcast_addr->num_of_adrs =
421 cpu_to_le16((u16) mcast_list->num_multicast_addr);
422 memcpy(mcast_addr->mac_list, mcast_list->mac_list,
423 mcast_list->num_multicast_addr * ETH_ALEN);
424
425 return 0;
426}
427
428/*
429 * This function prepares command to deauthenticate.
430 *
431 * Preparation includes -
432 * - Setting command ID and proper size
433 * - Setting AP MAC address and reason code
434 * - Ensuring correct endian-ness
435 */
436static int mwifiex_cmd_802_11_deauthenticate(struct mwifiex_private *priv,
437 struct host_cmd_ds_command *cmd,
438 void *data_buf)
439{
440 struct host_cmd_ds_802_11_deauthenticate *deauth = &cmd->params.deauth;
441
442 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_DEAUTHENTICATE);
443 cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_802_11_deauthenticate)
444 + S_DS_GEN);
445
446 /* Set AP MAC address */
447 memcpy(deauth->mac_addr, (u8 *) data_buf, ETH_ALEN);
448
449 dev_dbg(priv->adapter->dev, "cmd: Deauth: %pM\n", deauth->mac_addr);
450
451 deauth->reason_code = cpu_to_le16(WLAN_REASON_DEAUTH_LEAVING);
452
453 return 0;
454}
455
456/*
457 * This function prepares command to stop Ad-Hoc network.
458 *
459 * Preparation includes -
460 * - Setting command ID and proper size
461 * - Ensuring correct endian-ness
462 */
463static int mwifiex_cmd_802_11_ad_hoc_stop(struct host_cmd_ds_command *cmd)
464{
465 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_AD_HOC_STOP);
466 cmd->size = cpu_to_le16(S_DS_GEN);
467 return 0;
468}
469
470/*
471 * This function sets WEP key(s) to key parameter TLV(s).
472 *
473 * Multi-key parameter TLVs are supported, so we can send multiple
474 * WEP keys in a single buffer.
475 */
476static int
477mwifiex_set_keyparamset_wep(struct mwifiex_private *priv,
478 struct mwifiex_ie_type_key_param_set *key_param_set,
479 u16 *key_param_len)
480{
481 int cur_key_param_len;
482 u8 i;
483
484 /* Multi-key_param_set TLV is supported */
485 for (i = 0; i < NUM_WEP_KEYS; i++) {
486 if ((priv->wep_key[i].key_length == WLAN_KEY_LEN_WEP40) ||
487 (priv->wep_key[i].key_length == WLAN_KEY_LEN_WEP104)) {
488 key_param_set->type =
489 cpu_to_le16(TLV_TYPE_KEY_MATERIAL);
490/* Key_param_set WEP fixed length */
491#define KEYPARAMSET_WEP_FIXED_LEN 8
492 key_param_set->length = cpu_to_le16((u16)
493 (priv->wep_key[i].
494 key_length +
495 KEYPARAMSET_WEP_FIXED_LEN));
496 key_param_set->key_type_id =
497 cpu_to_le16(KEY_TYPE_ID_WEP);
498 key_param_set->key_info =
499 cpu_to_le16(KEY_ENABLED | KEY_UNICAST |
500 KEY_MCAST);
501 key_param_set->key_len =
502 cpu_to_le16(priv->wep_key[i].key_length);
503 /* Set WEP key index */
504 key_param_set->key[0] = i;
505 /* Set default Tx key flag */
506 if (i ==
507 (priv->
508 wep_key_curr_index & HostCmd_WEP_KEY_INDEX_MASK))
509 key_param_set->key[1] = 1;
510 else
511 key_param_set->key[1] = 0;
512 memmove(&key_param_set->key[2],
513 priv->wep_key[i].key_material,
514 priv->wep_key[i].key_length);
515
516 cur_key_param_len = priv->wep_key[i].key_length +
517 KEYPARAMSET_WEP_FIXED_LEN +
518 sizeof(struct mwifiex_ie_types_header);
519 *key_param_len += (u16) cur_key_param_len;
520 key_param_set =
521 (struct mwifiex_ie_type_key_param_set *)
522 ((u8 *)key_param_set +
523 cur_key_param_len);
524 } else if (!priv->wep_key[i].key_length) {
525 continue;
526 } else {
527 dev_err(priv->adapter->dev,
528 "key%d Length = %d is incorrect\n",
529 (i + 1), priv->wep_key[i].key_length);
530 return -1;
531 }
532 }
533
534 return 0;
535}
536
537/*
538 * This function prepares command to set/get/reset network key(s).
539 *
540 * Preparation includes -
541 * - Setting command ID, action and proper size
542 * - Setting WEP keys, WAPI keys or WPA keys along with required
543 * encryption (TKIP, AES) (as required)
544 * - Ensuring correct endian-ness
545 */
546static int mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
547 struct host_cmd_ds_command *cmd,
548 u16 cmd_action,
549 u32 cmd_oid, void *data_buf)
550{
551 struct host_cmd_ds_802_11_key_material *key_material =
552 &cmd->params.key_material;
553 struct mwifiex_ds_encrypt_key *enc_key =
554 (struct mwifiex_ds_encrypt_key *) data_buf;
555 u16 key_param_len = 0;
556 int ret = 0;
557 const u8 bc_mac[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
558
559 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_KEY_MATERIAL);
560 key_material->action = cpu_to_le16(cmd_action);
561
562 if (cmd_action == HostCmd_ACT_GEN_GET) {
563 cmd->size =
564 cpu_to_le16(sizeof(key_material->action) + S_DS_GEN);
565 return ret;
566 }
567
568 if (!enc_key) {
569 memset(&key_material->key_param_set, 0,
570 (NUM_WEP_KEYS *
571 sizeof(struct mwifiex_ie_type_key_param_set)));
572 ret = mwifiex_set_keyparamset_wep(priv,
573 &key_material->key_param_set,
574 &key_param_len);
575 cmd->size = cpu_to_le16(key_param_len +
576 sizeof(key_material->action) + S_DS_GEN);
577 return ret;
578 } else
579 memset(&key_material->key_param_set, 0,
580 sizeof(struct mwifiex_ie_type_key_param_set));
581 if (enc_key->is_wapi_key) {
582 dev_dbg(priv->adapter->dev, "info: Set WAPI Key\n");
583 key_material->key_param_set.key_type_id =
584 cpu_to_le16(KEY_TYPE_ID_WAPI);
585 if (cmd_oid == KEY_INFO_ENABLED)
586 key_material->key_param_set.key_info =
587 cpu_to_le16(KEY_ENABLED);
588 else
589 key_material->key_param_set.key_info =
590 cpu_to_le16(!KEY_ENABLED);
591
592 key_material->key_param_set.key[0] = enc_key->key_index;
593 if (!priv->sec_info.wapi_key_on)
594 key_material->key_param_set.key[1] = 1;
595 else
596 /* set 0 when re-key */
597 key_material->key_param_set.key[1] = 0;
598
599 if (0 != memcmp(enc_key->mac_addr, bc_mac, sizeof(bc_mac))) {
600 /* WAPI pairwise key: unicast */
601 key_material->key_param_set.key_info |=
602 cpu_to_le16(KEY_UNICAST);
603 } else { /* WAPI group key: multicast */
604 key_material->key_param_set.key_info |=
605 cpu_to_le16(KEY_MCAST);
606 priv->sec_info.wapi_key_on = true;
607 }
608
609 key_material->key_param_set.type =
610 cpu_to_le16(TLV_TYPE_KEY_MATERIAL);
611 key_material->key_param_set.key_len =
612 cpu_to_le16(WAPI_KEY_LEN);
613 memcpy(&key_material->key_param_set.key[2],
614 enc_key->key_material, enc_key->key_len);
615 memcpy(&key_material->key_param_set.key[2 + enc_key->key_len],
616 enc_key->wapi_rxpn, WAPI_RXPN_LEN);
617 key_material->key_param_set.length =
618 cpu_to_le16(WAPI_KEY_LEN + KEYPARAMSET_FIXED_LEN);
619
620 key_param_len = (WAPI_KEY_LEN + KEYPARAMSET_FIXED_LEN) +
621 sizeof(struct mwifiex_ie_types_header);
622 cmd->size = cpu_to_le16(key_param_len +
623 sizeof(key_material->action) + S_DS_GEN);
624 return ret;
625 }
626 if (enc_key->key_len == WLAN_KEY_LEN_CCMP) {
627 dev_dbg(priv->adapter->dev, "cmd: WPA_AES\n");
628 key_material->key_param_set.key_type_id =
629 cpu_to_le16(KEY_TYPE_ID_AES);
630 if (cmd_oid == KEY_INFO_ENABLED)
631 key_material->key_param_set.key_info =
632 cpu_to_le16(KEY_ENABLED);
633 else
634 key_material->key_param_set.key_info =
635 cpu_to_le16(!KEY_ENABLED);
636
637 if (enc_key->key_index & MWIFIEX_KEY_INDEX_UNICAST)
638 /* AES pairwise key: unicast */
639 key_material->key_param_set.key_info |=
640 cpu_to_le16(KEY_UNICAST);
641 else /* AES group key: multicast */
642 key_material->key_param_set.key_info |=
643 cpu_to_le16(KEY_MCAST);
644 } else if (enc_key->key_len == WLAN_KEY_LEN_TKIP) {
645 dev_dbg(priv->adapter->dev, "cmd: WPA_TKIP\n");
646 key_material->key_param_set.key_type_id =
647 cpu_to_le16(KEY_TYPE_ID_TKIP);
648 key_material->key_param_set.key_info =
649 cpu_to_le16(KEY_ENABLED);
650
651 if (enc_key->key_index & MWIFIEX_KEY_INDEX_UNICAST)
652 /* TKIP pairwise key: unicast */
653 key_material->key_param_set.key_info |=
654 cpu_to_le16(KEY_UNICAST);
655 else /* TKIP group key: multicast */
656 key_material->key_param_set.key_info |=
657 cpu_to_le16(KEY_MCAST);
658 }
659
660 if (key_material->key_param_set.key_type_id) {
661 key_material->key_param_set.type =
662 cpu_to_le16(TLV_TYPE_KEY_MATERIAL);
663 key_material->key_param_set.key_len =
664 cpu_to_le16((u16) enc_key->key_len);
665 memcpy(key_material->key_param_set.key, enc_key->key_material,
666 enc_key->key_len);
667 key_material->key_param_set.length =
668 cpu_to_le16((u16) enc_key->key_len +
669 KEYPARAMSET_FIXED_LEN);
670
671 key_param_len = (u16) (enc_key->key_len + KEYPARAMSET_FIXED_LEN)
672 + sizeof(struct mwifiex_ie_types_header);
673
674 cmd->size = cpu_to_le16(key_param_len +
675 sizeof(key_material->action) + S_DS_GEN);
676 }
677
678 return ret;
679}
680
681/*
682 * This function prepares command to set/get 11d domain information.
683 *
684 * Preparation includes -
685 * - Setting command ID, action and proper size
686 * - Setting domain information fields (for SET only)
687 * - Ensuring correct endian-ness
688 */
689static int mwifiex_cmd_802_11d_domain_info(struct mwifiex_private *priv,
690 struct host_cmd_ds_command *cmd,
691 u16 cmd_action)
692{
693 struct mwifiex_adapter *adapter = priv->adapter;
694 struct host_cmd_ds_802_11d_domain_info *domain_info =
695 &cmd->params.domain_info;
696 struct mwifiex_ietypes_domain_param_set *domain =
697 &domain_info->domain;
698 u8 no_of_triplet = adapter->domain_reg.no_of_triplet;
699
700 dev_dbg(adapter->dev, "info: 11D: no_of_triplet=0x%x\n", no_of_triplet);
701
702 cmd->command = cpu_to_le16(HostCmd_CMD_802_11D_DOMAIN_INFO);
703 domain_info->action = cpu_to_le16(cmd_action);
704 if (cmd_action == HostCmd_ACT_GEN_GET) {
705 cmd->size = cpu_to_le16(sizeof(domain_info->action) + S_DS_GEN);
706 return 0;
707 }
708
709 /* Set domain info fields */
710 domain->header.type = cpu_to_le16(WLAN_EID_COUNTRY);
711 memcpy(domain->country_code, adapter->domain_reg.country_code,
712 sizeof(domain->country_code));
713
714 domain->header.len = cpu_to_le16((no_of_triplet *
715 sizeof(struct ieee80211_country_ie_triplet)) +
716 sizeof(domain->country_code));
717
718 if (no_of_triplet) {
719 memcpy(domain->triplet, adapter->domain_reg.triplet,
720 no_of_triplet *
721 sizeof(struct ieee80211_country_ie_triplet));
722
723 cmd->size = cpu_to_le16(sizeof(domain_info->action) +
724 le16_to_cpu(domain->header.len) +
725 sizeof(struct mwifiex_ie_types_header)
726 + S_DS_GEN);
727 } else {
728 cmd->size = cpu_to_le16(sizeof(domain_info->action) + S_DS_GEN);
729 }
730
731 return 0;
732}
733
734/*
735 * This function prepares command to set/get RF channel.
736 *
737 * Preparation includes -
738 * - Setting command ID, action and proper size
739 * - Setting RF type and current RF channel (for SET only)
740 * - Ensuring correct endian-ness
741 */
742static int mwifiex_cmd_802_11_rf_channel(struct mwifiex_private *priv,
743 struct host_cmd_ds_command *cmd,
744 u16 cmd_action, void *data_buf)
745{
746 struct host_cmd_ds_802_11_rf_channel *rf_chan =
747 &cmd->params.rf_channel;
748 uint16_t rf_type = le16_to_cpu(rf_chan->rf_type);
749
750 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_RF_CHANNEL);
751 cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_802_11_rf_channel)
752 + S_DS_GEN);
753
754 if (cmd_action == HostCmd_ACT_GEN_SET) {
755 if ((priv->adapter->adhoc_start_band & BAND_A)
756 || (priv->adapter->adhoc_start_band & BAND_AN))
757 rf_chan->rf_type =
758 cpu_to_le16(HostCmd_SCAN_RADIO_TYPE_A);
759
760 rf_type = le16_to_cpu(rf_chan->rf_type);
761 SET_SECONDARYCHAN(rf_type, priv->adapter->chan_offset);
762 rf_chan->current_channel = cpu_to_le16(*((u16 *) data_buf));
763 }
764 rf_chan->action = cpu_to_le16(cmd_action);
765 return 0;
766}
767
768/*
769 * This function prepares command to set/get IBSS coalescing status.
770 *
771 * Preparation includes -
772 * - Setting command ID, action and proper size
773 * - Setting status to enable or disable (for SET only)
774 * - Ensuring correct endian-ness
775 */
776static int mwifiex_cmd_ibss_coalescing_status(struct host_cmd_ds_command *cmd,
777 u16 cmd_action, void *data_buf)
778{
779 struct host_cmd_ds_802_11_ibss_status *ibss_coal =
780 &(cmd->params.ibss_coalescing);
781 u16 enable = 0;
782
783 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_IBSS_COALESCING_STATUS);
784 cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_802_11_ibss_status) +
785 S_DS_GEN);
786 cmd->result = 0;
787 ibss_coal->action = cpu_to_le16(cmd_action);
788
789 switch (cmd_action) {
790 case HostCmd_ACT_GEN_SET:
791 if (data_buf != NULL)
792 enable = *(u16 *) data_buf;
793 ibss_coal->enable = cpu_to_le16(enable);
794 break;
795
796 /* In other case.. Nothing to do */
797 case HostCmd_ACT_GEN_GET:
798 default:
799 break;
800 }
801
802 return 0;
803}
804
805/*
806 * This function prepares command to set/get register value.
807 *
808 * Preparation includes -
809 * - Setting command ID, action and proper size
810 * - Setting register offset (for both GET and SET) and
811 * register value (for SET only)
812 * - Ensuring correct endian-ness
813 *
814 * The following type of registers can be accessed with this function -
815 * - MAC register
816 * - BBP register
817 * - RF register
818 * - PMIC register
819 * - CAU register
820 * - EEPROM
821 */
822static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
823 u16 cmd_action, void *data_buf)
824{
825 struct mwifiex_ds_reg_rw *reg_rw;
826
827 reg_rw = (struct mwifiex_ds_reg_rw *) data_buf;
828 switch (le16_to_cpu(cmd->command)) {
829 case HostCmd_CMD_MAC_REG_ACCESS:
830 {
831 struct host_cmd_ds_mac_reg_access *mac_reg;
832
833 cmd->size = cpu_to_le16(sizeof(*mac_reg) + S_DS_GEN);
834 mac_reg = (struct host_cmd_ds_mac_reg_access *) &cmd->
835 params.mac_reg;
836 mac_reg->action = cpu_to_le16(cmd_action);
837 mac_reg->offset =
838 cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
839 mac_reg->value = reg_rw->value;
840 break;
841 }
842 case HostCmd_CMD_BBP_REG_ACCESS:
843 {
844 struct host_cmd_ds_bbp_reg_access *bbp_reg;
845
846 cmd->size = cpu_to_le16(sizeof(*bbp_reg) + S_DS_GEN);
847 bbp_reg = (struct host_cmd_ds_bbp_reg_access *) &cmd->
848 params.bbp_reg;
849 bbp_reg->action = cpu_to_le16(cmd_action);
850 bbp_reg->offset =
851 cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
852 bbp_reg->value = (u8) le32_to_cpu(reg_rw->value);
853 break;
854 }
855 case HostCmd_CMD_RF_REG_ACCESS:
856 {
857 struct host_cmd_ds_rf_reg_access *rf_reg;
858
859 cmd->size = cpu_to_le16(sizeof(*rf_reg) + S_DS_GEN);
860 rf_reg = (struct host_cmd_ds_rf_reg_access *) &cmd->
861 params.rf_reg;
862 rf_reg->action = cpu_to_le16(cmd_action);
863 rf_reg->offset =
864 cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
865 rf_reg->value = (u8) le32_to_cpu(reg_rw->value);
866 break;
867 }
868 case HostCmd_CMD_PMIC_REG_ACCESS:
869 {
870 struct host_cmd_ds_pmic_reg_access *pmic_reg;
871
872 cmd->size = cpu_to_le16(sizeof(*pmic_reg) + S_DS_GEN);
873 pmic_reg = (struct host_cmd_ds_pmic_reg_access *) &cmd->
874 params.pmic_reg;
875 pmic_reg->action = cpu_to_le16(cmd_action);
876 pmic_reg->offset =
877 cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
878 pmic_reg->value = (u8) le32_to_cpu(reg_rw->value);
879 break;
880 }
881 case HostCmd_CMD_CAU_REG_ACCESS:
882 {
883 struct host_cmd_ds_rf_reg_access *cau_reg;
884
885 cmd->size = cpu_to_le16(sizeof(*cau_reg) + S_DS_GEN);
886 cau_reg = (struct host_cmd_ds_rf_reg_access *) &cmd->
887 params.rf_reg;
888 cau_reg->action = cpu_to_le16(cmd_action);
889 cau_reg->offset =
890 cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
891 cau_reg->value = (u8) le32_to_cpu(reg_rw->value);
892 break;
893 }
894 case HostCmd_CMD_802_11_EEPROM_ACCESS:
895 {
896 struct mwifiex_ds_read_eeprom *rd_eeprom =
897 (struct mwifiex_ds_read_eeprom *) data_buf;
898 struct host_cmd_ds_802_11_eeprom_access *cmd_eeprom =
899 (struct host_cmd_ds_802_11_eeprom_access *)
900 &cmd->params.eeprom;
901
902 cmd->size = cpu_to_le16(sizeof(*cmd_eeprom) + S_DS_GEN);
903 cmd_eeprom->action = cpu_to_le16(cmd_action);
904 cmd_eeprom->offset = rd_eeprom->offset;
905 cmd_eeprom->byte_count = rd_eeprom->byte_count;
906 cmd_eeprom->value = 0;
907 break;
908 }
909 default:
910 return -1;
911 }
912
913 return 0;
914}
915
916/*
917 * This function prepares the commands before sending them to the firmware.
918 *
919 * This is a generic function which calls specific command preparation
920 * routines based upon the command number.
921 */
922int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
923 u16 cmd_action, u32 cmd_oid,
924 void *data_buf, void *cmd_buf)
925{
926 struct host_cmd_ds_command *cmd_ptr =
927 (struct host_cmd_ds_command *) cmd_buf;
928 int ret = 0;
929
930 /* Prepare command */
931 switch (cmd_no) {
932 case HostCmd_CMD_GET_HW_SPEC:
933 ret = mwifiex_cmd_get_hw_spec(priv, cmd_ptr);
934 break;
935 case HostCmd_CMD_MAC_CONTROL:
936 ret = mwifiex_cmd_mac_control(priv, cmd_ptr, cmd_action,
937 data_buf);
938 break;
939 case HostCmd_CMD_802_11_MAC_ADDRESS:
940 ret = mwifiex_cmd_802_11_mac_address(priv, cmd_ptr,
941 cmd_action);
942 break;
943 case HostCmd_CMD_MAC_MULTICAST_ADR:
944 ret = mwifiex_cmd_mac_multicast_adr(cmd_ptr, cmd_action,
945 data_buf);
946 break;
947 case HostCmd_CMD_TX_RATE_CFG:
948 ret = mwifiex_cmd_tx_rate_cfg(priv, cmd_ptr, cmd_action,
949 data_buf);
950 break;
951 case HostCmd_CMD_TXPWR_CFG:
952 ret = mwifiex_cmd_tx_power_cfg(cmd_ptr, cmd_action,
953 data_buf);
954 break;
955 case HostCmd_CMD_802_11_PS_MODE_ENH:
956 ret = mwifiex_cmd_enh_power_mode(priv, cmd_ptr, cmd_action,
957 (uint16_t)cmd_oid, data_buf);
958 break;
959 case HostCmd_CMD_802_11_HS_CFG_ENH:
960 ret = mwifiex_cmd_802_11_hs_cfg(priv, cmd_ptr, cmd_action,
961 (struct mwifiex_hs_config_param *) data_buf);
962 break;
963 case HostCmd_CMD_802_11_SCAN:
964 ret = mwifiex_cmd_802_11_scan(cmd_ptr, data_buf);
965 break;
966 case HostCmd_CMD_802_11_BG_SCAN_QUERY:
967 ret = mwifiex_cmd_802_11_bg_scan_query(cmd_ptr);
968 break;
969 case HostCmd_CMD_802_11_ASSOCIATE:
970 ret = mwifiex_cmd_802_11_associate(priv, cmd_ptr, data_buf);
971 break;
972 case HostCmd_CMD_802_11_DEAUTHENTICATE:
973 ret = mwifiex_cmd_802_11_deauthenticate(priv, cmd_ptr,
974 data_buf);
975 break;
976 case HostCmd_CMD_802_11_AD_HOC_START:
977 ret = mwifiex_cmd_802_11_ad_hoc_start(priv, cmd_ptr,
978 data_buf);
979 break;
980 case HostCmd_CMD_802_11_GET_LOG:
981 ret = mwifiex_cmd_802_11_get_log(cmd_ptr);
982 break;
983 case HostCmd_CMD_802_11_AD_HOC_JOIN:
984 ret = mwifiex_cmd_802_11_ad_hoc_join(priv, cmd_ptr,
985 data_buf);
986 break;
987 case HostCmd_CMD_802_11_AD_HOC_STOP:
988 ret = mwifiex_cmd_802_11_ad_hoc_stop(cmd_ptr);
989 break;
990 case HostCmd_CMD_RSSI_INFO:
991 ret = mwifiex_cmd_802_11_rssi_info(priv, cmd_ptr, cmd_action);
992 break;
993 case HostCmd_CMD_802_11_SNMP_MIB:
994 ret = mwifiex_cmd_802_11_snmp_mib(priv, cmd_ptr, cmd_action,
995 cmd_oid, data_buf);
996 break;
997 case HostCmd_CMD_802_11_TX_RATE_QUERY:
998 cmd_ptr->command =
999 cpu_to_le16(HostCmd_CMD_802_11_TX_RATE_QUERY);
1000 cmd_ptr->size =
1001 cpu_to_le16(sizeof(struct host_cmd_ds_tx_rate_query) +
1002 S_DS_GEN);
1003 priv->tx_rate = 0;
1004 ret = 0;
1005 break;
1006 case HostCmd_CMD_VERSION_EXT:
1007 cmd_ptr->command = cpu_to_le16(cmd_no);
1008 cmd_ptr->params.verext.version_str_sel =
1009 (u8) (*((u32 *) data_buf));
1010 memcpy(&cmd_ptr->params, data_buf,
1011 sizeof(struct host_cmd_ds_version_ext));
1012 cmd_ptr->size =
1013 cpu_to_le16(sizeof(struct host_cmd_ds_version_ext) +
1014 S_DS_GEN);
1015 ret = 0;
1016 break;
1017 case HostCmd_CMD_802_11_RF_CHANNEL:
1018 ret = mwifiex_cmd_802_11_rf_channel(priv, cmd_ptr, cmd_action,
1019 data_buf);
1020 break;
1021 case HostCmd_CMD_FUNC_INIT:
1022 if (priv->adapter->hw_status == MWIFIEX_HW_STATUS_RESET)
1023 priv->adapter->hw_status = MWIFIEX_HW_STATUS_READY;
1024 cmd_ptr->command = cpu_to_le16(cmd_no);
1025 cmd_ptr->size = cpu_to_le16(S_DS_GEN);
1026 break;
1027 case HostCmd_CMD_FUNC_SHUTDOWN:
1028 priv->adapter->hw_status = MWIFIEX_HW_STATUS_RESET;
1029 cmd_ptr->command = cpu_to_le16(cmd_no);
1030 cmd_ptr->size = cpu_to_le16(S_DS_GEN);
1031 break;
1032 case HostCmd_CMD_11N_ADDBA_REQ:
1033 ret = mwifiex_cmd_11n_addba_req(cmd_ptr, data_buf);
1034 break;
1035 case HostCmd_CMD_11N_DELBA:
1036 ret = mwifiex_cmd_11n_delba(cmd_ptr, data_buf);
1037 break;
1038 case HostCmd_CMD_11N_ADDBA_RSP:
1039 ret = mwifiex_cmd_11n_addba_rsp_gen(priv, cmd_ptr, data_buf);
1040 break;
1041 case HostCmd_CMD_802_11_KEY_MATERIAL:
1042 ret = mwifiex_cmd_802_11_key_material(priv, cmd_ptr,
1043 cmd_action, cmd_oid,
1044 data_buf);
1045 break;
1046 case HostCmd_CMD_802_11D_DOMAIN_INFO:
1047 ret = mwifiex_cmd_802_11d_domain_info(priv, cmd_ptr,
1048 cmd_action);
1049 break;
1050 case HostCmd_CMD_RECONFIGURE_TX_BUFF:
1051 ret = mwifiex_cmd_recfg_tx_buf(priv, cmd_ptr, cmd_action,
1052 data_buf);
1053 break;
1054 case HostCmd_CMD_AMSDU_AGGR_CTRL:
1055 ret = mwifiex_cmd_amsdu_aggr_ctrl(cmd_ptr, cmd_action,
1056 data_buf);
1057 break;
1058 case HostCmd_CMD_11N_CFG:
1059 ret = mwifiex_cmd_11n_cfg(cmd_ptr, cmd_action,
1060 data_buf);
1061 break;
1062 case HostCmd_CMD_WMM_GET_STATUS:
1063 dev_dbg(priv->adapter->dev,
1064 "cmd: WMM: WMM_GET_STATUS cmd sent\n");
1065 cmd_ptr->command = cpu_to_le16(HostCmd_CMD_WMM_GET_STATUS);
1066 cmd_ptr->size =
1067 cpu_to_le16(sizeof(struct host_cmd_ds_wmm_get_status) +
1068 S_DS_GEN);
1069 ret = 0;
1070 break;
1071 case HostCmd_CMD_802_11_IBSS_COALESCING_STATUS:
1072 ret = mwifiex_cmd_ibss_coalescing_status(cmd_ptr, cmd_action,
1073 data_buf);
1074 break;
1075 case HostCmd_CMD_MAC_REG_ACCESS:
1076 case HostCmd_CMD_BBP_REG_ACCESS:
1077 case HostCmd_CMD_RF_REG_ACCESS:
1078 case HostCmd_CMD_PMIC_REG_ACCESS:
1079 case HostCmd_CMD_CAU_REG_ACCESS:
1080 case HostCmd_CMD_802_11_EEPROM_ACCESS:
1081 ret = mwifiex_cmd_reg_access(cmd_ptr, cmd_action, data_buf);
1082 break;
1083 case HostCmd_CMD_SET_BSS_MODE:
1084 cmd_ptr->command = cpu_to_le16(cmd_no);
1085 if (priv->bss_mode == NL80211_IFTYPE_ADHOC)
1086 cmd_ptr->params.bss_mode.con_type =
1087 CONNECTION_TYPE_ADHOC;
1088 else if (priv->bss_mode == NL80211_IFTYPE_STATION)
1089 cmd_ptr->params.bss_mode.con_type =
1090 CONNECTION_TYPE_INFRA;
1091 cmd_ptr->size = cpu_to_le16(sizeof(struct
1092 host_cmd_ds_set_bss_mode) + S_DS_GEN);
1093 ret = 0;
1094 break;
1095 default:
1096 dev_err(priv->adapter->dev,
1097 "PREP_CMD: unknown cmd- %#x\n", cmd_no);
1098 ret = -1;
1099 break;
1100 }
1101 return ret;
1102}
1103
1104/*
1105 * This function issues commands to initialize firmware.
1106 *
1107 * This is called after firmware download to bring the card to
1108 * working state.
1109 *
1110 * The following commands are issued sequentially -
1111 * - Function init (for first interface only)
1112 * - Read MAC address (for first interface only)
1113 * - Reconfigure Tx buffer size (for first interface only)
1114 * - Enable auto deep sleep (for first interface only)
1115 * - Get Tx rate
1116 * - Get Tx power
1117 * - Set IBSS coalescing status
1118 * - Set AMSDU aggregation control
1119 * - Set 11d control
1120 * - Set MAC control (this must be the last command to initialize firmware)
1121 */
1122int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
1123{
1124 int ret;
1125 u16 enable = true;
1126 struct mwifiex_ds_11n_amsdu_aggr_ctrl amsdu_aggr_ctrl;
1127 struct mwifiex_ds_auto_ds auto_ds;
1128 enum state_11d_t state_11d;
1129
1130 if (first_sta) {
1131
1132 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_FUNC_INIT,
1133 HostCmd_ACT_GEN_SET, 0, NULL);
1134 if (ret)
1135 return -1;
1136 /* Read MAC address from HW */
1137 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_GET_HW_SPEC,
1138 HostCmd_ACT_GEN_GET, 0, NULL);
1139 if (ret)
1140 return -1;
1141
1142 /* Reconfigure tx buf size */
1143 ret = mwifiex_send_cmd_async(priv,
1144 HostCmd_CMD_RECONFIGURE_TX_BUFF,
1145 HostCmd_ACT_GEN_SET, 0,
1146 &priv->adapter->tx_buf_size);
1147 if (ret)
1148 return -1;
1149
1150 /* Enable IEEE PS by default */
1151 priv->adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_PSP;
1152 ret = mwifiex_send_cmd_async(priv,
1153 HostCmd_CMD_802_11_PS_MODE_ENH,
1154 EN_AUTO_PS, BITMAP_STA_PS, NULL);
1155 if (ret)
1156 return -1;
1157 }
1158
1159 /* get tx rate */
1160 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_TX_RATE_CFG,
1161 HostCmd_ACT_GEN_GET, 0, NULL);
1162 if (ret)
1163 return -1;
1164 priv->data_rate = 0;
1165
1166 /* get tx power */
1167 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_TXPWR_CFG,
1168 HostCmd_ACT_GEN_GET, 0, NULL);
1169 if (ret)
1170 return -1;
1171
1172 /* set ibss coalescing_status */
1173 ret = mwifiex_send_cmd_async(priv,
1174 HostCmd_CMD_802_11_IBSS_COALESCING_STATUS,
1175 HostCmd_ACT_GEN_SET, 0, &enable);
1176 if (ret)
1177 return -1;
1178
1179 memset(&amsdu_aggr_ctrl, 0, sizeof(amsdu_aggr_ctrl));
1180 amsdu_aggr_ctrl.enable = true;
1181 /* Send request to firmware */
1182 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_AMSDU_AGGR_CTRL,
1183 HostCmd_ACT_GEN_SET, 0,
1184 (void *) &amsdu_aggr_ctrl);
1185 if (ret)
1186 return -1;
1187 /* MAC Control must be the last command in init_fw */
1188 /* set MAC Control */
1189 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_MAC_CONTROL,
1190 HostCmd_ACT_GEN_SET, 0,
1191 &priv->curr_pkt_filter);
1192 if (ret)
1193 return -1;
1194
1195 if (first_sta) {
1196 /* Enable auto deep sleep */
1197 auto_ds.auto_ds = DEEP_SLEEP_ON;
1198 auto_ds.idle_time = DEEP_SLEEP_IDLE_TIME;
1199 ret = mwifiex_send_cmd_async(priv,
1200 HostCmd_CMD_802_11_PS_MODE_ENH,
1201 EN_AUTO_PS, BITMAP_AUTO_DS,
1202 &auto_ds);
1203 if (ret)
1204 return -1;
1205 }
1206
1207 /* Send cmd to FW to enable/disable 11D function */
1208 state_11d = ENABLE_11D;
1209 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SNMP_MIB,
1210 HostCmd_ACT_GEN_SET, DOT11D_I, &state_11d);
1211 if (ret)
1212 dev_err(priv->adapter->dev, "11D: failed to enable 11D\n");
1213
1214 /* set last_init_cmd */
1215 priv->adapter->last_init_cmd = HostCmd_CMD_802_11_SNMP_MIB;
1216 ret = -EINPROGRESS;
1217
1218 return ret;
1219}
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
new file mode 100644
index 000000000000..d08f76429a0a
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -0,0 +1,972 @@
1/*
2 * Marvell Wireless LAN device driver: station command response handling
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "ioctl.h"
22#include "util.h"
23#include "fw.h"
24#include "main.h"
25#include "wmm.h"
26#include "11n.h"
27
28
29/*
30 * This function handles the command response error case.
31 *
32 * For scan response error, the function cancels all the pending
33 * scan commands and generates an event to inform the applications
34 * of the scan completion.
35 *
36 * For Power Save command failure, we do not retry enter PS
37 * command in case of Ad-hoc mode.
38 *
39 * For all other response errors, the current command buffer is freed
40 * and returned to the free command queue.
41 */
42static void
43mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
44 struct host_cmd_ds_command *resp)
45{
46 struct cmd_ctrl_node *cmd_node = NULL, *tmp_node;
47 struct mwifiex_adapter *adapter = priv->adapter;
48 struct host_cmd_ds_802_11_ps_mode_enh *pm;
49 unsigned long flags;
50
51 dev_err(adapter->dev, "CMD_RESP: cmd %#x error, result=%#x\n",
52 resp->command, resp->result);
53
54 if (adapter->curr_cmd->wait_q_enabled)
55 adapter->cmd_wait_q.status = -1;
56
57 switch (le16_to_cpu(resp->command)) {
58 case HostCmd_CMD_802_11_PS_MODE_ENH:
59 pm = &resp->params.psmode_enh;
60 dev_err(adapter->dev, "PS_MODE_ENH cmd failed: "
61 "result=0x%x action=0x%X\n",
62 resp->result, le16_to_cpu(pm->action));
63 /* We do not re-try enter-ps command in ad-hoc mode. */
64 if (le16_to_cpu(pm->action) == EN_AUTO_PS &&
65 (le16_to_cpu(pm->params.ps_bitmap) & BITMAP_STA_PS) &&
66 priv->bss_mode == NL80211_IFTYPE_ADHOC)
67 adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_CAM;
68
69 break;
70 case HostCmd_CMD_802_11_SCAN:
71 /* Cancel all pending scan command */
72 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
73 list_for_each_entry_safe(cmd_node, tmp_node,
74 &adapter->scan_pending_q, list) {
75 list_del(&cmd_node->list);
76 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
77 flags);
78 mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
79 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
80 }
81 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
82
83 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
84 adapter->scan_processing = false;
85 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
86 if (priv->report_scan_result)
87 priv->report_scan_result = false;
88 if (priv->scan_pending_on_block) {
89 priv->scan_pending_on_block = false;
90 up(&priv->async_sem);
91 }
92 break;
93
94 case HostCmd_CMD_MAC_CONTROL:
95 break;
96
97 default:
98 break;
99 }
100 /* Handling errors here */
101 mwifiex_insert_cmd_to_free_q(adapter, adapter->curr_cmd);
102
103 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
104 adapter->curr_cmd = NULL;
105 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
106}
107
108/*
109 * This function handles the command response of get RSSI info.
110 *
111 * Handling includes changing the header fields into CPU format
112 * and saving the following parameters in driver -
113 * - Last data and beacon RSSI value
114 * - Average data and beacon RSSI value
115 * - Last data and beacon NF value
116 * - Average data and beacon NF value
117 *
118 * The parameters are send to the application as well, along with
119 * calculated SNR values.
120 */
121static int mwifiex_ret_802_11_rssi_info(struct mwifiex_private *priv,
122 struct host_cmd_ds_command *resp,
123 void *data_buf)
124{
125 struct host_cmd_ds_802_11_rssi_info_rsp *rssi_info_rsp =
126 &resp->params.rssi_info_rsp;
127 struct mwifiex_ds_get_signal *signal;
128
129 priv->data_rssi_last = le16_to_cpu(rssi_info_rsp->data_rssi_last);
130 priv->data_nf_last = le16_to_cpu(rssi_info_rsp->data_nf_last);
131
132 priv->data_rssi_avg = le16_to_cpu(rssi_info_rsp->data_rssi_avg);
133 priv->data_nf_avg = le16_to_cpu(rssi_info_rsp->data_nf_avg);
134
135 priv->bcn_rssi_last = le16_to_cpu(rssi_info_rsp->bcn_rssi_last);
136 priv->bcn_nf_last = le16_to_cpu(rssi_info_rsp->bcn_nf_last);
137
138 priv->bcn_rssi_avg = le16_to_cpu(rssi_info_rsp->bcn_rssi_avg);
139 priv->bcn_nf_avg = le16_to_cpu(rssi_info_rsp->bcn_nf_avg);
140
141 /* Need to indicate IOCTL complete */
142 if (data_buf) {
143 signal = (struct mwifiex_ds_get_signal *) data_buf;
144 memset(signal, 0, sizeof(struct mwifiex_ds_get_signal));
145
146 signal->selector = ALL_RSSI_INFO_MASK;
147
148 /* RSSI */
149 signal->bcn_rssi_last = priv->bcn_rssi_last;
150 signal->bcn_rssi_avg = priv->bcn_rssi_avg;
151 signal->data_rssi_last = priv->data_rssi_last;
152 signal->data_rssi_avg = priv->data_rssi_avg;
153
154 /* SNR */
155 signal->bcn_snr_last =
156 CAL_SNR(priv->bcn_rssi_last, priv->bcn_nf_last);
157 signal->bcn_snr_avg =
158 CAL_SNR(priv->bcn_rssi_avg, priv->bcn_nf_avg);
159 signal->data_snr_last =
160 CAL_SNR(priv->data_rssi_last, priv->data_nf_last);
161 signal->data_snr_avg =
162 CAL_SNR(priv->data_rssi_avg, priv->data_nf_avg);
163
164 /* NF */
165 signal->bcn_nf_last = priv->bcn_nf_last;
166 signal->bcn_nf_avg = priv->bcn_nf_avg;
167 signal->data_nf_last = priv->data_nf_last;
168 signal->data_nf_avg = priv->data_nf_avg;
169 }
170
171 return 0;
172}
173
174/*
175 * This function handles the command response of set/get SNMP
176 * MIB parameters.
177 *
178 * Handling includes changing the header fields into CPU format
179 * and saving the parameter in driver.
180 *
181 * The following parameters are supported -
182 * - Fragmentation threshold
183 * - RTS threshold
184 * - Short retry limit
185 */
186static int mwifiex_ret_802_11_snmp_mib(struct mwifiex_private *priv,
187 struct host_cmd_ds_command *resp,
188 void *data_buf)
189{
190 struct host_cmd_ds_802_11_snmp_mib *smib = &resp->params.smib;
191 u16 oid = le16_to_cpu(smib->oid);
192 u16 query_type = le16_to_cpu(smib->query_type);
193 u32 ul_temp;
194
195 dev_dbg(priv->adapter->dev, "info: SNMP_RESP: oid value = %#x,"
196 " query_type = %#x, buf size = %#x\n",
197 oid, query_type, le16_to_cpu(smib->buf_size));
198 if (query_type == HostCmd_ACT_GEN_GET) {
199 ul_temp = le16_to_cpu(*((__le16 *) (smib->value)));
200 if (data_buf)
201 *(u32 *)data_buf = ul_temp;
202 switch (oid) {
203 case FRAG_THRESH_I:
204 dev_dbg(priv->adapter->dev,
205 "info: SNMP_RESP: FragThsd =%u\n", ul_temp);
206 break;
207 case RTS_THRESH_I:
208 dev_dbg(priv->adapter->dev,
209 "info: SNMP_RESP: RTSThsd =%u\n", ul_temp);
210 break;
211 case SHORT_RETRY_LIM_I:
212 dev_dbg(priv->adapter->dev,
213 "info: SNMP_RESP: TxRetryCount=%u\n", ul_temp);
214 break;
215 default:
216 break;
217 }
218 }
219
220 return 0;
221}
222
223/*
224 * This function handles the command response of get log request
225 *
226 * Handling includes changing the header fields into CPU format
227 * and sending the received parameters to application.
228 */
229static int mwifiex_ret_get_log(struct mwifiex_private *priv,
230 struct host_cmd_ds_command *resp,
231 void *data_buf)
232{
233 struct host_cmd_ds_802_11_get_log *get_log =
234 (struct host_cmd_ds_802_11_get_log *) &resp->params.get_log;
235 struct mwifiex_ds_get_stats *stats;
236
237 if (data_buf) {
238 stats = (struct mwifiex_ds_get_stats *) data_buf;
239 stats->mcast_tx_frame = le32_to_cpu(get_log->mcast_tx_frame);
240 stats->failed = le32_to_cpu(get_log->failed);
241 stats->retry = le32_to_cpu(get_log->retry);
242 stats->multi_retry = le32_to_cpu(get_log->multi_retry);
243 stats->frame_dup = le32_to_cpu(get_log->frame_dup);
244 stats->rts_success = le32_to_cpu(get_log->rts_success);
245 stats->rts_failure = le32_to_cpu(get_log->rts_failure);
246 stats->ack_failure = le32_to_cpu(get_log->ack_failure);
247 stats->rx_frag = le32_to_cpu(get_log->rx_frag);
248 stats->mcast_rx_frame = le32_to_cpu(get_log->mcast_rx_frame);
249 stats->fcs_error = le32_to_cpu(get_log->fcs_error);
250 stats->tx_frame = le32_to_cpu(get_log->tx_frame);
251 stats->wep_icv_error[0] =
252 le32_to_cpu(get_log->wep_icv_err_cnt[0]);
253 stats->wep_icv_error[1] =
254 le32_to_cpu(get_log->wep_icv_err_cnt[1]);
255 stats->wep_icv_error[2] =
256 le32_to_cpu(get_log->wep_icv_err_cnt[2]);
257 stats->wep_icv_error[3] =
258 le32_to_cpu(get_log->wep_icv_err_cnt[3]);
259 }
260
261 return 0;
262}
263
264/*
265 * This function handles the command response of set/get Tx rate
266 * configurations.
267 *
268 * Handling includes changing the header fields into CPU format
269 * and saving the following parameters in driver -
270 * - DSSS rate bitmap
271 * - OFDM rate bitmap
272 * - HT MCS rate bitmaps
273 *
274 * Based on the new rate bitmaps, the function re-evaluates if
275 * auto data rate has been activated. If not, it sends another
276 * query to the firmware to get the current Tx data rate and updates
277 * the driver value.
278 */
279static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv,
280 struct host_cmd_ds_command *resp,
281 void *data_buf)
282{
283 struct mwifiex_rate_cfg *ds_rate;
284 struct host_cmd_ds_tx_rate_cfg *rate_cfg = &resp->params.tx_rate_cfg;
285 struct mwifiex_rate_scope *rate_scope;
286 struct mwifiex_ie_types_header *head;
287 u16 tlv, tlv_buf_len;
288 u8 *tlv_buf;
289 u32 i;
290 int ret = 0;
291
292 tlv_buf = (u8 *) ((u8 *) rate_cfg) +
293 sizeof(struct host_cmd_ds_tx_rate_cfg);
294 tlv_buf_len = *(u16 *) (tlv_buf + sizeof(u16));
295
296 while (tlv_buf && tlv_buf_len > 0) {
297 tlv = (*tlv_buf);
298 tlv = tlv | (*(tlv_buf + 1) << 8);
299
300 switch (tlv) {
301 case TLV_TYPE_RATE_SCOPE:
302 rate_scope = (struct mwifiex_rate_scope *) tlv_buf;
303 priv->bitmap_rates[0] =
304 le16_to_cpu(rate_scope->hr_dsss_rate_bitmap);
305 priv->bitmap_rates[1] =
306 le16_to_cpu(rate_scope->ofdm_rate_bitmap);
307 for (i = 0;
308 i <
309 sizeof(rate_scope->ht_mcs_rate_bitmap) /
310 sizeof(u16); i++)
311 priv->bitmap_rates[2 + i] =
312 le16_to_cpu(rate_scope->
313 ht_mcs_rate_bitmap[i]);
314 break;
315 /* Add RATE_DROP tlv here */
316 }
317
318 head = (struct mwifiex_ie_types_header *) tlv_buf;
319 tlv_buf += le16_to_cpu(head->len) + sizeof(*head);
320 tlv_buf_len -= le16_to_cpu(head->len);
321 }
322
323 priv->is_data_rate_auto = mwifiex_is_rate_auto(priv);
324
325 if (priv->is_data_rate_auto)
326 priv->data_rate = 0;
327 else
328 ret = mwifiex_send_cmd_async(priv,
329 HostCmd_CMD_802_11_TX_RATE_QUERY,
330 HostCmd_ACT_GEN_GET, 0, NULL);
331
332 if (data_buf) {
333 ds_rate = (struct mwifiex_rate_cfg *) data_buf;
334 if (le16_to_cpu(rate_cfg->action) == HostCmd_ACT_GEN_GET) {
335 if (priv->is_data_rate_auto) {
336 ds_rate->is_rate_auto = 1;
337 } else {
338 ds_rate->rate = mwifiex_get_rate_index(priv->
339 bitmap_rates,
340 sizeof(priv->
341 bitmap_rates));
342 if (ds_rate->rate >=
343 MWIFIEX_RATE_BITMAP_OFDM0
344 && ds_rate->rate <=
345 MWIFIEX_RATE_BITMAP_OFDM7)
346 ds_rate->rate -=
347 (MWIFIEX_RATE_BITMAP_OFDM0 -
348 MWIFIEX_RATE_INDEX_OFDM0);
349 if (ds_rate->rate >=
350 MWIFIEX_RATE_BITMAP_MCS0
351 && ds_rate->rate <=
352 MWIFIEX_RATE_BITMAP_MCS127)
353 ds_rate->rate -=
354 (MWIFIEX_RATE_BITMAP_MCS0 -
355 MWIFIEX_RATE_INDEX_MCS0);
356 }
357 }
358 }
359
360 return ret;
361}
362
363/*
364 * This function handles the command response of get Tx power level.
365 *
366 * Handling includes saving the maximum and minimum Tx power levels
367 * in driver, as well as sending the values to user.
368 */
369static int mwifiex_get_power_level(struct mwifiex_private *priv, void *data_buf)
370{
371 int length, max_power = -1, min_power = -1;
372 struct mwifiex_types_power_group *pg_tlv_hdr;
373 struct mwifiex_power_group *pg;
374
375 if (data_buf) {
376 pg_tlv_hdr =
377 (struct mwifiex_types_power_group *) ((u8 *) data_buf
378 + sizeof(struct host_cmd_ds_txpwr_cfg));
379 pg = (struct mwifiex_power_group *) ((u8 *) pg_tlv_hdr +
380 sizeof(struct mwifiex_types_power_group));
381 length = pg_tlv_hdr->length;
382 if (length > 0) {
383 max_power = pg->power_max;
384 min_power = pg->power_min;
385 length -= sizeof(struct mwifiex_power_group);
386 }
387 while (length) {
388 pg++;
389 if (max_power < pg->power_max)
390 max_power = pg->power_max;
391
392 if (min_power > pg->power_min)
393 min_power = pg->power_min;
394
395 length -= sizeof(struct mwifiex_power_group);
396 }
397 if (pg_tlv_hdr->length > 0) {
398 priv->min_tx_power_level = (u8) min_power;
399 priv->max_tx_power_level = (u8) max_power;
400 }
401 } else {
402 return -1;
403 }
404
405 return 0;
406}
407
408/*
409 * This function handles the command response of set/get Tx power
410 * configurations.
411 *
412 * Handling includes changing the header fields into CPU format
413 * and saving the current Tx power level in driver.
414 */
415static int mwifiex_ret_tx_power_cfg(struct mwifiex_private *priv,
416 struct host_cmd_ds_command *resp,
417 void *data_buf)
418{
419 struct mwifiex_adapter *adapter = priv->adapter;
420 struct host_cmd_ds_txpwr_cfg *txp_cfg = &resp->params.txp_cfg;
421 struct mwifiex_types_power_group *pg_tlv_hdr;
422 struct mwifiex_power_group *pg;
423 u16 action = le16_to_cpu(txp_cfg->action);
424
425 switch (action) {
426 case HostCmd_ACT_GEN_GET:
427 {
428 pg_tlv_hdr =
429 (struct mwifiex_types_power_group *) ((u8 *)
430 txp_cfg +
431 sizeof
432 (struct
433 host_cmd_ds_txpwr_cfg));
434 pg = (struct mwifiex_power_group *) ((u8 *)
435 pg_tlv_hdr +
436 sizeof(struct
437 mwifiex_types_power_group));
438 if (adapter->hw_status ==
439 MWIFIEX_HW_STATUS_INITIALIZING)
440 mwifiex_get_power_level(priv, txp_cfg);
441 priv->tx_power_level = (u16) pg->power_min;
442 break;
443 }
444 case HostCmd_ACT_GEN_SET:
445 if (le32_to_cpu(txp_cfg->mode)) {
446 pg_tlv_hdr =
447 (struct mwifiex_types_power_group *) ((u8 *)
448 txp_cfg +
449 sizeof
450 (struct
451 host_cmd_ds_txpwr_cfg));
452 pg = (struct mwifiex_power_group *) ((u8 *) pg_tlv_hdr
453 +
454 sizeof(struct
455 mwifiex_types_power_group));
456 if (pg->power_max == pg->power_min)
457 priv->tx_power_level = (u16) pg->power_min;
458 }
459 break;
460 default:
461 dev_err(adapter->dev, "CMD_RESP: unknown cmd action %d\n",
462 action);
463 return 0;
464 }
465 dev_dbg(adapter->dev,
466 "info: Current TxPower Level = %d, Max Power=%d, Min Power=%d\n",
467 priv->tx_power_level, priv->max_tx_power_level,
468 priv->min_tx_power_level);
469
470 return 0;
471}
472
473/*
474 * This function handles the command response of set/get MAC address.
475 *
476 * Handling includes saving the MAC address in driver.
477 */
478static int mwifiex_ret_802_11_mac_address(struct mwifiex_private *priv,
479 struct host_cmd_ds_command *resp)
480{
481 struct host_cmd_ds_802_11_mac_address *cmd_mac_addr =
482 &resp->params.mac_addr;
483
484 memcpy(priv->curr_addr, cmd_mac_addr->mac_addr, ETH_ALEN);
485
486 dev_dbg(priv->adapter->dev,
487 "info: set mac address: %pM\n", priv->curr_addr);
488
489 return 0;
490}
491
492/*
493 * This function handles the command response of set/get MAC multicast
494 * address.
495 */
496static int mwifiex_ret_mac_multicast_adr(struct mwifiex_private *priv,
497 struct host_cmd_ds_command *resp)
498{
499 return 0;
500}
501
502/*
503 * This function handles the command response of get Tx rate query.
504 *
505 * Handling includes changing the header fields into CPU format
506 * and saving the Tx rate and HT information parameters in driver.
507 *
508 * Both rate configuration and current data rate can be retrieved
509 * with this request.
510 */
511static int mwifiex_ret_802_11_tx_rate_query(struct mwifiex_private *priv,
512 struct host_cmd_ds_command *resp)
513{
514 priv->tx_rate = resp->params.tx_rate.tx_rate;
515 priv->tx_htinfo = resp->params.tx_rate.ht_info;
516 if (!priv->is_data_rate_auto)
517 priv->data_rate =
518 mwifiex_index_to_data_rate(priv->tx_rate,
519 priv->tx_htinfo);
520
521 return 0;
522}
523
524/*
525 * This function handles the command response of a deauthenticate
526 * command.
527 *
528 * If the deauthenticated MAC matches the current BSS MAC, the connection
529 * state is reset.
530 */
531static int mwifiex_ret_802_11_deauthenticate(struct mwifiex_private *priv,
532 struct host_cmd_ds_command *resp)
533{
534 struct mwifiex_adapter *adapter = priv->adapter;
535
536 adapter->dbg.num_cmd_deauth++;
537 if (!memcmp(resp->params.deauth.mac_addr,
538 &priv->curr_bss_params.bss_descriptor.mac_address,
539 sizeof(resp->params.deauth.mac_addr)))
540 mwifiex_reset_connect_state(priv);
541
542 return 0;
543}
544
545/*
546 * This function handles the command response of ad-hoc stop.
547 *
548 * The function resets the connection state in driver.
549 */
550static int mwifiex_ret_802_11_ad_hoc_stop(struct mwifiex_private *priv,
551 struct host_cmd_ds_command *resp)
552{
553 mwifiex_reset_connect_state(priv);
554 return 0;
555}
556
557/*
558 * This function handles the command response of set/get key material.
559 *
560 * Handling includes updating the driver parameters to reflect the
561 * changes.
562 */
563static int mwifiex_ret_802_11_key_material(struct mwifiex_private *priv,
564 struct host_cmd_ds_command *resp)
565{
566 struct host_cmd_ds_802_11_key_material *key =
567 &resp->params.key_material;
568
569 if (le16_to_cpu(key->action) == HostCmd_ACT_GEN_SET) {
570 if ((le16_to_cpu(key->key_param_set.key_info) & KEY_MCAST)) {
571 dev_dbg(priv->adapter->dev, "info: key: GTK is set\n");
572 priv->wpa_is_gtk_set = true;
573 priv->scan_block = false;
574 }
575 }
576
577 memset(priv->aes_key.key_param_set.key, 0,
578 sizeof(key->key_param_set.key));
579 priv->aes_key.key_param_set.key_len = key->key_param_set.key_len;
580 memcpy(priv->aes_key.key_param_set.key, key->key_param_set.key,
581 le16_to_cpu(priv->aes_key.key_param_set.key_len));
582
583 return 0;
584}
585
586/*
587 * This function handles the command response of get 11d domain information.
588 */
589static int mwifiex_ret_802_11d_domain_info(struct mwifiex_private *priv,
590 struct host_cmd_ds_command *resp)
591{
592 struct host_cmd_ds_802_11d_domain_info_rsp *domain_info =
593 &resp->params.domain_info_resp;
594 struct mwifiex_ietypes_domain_param_set *domain = &domain_info->domain;
595 u16 action = le16_to_cpu(domain_info->action);
596 u8 no_of_triplet;
597
598 no_of_triplet = (u8) ((le16_to_cpu(domain->header.len) -
599 IEEE80211_COUNTRY_STRING_LEN) /
600 sizeof(struct ieee80211_country_ie_triplet));
601
602 dev_dbg(priv->adapter->dev, "info: 11D Domain Info Resp:"
603 " no_of_triplet=%d\n", no_of_triplet);
604
605 if (no_of_triplet > MWIFIEX_MAX_TRIPLET_802_11D) {
606 dev_warn(priv->adapter->dev,
607 "11D: invalid number of triplets %d "
608 "returned!!\n", no_of_triplet);
609 return -1;
610 }
611
612 switch (action) {
613 case HostCmd_ACT_GEN_SET: /* Proc Set Action */
614 break;
615 case HostCmd_ACT_GEN_GET:
616 break;
617 default:
618 dev_err(priv->adapter->dev,
619 "11D: invalid action:%d\n", domain_info->action);
620 return -1;
621 }
622
623 return 0;
624}
625
626/*
627 * This function handles the command response of get RF channel.
628 *
629 * Handling includes changing the header fields into CPU format
630 * and saving the new channel in driver.
631 */
632static int mwifiex_ret_802_11_rf_channel(struct mwifiex_private *priv,
633 struct host_cmd_ds_command *resp,
634 void *data_buf)
635{
636 struct host_cmd_ds_802_11_rf_channel *rf_channel =
637 &resp->params.rf_channel;
638 u16 new_channel = le16_to_cpu(rf_channel->current_channel);
639
640 if (priv->curr_bss_params.bss_descriptor.channel != new_channel) {
641 dev_dbg(priv->adapter->dev, "cmd: Channel Switch: %d to %d\n",
642 priv->curr_bss_params.bss_descriptor.channel,
643 new_channel);
644 /* Update the channel again */
645 priv->curr_bss_params.bss_descriptor.channel = new_channel;
646 }
647 if (data_buf)
648 *((u16 *)data_buf) = new_channel;
649
650 return 0;
651}
652
653/*
654 * This function handles the command response of get extended version.
655 *
656 * Handling includes forming the extended version string and sending it
657 * to application.
658 */
659static int mwifiex_ret_ver_ext(struct mwifiex_private *priv,
660 struct host_cmd_ds_command *resp,
661 void *data_buf)
662{
663 struct host_cmd_ds_version_ext *ver_ext = &resp->params.verext;
664 struct host_cmd_ds_version_ext *version_ext;
665
666 if (data_buf) {
667 version_ext = (struct host_cmd_ds_version_ext *)data_buf;
668 version_ext->version_str_sel = ver_ext->version_str_sel;
669 memcpy(version_ext->version_str, ver_ext->version_str,
670 sizeof(char) * 128);
671 memcpy(priv->version_str, ver_ext->version_str, 128);
672 }
673 return 0;
674}
675
676/*
677 * This function handles the command response of register access.
678 *
679 * The register value and offset are returned to the user. For EEPROM
680 * access, the byte count is also returned.
681 */
682static int mwifiex_ret_reg_access(u16 type, struct host_cmd_ds_command *resp,
683 void *data_buf)
684{
685 struct mwifiex_ds_reg_rw *reg_rw;
686 struct mwifiex_ds_read_eeprom *eeprom;
687
688 if (data_buf) {
689 reg_rw = (struct mwifiex_ds_reg_rw *) data_buf;
690 eeprom = (struct mwifiex_ds_read_eeprom *) data_buf;
691 switch (type) {
692 case HostCmd_CMD_MAC_REG_ACCESS:
693 {
694 struct host_cmd_ds_mac_reg_access *reg;
695 reg = (struct host_cmd_ds_mac_reg_access *)
696 &resp->params.mac_reg;
697 reg_rw->offset = cpu_to_le32(
698 (u32) le16_to_cpu(reg->offset));
699 reg_rw->value = reg->value;
700 break;
701 }
702 case HostCmd_CMD_BBP_REG_ACCESS:
703 {
704 struct host_cmd_ds_bbp_reg_access *reg;
705 reg = (struct host_cmd_ds_bbp_reg_access *)
706 &resp->params.bbp_reg;
707 reg_rw->offset = cpu_to_le32(
708 (u32) le16_to_cpu(reg->offset));
709 reg_rw->value = cpu_to_le32((u32) reg->value);
710 break;
711 }
712
713 case HostCmd_CMD_RF_REG_ACCESS:
714 {
715 struct host_cmd_ds_rf_reg_access *reg;
716 reg = (struct host_cmd_ds_rf_reg_access *)
717 &resp->params.rf_reg;
718 reg_rw->offset = cpu_to_le32(
719 (u32) le16_to_cpu(reg->offset));
720 reg_rw->value = cpu_to_le32((u32) reg->value);
721 break;
722 }
723 case HostCmd_CMD_PMIC_REG_ACCESS:
724 {
725 struct host_cmd_ds_pmic_reg_access *reg;
726 reg = (struct host_cmd_ds_pmic_reg_access *)
727 &resp->params.pmic_reg;
728 reg_rw->offset = cpu_to_le32(
729 (u32) le16_to_cpu(reg->offset));
730 reg_rw->value = cpu_to_le32((u32) reg->value);
731 break;
732 }
733 case HostCmd_CMD_CAU_REG_ACCESS:
734 {
735 struct host_cmd_ds_rf_reg_access *reg;
736 reg = (struct host_cmd_ds_rf_reg_access *)
737 &resp->params.rf_reg;
738 reg_rw->offset = cpu_to_le32(
739 (u32) le16_to_cpu(reg->offset));
740 reg_rw->value = cpu_to_le32((u32) reg->value);
741 break;
742 }
743 case HostCmd_CMD_802_11_EEPROM_ACCESS:
744 {
745 struct host_cmd_ds_802_11_eeprom_access
746 *cmd_eeprom =
747 (struct host_cmd_ds_802_11_eeprom_access
748 *) &resp->params.eeprom;
749 pr_debug("info: EEPROM read len=%x\n",
750 cmd_eeprom->byte_count);
751 if (le16_to_cpu(eeprom->byte_count) <
752 le16_to_cpu(
753 cmd_eeprom->byte_count)) {
754 eeprom->byte_count = cpu_to_le16(0);
755 pr_debug("info: EEPROM read "
756 "length is too big\n");
757 return -1;
758 }
759 eeprom->offset = cmd_eeprom->offset;
760 eeprom->byte_count = cmd_eeprom->byte_count;
761 if (le16_to_cpu(eeprom->byte_count) > 0)
762 memcpy(&eeprom->value,
763 &cmd_eeprom->value,
764 le16_to_cpu(eeprom->byte_count));
765
766 break;
767 }
768 default:
769 return -1;
770 }
771 }
772 return 0;
773}
774
775/*
776 * This function handles the command response of get IBSS coalescing status.
777 *
778 * If the received BSSID is different than the current one, the current BSSID,
779 * beacon interval, ATIM window and ERP information are updated, along with
780 * changing the ad-hoc state accordingly.
781 */
782static int mwifiex_ret_ibss_coalescing_status(struct mwifiex_private *priv,
783 struct host_cmd_ds_command *resp)
784{
785 struct host_cmd_ds_802_11_ibss_status *ibss_coal_resp =
786 &(resp->params.ibss_coalescing);
787 u8 zero_mac[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 };
788
789 if (le16_to_cpu(ibss_coal_resp->action) == HostCmd_ACT_GEN_SET)
790 return 0;
791
792 dev_dbg(priv->adapter->dev,
793 "info: new BSSID %pM\n", ibss_coal_resp->bssid);
794
795 /* If rsp has NULL BSSID, Just return..... No Action */
796 if (!memcmp(ibss_coal_resp->bssid, zero_mac, ETH_ALEN)) {
797 dev_warn(priv->adapter->dev, "new BSSID is NULL\n");
798 return 0;
799 }
800
801 /* If BSSID is diff, modify current BSS parameters */
802 if (memcmp(priv->curr_bss_params.bss_descriptor.mac_address,
803 ibss_coal_resp->bssid, ETH_ALEN)) {
804 /* BSSID */
805 memcpy(priv->curr_bss_params.bss_descriptor.mac_address,
806 ibss_coal_resp->bssid, ETH_ALEN);
807
808 /* Beacon Interval */
809 priv->curr_bss_params.bss_descriptor.beacon_period
810 = le16_to_cpu(ibss_coal_resp->beacon_interval);
811
812 /* ERP Information */
813 priv->curr_bss_params.bss_descriptor.erp_flags =
814 (u8) le16_to_cpu(ibss_coal_resp->use_g_rate_protect);
815
816 priv->adhoc_state = ADHOC_COALESCED;
817 }
818
819 return 0;
820}
821
822/*
823 * This function handles the command responses.
824 *
825 * This is a generic function, which calls command specific
826 * response handlers based on the command ID.
827 */
828int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv,
829 u16 cmdresp_no, void *cmd_buf)
830{
831 int ret = 0;
832 struct mwifiex_adapter *adapter = priv->adapter;
833 struct host_cmd_ds_command *resp =
834 (struct host_cmd_ds_command *) cmd_buf;
835 void *data_buf = adapter->curr_cmd->data_buf;
836
837 /* If the command is not successful, cleanup and return failure */
838 if (resp->result != HostCmd_RESULT_OK) {
839 mwifiex_process_cmdresp_error(priv, resp);
840 return -1;
841 }
842 /* Command successful, handle response */
843 switch (cmdresp_no) {
844 case HostCmd_CMD_GET_HW_SPEC:
845 ret = mwifiex_ret_get_hw_spec(priv, resp);
846 break;
847 case HostCmd_CMD_MAC_CONTROL:
848 break;
849 case HostCmd_CMD_802_11_MAC_ADDRESS:
850 ret = mwifiex_ret_802_11_mac_address(priv, resp);
851 break;
852 case HostCmd_CMD_MAC_MULTICAST_ADR:
853 ret = mwifiex_ret_mac_multicast_adr(priv, resp);
854 break;
855 case HostCmd_CMD_TX_RATE_CFG:
856 ret = mwifiex_ret_tx_rate_cfg(priv, resp, data_buf);
857 break;
858 case HostCmd_CMD_802_11_SCAN:
859 ret = mwifiex_ret_802_11_scan(priv, resp);
860 adapter->curr_cmd->wait_q_enabled = false;
861 break;
862 case HostCmd_CMD_802_11_BG_SCAN_QUERY:
863 ret = mwifiex_ret_802_11_scan(priv, resp);
864 dev_dbg(adapter->dev,
865 "info: CMD_RESP: BG_SCAN result is ready!\n");
866 break;
867 case HostCmd_CMD_TXPWR_CFG:
868 ret = mwifiex_ret_tx_power_cfg(priv, resp, data_buf);
869 break;
870 case HostCmd_CMD_802_11_PS_MODE_ENH:
871 ret = mwifiex_ret_enh_power_mode(priv, resp, data_buf);
872 break;
873 case HostCmd_CMD_802_11_HS_CFG_ENH:
874 ret = mwifiex_ret_802_11_hs_cfg(priv, resp);
875 break;
876 case HostCmd_CMD_802_11_ASSOCIATE:
877 ret = mwifiex_ret_802_11_associate(priv, resp);
878 break;
879 case HostCmd_CMD_802_11_DEAUTHENTICATE:
880 ret = mwifiex_ret_802_11_deauthenticate(priv, resp);
881 break;
882 case HostCmd_CMD_802_11_AD_HOC_START:
883 case HostCmd_CMD_802_11_AD_HOC_JOIN:
884 ret = mwifiex_ret_802_11_ad_hoc(priv, resp);
885 break;
886 case HostCmd_CMD_802_11_AD_HOC_STOP:
887 ret = mwifiex_ret_802_11_ad_hoc_stop(priv, resp);
888 break;
889 case HostCmd_CMD_802_11_GET_LOG:
890 ret = mwifiex_ret_get_log(priv, resp, data_buf);
891 break;
892 case HostCmd_CMD_RSSI_INFO:
893 ret = mwifiex_ret_802_11_rssi_info(priv, resp, data_buf);
894 break;
895 case HostCmd_CMD_802_11_SNMP_MIB:
896 ret = mwifiex_ret_802_11_snmp_mib(priv, resp, data_buf);
897 break;
898 case HostCmd_CMD_802_11_TX_RATE_QUERY:
899 ret = mwifiex_ret_802_11_tx_rate_query(priv, resp);
900 break;
901 case HostCmd_CMD_802_11_RF_CHANNEL:
902 ret = mwifiex_ret_802_11_rf_channel(priv, resp, data_buf);
903 break;
904 case HostCmd_CMD_VERSION_EXT:
905 ret = mwifiex_ret_ver_ext(priv, resp, data_buf);
906 break;
907 case HostCmd_CMD_FUNC_INIT:
908 case HostCmd_CMD_FUNC_SHUTDOWN:
909 break;
910 case HostCmd_CMD_802_11_KEY_MATERIAL:
911 ret = mwifiex_ret_802_11_key_material(priv, resp);
912 break;
913 case HostCmd_CMD_802_11D_DOMAIN_INFO:
914 ret = mwifiex_ret_802_11d_domain_info(priv, resp);
915 break;
916 case HostCmd_CMD_11N_ADDBA_REQ:
917 ret = mwifiex_ret_11n_addba_req(priv, resp);
918 break;
919 case HostCmd_CMD_11N_DELBA:
920 ret = mwifiex_ret_11n_delba(priv, resp);
921 break;
922 case HostCmd_CMD_11N_ADDBA_RSP:
923 ret = mwifiex_ret_11n_addba_resp(priv, resp);
924 break;
925 case HostCmd_CMD_RECONFIGURE_TX_BUFF:
926 adapter->tx_buf_size = (u16) le16_to_cpu(resp->params.
927 tx_buf.buff_size);
928 adapter->tx_buf_size = (adapter->tx_buf_size /
929 MWIFIEX_SDIO_BLOCK_SIZE) *
930 MWIFIEX_SDIO_BLOCK_SIZE;
931 adapter->curr_tx_buf_size = adapter->tx_buf_size;
932 dev_dbg(adapter->dev,
933 "cmd: max_tx_buf_size=%d, tx_buf_size=%d\n",
934 adapter->max_tx_buf_size, adapter->tx_buf_size);
935
936 if (adapter->if_ops.update_mp_end_port)
937 adapter->if_ops.update_mp_end_port(adapter,
938 le16_to_cpu(resp->
939 params.
940 tx_buf.
941 mp_end_port));
942 break;
943 case HostCmd_CMD_AMSDU_AGGR_CTRL:
944 ret = mwifiex_ret_amsdu_aggr_ctrl(resp, data_buf);
945 break;
946 case HostCmd_CMD_WMM_GET_STATUS:
947 ret = mwifiex_ret_wmm_get_status(priv, resp);
948 break;
949 case HostCmd_CMD_802_11_IBSS_COALESCING_STATUS:
950 ret = mwifiex_ret_ibss_coalescing_status(priv, resp);
951 break;
952 case HostCmd_CMD_MAC_REG_ACCESS:
953 case HostCmd_CMD_BBP_REG_ACCESS:
954 case HostCmd_CMD_RF_REG_ACCESS:
955 case HostCmd_CMD_PMIC_REG_ACCESS:
956 case HostCmd_CMD_CAU_REG_ACCESS:
957 case HostCmd_CMD_802_11_EEPROM_ACCESS:
958 ret = mwifiex_ret_reg_access(cmdresp_no, resp, data_buf);
959 break;
960 case HostCmd_CMD_SET_BSS_MODE:
961 break;
962 case HostCmd_CMD_11N_CFG:
963 ret = mwifiex_ret_11n_cfg(resp, data_buf);
964 break;
965 default:
966 dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n",
967 resp->command);
968 break;
969 }
970
971 return ret;
972}
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
new file mode 100644
index 000000000000..fc265cab0907
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -0,0 +1,406 @@
1/*
2 * Marvell Wireless LAN device driver: station event handling
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "ioctl.h"
22#include "util.h"
23#include "fw.h"
24#include "main.h"
25#include "wmm.h"
26#include "11n.h"
27
28/*
29 * This function resets the connection state.
30 *
31 * The function is invoked after receiving a disconnect event from firmware,
32 * and performs the following actions -
33 * - Set media status to disconnected
34 * - Clean up Tx and Rx packets
35 * - Resets SNR/NF/RSSI value in driver
36 * - Resets security configurations in driver
37 * - Enables auto data rate
38 * - Saves the previous SSID and BSSID so that they can
39 * be used for re-association, if required
40 * - Erases current SSID and BSSID information
41 * - Sends a disconnect event to upper layers/applications.
42 */
43void
44mwifiex_reset_connect_state(struct mwifiex_private *priv)
45{
46 struct mwifiex_adapter *adapter = priv->adapter;
47
48 if (!priv->media_connected)
49 return;
50
51 dev_dbg(adapter->dev, "info: handles disconnect event\n");
52
53 priv->media_connected = false;
54
55 priv->scan_block = false;
56
57 /* Free Tx and Rx packets, report disconnect to upper layer */
58 mwifiex_clean_txrx(priv);
59
60 /* Reset SNR/NF/RSSI values */
61 priv->data_rssi_last = 0;
62 priv->data_nf_last = 0;
63 priv->data_rssi_avg = 0;
64 priv->data_nf_avg = 0;
65 priv->bcn_rssi_last = 0;
66 priv->bcn_nf_last = 0;
67 priv->bcn_rssi_avg = 0;
68 priv->bcn_nf_avg = 0;
69 priv->rxpd_rate = 0;
70 priv->rxpd_htinfo = 0;
71 priv->sec_info.wpa_enabled = false;
72 priv->sec_info.wpa2_enabled = false;
73 priv->wpa_ie_len = 0;
74
75 priv->sec_info.wapi_enabled = false;
76 priv->wapi_ie_len = 0;
77 priv->sec_info.wapi_key_on = false;
78
79 priv->sec_info.encryption_mode = 0;
80
81 /* Enable auto data rate */
82 priv->is_data_rate_auto = true;
83 priv->data_rate = 0;
84
85 if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
86 priv->adhoc_state = ADHOC_IDLE;
87 priv->adhoc_is_link_sensed = false;
88 }
89
90 /*
91 * Memorize the previous SSID and BSSID so
92 * it could be used for re-assoc
93 */
94
95 dev_dbg(adapter->dev, "info: previous SSID=%s, SSID len=%u\n",
96 priv->prev_ssid.ssid, priv->prev_ssid.ssid_len);
97
98 dev_dbg(adapter->dev, "info: current SSID=%s, SSID len=%u\n",
99 priv->curr_bss_params.bss_descriptor.ssid.ssid,
100 priv->curr_bss_params.bss_descriptor.ssid.ssid_len);
101
102 memcpy(&priv->prev_ssid,
103 &priv->curr_bss_params.bss_descriptor.ssid,
104 sizeof(struct mwifiex_802_11_ssid));
105
106 memcpy(priv->prev_bssid,
107 priv->curr_bss_params.bss_descriptor.mac_address, ETH_ALEN);
108
109 /* Need to erase the current SSID and BSSID info */
110 memset(&priv->curr_bss_params, 0x00, sizeof(priv->curr_bss_params));
111
112 adapter->tx_lock_flag = false;
113 adapter->pps_uapsd_mode = false;
114
115 if (adapter->num_cmd_timeout && adapter->curr_cmd)
116 return;
117 priv->media_connected = false;
118 if (!priv->disconnect) {
119 priv->disconnect = 1;
120 dev_dbg(adapter->dev, "info: successfully disconnected from"
121 " %pM: reason code %d\n", priv->cfg_bssid,
122 WLAN_REASON_DEAUTH_LEAVING);
123 cfg80211_disconnected(priv->netdev,
124 WLAN_REASON_DEAUTH_LEAVING, NULL, 0,
125 GFP_KERNEL);
126 queue_work(priv->workqueue, &priv->cfg_workqueue);
127 }
128 if (!netif_queue_stopped(priv->netdev))
129 netif_stop_queue(priv->netdev);
130 if (netif_carrier_ok(priv->netdev))
131 netif_carrier_off(priv->netdev);
132 /* Reset wireless stats signal info */
133 priv->w_stats.qual.level = 0;
134 priv->w_stats.qual.noise = 0;
135}
136
137/*
138 * This function handles events generated by firmware.
139 *
140 * This is a generic function and handles all events.
141 *
142 * Event specific routines are called by this function based
143 * upon the generated event cause.
144 *
145 * For the following events, the function just forwards them to upper
146 * layers, optionally recording the change -
147 * - EVENT_LINK_SENSED
148 * - EVENT_MIC_ERR_UNICAST
149 * - EVENT_MIC_ERR_MULTICAST
150 * - EVENT_PORT_RELEASE
151 * - EVENT_RSSI_LOW
152 * - EVENT_SNR_LOW
153 * - EVENT_MAX_FAIL
154 * - EVENT_RSSI_HIGH
155 * - EVENT_SNR_HIGH
156 * - EVENT_DATA_RSSI_LOW
157 * - EVENT_DATA_SNR_LOW
158 * - EVENT_DATA_RSSI_HIGH
159 * - EVENT_DATA_SNR_HIGH
160 * - EVENT_LINK_QUALITY
161 * - EVENT_PRE_BEACON_LOST
162 * - EVENT_IBSS_COALESCED
163 * - EVENT_WEP_ICV_ERR
164 * - EVENT_BW_CHANGE
165 * - EVENT_HOSTWAKE_STAIE
166 *
167 * For the following events, no action is taken -
168 * - EVENT_MIB_CHANGED
169 * - EVENT_INIT_DONE
170 * - EVENT_DUMMY_HOST_WAKEUP_SIGNAL
171 *
172 * Rest of the supported events requires driver handling -
173 * - EVENT_DEAUTHENTICATED
174 * - EVENT_DISASSOCIATED
175 * - EVENT_LINK_LOST
176 * - EVENT_PS_SLEEP
177 * - EVENT_PS_AWAKE
178 * - EVENT_DEEP_SLEEP_AWAKE
179 * - EVENT_HS_ACT_REQ
180 * - EVENT_ADHOC_BCN_LOST
181 * - EVENT_BG_SCAN_REPORT
182 * - EVENT_WMM_STATUS_CHANGE
183 * - EVENT_ADDBA
184 * - EVENT_DELBA
185 * - EVENT_BA_STREAM_TIEMOUT
186 * - EVENT_AMSDU_AGGR_CTRL
187 */
188int mwifiex_process_sta_event(struct mwifiex_private *priv)
189{
190 struct mwifiex_adapter *adapter = priv->adapter;
191 int ret = 0;
192 u32 eventcause = adapter->event_cause;
193
194 switch (eventcause) {
195 case EVENT_DUMMY_HOST_WAKEUP_SIGNAL:
196 dev_err(adapter->dev, "invalid EVENT: DUMMY_HOST_WAKEUP_SIGNAL,"
197 " ignoring it\n");
198 break;
199 case EVENT_LINK_SENSED:
200 dev_dbg(adapter->dev, "event: LINK_SENSED\n");
201 if (!netif_carrier_ok(priv->netdev))
202 netif_carrier_on(priv->netdev);
203 if (netif_queue_stopped(priv->netdev))
204 netif_wake_queue(priv->netdev);
205 break;
206
207 case EVENT_DEAUTHENTICATED:
208 dev_dbg(adapter->dev, "event: Deauthenticated\n");
209 adapter->dbg.num_event_deauth++;
210 if (priv->media_connected)
211 mwifiex_reset_connect_state(priv);
212 break;
213
214 case EVENT_DISASSOCIATED:
215 dev_dbg(adapter->dev, "event: Disassociated\n");
216 adapter->dbg.num_event_disassoc++;
217 if (priv->media_connected)
218 mwifiex_reset_connect_state(priv);
219 break;
220
221 case EVENT_LINK_LOST:
222 dev_dbg(adapter->dev, "event: Link lost\n");
223 adapter->dbg.num_event_link_lost++;
224 if (priv->media_connected)
225 mwifiex_reset_connect_state(priv);
226 break;
227
228 case EVENT_PS_SLEEP:
229 dev_dbg(adapter->dev, "info: EVENT: SLEEP\n");
230
231 adapter->ps_state = PS_STATE_PRE_SLEEP;
232
233 mwifiex_check_ps_cond(adapter);
234 break;
235
236 case EVENT_PS_AWAKE:
237 dev_dbg(adapter->dev, "info: EVENT: AWAKE\n");
238 if (!adapter->pps_uapsd_mode &&
239 priv->media_connected &&
240 adapter->sleep_period.period) {
241 adapter->pps_uapsd_mode = true;
242 dev_dbg(adapter->dev,
243 "event: PPS/UAPSD mode activated\n");
244 }
245 adapter->tx_lock_flag = false;
246 if (adapter->pps_uapsd_mode && adapter->gen_null_pkt) {
247 if (mwifiex_check_last_packet_indication(priv)) {
248 if (!adapter->data_sent) {
249 if (!mwifiex_send_null_packet(priv,
250 MWIFIEX_TxPD_POWER_MGMT_NULL_PACKET
251 |
252 MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET))
253 adapter->ps_state =
254 PS_STATE_SLEEP;
255 return 0;
256 }
257 }
258 }
259 adapter->ps_state = PS_STATE_AWAKE;
260 adapter->pm_wakeup_card_req = false;
261 adapter->pm_wakeup_fw_try = false;
262
263 break;
264
265 case EVENT_DEEP_SLEEP_AWAKE:
266 adapter->if_ops.wakeup_complete(adapter);
267 dev_dbg(adapter->dev, "event: DS_AWAKE\n");
268 if (adapter->is_deep_sleep)
269 adapter->is_deep_sleep = false;
270 break;
271
272 case EVENT_HS_ACT_REQ:
273 dev_dbg(adapter->dev, "event: HS_ACT_REQ\n");
274 ret = mwifiex_send_cmd_async(priv,
275 HostCmd_CMD_802_11_HS_CFG_ENH,
276 0, 0, NULL);
277 break;
278
279 case EVENT_MIC_ERR_UNICAST:
280 dev_dbg(adapter->dev, "event: UNICAST MIC ERROR\n");
281 break;
282
283 case EVENT_MIC_ERR_MULTICAST:
284 dev_dbg(adapter->dev, "event: MULTICAST MIC ERROR\n");
285 break;
286 case EVENT_MIB_CHANGED:
287 case EVENT_INIT_DONE:
288 break;
289
290 case EVENT_ADHOC_BCN_LOST:
291 dev_dbg(adapter->dev, "event: ADHOC_BCN_LOST\n");
292 priv->adhoc_is_link_sensed = false;
293 mwifiex_clean_txrx(priv);
294 if (!netif_queue_stopped(priv->netdev))
295 netif_stop_queue(priv->netdev);
296 if (netif_carrier_ok(priv->netdev))
297 netif_carrier_off(priv->netdev);
298 break;
299
300 case EVENT_BG_SCAN_REPORT:
301 dev_dbg(adapter->dev, "event: BGS_REPORT\n");
302 /* Clear the previous scan result */
303 memset(adapter->scan_table, 0x00,
304 sizeof(struct mwifiex_bssdescriptor) * IW_MAX_AP);
305 adapter->num_in_scan_table = 0;
306 adapter->bcn_buf_end = adapter->bcn_buf;
307 ret = mwifiex_send_cmd_async(priv,
308 HostCmd_CMD_802_11_BG_SCAN_QUERY,
309 HostCmd_ACT_GEN_GET, 0, NULL);
310 break;
311
312 case EVENT_PORT_RELEASE:
313 dev_dbg(adapter->dev, "event: PORT RELEASE\n");
314 break;
315
316 case EVENT_WMM_STATUS_CHANGE:
317 dev_dbg(adapter->dev, "event: WMM status changed\n");
318 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_WMM_GET_STATUS,
319 0, 0, NULL);
320 break;
321
322 case EVENT_RSSI_LOW:
323 dev_dbg(adapter->dev, "event: Beacon RSSI_LOW\n");
324 break;
325 case EVENT_SNR_LOW:
326 dev_dbg(adapter->dev, "event: Beacon SNR_LOW\n");
327 break;
328 case EVENT_MAX_FAIL:
329 dev_dbg(adapter->dev, "event: MAX_FAIL\n");
330 break;
331 case EVENT_RSSI_HIGH:
332 dev_dbg(adapter->dev, "event: Beacon RSSI_HIGH\n");
333 break;
334 case EVENT_SNR_HIGH:
335 dev_dbg(adapter->dev, "event: Beacon SNR_HIGH\n");
336 break;
337 case EVENT_DATA_RSSI_LOW:
338 dev_dbg(adapter->dev, "event: Data RSSI_LOW\n");
339 break;
340 case EVENT_DATA_SNR_LOW:
341 dev_dbg(adapter->dev, "event: Data SNR_LOW\n");
342 break;
343 case EVENT_DATA_RSSI_HIGH:
344 dev_dbg(adapter->dev, "event: Data RSSI_HIGH\n");
345 break;
346 case EVENT_DATA_SNR_HIGH:
347 dev_dbg(adapter->dev, "event: Data SNR_HIGH\n");
348 break;
349 case EVENT_LINK_QUALITY:
350 dev_dbg(adapter->dev, "event: Link Quality\n");
351 break;
352 case EVENT_PRE_BEACON_LOST:
353 dev_dbg(adapter->dev, "event: Pre-Beacon Lost\n");
354 break;
355 case EVENT_IBSS_COALESCED:
356 dev_dbg(adapter->dev, "event: IBSS_COALESCED\n");
357 ret = mwifiex_send_cmd_async(priv,
358 HostCmd_CMD_802_11_IBSS_COALESCING_STATUS,
359 HostCmd_ACT_GEN_GET, 0, NULL);
360 break;
361 case EVENT_ADDBA:
362 dev_dbg(adapter->dev, "event: ADDBA Request\n");
363 mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_ADDBA_RSP,
364 HostCmd_ACT_GEN_SET, 0,
365 adapter->event_body);
366 break;
367 case EVENT_DELBA:
368 dev_dbg(adapter->dev, "event: DELBA Request\n");
369 mwifiex_11n_delete_ba_stream(priv, adapter->event_body);
370 break;
371 case EVENT_BA_STREAM_TIEMOUT:
372 dev_dbg(adapter->dev, "event: BA Stream timeout\n");
373 mwifiex_11n_ba_stream_timeout(priv,
374 (struct host_cmd_ds_11n_batimeout
375 *)
376 adapter->event_body);
377 break;
378 case EVENT_AMSDU_AGGR_CTRL:
379 dev_dbg(adapter->dev, "event: AMSDU_AGGR_CTRL %d\n",
380 *(u16 *) adapter->event_body);
381 adapter->tx_buf_size =
382 min(adapter->curr_tx_buf_size,
383 le16_to_cpu(*(__le16 *) adapter->event_body));
384 dev_dbg(adapter->dev, "event: tx_buf_size %d\n",
385 adapter->tx_buf_size);
386 break;
387
388 case EVENT_WEP_ICV_ERR:
389 dev_dbg(adapter->dev, "event: WEP ICV error\n");
390 break;
391
392 case EVENT_BW_CHANGE:
393 dev_dbg(adapter->dev, "event: BW Change\n");
394 break;
395
396 case EVENT_HOSTWAKE_STAIE:
397 dev_dbg(adapter->dev, "event: HOSTWAKE_STAIE %d\n", eventcause);
398 break;
399 default:
400 dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
401 eventcause);
402 break;
403 }
404
405 return ret;
406}
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
new file mode 100644
index 000000000000..d05907d05039
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -0,0 +1,1593 @@
1/*
2 * Marvell Wireless LAN device driver: functions for station ioctl
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "ioctl.h"
22#include "util.h"
23#include "fw.h"
24#include "main.h"
25#include "wmm.h"
26#include "11n.h"
27#include "cfg80211.h"
28
29/*
30 * Copies the multicast address list from device to driver.
31 *
32 * This function does not validate the destination memory for
33 * size, and the calling function must ensure enough memory is
34 * available.
35 */
36int mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist,
37 struct net_device *dev)
38{
39 int i = 0;
40 struct netdev_hw_addr *ha;
41
42 netdev_for_each_mc_addr(ha, dev)
43 memcpy(&mlist->mac_list[i++], ha->addr, ETH_ALEN);
44
45 return i;
46}
47
48/*
49 * Wait queue completion handler.
50 *
51 * This function waits on a cmd wait queue. It also cancels the pending
52 * request after waking up, in case of errors.
53 */
54int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
55{
56 bool cancel_flag = false;
57 int status = adapter->cmd_wait_q.status;
58
59 dev_dbg(adapter->dev, "cmd pending\n");
60 atomic_inc(&adapter->cmd_pending);
61
62 /* Status pending, wake up main process */
63 queue_work(adapter->workqueue, &adapter->main_work);
64
65 /* Wait for completion */
66 wait_event_interruptible(adapter->cmd_wait_q.wait,
67 adapter->cmd_wait_q.condition);
68 if (!adapter->cmd_wait_q.condition)
69 cancel_flag = true;
70
71 if (cancel_flag) {
72 mwifiex_cancel_pending_ioctl(adapter);
73 dev_dbg(adapter->dev, "cmd cancel\n");
74 }
75 adapter->cmd_wait_q.status = 0;
76
77 return status;
78}
79
80/*
81 * This function prepares the correct firmware command and
82 * issues it to set the multicast list.
83 *
84 * This function can be used to enable promiscuous mode, or enable all
85 * multicast packets, or to enable selective multicast.
86 */
87int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
88 struct mwifiex_multicast_list *mcast_list)
89{
90 int ret = 0;
91 u16 old_pkt_filter;
92
93 old_pkt_filter = priv->curr_pkt_filter;
94
95 if (mcast_list->mode == MWIFIEX_PROMISC_MODE) {
96 dev_dbg(priv->adapter->dev, "info: Enable Promiscuous mode\n");
97 priv->curr_pkt_filter |= HostCmd_ACT_MAC_PROMISCUOUS_ENABLE;
98 priv->curr_pkt_filter &=
99 ~HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE;
100 } else {
101 /* Multicast */
102 priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_PROMISCUOUS_ENABLE;
103 if (mcast_list->mode == MWIFIEX_MULTICAST_MODE) {
104 dev_dbg(priv->adapter->dev,
105 "info: Enabling All Multicast!\n");
106 priv->curr_pkt_filter |=
107 HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE;
108 } else {
109 priv->curr_pkt_filter &=
110 ~HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE;
111 if (mcast_list->num_multicast_addr) {
112 dev_dbg(priv->adapter->dev,
113 "info: Set multicast list=%d\n",
114 mcast_list->num_multicast_addr);
115 /* Set multicast addresses to firmware */
116 if (old_pkt_filter == priv->curr_pkt_filter) {
117 /* Send request to firmware */
118 ret = mwifiex_send_cmd_async(priv,
119 HostCmd_CMD_MAC_MULTICAST_ADR,
120 HostCmd_ACT_GEN_SET, 0,
121 mcast_list);
122 } else {
123 /* Send request to firmware */
124 ret = mwifiex_send_cmd_async(priv,
125 HostCmd_CMD_MAC_MULTICAST_ADR,
126 HostCmd_ACT_GEN_SET, 0,
127 mcast_list);
128 }
129 }
130 }
131 }
132 dev_dbg(priv->adapter->dev,
133 "info: old_pkt_filter=%#x, curr_pkt_filter=%#x\n",
134 old_pkt_filter, priv->curr_pkt_filter);
135 if (old_pkt_filter != priv->curr_pkt_filter) {
136 ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_MAC_CONTROL,
137 HostCmd_ACT_GEN_SET,
138 0, &priv->curr_pkt_filter);
139 }
140
141 return ret;
142}
143
144/*
145 * In Ad-Hoc mode, the IBSS is created if not found in scan list.
146 * In both Ad-Hoc and infra mode, an deauthentication is performed
147 * first.
148 */
149int mwifiex_bss_start(struct mwifiex_private *priv,
150 struct mwifiex_ssid_bssid *ssid_bssid)
151{
152 int ret;
153 struct mwifiex_adapter *adapter = priv->adapter;
154 s32 i = -1;
155
156 priv->scan_block = false;
157 if (!ssid_bssid)
158 return -1;
159
160 if (priv->bss_mode == NL80211_IFTYPE_STATION) {
161 /* Infra mode */
162 ret = mwifiex_deauthenticate(priv, NULL);
163 if (ret)
164 return ret;
165
166 /* Search for the requested SSID in the scan table */
167 if (ssid_bssid->ssid.ssid_len)
168 i = mwifiex_find_ssid_in_list(priv, &ssid_bssid->ssid,
169 NULL, NL80211_IFTYPE_STATION);
170 else
171 i = mwifiex_find_bssid_in_list(priv,
172 (u8 *) &ssid_bssid->bssid,
173 NL80211_IFTYPE_STATION);
174 if (i < 0)
175 return -1;
176
177 dev_dbg(adapter->dev,
178 "info: SSID found in scan list ... associating...\n");
179
180 /* Clear any past association response stored for
181 * application retrieval */
182 priv->assoc_rsp_size = 0;
183 ret = mwifiex_associate(priv, &adapter->scan_table[i]);
184 if (ret)
185 return ret;
186 } else {
187 /* Adhoc mode */
188 /* If the requested SSID matches current SSID, return */
189 if (ssid_bssid->ssid.ssid_len &&
190 (!mwifiex_ssid_cmp
191 (&priv->curr_bss_params.bss_descriptor.ssid,
192 &ssid_bssid->ssid)))
193 return 0;
194
195 /* Exit Adhoc mode first */
196 dev_dbg(adapter->dev, "info: Sending Adhoc Stop\n");
197 ret = mwifiex_deauthenticate(priv, NULL);
198 if (ret)
199 return ret;
200
201 priv->adhoc_is_link_sensed = false;
202
203 /* Search for the requested network in the scan table */
204 if (ssid_bssid->ssid.ssid_len)
205 i = mwifiex_find_ssid_in_list(priv,
206 &ssid_bssid->ssid, NULL,
207 NL80211_IFTYPE_ADHOC);
208 else
209 i = mwifiex_find_bssid_in_list(priv,
210 (u8 *)&ssid_bssid->bssid,
211 NL80211_IFTYPE_ADHOC);
212
213 if (i >= 0) {
214 dev_dbg(adapter->dev, "info: network found in scan"
215 " list. Joining...\n");
216 ret = mwifiex_adhoc_join(priv, &adapter->scan_table[i]);
217 if (ret)
218 return ret;
219 } else {
220 dev_dbg(adapter->dev, "info: Network not found in "
221 "the list, creating adhoc with ssid = %s\n",
222 ssid_bssid->ssid.ssid);
223 ret = mwifiex_adhoc_start(priv, &ssid_bssid->ssid);
224 if (ret)
225 return ret;
226 }
227 }
228
229 return ret;
230}
231
232/*
233 * IOCTL request handler to set host sleep configuration.
234 *
235 * This function prepares the correct firmware command and
236 * issues it.
237 */
238int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action,
239 int cmd_type, struct mwifiex_ds_hs_cfg *hs_cfg)
240
241{
242 struct mwifiex_adapter *adapter = priv->adapter;
243 int status = 0;
244 u32 prev_cond = 0;
245
246 if (!hs_cfg)
247 return -ENOMEM;
248
249 switch (action) {
250 case HostCmd_ACT_GEN_SET:
251 if (adapter->pps_uapsd_mode) {
252 dev_dbg(adapter->dev, "info: Host Sleep IOCTL"
253 " is blocked in UAPSD/PPS mode\n");
254 status = -1;
255 break;
256 }
257 if (hs_cfg->is_invoke_hostcmd) {
258 if (hs_cfg->conditions == HOST_SLEEP_CFG_CANCEL) {
259 if (!adapter->is_hs_configured)
260 /* Already cancelled */
261 break;
262 /* Save previous condition */
263 prev_cond = le32_to_cpu(adapter->hs_cfg
264 .conditions);
265 adapter->hs_cfg.conditions =
266 cpu_to_le32(hs_cfg->conditions);
267 } else if (hs_cfg->conditions) {
268 adapter->hs_cfg.conditions =
269 cpu_to_le32(hs_cfg->conditions);
270 adapter->hs_cfg.gpio = (u8)hs_cfg->gpio;
271 if (hs_cfg->gap)
272 adapter->hs_cfg.gap = (u8)hs_cfg->gap;
273 } else if (adapter->hs_cfg.conditions ==
274 cpu_to_le32(
275 HOST_SLEEP_CFG_CANCEL)) {
276 /* Return failure if no parameters for HS
277 enable */
278 status = -1;
279 break;
280 }
281 if (cmd_type == MWIFIEX_SYNC_CMD)
282 status = mwifiex_send_cmd_sync(priv,
283 HostCmd_CMD_802_11_HS_CFG_ENH,
284 HostCmd_ACT_GEN_SET, 0,
285 &adapter->hs_cfg);
286 else
287 status = mwifiex_send_cmd_async(priv,
288 HostCmd_CMD_802_11_HS_CFG_ENH,
289 HostCmd_ACT_GEN_SET, 0,
290 &adapter->hs_cfg);
291 if (hs_cfg->conditions == HOST_SLEEP_CFG_CANCEL)
292 /* Restore previous condition */
293 adapter->hs_cfg.conditions =
294 cpu_to_le32(prev_cond);
295 } else {
296 adapter->hs_cfg.conditions =
297 cpu_to_le32(hs_cfg->conditions);
298 adapter->hs_cfg.gpio = (u8)hs_cfg->gpio;
299 adapter->hs_cfg.gap = (u8)hs_cfg->gap;
300 }
301 break;
302 case HostCmd_ACT_GEN_GET:
303 hs_cfg->conditions = le32_to_cpu(adapter->hs_cfg.conditions);
304 hs_cfg->gpio = adapter->hs_cfg.gpio;
305 hs_cfg->gap = adapter->hs_cfg.gap;
306 break;
307 default:
308 status = -1;
309 break;
310 }
311
312 return status;
313}
314
315/*
316 * Sends IOCTL request to cancel the existing Host Sleep configuration.
317 *
318 * This function allocates the IOCTL request buffer, fills it
319 * with requisite parameters and calls the IOCTL handler.
320 */
321int mwifiex_cancel_hs(struct mwifiex_private *priv, int cmd_type)
322{
323 struct mwifiex_ds_hs_cfg hscfg;
324
325 hscfg.conditions = HOST_SLEEP_CFG_CANCEL;
326 hscfg.is_invoke_hostcmd = true;
327
328 return mwifiex_set_hs_params(priv, HostCmd_ACT_GEN_SET,
329 cmd_type, &hscfg);
330}
331EXPORT_SYMBOL_GPL(mwifiex_cancel_hs);
332
333/*
334 * Sends IOCTL request to cancel the existing Host Sleep configuration.
335 *
336 * This function allocates the IOCTL request buffer, fills it
337 * with requisite parameters and calls the IOCTL handler.
338 */
339int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
340{
341 struct mwifiex_ds_hs_cfg hscfg;
342
343 if (adapter->hs_activated) {
344 dev_dbg(adapter->dev, "cmd: HS Already actived\n");
345 return true;
346 }
347
348 adapter->hs_activate_wait_q_woken = false;
349
350 memset(&hscfg, 0, sizeof(struct mwifiex_hs_config_param));
351 hscfg.is_invoke_hostcmd = true;
352
353 if (mwifiex_set_hs_params(mwifiex_get_priv(adapter,
354 MWIFIEX_BSS_ROLE_STA),
355 HostCmd_ACT_GEN_SET, MWIFIEX_SYNC_CMD,
356 &hscfg)) {
357 dev_err(adapter->dev, "IOCTL request HS enable failed\n");
358 return false;
359 }
360
361 wait_event_interruptible(adapter->hs_activate_wait_q,
362 adapter->hs_activate_wait_q_woken);
363
364 return true;
365}
366EXPORT_SYMBOL_GPL(mwifiex_enable_hs);
367
368/*
369 * IOCTL request handler to get BSS information.
370 *
371 * This function collates the information from different driver structures
372 * to send to the user.
373 */
374int mwifiex_get_bss_info(struct mwifiex_private *priv,
375 struct mwifiex_bss_info *info)
376{
377 struct mwifiex_adapter *adapter = priv->adapter;
378 struct mwifiex_bssdescriptor *bss_desc;
379 s32 tbl_idx;
380
381 if (!info)
382 return -1;
383
384 bss_desc = &priv->curr_bss_params.bss_descriptor;
385
386 info->bss_mode = priv->bss_mode;
387
388 memcpy(&info->ssid, &bss_desc->ssid,
389 sizeof(struct mwifiex_802_11_ssid));
390
391 memcpy(&info->bssid, &bss_desc->mac_address, ETH_ALEN);
392
393 info->bss_chan = bss_desc->channel;
394
395 info->region_code = adapter->region_code;
396
397 /* Scan table index if connected */
398 info->scan_table_idx = 0;
399 if (priv->media_connected) {
400 tbl_idx =
401 mwifiex_find_ssid_in_list(priv, &bss_desc->ssid,
402 bss_desc->mac_address,
403 priv->bss_mode);
404 if (tbl_idx >= 0)
405 info->scan_table_idx = tbl_idx;
406 }
407
408 info->media_connected = priv->media_connected;
409
410 info->max_power_level = priv->max_tx_power_level;
411 info->min_power_level = priv->min_tx_power_level;
412
413 info->adhoc_state = priv->adhoc_state;
414
415 info->bcn_nf_last = priv->bcn_nf_last;
416
417 if (priv->sec_info.wep_status == MWIFIEX_802_11_WEP_ENABLED)
418 info->wep_status = true;
419 else
420 info->wep_status = false;
421
422 info->is_hs_configured = adapter->is_hs_configured;
423 info->is_deep_sleep = adapter->is_deep_sleep;
424
425 return 0;
426}
427
428/*
429 * The function sets band configurations.
430 *
431 * it performs extra checks to make sure the Ad-Hoc
432 * band and channel are compatible. Otherwise it returns an error.
433 *
434 */
435int mwifiex_set_radio_band_cfg(struct mwifiex_private *priv,
436 struct mwifiex_ds_band_cfg *radio_cfg)
437{
438 struct mwifiex_adapter *adapter = priv->adapter;
439 u8 infra_band, adhoc_band;
440 u32 adhoc_channel;
441
442 infra_band = (u8) radio_cfg->config_bands;
443 adhoc_band = (u8) radio_cfg->adhoc_start_band;
444 adhoc_channel = radio_cfg->adhoc_channel;
445
446 /* SET Infra band */
447 if ((infra_band | adapter->fw_bands) & ~adapter->fw_bands)
448 return -1;
449
450 adapter->config_bands = infra_band;
451
452 /* SET Ad-hoc Band */
453 if ((adhoc_band | adapter->fw_bands) & ~adapter->fw_bands)
454 return -1;
455
456 if (adhoc_band)
457 adapter->adhoc_start_band = adhoc_band;
458 adapter->chan_offset = (u8) radio_cfg->sec_chan_offset;
459 /*
460 * If no adhoc_channel is supplied verify if the existing adhoc
461 * channel compiles with new adhoc_band
462 */
463 if (!adhoc_channel) {
464 if (!mwifiex_get_cfp_by_band_and_channel_from_cfg80211
465 (priv, adapter->adhoc_start_band,
466 priv->adhoc_channel)) {
467 /* Pass back the default channel */
468 radio_cfg->adhoc_channel = DEFAULT_AD_HOC_CHANNEL;
469 if ((adapter->adhoc_start_band & BAND_A)
470 || (adapter->adhoc_start_band & BAND_AN))
471 radio_cfg->adhoc_channel =
472 DEFAULT_AD_HOC_CHANNEL_A;
473 }
474 } else { /* Retrurn error if adhoc_band and
475 adhoc_channel combination is invalid */
476 if (!mwifiex_get_cfp_by_band_and_channel_from_cfg80211
477 (priv, adapter->adhoc_start_band, (u16) adhoc_channel))
478 return -1;
479 priv->adhoc_channel = (u8) adhoc_channel;
480 }
481 if ((adhoc_band & BAND_GN) || (adhoc_band & BAND_AN))
482 adapter->adhoc_11n_enabled = true;
483 else
484 adapter->adhoc_11n_enabled = false;
485
486 return 0;
487}
488
489/*
490 * IOCTL request handler to set/get active channel.
491 *
492 * This function performs validity checking on channel/frequency
493 * compatibility and returns failure if not valid.
494 */
495int mwifiex_bss_set_channel(struct mwifiex_private *priv,
496 struct mwifiex_chan_freq_power *chan)
497{
498 struct mwifiex_adapter *adapter = priv->adapter;
499 struct mwifiex_chan_freq_power *cfp = NULL;
500
501 if (!chan)
502 return -1;
503
504 if (!chan->channel && !chan->freq)
505 return -1;
506 if (adapter->adhoc_start_band & BAND_AN)
507 adapter->adhoc_start_band = BAND_G | BAND_B | BAND_GN;
508 else if (adapter->adhoc_start_band & BAND_A)
509 adapter->adhoc_start_band = BAND_G | BAND_B;
510 if (chan->channel) {
511 if (chan->channel <= MAX_CHANNEL_BAND_BG)
512 cfp = mwifiex_get_cfp_by_band_and_channel_from_cfg80211
513 (priv, 0, (u16) chan->channel);
514 if (!cfp) {
515 cfp = mwifiex_get_cfp_by_band_and_channel_from_cfg80211
516 (priv, BAND_A, (u16) chan->channel);
517 if (cfp) {
518 if (adapter->adhoc_11n_enabled)
519 adapter->adhoc_start_band = BAND_A
520 | BAND_AN;
521 else
522 adapter->adhoc_start_band = BAND_A;
523 }
524 }
525 } else {
526 if (chan->freq <= MAX_FREQUENCY_BAND_BG)
527 cfp = mwifiex_get_cfp_by_band_and_freq_from_cfg80211(
528 priv, 0, chan->freq);
529 if (!cfp) {
530 cfp = mwifiex_get_cfp_by_band_and_freq_from_cfg80211
531 (priv, BAND_A, chan->freq);
532 if (cfp) {
533 if (adapter->adhoc_11n_enabled)
534 adapter->adhoc_start_band = BAND_A
535 | BAND_AN;
536 else
537 adapter->adhoc_start_band = BAND_A;
538 }
539 }
540 }
541 if (!cfp || !cfp->channel) {
542 dev_err(adapter->dev, "invalid channel/freq\n");
543 return -1;
544 }
545 priv->adhoc_channel = (u8) cfp->channel;
546 chan->channel = cfp->channel;
547 chan->freq = cfp->freq;
548
549 return 0;
550}
551
552/*
553 * IOCTL request handler to set/get Ad-Hoc channel.
554 *
555 * This function prepares the correct firmware command and
556 * issues it to set or get the ad-hoc channel.
557 */
558static int mwifiex_bss_ioctl_ibss_channel(struct mwifiex_private *priv,
559 u16 action, u16 *channel)
560{
561 if (action == HostCmd_ACT_GEN_GET) {
562 if (!priv->media_connected) {
563 *channel = priv->adhoc_channel;
564 return 0;
565 }
566 } else {
567 priv->adhoc_channel = (u8) *channel;
568 }
569
570 return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_RF_CHANNEL,
571 action, 0, channel);
572}
573
574/*
575 * IOCTL request handler to find a particular BSS.
576 *
577 * The BSS can be searched with either a BSSID or a SSID. If none of
578 * these are provided, just the best BSS (best RSSI) is returned.
579 */
580int mwifiex_bss_ioctl_find_bss(struct mwifiex_private *priv,
581 struct mwifiex_ssid_bssid *ssid_bssid)
582{
583 struct mwifiex_adapter *adapter = priv->adapter;
584 struct mwifiex_bssdescriptor *bss_desc;
585 u8 zero_mac[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 };
586 u8 mac[ETH_ALEN];
587 int i = 0;
588
589 if (memcmp(ssid_bssid->bssid, zero_mac, sizeof(zero_mac))) {
590 i = mwifiex_find_bssid_in_list(priv,
591 (u8 *) ssid_bssid->bssid,
592 priv->bss_mode);
593 if (i < 0) {
594 memcpy(mac, ssid_bssid->bssid, sizeof(mac));
595 dev_err(adapter->dev, "cannot find bssid %pM\n", mac);
596 return -1;
597 }
598 bss_desc = &adapter->scan_table[i];
599 memcpy(&ssid_bssid->ssid, &bss_desc->ssid,
600 sizeof(struct mwifiex_802_11_ssid));
601 } else if (ssid_bssid->ssid.ssid_len) {
602 i = mwifiex_find_ssid_in_list(priv, &ssid_bssid->ssid, NULL,
603 priv->bss_mode);
604 if (i < 0) {
605 dev_err(adapter->dev, "cannot find ssid %s\n",
606 ssid_bssid->ssid.ssid);
607 return -1;
608 }
609 bss_desc = &adapter->scan_table[i];
610 memcpy(ssid_bssid->bssid, bss_desc->mac_address, ETH_ALEN);
611 } else {
612 return mwifiex_find_best_network(priv, ssid_bssid);
613 }
614
615 return 0;
616}
617
618/*
619 * IOCTL request handler to change Ad-Hoc channel.
620 *
621 * This function allocates the IOCTL request buffer, fills it
622 * with requisite parameters and calls the IOCTL handler.
623 *
624 * The function follows the following steps to perform the change -
625 * - Get current IBSS information
626 * - Get current channel
627 * - If no change is required, return
628 * - If not connected, change channel and return
629 * - If connected,
630 * - Disconnect
631 * - Change channel
632 * - Perform specific SSID scan with same SSID
633 * - Start/Join the IBSS
634 */
635int
636mwifiex_drv_change_adhoc_chan(struct mwifiex_private *priv, int channel)
637{
638 int ret;
639 struct mwifiex_bss_info bss_info;
640 struct mwifiex_ssid_bssid ssid_bssid;
641 u16 curr_chan = 0;
642
643 memset(&bss_info, 0, sizeof(bss_info));
644
645 /* Get BSS information */
646 if (mwifiex_get_bss_info(priv, &bss_info))
647 return -1;
648
649 /* Get current channel */
650 ret = mwifiex_bss_ioctl_ibss_channel(priv, HostCmd_ACT_GEN_GET,
651 &curr_chan);
652
653 if (curr_chan == channel) {
654 ret = 0;
655 goto done;
656 }
657 dev_dbg(priv->adapter->dev, "cmd: updating channel from %d to %d\n",
658 curr_chan, channel);
659
660 if (!bss_info.media_connected) {
661 ret = 0;
662 goto done;
663 }
664
665 /* Do disonnect */
666 memset(&ssid_bssid, 0, ETH_ALEN);
667 ret = mwifiex_deauthenticate(priv, ssid_bssid.bssid);
668
669 ret = mwifiex_bss_ioctl_ibss_channel(priv, HostCmd_ACT_GEN_SET,
670 (u16 *) &channel);
671
672 /* Do specific SSID scanning */
673 if (mwifiex_request_scan(priv, &bss_info.ssid)) {
674 ret = -1;
675 goto done;
676 }
677 /* Start/Join Adhoc network */
678 memset(&ssid_bssid, 0, sizeof(struct mwifiex_ssid_bssid));
679 memcpy(&ssid_bssid.ssid, &bss_info.ssid,
680 sizeof(struct mwifiex_802_11_ssid));
681
682 ret = mwifiex_bss_start(priv, &ssid_bssid);
683done:
684 return ret;
685}
686
687/*
688 * IOCTL request handler to get rate.
689 *
690 * This function prepares the correct firmware command and
691 * issues it to get the current rate if it is connected,
692 * otherwise, the function returns the lowest supported rate
693 * for the band.
694 */
695static int mwifiex_rate_ioctl_get_rate_value(struct mwifiex_private *priv,
696 struct mwifiex_rate_cfg *rate_cfg)
697{
698 struct mwifiex_adapter *adapter = priv->adapter;
699
700 rate_cfg->is_rate_auto = priv->is_data_rate_auto;
701 if (!priv->media_connected) {
702 switch (adapter->config_bands) {
703 case BAND_B:
704 /* Return the lowest supported rate for B band */
705 rate_cfg->rate = supported_rates_b[0] & 0x7f;
706 break;
707 case BAND_G:
708 case BAND_G | BAND_GN:
709 /* Return the lowest supported rate for G band */
710 rate_cfg->rate = supported_rates_g[0] & 0x7f;
711 break;
712 case BAND_B | BAND_G:
713 case BAND_A | BAND_B | BAND_G:
714 case BAND_A | BAND_B:
715 case BAND_A | BAND_B | BAND_G | BAND_AN | BAND_GN:
716 case BAND_B | BAND_G | BAND_GN:
717 /* Return the lowest supported rate for BG band */
718 rate_cfg->rate = supported_rates_bg[0] & 0x7f;
719 break;
720 case BAND_A:
721 case BAND_A | BAND_G:
722 case BAND_A | BAND_G | BAND_AN | BAND_GN:
723 case BAND_A | BAND_AN:
724 /* Return the lowest supported rate for A band */
725 rate_cfg->rate = supported_rates_a[0] & 0x7f;
726 break;
727 case BAND_GN:
728 /* Return the lowest supported rate for N band */
729 rate_cfg->rate = supported_rates_n[0] & 0x7f;
730 break;
731 default:
732 dev_warn(adapter->dev, "invalid band %#x\n",
733 adapter->config_bands);
734 break;
735 }
736 } else {
737 return mwifiex_send_cmd_sync(priv,
738 HostCmd_CMD_802_11_TX_RATE_QUERY,
739 HostCmd_ACT_GEN_GET, 0, NULL);
740 }
741
742 return 0;
743}
744
745/*
746 * IOCTL request handler to set rate.
747 *
748 * This function prepares the correct firmware command and
749 * issues it to set the current rate.
750 *
751 * The function also performs validation checking on the supplied value.
752 */
753static int mwifiex_rate_ioctl_set_rate_value(struct mwifiex_private *priv,
754 struct mwifiex_rate_cfg *rate_cfg)
755{
756 u8 rates[MWIFIEX_SUPPORTED_RATES];
757 u8 *rate;
758 int rate_index, ret;
759 u16 bitmap_rates[MAX_BITMAP_RATES_SIZE];
760 u32 i;
761 struct mwifiex_adapter *adapter = priv->adapter;
762
763 if (rate_cfg->is_rate_auto) {
764 memset(bitmap_rates, 0, sizeof(bitmap_rates));
765 /* Support all HR/DSSS rates */
766 bitmap_rates[0] = 0x000F;
767 /* Support all OFDM rates */
768 bitmap_rates[1] = 0x00FF;
769 /* Support all HT-MCSs rate */
770 for (i = 0; i < ARRAY_SIZE(priv->bitmap_rates) - 3; i++)
771 bitmap_rates[i + 2] = 0xFFFF;
772 bitmap_rates[9] = 0x3FFF;
773 } else {
774 memset(rates, 0, sizeof(rates));
775 mwifiex_get_active_data_rates(priv, rates);
776 rate = rates;
777 for (i = 0; (rate[i] && i < MWIFIEX_SUPPORTED_RATES); i++) {
778 dev_dbg(adapter->dev, "info: rate=%#x wanted=%#x\n",
779 rate[i], rate_cfg->rate);
780 if ((rate[i] & 0x7f) == (rate_cfg->rate & 0x7f))
781 break;
782 }
783 if (!rate[i] || (i == MWIFIEX_SUPPORTED_RATES)) {
784 dev_err(adapter->dev, "fixed data rate %#x is out "
785 "of range\n", rate_cfg->rate);
786 return -1;
787 }
788 memset(bitmap_rates, 0, sizeof(bitmap_rates));
789
790 rate_index = mwifiex_data_rate_to_index(rate_cfg->rate);
791
792 /* Only allow b/g rates to be set */
793 if (rate_index >= MWIFIEX_RATE_INDEX_HRDSSS0 &&
794 rate_index <= MWIFIEX_RATE_INDEX_HRDSSS3) {
795 bitmap_rates[0] = 1 << rate_index;
796 } else {
797 rate_index -= 1; /* There is a 0x00 in the table */
798 if (rate_index >= MWIFIEX_RATE_INDEX_OFDM0 &&
799 rate_index <= MWIFIEX_RATE_INDEX_OFDM7)
800 bitmap_rates[1] = 1 << (rate_index -
801 MWIFIEX_RATE_INDEX_OFDM0);
802 }
803 }
804
805 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_TX_RATE_CFG,
806 HostCmd_ACT_GEN_SET, 0, bitmap_rates);
807
808 return ret;
809}
810
811/*
812 * IOCTL request handler to set/get rate.
813 *
814 * This function can be used to set/get either the rate value or the
815 * rate index.
816 */
817static int mwifiex_rate_ioctl_cfg(struct mwifiex_private *priv,
818 struct mwifiex_rate_cfg *rate_cfg)
819{
820 int status;
821
822 if (!rate_cfg)
823 return -1;
824
825 if (rate_cfg->action == HostCmd_ACT_GEN_GET)
826 status = mwifiex_rate_ioctl_get_rate_value(priv, rate_cfg);
827 else
828 status = mwifiex_rate_ioctl_set_rate_value(priv, rate_cfg);
829
830 return status;
831}
832
833/*
834 * Sends IOCTL request to get the data rate.
835 *
836 * This function allocates the IOCTL request buffer, fills it
837 * with requisite parameters and calls the IOCTL handler.
838 */
839int mwifiex_drv_get_data_rate(struct mwifiex_private *priv,
840 struct mwifiex_rate_cfg *rate)
841{
842 int ret;
843
844 memset(rate, 0, sizeof(struct mwifiex_rate_cfg));
845 rate->action = HostCmd_ACT_GEN_GET;
846 ret = mwifiex_rate_ioctl_cfg(priv, rate);
847
848 if (!ret) {
849 if (rate && rate->is_rate_auto)
850 rate->rate = mwifiex_index_to_data_rate(priv->tx_rate,
851 priv->tx_htinfo);
852 else if (rate)
853 rate->rate = priv->data_rate;
854 } else {
855 ret = -1;
856 }
857
858 return ret;
859}
860
861/*
862 * IOCTL request handler to set tx power configuration.
863 *
864 * This function prepares the correct firmware command and
865 * issues it.
866 *
867 * For non-auto power mode, all the following power groups are set -
868 * - Modulation class HR/DSSS
869 * - Modulation class OFDM
870 * - Modulation class HTBW20
871 * - Modulation class HTBW40
872 */
873int mwifiex_set_tx_power(struct mwifiex_private *priv,
874 struct mwifiex_power_cfg *power_cfg)
875{
876 int ret;
877 struct host_cmd_ds_txpwr_cfg *txp_cfg;
878 struct mwifiex_types_power_group *pg_tlv;
879 struct mwifiex_power_group *pg;
880 u8 *buf;
881 u16 dbm = 0;
882
883 if (!power_cfg->is_power_auto) {
884 dbm = (u16) power_cfg->power_level;
885 if ((dbm < priv->min_tx_power_level) ||
886 (dbm > priv->max_tx_power_level)) {
887 dev_err(priv->adapter->dev, "txpower value %d dBm"
888 " is out of range (%d dBm-%d dBm)\n",
889 dbm, priv->min_tx_power_level,
890 priv->max_tx_power_level);
891 return -1;
892 }
893 }
894 buf = kzalloc(MWIFIEX_SIZE_OF_CMD_BUFFER, GFP_KERNEL);
895 if (!buf) {
896 dev_err(priv->adapter->dev, "%s: failed to alloc cmd buffer\n",
897 __func__);
898 return -ENOMEM;
899 }
900
901 txp_cfg = (struct host_cmd_ds_txpwr_cfg *) buf;
902 txp_cfg->action = cpu_to_le16(HostCmd_ACT_GEN_SET);
903 if (!power_cfg->is_power_auto) {
904 txp_cfg->mode = cpu_to_le32(1);
905 pg_tlv = (struct mwifiex_types_power_group *) (buf +
906 sizeof(struct host_cmd_ds_txpwr_cfg));
907 pg_tlv->type = TLV_TYPE_POWER_GROUP;
908 pg_tlv->length = 4 * sizeof(struct mwifiex_power_group);
909 pg = (struct mwifiex_power_group *) (buf +
910 sizeof(struct host_cmd_ds_txpwr_cfg) +
911 sizeof(struct mwifiex_types_power_group));
912 /* Power group for modulation class HR/DSSS */
913 pg->first_rate_code = 0x00;
914 pg->last_rate_code = 0x03;
915 pg->modulation_class = MOD_CLASS_HR_DSSS;
916 pg->power_step = 0;
917 pg->power_min = (s8) dbm;
918 pg->power_max = (s8) dbm;
919 pg++;
920 /* Power group for modulation class OFDM */
921 pg->first_rate_code = 0x00;
922 pg->last_rate_code = 0x07;
923 pg->modulation_class = MOD_CLASS_OFDM;
924 pg->power_step = 0;
925 pg->power_min = (s8) dbm;
926 pg->power_max = (s8) dbm;
927 pg++;
928 /* Power group for modulation class HTBW20 */
929 pg->first_rate_code = 0x00;
930 pg->last_rate_code = 0x20;
931 pg->modulation_class = MOD_CLASS_HT;
932 pg->power_step = 0;
933 pg->power_min = (s8) dbm;
934 pg->power_max = (s8) dbm;
935 pg->ht_bandwidth = HT_BW_20;
936 pg++;
937 /* Power group for modulation class HTBW40 */
938 pg->first_rate_code = 0x00;
939 pg->last_rate_code = 0x20;
940 pg->modulation_class = MOD_CLASS_HT;
941 pg->power_step = 0;
942 pg->power_min = (s8) dbm;
943 pg->power_max = (s8) dbm;
944 pg->ht_bandwidth = HT_BW_40;
945 }
946 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_TXPWR_CFG,
947 HostCmd_ACT_GEN_SET, 0, buf);
948
949 kfree(buf);
950 return ret;
951}
952
953/*
954 * IOCTL request handler to get power save mode.
955 *
956 * This function prepares the correct firmware command and
957 * issues it.
958 */
959int mwifiex_drv_set_power(struct mwifiex_private *priv, u32 *ps_mode)
960{
961 int ret;
962 struct mwifiex_adapter *adapter = priv->adapter;
963 u16 sub_cmd;
964
965 if (*ps_mode)
966 adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_PSP;
967 else
968 adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_CAM;
969 sub_cmd = (*ps_mode) ? EN_AUTO_PS : DIS_AUTO_PS;
970 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_PS_MODE_ENH,
971 sub_cmd, BITMAP_STA_PS, NULL);
972 if ((!ret) && (sub_cmd == DIS_AUTO_PS))
973 ret = mwifiex_send_cmd_async(priv,
974 HostCmd_CMD_802_11_PS_MODE_ENH, GET_PS,
975 0, NULL);
976
977 return ret;
978}
979
980/*
981 * IOCTL request handler to set/reset WPA IE.
982 *
983 * The supplied WPA IE is treated as a opaque buffer. Only the first field
984 * is checked to determine WPA version. If buffer length is zero, the existing
985 * WPA IE is reset.
986 */
987static int mwifiex_set_wpa_ie_helper(struct mwifiex_private *priv,
988 u8 *ie_data_ptr, u16 ie_len)
989{
990 if (ie_len) {
991 if (ie_len > sizeof(priv->wpa_ie)) {
992 dev_err(priv->adapter->dev,
993 "failed to copy WPA IE, too big\n");
994 return -1;
995 }
996 memcpy(priv->wpa_ie, ie_data_ptr, ie_len);
997 priv->wpa_ie_len = (u8) ie_len;
998 dev_dbg(priv->adapter->dev, "cmd: Set Wpa_ie_len=%d IE=%#x\n",
999 priv->wpa_ie_len, priv->wpa_ie[0]);
1000
1001 if (priv->wpa_ie[0] == WLAN_EID_WPA) {
1002 priv->sec_info.wpa_enabled = true;
1003 } else if (priv->wpa_ie[0] == WLAN_EID_RSN) {
1004 priv->sec_info.wpa2_enabled = true;
1005 } else {
1006 priv->sec_info.wpa_enabled = false;
1007 priv->sec_info.wpa2_enabled = false;
1008 }
1009 } else {
1010 memset(priv->wpa_ie, 0, sizeof(priv->wpa_ie));
1011 priv->wpa_ie_len = 0;
1012 dev_dbg(priv->adapter->dev, "info: reset wpa_ie_len=%d IE=%#x\n",
1013 priv->wpa_ie_len, priv->wpa_ie[0]);
1014 priv->sec_info.wpa_enabled = false;
1015 priv->sec_info.wpa2_enabled = false;
1016 }
1017
1018 return 0;
1019}
1020
1021/*
1022 * IOCTL request handler to set/reset WAPI IE.
1023 *
1024 * The supplied WAPI IE is treated as a opaque buffer. Only the first field
1025 * is checked to internally enable WAPI. If buffer length is zero, the existing
1026 * WAPI IE is reset.
1027 */
1028static int mwifiex_set_wapi_ie(struct mwifiex_private *priv,
1029 u8 *ie_data_ptr, u16 ie_len)
1030{
1031 if (ie_len) {
1032 if (ie_len > sizeof(priv->wapi_ie)) {
1033 dev_dbg(priv->adapter->dev,
1034 "info: failed to copy WAPI IE, too big\n");
1035 return -1;
1036 }
1037 memcpy(priv->wapi_ie, ie_data_ptr, ie_len);
1038 priv->wapi_ie_len = ie_len;
1039 dev_dbg(priv->adapter->dev, "cmd: Set wapi_ie_len=%d IE=%#x\n",
1040 priv->wapi_ie_len, priv->wapi_ie[0]);
1041
1042 if (priv->wapi_ie[0] == WLAN_EID_BSS_AC_ACCESS_DELAY)
1043 priv->sec_info.wapi_enabled = true;
1044 } else {
1045 memset(priv->wapi_ie, 0, sizeof(priv->wapi_ie));
1046 priv->wapi_ie_len = ie_len;
1047 dev_dbg(priv->adapter->dev,
1048 "info: Reset wapi_ie_len=%d IE=%#x\n",
1049 priv->wapi_ie_len, priv->wapi_ie[0]);
1050 priv->sec_info.wapi_enabled = false;
1051 }
1052 return 0;
1053}
1054
1055/*
1056 * IOCTL request handler to set WAPI key.
1057 *
1058 * This function prepares the correct firmware command and
1059 * issues it.
1060 */
1061static int mwifiex_sec_ioctl_set_wapi_key(struct mwifiex_private *priv,
1062 struct mwifiex_ds_encrypt_key *encrypt_key)
1063{
1064
1065 return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_KEY_MATERIAL,
1066 HostCmd_ACT_GEN_SET, KEY_INFO_ENABLED,
1067 encrypt_key);
1068}
1069
1070/*
1071 * IOCTL request handler to set WEP network key.
1072 *
1073 * This function prepares the correct firmware command and
1074 * issues it, after validation checks.
1075 */
1076static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv,
1077 struct mwifiex_ds_encrypt_key *encrypt_key)
1078{
1079 int ret;
1080 struct mwifiex_wep_key *wep_key;
1081 int index;
1082
1083 if (priv->wep_key_curr_index >= NUM_WEP_KEYS)
1084 priv->wep_key_curr_index = 0;
1085 wep_key = &priv->wep_key[priv->wep_key_curr_index];
1086 index = encrypt_key->key_index;
1087 if (encrypt_key->key_disable) {
1088 priv->sec_info.wep_status = MWIFIEX_802_11_WEP_DISABLED;
1089 } else if (!encrypt_key->key_len) {
1090 /* Copy the required key as the current key */
1091 wep_key = &priv->wep_key[index];
1092 if (!wep_key->key_length) {
1093 dev_err(priv->adapter->dev,
1094 "key not set, so cannot enable it\n");
1095 return -1;
1096 }
1097 priv->wep_key_curr_index = (u16) index;
1098 priv->sec_info.wep_status = MWIFIEX_802_11_WEP_ENABLED;
1099 } else {
1100 wep_key = &priv->wep_key[index];
1101 memset(wep_key, 0, sizeof(struct mwifiex_wep_key));
1102 /* Copy the key in the driver */
1103 memcpy(wep_key->key_material,
1104 encrypt_key->key_material,
1105 encrypt_key->key_len);
1106 wep_key->key_index = index;
1107 wep_key->key_length = encrypt_key->key_len;
1108 priv->sec_info.wep_status = MWIFIEX_802_11_WEP_ENABLED;
1109 }
1110 if (wep_key->key_length) {
1111 /* Send request to firmware */
1112 ret = mwifiex_send_cmd_async(priv,
1113 HostCmd_CMD_802_11_KEY_MATERIAL,
1114 HostCmd_ACT_GEN_SET, 0, NULL);
1115 if (ret)
1116 return ret;
1117 }
1118 if (priv->sec_info.wep_status == MWIFIEX_802_11_WEP_ENABLED)
1119 priv->curr_pkt_filter |= HostCmd_ACT_MAC_WEP_ENABLE;
1120 else
1121 priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_WEP_ENABLE;
1122
1123 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_MAC_CONTROL,
1124 HostCmd_ACT_GEN_SET, 0,
1125 &priv->curr_pkt_filter);
1126
1127 return ret;
1128}
1129
1130/*
1131 * IOCTL request handler to set WPA key.
1132 *
1133 * This function prepares the correct firmware command and
1134 * issues it, after validation checks.
1135 *
1136 * Current driver only supports key length of up to 32 bytes.
1137 *
1138 * This function can also be used to disable a currently set key.
1139 */
1140static int mwifiex_sec_ioctl_set_wpa_key(struct mwifiex_private *priv,
1141 struct mwifiex_ds_encrypt_key *encrypt_key)
1142{
1143 int ret;
1144 u8 remove_key = false;
1145 struct host_cmd_ds_802_11_key_material *ibss_key;
1146
1147 /* Current driver only supports key length of up to 32 bytes */
1148 if (encrypt_key->key_len > WLAN_MAX_KEY_LEN) {
1149 dev_err(priv->adapter->dev, "key length too long\n");
1150 return -1;
1151 }
1152
1153 if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
1154 /*
1155 * IBSS/WPA-None uses only one key (Group) for both receiving
1156 * and sending unicast and multicast packets.
1157 */
1158 /* Send the key as PTK to firmware */
1159 encrypt_key->key_index = MWIFIEX_KEY_INDEX_UNICAST;
1160 ret = mwifiex_send_cmd_async(priv,
1161 HostCmd_CMD_802_11_KEY_MATERIAL,
1162 HostCmd_ACT_GEN_SET, KEY_INFO_ENABLED,
1163 encrypt_key);
1164 if (ret)
1165 return ret;
1166
1167 ibss_key = &priv->aes_key;
1168 memset(ibss_key, 0,
1169 sizeof(struct host_cmd_ds_802_11_key_material));
1170 /* Copy the key in the driver */
1171 memcpy(ibss_key->key_param_set.key, encrypt_key->key_material,
1172 encrypt_key->key_len);
1173 memcpy(&ibss_key->key_param_set.key_len, &encrypt_key->key_len,
1174 sizeof(ibss_key->key_param_set.key_len));
1175 ibss_key->key_param_set.key_type_id
1176 = cpu_to_le16(KEY_TYPE_ID_TKIP);
1177 ibss_key->key_param_set.key_info = cpu_to_le16(KEY_ENABLED);
1178
1179 /* Send the key as GTK to firmware */
1180 encrypt_key->key_index = ~MWIFIEX_KEY_INDEX_UNICAST;
1181 }
1182
1183 if (!encrypt_key->key_index)
1184 encrypt_key->key_index = MWIFIEX_KEY_INDEX_UNICAST;
1185
1186 if (remove_key)
1187 ret = mwifiex_send_cmd_sync(priv,
1188 HostCmd_CMD_802_11_KEY_MATERIAL,
1189 HostCmd_ACT_GEN_SET, !(KEY_INFO_ENABLED),
1190 encrypt_key);
1191 else
1192 ret = mwifiex_send_cmd_sync(priv,
1193 HostCmd_CMD_802_11_KEY_MATERIAL,
1194 HostCmd_ACT_GEN_SET, KEY_INFO_ENABLED,
1195 encrypt_key);
1196
1197 return ret;
1198}
1199
1200/*
1201 * IOCTL request handler to set/get network keys.
1202 *
1203 * This is a generic key handling function which supports WEP, WPA
1204 * and WAPI.
1205 */
1206static int
1207mwifiex_sec_ioctl_encrypt_key(struct mwifiex_private *priv,
1208 struct mwifiex_ds_encrypt_key *encrypt_key)
1209{
1210 int status;
1211
1212 if (encrypt_key->is_wapi_key)
1213 status = mwifiex_sec_ioctl_set_wapi_key(priv, encrypt_key);
1214 else if (encrypt_key->key_len > WLAN_KEY_LEN_WEP104)
1215 status = mwifiex_sec_ioctl_set_wpa_key(priv, encrypt_key);
1216 else
1217 status = mwifiex_sec_ioctl_set_wep_key(priv, encrypt_key);
1218 return status;
1219}
1220
1221/*
1222 * This function returns the driver version.
1223 */
1224int
1225mwifiex_drv_get_driver_version(struct mwifiex_adapter *adapter, char *version,
1226 int max_len)
1227{
1228 union {
1229 u32 l;
1230 u8 c[4];
1231 } ver;
1232 char fw_ver[32];
1233
1234 ver.l = adapter->fw_release_number;
1235 sprintf(fw_ver, "%u.%u.%u.p%u", ver.c[2], ver.c[1], ver.c[0], ver.c[3]);
1236
1237 snprintf(version, max_len, driver_version, fw_ver);
1238
1239 dev_dbg(adapter->dev, "info: MWIFIEX VERSION: %s\n", version);
1240
1241 return 0;
1242}
1243
1244/*
1245 * Sends IOCTL request to get signal information.
1246 *
1247 * This function allocates the IOCTL request buffer, fills it
1248 * with requisite parameters and calls the IOCTL handler.
1249 */
1250int mwifiex_get_signal_info(struct mwifiex_private *priv,
1251 struct mwifiex_ds_get_signal *signal)
1252{
1253 int status;
1254
1255 signal->selector = ALL_RSSI_INFO_MASK;
1256
1257 /* Signal info can be obtained only if connected */
1258 if (!priv->media_connected) {
1259 dev_dbg(priv->adapter->dev,
1260 "info: Can not get signal in disconnected state\n");
1261 return -1;
1262 }
1263
1264 status = mwifiex_send_cmd_sync(priv, HostCmd_CMD_RSSI_INFO,
1265 HostCmd_ACT_GEN_GET, 0, signal);
1266
1267 if (!status) {
1268 if (signal->selector & BCN_RSSI_AVG_MASK)
1269 priv->w_stats.qual.level = signal->bcn_rssi_avg;
1270 if (signal->selector & BCN_NF_AVG_MASK)
1271 priv->w_stats.qual.noise = signal->bcn_nf_avg;
1272 }
1273
1274 return status;
1275}
1276
1277/*
1278 * Sends IOCTL request to set encoding parameters.
1279 *
1280 * This function allocates the IOCTL request buffer, fills it
1281 * with requisite parameters and calls the IOCTL handler.
1282 */
1283int mwifiex_set_encode(struct mwifiex_private *priv, const u8 *key,
1284 int key_len, u8 key_index, int disable)
1285{
1286 struct mwifiex_ds_encrypt_key encrypt_key;
1287
1288 memset(&encrypt_key, 0, sizeof(struct mwifiex_ds_encrypt_key));
1289 encrypt_key.key_len = key_len;
1290 if (!disable) {
1291 encrypt_key.key_index = key_index;
1292 if (key_len)
1293 memcpy(encrypt_key.key_material, key, key_len);
1294 } else {
1295 encrypt_key.key_disable = true;
1296 }
1297
1298 return mwifiex_sec_ioctl_encrypt_key(priv, &encrypt_key);
1299}
1300
1301/*
1302 * Sends IOCTL request to get extended version.
1303 *
1304 * This function allocates the IOCTL request buffer, fills it
1305 * with requisite parameters and calls the IOCTL handler.
1306 */
1307int
1308mwifiex_get_ver_ext(struct mwifiex_private *priv)
1309{
1310 struct mwifiex_ver_ext ver_ext;
1311
1312 memset(&ver_ext, 0, sizeof(struct host_cmd_ds_version_ext));
1313 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_VERSION_EXT,
1314 HostCmd_ACT_GEN_GET, 0, &ver_ext))
1315 return -1;
1316
1317 return 0;
1318}
1319
1320/*
1321 * Sends IOCTL request to get statistics information.
1322 *
1323 * This function allocates the IOCTL request buffer, fills it
1324 * with requisite parameters and calls the IOCTL handler.
1325 */
1326int
1327mwifiex_get_stats_info(struct mwifiex_private *priv,
1328 struct mwifiex_ds_get_stats *log)
1329{
1330 int ret;
1331
1332 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_GET_LOG,
1333 HostCmd_ACT_GEN_GET, 0, log);
1334
1335 if (!ret) {
1336 priv->w_stats.discard.fragment = log->fcs_error;
1337 priv->w_stats.discard.retries = log->retry;
1338 priv->w_stats.discard.misc = log->ack_failure;
1339 }
1340
1341 return ret;
1342}
1343
1344/*
1345 * IOCTL request handler to read/write register.
1346 *
1347 * This function prepares the correct firmware command and
1348 * issues it.
1349 *
1350 * Access to the following registers are supported -
1351 * - MAC
1352 * - BBP
1353 * - RF
1354 * - PMIC
1355 * - CAU
1356 */
1357static int mwifiex_reg_mem_ioctl_reg_rw(struct mwifiex_private *priv,
1358 struct mwifiex_ds_reg_rw *reg_rw,
1359 u16 action)
1360{
1361 u16 cmd_no;
1362
1363 switch (le32_to_cpu(reg_rw->type)) {
1364 case MWIFIEX_REG_MAC:
1365 cmd_no = HostCmd_CMD_MAC_REG_ACCESS;
1366 break;
1367 case MWIFIEX_REG_BBP:
1368 cmd_no = HostCmd_CMD_BBP_REG_ACCESS;
1369 break;
1370 case MWIFIEX_REG_RF:
1371 cmd_no = HostCmd_CMD_RF_REG_ACCESS;
1372 break;
1373 case MWIFIEX_REG_PMIC:
1374 cmd_no = HostCmd_CMD_PMIC_REG_ACCESS;
1375 break;
1376 case MWIFIEX_REG_CAU:
1377 cmd_no = HostCmd_CMD_CAU_REG_ACCESS;
1378 break;
1379 default:
1380 return -1;
1381 }
1382
1383 return mwifiex_send_cmd_sync(priv, cmd_no, action, 0, reg_rw);
1384
1385}
1386
1387/*
1388 * Sends IOCTL request to write to a register.
1389 *
1390 * This function allocates the IOCTL request buffer, fills it
1391 * with requisite parameters and calls the IOCTL handler.
1392 */
1393int
1394mwifiex_reg_write(struct mwifiex_private *priv, u32 reg_type,
1395 u32 reg_offset, u32 reg_value)
1396{
1397 struct mwifiex_ds_reg_rw reg_rw;
1398
1399 reg_rw.type = cpu_to_le32(reg_type);
1400 reg_rw.offset = cpu_to_le32(reg_offset);
1401 reg_rw.value = cpu_to_le32(reg_value);
1402
1403 return mwifiex_reg_mem_ioctl_reg_rw(priv, &reg_rw, HostCmd_ACT_GEN_SET);
1404}
1405
1406/*
1407 * Sends IOCTL request to read from a register.
1408 *
1409 * This function allocates the IOCTL request buffer, fills it
1410 * with requisite parameters and calls the IOCTL handler.
1411 */
1412int
1413mwifiex_reg_read(struct mwifiex_private *priv, u32 reg_type,
1414 u32 reg_offset, u32 *value)
1415{
1416 int ret;
1417 struct mwifiex_ds_reg_rw reg_rw;
1418
1419 reg_rw.type = cpu_to_le32(reg_type);
1420 reg_rw.offset = cpu_to_le32(reg_offset);
1421 ret = mwifiex_reg_mem_ioctl_reg_rw(priv, &reg_rw, HostCmd_ACT_GEN_GET);
1422
1423 if (ret)
1424 goto done;
1425
1426 *value = le32_to_cpu(reg_rw.value);
1427
1428done:
1429 return ret;
1430}
1431
1432/*
1433 * Sends IOCTL request to read from EEPROM.
1434 *
1435 * This function allocates the IOCTL request buffer, fills it
1436 * with requisite parameters and calls the IOCTL handler.
1437 */
1438int
1439mwifiex_eeprom_read(struct mwifiex_private *priv, u16 offset, u16 bytes,
1440 u8 *value)
1441{
1442 int ret;
1443 struct mwifiex_ds_read_eeprom rd_eeprom;
1444
1445 rd_eeprom.offset = cpu_to_le16((u16) offset);
1446 rd_eeprom.byte_count = cpu_to_le16((u16) bytes);
1447
1448 /* Send request to firmware */
1449 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_EEPROM_ACCESS,
1450 HostCmd_ACT_GEN_GET, 0, &rd_eeprom);
1451
1452 if (!ret)
1453 memcpy(value, rd_eeprom.value, MAX_EEPROM_DATA);
1454 return ret;
1455}
1456
1457/*
1458 * This function sets a generic IE. In addition to generic IE, it can
1459 * also handle WPA, WPA2 and WAPI IEs.
1460 */
1461static int
1462mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr,
1463 u16 ie_len)
1464{
1465 int ret = 0;
1466 struct ieee_types_vendor_header *pvendor_ie;
1467 const u8 wpa_oui[] = { 0x00, 0x50, 0xf2, 0x01 };
1468 const u8 wps_oui[] = { 0x00, 0x50, 0xf2, 0x04 };
1469
1470 /* If the passed length is zero, reset the buffer */
1471 if (!ie_len) {
1472 priv->gen_ie_buf_len = 0;
1473 priv->wps.session_enable = false;
1474
1475 return 0;
1476 } else if (!ie_data_ptr) {
1477 return -1;
1478 }
1479 pvendor_ie = (struct ieee_types_vendor_header *) ie_data_ptr;
1480 /* Test to see if it is a WPA IE, if not, then it is a gen IE */
1481 if (((pvendor_ie->element_id == WLAN_EID_WPA)
1482 && (!memcmp(pvendor_ie->oui, wpa_oui, sizeof(wpa_oui))))
1483 || (pvendor_ie->element_id == WLAN_EID_RSN)) {
1484
1485 /* IE is a WPA/WPA2 IE so call set_wpa function */
1486 ret = mwifiex_set_wpa_ie_helper(priv, ie_data_ptr, ie_len);
1487 priv->wps.session_enable = false;
1488
1489 return ret;
1490 } else if (pvendor_ie->element_id == WLAN_EID_BSS_AC_ACCESS_DELAY) {
1491 /* IE is a WAPI IE so call set_wapi function */
1492 ret = mwifiex_set_wapi_ie(priv, ie_data_ptr, ie_len);
1493
1494 return ret;
1495 }
1496 /*
1497 * Verify that the passed length is not larger than the
1498 * available space remaining in the buffer
1499 */
1500 if (ie_len < (sizeof(priv->gen_ie_buf) - priv->gen_ie_buf_len)) {
1501
1502 /* Test to see if it is a WPS IE, if so, enable
1503 * wps session flag
1504 */
1505 pvendor_ie = (struct ieee_types_vendor_header *) ie_data_ptr;
1506 if ((pvendor_ie->element_id == WLAN_EID_VENDOR_SPECIFIC)
1507 && (!memcmp(pvendor_ie->oui, wps_oui,
1508 sizeof(wps_oui)))) {
1509 priv->wps.session_enable = true;
1510 dev_dbg(priv->adapter->dev,
1511 "info: WPS Session Enabled.\n");
1512 }
1513
1514 /* Append the passed data to the end of the
1515 genIeBuffer */
1516 memcpy(priv->gen_ie_buf + priv->gen_ie_buf_len, ie_data_ptr,
1517 ie_len);
1518 /* Increment the stored buffer length by the
1519 size passed */
1520 priv->gen_ie_buf_len += ie_len;
1521 } else {
1522 /* Passed data does not fit in the remaining
1523 buffer space */
1524 ret = -1;
1525 }
1526
1527 /* Return 0, or -1 for error case */
1528 return ret;
1529}
1530
1531/*
1532 * IOCTL request handler to set/get generic IE.
1533 *
1534 * In addition to various generic IEs, this function can also be
1535 * used to set the ARP filter.
1536 */
1537static int mwifiex_misc_ioctl_gen_ie(struct mwifiex_private *priv,
1538 struct mwifiex_ds_misc_gen_ie *gen_ie,
1539 u16 action)
1540{
1541 struct mwifiex_adapter *adapter = priv->adapter;
1542
1543 switch (gen_ie->type) {
1544 case MWIFIEX_IE_TYPE_GEN_IE:
1545 if (action == HostCmd_ACT_GEN_GET) {
1546 gen_ie->len = priv->wpa_ie_len;
1547 memcpy(gen_ie->ie_data, priv->wpa_ie, gen_ie->len);
1548 } else {
1549 mwifiex_set_gen_ie_helper(priv, gen_ie->ie_data,
1550 (u16) gen_ie->len);
1551 }
1552 break;
1553 case MWIFIEX_IE_TYPE_ARP_FILTER:
1554 memset(adapter->arp_filter, 0, sizeof(adapter->arp_filter));
1555 if (gen_ie->len > ARP_FILTER_MAX_BUF_SIZE) {
1556 adapter->arp_filter_size = 0;
1557 dev_err(adapter->dev, "invalid ARP filter size\n");
1558 return -1;
1559 } else {
1560 memcpy(adapter->arp_filter, gen_ie->ie_data,
1561 gen_ie->len);
1562 adapter->arp_filter_size = gen_ie->len;
1563 }
1564 break;
1565 default:
1566 dev_err(adapter->dev, "invalid IE type\n");
1567 return -1;
1568 }
1569 return 0;
1570}
1571
1572/*
1573 * Sends IOCTL request to set a generic IE.
1574 *
1575 * This function allocates the IOCTL request buffer, fills it
1576 * with requisite parameters and calls the IOCTL handler.
1577 */
1578int
1579mwifiex_set_gen_ie(struct mwifiex_private *priv, u8 *ie, int ie_len)
1580{
1581 struct mwifiex_ds_misc_gen_ie gen_ie;
1582
1583 if (ie_len > IW_CUSTOM_MAX)
1584 return -EFAULT;
1585
1586 gen_ie.type = MWIFIEX_IE_TYPE_GEN_IE;
1587 gen_ie.len = ie_len;
1588 memcpy(gen_ie.ie_data, ie, ie_len);
1589 if (mwifiex_misc_ioctl_gen_ie(priv, &gen_ie, HostCmd_ACT_GEN_SET))
1590 return -EFAULT;
1591
1592 return 0;
1593}
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
new file mode 100644
index 000000000000..1fdddece7479
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -0,0 +1,200 @@
1/*
2 * Marvell Wireless LAN device driver: station RX data handling
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "ioctl.h"
22#include "util.h"
23#include "fw.h"
24#include "main.h"
25#include "11n_aggr.h"
26#include "11n_rxreorder.h"
27
28/*
29 * This function processes the received packet and forwards it
30 * to kernel/upper layer.
31 *
32 * This function parses through the received packet and determines
33 * if it is a debug packet or normal packet.
34 *
35 * For non-debug packets, the function chops off unnecessary leading
36 * header bytes, reconstructs the packet as an ethernet frame or
37 * 802.2/llc/snap frame as required, and sends it to kernel/upper layer.
38 *
39 * The completion callback is called after processing in complete.
40 */
41int mwifiex_process_rx_packet(struct mwifiex_adapter *adapter,
42 struct sk_buff *skb)
43{
44 int ret;
45 struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
46 struct mwifiex_private *priv = adapter->priv[rx_info->bss_index];
47 struct rx_packet_hdr *rx_pkt_hdr;
48 struct rxpd *local_rx_pd;
49 int hdr_chop;
50 struct ethhdr *eth_hdr;
51 u8 rfc1042_eth_hdr[ETH_ALEN] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
52
53 local_rx_pd = (struct rxpd *) (skb->data);
54
55 rx_pkt_hdr = (struct rx_packet_hdr *) ((u8 *) local_rx_pd +
56 local_rx_pd->rx_pkt_offset);
57
58 if (!memcmp(&rx_pkt_hdr->rfc1042_hdr,
59 rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr))) {
60 /*
61 * Replace the 803 header and rfc1042 header (llc/snap) with an
62 * EthernetII header, keep the src/dst and snap_type
63 * (ethertype).
64 * The firmware only passes up SNAP frames converting
65 * all RX Data from 802.11 to 802.2/LLC/SNAP frames.
66 * To create the Ethernet II, just move the src, dst address
67 * right before the snap_type.
68 */
69 eth_hdr = (struct ethhdr *)
70 ((u8 *) &rx_pkt_hdr->eth803_hdr
71 + sizeof(rx_pkt_hdr->eth803_hdr) +
72 sizeof(rx_pkt_hdr->rfc1042_hdr)
73 - sizeof(rx_pkt_hdr->eth803_hdr.h_dest)
74 - sizeof(rx_pkt_hdr->eth803_hdr.h_source)
75 - sizeof(rx_pkt_hdr->rfc1042_hdr.snap_type));
76
77 memcpy(eth_hdr->h_source, rx_pkt_hdr->eth803_hdr.h_source,
78 sizeof(eth_hdr->h_source));
79 memcpy(eth_hdr->h_dest, rx_pkt_hdr->eth803_hdr.h_dest,
80 sizeof(eth_hdr->h_dest));
81
82 /* Chop off the rxpd + the excess memory from the 802.2/llc/snap
83 header that was removed. */
84 hdr_chop = (u8 *) eth_hdr - (u8 *) local_rx_pd;
85 } else {
86 /* Chop off the rxpd */
87 hdr_chop = (u8 *) &rx_pkt_hdr->eth803_hdr -
88 (u8 *) local_rx_pd;
89 }
90
91 /* Chop off the leading header bytes so the it points to the start of
92 either the reconstructed EthII frame or the 802.2/llc/snap frame */
93 skb_pull(skb, hdr_chop);
94
95 priv->rxpd_rate = local_rx_pd->rx_rate;
96
97 priv->rxpd_htinfo = local_rx_pd->ht_info;
98
99 ret = mwifiex_recv_packet(adapter, skb);
100 if (ret == -1)
101 dev_err(adapter->dev, "recv packet failed\n");
102
103 return ret;
104}
105
106/*
107 * This function processes the received buffer.
108 *
109 * The function looks into the RxPD and performs sanity tests on the
110 * received buffer to ensure its a valid packet, before processing it
111 * further. If the packet is determined to be aggregated, it is
112 * de-aggregated accordingly. Non-unicast packets are sent directly to
113 * the kernel/upper layers. Unicast packets are handed over to the
114 * Rx reordering routine if 11n is enabled.
115 *
116 * The completion callback is called after processing in complete.
117 */
118int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
119 struct sk_buff *skb)
120{
121 int ret = 0;
122 struct rxpd *local_rx_pd;
123 struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
124 struct rx_packet_hdr *rx_pkt_hdr;
125 u8 ta[ETH_ALEN];
126 u16 rx_pkt_type;
127 struct mwifiex_private *priv = adapter->priv[rx_info->bss_index];
128
129 local_rx_pd = (struct rxpd *) (skb->data);
130 rx_pkt_type = local_rx_pd->rx_pkt_type;
131
132 rx_pkt_hdr = (struct rx_packet_hdr *) ((u8 *) local_rx_pd +
133 local_rx_pd->rx_pkt_offset);
134
135 if ((local_rx_pd->rx_pkt_offset + local_rx_pd->rx_pkt_length) >
136 (u16) skb->len) {
137 dev_err(adapter->dev, "wrong rx packet: len=%d,"
138 " rx_pkt_offset=%d, rx_pkt_length=%d\n", skb->len,
139 local_rx_pd->rx_pkt_offset, local_rx_pd->rx_pkt_length);
140 priv->stats.rx_dropped++;
141 dev_kfree_skb_any(skb);
142 return ret;
143 }
144
145 if (local_rx_pd->rx_pkt_type == PKT_TYPE_AMSDU) {
146 struct sk_buff_head list;
147 struct sk_buff *rx_skb;
148
149 __skb_queue_head_init(&list);
150
151 skb_pull(skb, local_rx_pd->rx_pkt_offset);
152 skb_trim(skb, local_rx_pd->rx_pkt_length);
153
154 ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
155 priv->wdev->iftype, 0, false);
156
157 while (!skb_queue_empty(&list)) {
158 rx_skb = __skb_dequeue(&list);
159 ret = mwifiex_recv_packet(adapter, rx_skb);
160 if (ret == -1)
161 dev_err(adapter->dev, "Rx of A-MSDU failed");
162 }
163 return 0;
164 }
165
166 /*
167 * If the packet is not an unicast packet then send the packet
168 * directly to os. Don't pass thru rx reordering
169 */
170 if (!IS_11N_ENABLED(priv) ||
171 memcmp(priv->curr_addr, rx_pkt_hdr->eth803_hdr.h_dest, ETH_ALEN)) {
172 mwifiex_process_rx_packet(adapter, skb);
173 return ret;
174 }
175
176 if (mwifiex_queuing_ra_based(priv)) {
177 memcpy(ta, rx_pkt_hdr->eth803_hdr.h_source, ETH_ALEN);
178 } else {
179 if (rx_pkt_type != PKT_TYPE_BAR)
180 priv->rx_seq[local_rx_pd->priority] =
181 local_rx_pd->seq_num;
182 memcpy(ta, priv->curr_bss_params.bss_descriptor.mac_address,
183 ETH_ALEN);
184 }
185
186 /* Reorder and send to OS */
187 ret = mwifiex_11n_rx_reorder_pkt(priv, local_rx_pd->seq_num,
188 local_rx_pd->priority, ta,
189 (u8) local_rx_pd->rx_pkt_type,
190 (void *) skb);
191
192 if (ret || (rx_pkt_type == PKT_TYPE_BAR)) {
193 if (priv && (ret == -1))
194 priv->stats.rx_dropped++;
195
196 dev_kfree_skb_any(skb);
197 }
198
199 return ret;
200}
diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c
new file mode 100644
index 000000000000..fa6221bc9104
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/sta_tx.c
@@ -0,0 +1,198 @@
1/*
2 * Marvell Wireless LAN device driver: station TX data handling
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "ioctl.h"
22#include "util.h"
23#include "fw.h"
24#include "main.h"
25#include "wmm.h"
26
27/*
28 * This function fills the TxPD for tx packets.
29 *
30 * The Tx buffer received by this function should already have the
31 * header space allocated for TxPD.
32 *
33 * This function inserts the TxPD in between interface header and actual
34 * data and adjusts the buffer pointers accordingly.
35 *
36 * The following TxPD fields are set by this function, as required -
37 * - BSS number
38 * - Tx packet length and offset
39 * - Priority
40 * - Packet delay
41 * - Priority specific Tx control
42 * - Flags
43 */
44void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
45 struct sk_buff *skb)
46{
47 struct mwifiex_adapter *adapter = priv->adapter;
48 struct txpd *local_tx_pd;
49 struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
50
51 if (!skb->len) {
52 dev_err(adapter->dev, "Tx: bad packet length: %d\n",
53 skb->len);
54 tx_info->status_code = -1;
55 return skb->data;
56 }
57
58 BUG_ON(skb_headroom(skb) < (sizeof(*local_tx_pd) + INTF_HEADER_LEN));
59 skb_push(skb, sizeof(*local_tx_pd));
60
61 local_tx_pd = (struct txpd *) skb->data;
62 memset(local_tx_pd, 0, sizeof(struct txpd));
63 local_tx_pd->bss_num = priv->bss_num;
64 local_tx_pd->bss_type = priv->bss_type;
65 local_tx_pd->tx_pkt_length = cpu_to_le16((u16) (skb->len -
66 sizeof(struct txpd)));
67
68 local_tx_pd->priority = (u8) skb->priority;
69 local_tx_pd->pkt_delay_2ms =
70 mwifiex_wmm_compute_drv_pkt_delay(priv, skb);
71
72 if (local_tx_pd->priority <
73 ARRAY_SIZE(priv->wmm.user_pri_pkt_tx_ctrl))
74 /*
75 * Set the priority specific tx_control field, setting of 0 will
76 * cause the default value to be used later in this function
77 */
78 local_tx_pd->tx_control =
79 cpu_to_le32(priv->wmm.user_pri_pkt_tx_ctrl[local_tx_pd->
80 priority]);
81
82 if (adapter->pps_uapsd_mode) {
83 if (mwifiex_check_last_packet_indication(priv)) {
84 adapter->tx_lock_flag = true;
85 local_tx_pd->flags =
86 MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET;
87 }
88 }
89
90 /* Offset of actual data */
91 local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd));
92
93 /* make space for INTF_HEADER_LEN */
94 skb_push(skb, INTF_HEADER_LEN);
95
96 if (!local_tx_pd->tx_control)
97 /* TxCtrl set by user or default */
98 local_tx_pd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl);
99
100 return skb->data;
101}
102
103/*
104 * This function tells firmware to send a NULL data packet.
105 *
106 * The function creates a NULL data packet with TxPD and sends to the
107 * firmware for transmission, with highest priority setting.
108 */
109int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
110{
111 struct mwifiex_adapter *adapter = priv->adapter;
112 struct txpd *local_tx_pd;
113/* sizeof(struct txpd) + Interface specific header */
114#define NULL_PACKET_HDR 64
115 u32 data_len = NULL_PACKET_HDR;
116 struct sk_buff *skb;
117 int ret;
118 struct mwifiex_txinfo *tx_info = NULL;
119
120 if (adapter->surprise_removed)
121 return -1;
122
123 if (!priv->media_connected)
124 return -1;
125
126 if (adapter->data_sent)
127 return -1;
128
129 skb = dev_alloc_skb(data_len);
130 if (!skb)
131 return -1;
132
133 tx_info = MWIFIEX_SKB_TXCB(skb);
134 tx_info->bss_index = priv->bss_index;
135 skb_reserve(skb, sizeof(struct txpd) + INTF_HEADER_LEN);
136 skb_push(skb, sizeof(struct txpd));
137
138 local_tx_pd = (struct txpd *) skb->data;
139 local_tx_pd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl);
140 local_tx_pd->flags = flags;
141 local_tx_pd->priority = WMM_HIGHEST_PRIORITY;
142 local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd));
143 local_tx_pd->bss_num = priv->bss_num;
144 local_tx_pd->bss_type = priv->bss_type;
145
146 skb_push(skb, INTF_HEADER_LEN);
147
148 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
149 skb->data, skb->len, NULL);
150 switch (ret) {
151 case -EBUSY:
152 adapter->data_sent = true;
153 /* Fall through FAILURE handling */
154 case -1:
155 dev_kfree_skb_any(skb);
156 dev_err(adapter->dev, "%s: host_to_card failed: ret=%d\n",
157 __func__, ret);
158 adapter->dbg.num_tx_host_to_card_failure++;
159 break;
160 case 0:
161 dev_kfree_skb_any(skb);
162 dev_dbg(adapter->dev, "data: %s: host_to_card succeeded\n",
163 __func__);
164 adapter->tx_lock_flag = true;
165 break;
166 case -EINPROGRESS:
167 break;
168 default:
169 break;
170 }
171
172 return ret;
173}
174
175/*
176 * This function checks if we need to send last packet indication.
177 */
178u8
179mwifiex_check_last_packet_indication(struct mwifiex_private *priv)
180{
181 struct mwifiex_adapter *adapter = priv->adapter;
182 u8 ret = false;
183
184 if (!adapter->sleep_period.period)
185 return ret;
186 if (mwifiex_wmm_lists_empty(adapter))
187 ret = true;
188
189 if (ret && !adapter->cmd_sent && !adapter->curr_cmd
190 && !is_command_pending(adapter)) {
191 adapter->delay_null_pkt = false;
192 ret = true;
193 } else {
194 ret = false;
195 adapter->delay_null_pkt = true;
196 }
197 return ret;
198}
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
new file mode 100644
index 000000000000..210120889dfe
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -0,0 +1,200 @@
1/*
2 * Marvell Wireless LAN device driver: generic TX/RX data handling
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "ioctl.h"
22#include "util.h"
23#include "fw.h"
24#include "main.h"
25#include "wmm.h"
26
27/*
28 * This function processes the received buffer.
29 *
30 * Main responsibility of this function is to parse the RxPD to
31 * identify the correct interface this packet is headed for and
32 * forwarding it to the associated handling function, where the
33 * packet will be further processed and sent to kernel/upper layer
34 * if required.
35 */
36int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter,
37 struct sk_buff *skb)
38{
39 struct mwifiex_private *priv =
40 mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
41 struct rxpd *local_rx_pd;
42 struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
43
44 local_rx_pd = (struct rxpd *) (skb->data);
45 /* Get the BSS number from rxpd, get corresponding priv */
46 priv = mwifiex_get_priv_by_id(adapter, local_rx_pd->bss_num &
47 BSS_NUM_MASK, local_rx_pd->bss_type);
48 if (!priv)
49 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
50
51 rx_info->bss_index = priv->bss_index;
52
53 return mwifiex_process_sta_rx_packet(adapter, skb);
54}
55EXPORT_SYMBOL_GPL(mwifiex_handle_rx_packet);
56
57/*
58 * This function sends a packet to device.
59 *
60 * It processes the packet to add the TxPD, checks condition and
61 * sends the processed packet to firmware for transmission.
62 *
63 * On successful completion, the function calls the completion callback
64 * and logs the time.
65 */
66int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
67 struct mwifiex_tx_param *tx_param)
68{
69 int ret = -1;
70 struct mwifiex_adapter *adapter = priv->adapter;
71 u8 *head_ptr;
72 struct txpd *local_tx_pd = NULL;
73
74 head_ptr = (u8 *) mwifiex_process_sta_txpd(priv, skb);
75 if (head_ptr) {
76 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA)
77 local_tx_pd =
78 (struct txpd *) (head_ptr + INTF_HEADER_LEN);
79
80 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
81 skb->data, skb->len, tx_param);
82 }
83
84 switch (ret) {
85 case -EBUSY:
86 if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
87 (adapter->pps_uapsd_mode) &&
88 (adapter->tx_lock_flag)) {
89 priv->adapter->tx_lock_flag = false;
90 local_tx_pd->flags = 0;
91 }
92 dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
93 break;
94 case -1:
95 adapter->data_sent = false;
96 dev_err(adapter->dev, "mwifiex_write_data_async failed: 0x%X\n",
97 ret);
98 adapter->dbg.num_tx_host_to_card_failure++;
99 mwifiex_write_data_complete(adapter, skb, ret);
100 break;
101 case -EINPROGRESS:
102 adapter->data_sent = false;
103 break;
104 case 0:
105 mwifiex_write_data_complete(adapter, skb, ret);
106 break;
107 default:
108 break;
109 }
110
111 return ret;
112}
113
114/*
115 * Packet send completion callback handler.
116 *
117 * It either frees the buffer directly or forwards it to another
118 * completion callback which checks conditions, updates statistics,
119 * wakes up stalled traffic queue if required, and then frees the buffer.
120 */
121int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
122 struct sk_buff *skb, int status)
123{
124 struct mwifiex_private *priv, *tpriv;
125 struct mwifiex_txinfo *tx_info;
126 int i;
127
128 if (!skb)
129 return 0;
130
131 tx_info = MWIFIEX_SKB_TXCB(skb);
132 priv = mwifiex_bss_index_to_priv(adapter, tx_info->bss_index);
133 if (!priv)
134 goto done;
135
136 priv->netdev->trans_start = jiffies;
137 if (!status) {
138 priv->stats.tx_packets++;
139 priv->stats.tx_bytes += skb->len;
140 } else {
141 priv->stats.tx_errors++;
142 }
143 atomic_dec(&adapter->tx_pending);
144
145 for (i = 0; i < adapter->priv_num; i++) {
146
147 tpriv = adapter->priv[i];
148
149 if ((GET_BSS_ROLE(tpriv) == MWIFIEX_BSS_ROLE_STA)
150 && (tpriv->media_connected)) {
151 if (netif_queue_stopped(tpriv->netdev))
152 netif_wake_queue(tpriv->netdev);
153 }
154 }
155done:
156 dev_kfree_skb_any(skb);
157
158 return 0;
159}
160
161/*
162 * Packet receive completion callback handler.
163 *
164 * This function calls another completion callback handler which
165 * updates the statistics, and optionally updates the parent buffer
166 * use count before freeing the received packet.
167 */
168int mwifiex_recv_packet_complete(struct mwifiex_adapter *adapter,
169 struct sk_buff *skb, int status)
170{
171 struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
172 struct mwifiex_rxinfo *rx_info_parent;
173 struct mwifiex_private *priv;
174 struct sk_buff *skb_parent;
175 unsigned long flags;
176
177 priv = adapter->priv[rx_info->bss_index];
178
179 if (priv && (status == -1))
180 priv->stats.rx_dropped++;
181
182 if (rx_info->parent) {
183 skb_parent = rx_info->parent;
184 rx_info_parent = MWIFIEX_SKB_RXCB(skb_parent);
185
186 spin_lock_irqsave(&priv->rx_pkt_lock, flags);
187 --rx_info_parent->use_count;
188
189 if (!rx_info_parent->use_count) {
190 spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
191 dev_kfree_skb_any(skb_parent);
192 } else {
193 spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
194 }
195 } else {
196 dev_kfree_skb_any(skb);
197 }
198
199 return 0;
200}
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
new file mode 100644
index 000000000000..d41291529bc0
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -0,0 +1,202 @@
1/*
2 * Marvell Wireless LAN device driver: utility functions
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "ioctl.h"
22#include "util.h"
23#include "fw.h"
24#include "main.h"
25#include "wmm.h"
26#include "11n.h"
27
28/*
29 * Firmware initialization complete callback handler.
30 *
31 * This function wakes up the function waiting on the init
32 * wait queue for the firmware initialization to complete.
33 */
34int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter)
35{
36
37 adapter->init_wait_q_woken = true;
38 wake_up_interruptible(&adapter->init_wait_q);
39 return 0;
40}
41
42/*
43 * Firmware shutdown complete callback handler.
44 *
45 * This function sets the hardware status to not ready and wakes up
46 * the function waiting on the init wait queue for the firmware
47 * shutdown to complete.
48 */
49int mwifiex_shutdown_fw_complete(struct mwifiex_adapter *adapter)
50{
51 adapter->hw_status = MWIFIEX_HW_STATUS_NOT_READY;
52 adapter->init_wait_q_woken = true;
53 wake_up_interruptible(&adapter->init_wait_q);
54 return 0;
55}
56
57/*
58 * This function sends init/shutdown command
59 * to firmware.
60 */
61int mwifiex_init_shutdown_fw(struct mwifiex_private *priv,
62 u32 func_init_shutdown)
63{
64 u16 cmd;
65
66 if (func_init_shutdown == MWIFIEX_FUNC_INIT) {
67 cmd = HostCmd_CMD_FUNC_INIT;
68 } else if (func_init_shutdown == MWIFIEX_FUNC_SHUTDOWN) {
69 cmd = HostCmd_CMD_FUNC_SHUTDOWN;
70 } else {
71 dev_err(priv->adapter->dev, "unsupported parameter\n");
72 return -1;
73 }
74
75 return mwifiex_send_cmd_sync(priv, cmd, HostCmd_ACT_GEN_SET, 0, NULL);
76}
77EXPORT_SYMBOL_GPL(mwifiex_init_shutdown_fw);
78
79/*
80 * IOCTL request handler to set/get debug information.
81 *
82 * This function collates/sets the information from/to different driver
83 * structures.
84 */
85int mwifiex_get_debug_info(struct mwifiex_private *priv,
86 struct mwifiex_debug_info *info)
87{
88 struct mwifiex_adapter *adapter = priv->adapter;
89
90 if (info) {
91 memcpy(info->packets_out,
92 priv->wmm.packets_out,
93 sizeof(priv->wmm.packets_out));
94 info->max_tx_buf_size = (u32) adapter->max_tx_buf_size;
95 info->tx_buf_size = (u32) adapter->tx_buf_size;
96 info->rx_tbl_num = mwifiex_get_rx_reorder_tbl(
97 priv, info->rx_tbl);
98 info->tx_tbl_num = mwifiex_get_tx_ba_stream_tbl(
99 priv, info->tx_tbl);
100 info->ps_mode = adapter->ps_mode;
101 info->ps_state = adapter->ps_state;
102 info->is_deep_sleep = adapter->is_deep_sleep;
103 info->pm_wakeup_card_req = adapter->pm_wakeup_card_req;
104 info->pm_wakeup_fw_try = adapter->pm_wakeup_fw_try;
105 info->is_hs_configured = adapter->is_hs_configured;
106 info->hs_activated = adapter->hs_activated;
107 info->num_cmd_host_to_card_failure
108 = adapter->dbg.num_cmd_host_to_card_failure;
109 info->num_cmd_sleep_cfm_host_to_card_failure
110 = adapter->dbg.num_cmd_sleep_cfm_host_to_card_failure;
111 info->num_tx_host_to_card_failure
112 = adapter->dbg.num_tx_host_to_card_failure;
113 info->num_event_deauth = adapter->dbg.num_event_deauth;
114 info->num_event_disassoc = adapter->dbg.num_event_disassoc;
115 info->num_event_link_lost = adapter->dbg.num_event_link_lost;
116 info->num_cmd_deauth = adapter->dbg.num_cmd_deauth;
117 info->num_cmd_assoc_success =
118 adapter->dbg.num_cmd_assoc_success;
119 info->num_cmd_assoc_failure =
120 adapter->dbg.num_cmd_assoc_failure;
121 info->num_tx_timeout = adapter->dbg.num_tx_timeout;
122 info->num_cmd_timeout = adapter->dbg.num_cmd_timeout;
123 info->timeout_cmd_id = adapter->dbg.timeout_cmd_id;
124 info->timeout_cmd_act = adapter->dbg.timeout_cmd_act;
125 memcpy(info->last_cmd_id, adapter->dbg.last_cmd_id,
126 sizeof(adapter->dbg.last_cmd_id));
127 memcpy(info->last_cmd_act, adapter->dbg.last_cmd_act,
128 sizeof(adapter->dbg.last_cmd_act));
129 info->last_cmd_index = adapter->dbg.last_cmd_index;
130 memcpy(info->last_cmd_resp_id, adapter->dbg.last_cmd_resp_id,
131 sizeof(adapter->dbg.last_cmd_resp_id));
132 info->last_cmd_resp_index = adapter->dbg.last_cmd_resp_index;
133 memcpy(info->last_event, adapter->dbg.last_event,
134 sizeof(adapter->dbg.last_event));
135 info->last_event_index = adapter->dbg.last_event_index;
136 info->data_sent = adapter->data_sent;
137 info->cmd_sent = adapter->cmd_sent;
138 info->cmd_resp_received = adapter->cmd_resp_received;
139 }
140
141 return 0;
142}
143
144/*
145 * This function processes the received packet before sending it to the
146 * kernel.
147 *
148 * It extracts the SKB from the received buffer and sends it to kernel.
149 * In case the received buffer does not contain the data in SKB format,
150 * the function creates a blank SKB, fills it with the data from the
151 * received buffer and then sends this new SKB to the kernel.
152 */
153int mwifiex_recv_packet(struct mwifiex_adapter *adapter, struct sk_buff *skb)
154{
155 struct mwifiex_rxinfo *rx_info;
156 struct mwifiex_private *priv;
157
158 if (!skb)
159 return -1;
160
161 rx_info = MWIFIEX_SKB_RXCB(skb);
162 priv = mwifiex_bss_index_to_priv(adapter, rx_info->bss_index);
163 if (!priv)
164 return -1;
165
166 skb->dev = priv->netdev;
167 skb->protocol = eth_type_trans(skb, priv->netdev);
168 skb->ip_summed = CHECKSUM_NONE;
169 priv->stats.rx_bytes += skb->len;
170 priv->stats.rx_packets++;
171 if (in_interrupt())
172 netif_rx(skb);
173 else
174 netif_rx_ni(skb);
175
176 return 0;
177}
178
179/*
180 * IOCTL completion callback handler.
181 *
182 * This function is called when a pending IOCTL is completed.
183 *
184 * If work queue support is enabled, the function wakes up the
185 * corresponding waiting function. Otherwise, it processes the
186 * IOCTL response and frees the response buffer.
187 */
188int mwifiex_complete_cmd(struct mwifiex_adapter *adapter)
189{
190 atomic_dec(&adapter->cmd_pending);
191 dev_dbg(adapter->dev, "cmd completed: status=%d\n",
192 adapter->cmd_wait_q.status);
193
194 adapter->cmd_wait_q.condition = true;
195
196 if (adapter->cmd_wait_q.status == -ETIMEDOUT)
197 dev_err(adapter->dev, "cmd timeout\n");
198 else
199 wake_up_interruptible(&adapter->cmd_wait_q.wait);
200
201 return 0;
202}
diff --git a/drivers/net/wireless/mwifiex/util.h b/drivers/net/wireless/mwifiex/util.h
new file mode 100644
index 000000000000..9506afc6c0e4
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/util.h
@@ -0,0 +1,32 @@
1/*
2 * Marvell Wireless LAN device driver: utility functions
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#ifndef _MWIFIEX_UTIL_H_
21#define _MWIFIEX_UTIL_H_
22
23static inline struct mwifiex_rxinfo *MWIFIEX_SKB_RXCB(struct sk_buff *skb)
24{
25 return (struct mwifiex_rxinfo *)skb->cb;
26}
27
28static inline struct mwifiex_txinfo *MWIFIEX_SKB_TXCB(struct sk_buff *skb)
29{
30 return (struct mwifiex_txinfo *)skb->cb;
31}
32#endif /* !_MWIFIEX_UTIL_H_ */
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
new file mode 100644
index 000000000000..faa09e32902e
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -0,0 +1,1231 @@
1/*
2 * Marvell Wireless LAN device driver: WMM
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "decl.h"
21#include "ioctl.h"
22#include "util.h"
23#include "fw.h"
24#include "main.h"
25#include "wmm.h"
26#include "11n.h"
27
28
29/* Maximum value FW can accept for driver delay in packet transmission */
30#define DRV_PKT_DELAY_TO_FW_MAX 512
31
32
33#define WMM_QUEUED_PACKET_LOWER_LIMIT 180
34
35#define WMM_QUEUED_PACKET_UPPER_LIMIT 200
36
37/* Offset for TOS field in the IP header */
38#define IPTOS_OFFSET 5
39
40/* WMM information IE */
41static const u8 wmm_info_ie[] = { WLAN_EID_VENDOR_SPECIFIC, 0x07,
42 0x00, 0x50, 0xf2, 0x02,
43 0x00, 0x01, 0x00
44};
45
46static const u8 wmm_aci_to_qidx_map[] = { WMM_AC_BE,
47 WMM_AC_BK,
48 WMM_AC_VI,
49 WMM_AC_VO
50};
51
52static u8 tos_to_tid[] = {
53 /* TID DSCP_P2 DSCP_P1 DSCP_P0 WMM_AC */
54 0x01, /* 0 1 0 AC_BK */
55 0x02, /* 0 0 0 AC_BK */
56 0x00, /* 0 0 1 AC_BE */
57 0x03, /* 0 1 1 AC_BE */
58 0x04, /* 1 0 0 AC_VI */
59 0x05, /* 1 0 1 AC_VI */
60 0x06, /* 1 1 0 AC_VO */
61 0x07 /* 1 1 1 AC_VO */
62};
63
64/*
65 * This table inverses the tos_to_tid operation to get a priority
66 * which is in sequential order, and can be compared.
67 * Use this to compare the priority of two different TIDs.
68 */
69static u8 tos_to_tid_inv[] = {
70 0x02, /* from tos_to_tid[2] = 0 */
71 0x00, /* from tos_to_tid[0] = 1 */
72 0x01, /* from tos_to_tid[1] = 2 */
73 0x03,
74 0x04,
75 0x05,
76 0x06,
77 0x07};
78
79static u8 ac_to_tid[4][2] = { {1, 2}, {0, 3}, {4, 5}, {6, 7} };
80
81/*
82 * This function debug prints the priority parameters for a WMM AC.
83 */
84static void
85mwifiex_wmm_ac_debug_print(const struct ieee_types_wmm_ac_parameters *ac_param)
86{
87 const char *ac_str[] = { "BK", "BE", "VI", "VO" };
88
89 pr_debug("info: WMM AC_%s: ACI=%d, ACM=%d, Aifsn=%d, "
90 "EcwMin=%d, EcwMax=%d, TxopLimit=%d\n",
91 ac_str[wmm_aci_to_qidx_map[(ac_param->aci_aifsn_bitmap
92 & MWIFIEX_ACI) >> 5]],
93 (ac_param->aci_aifsn_bitmap & MWIFIEX_ACI) >> 5,
94 (ac_param->aci_aifsn_bitmap & MWIFIEX_ACM) >> 4,
95 ac_param->aci_aifsn_bitmap & MWIFIEX_AIFSN,
96 ac_param->ecw_bitmap & MWIFIEX_ECW_MIN,
97 (ac_param->ecw_bitmap & MWIFIEX_ECW_MAX) >> 4,
98 le16_to_cpu(ac_param->tx_op_limit));
99}
100
101/*
102 * This function allocates a route address list.
103 *
104 * The function also initializes the list with the provided RA.
105 */
106static struct mwifiex_ra_list_tbl *
107mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, u8 *ra)
108{
109 struct mwifiex_ra_list_tbl *ra_list;
110
111 ra_list = kzalloc(sizeof(struct mwifiex_ra_list_tbl), GFP_ATOMIC);
112
113 if (!ra_list) {
114 dev_err(adapter->dev, "%s: failed to alloc ra_list\n",
115 __func__);
116 return NULL;
117 }
118 INIT_LIST_HEAD(&ra_list->list);
119 skb_queue_head_init(&ra_list->skb_head);
120
121 memcpy(ra_list->ra, ra, ETH_ALEN);
122
123 ra_list->total_pkts_size = 0;
124
125 dev_dbg(adapter->dev, "info: allocated ra_list %p\n", ra_list);
126
127 return ra_list;
128}
129
130/*
131 * This function allocates and adds a RA list for all TIDs
132 * with the given RA.
133 */
134void
135mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
136{
137 int i;
138 struct mwifiex_ra_list_tbl *ra_list;
139 struct mwifiex_adapter *adapter = priv->adapter;
140
141 for (i = 0; i < MAX_NUM_TID; ++i) {
142 ra_list = mwifiex_wmm_allocate_ralist_node(adapter, ra);
143 dev_dbg(adapter->dev, "info: created ra_list %p\n", ra_list);
144
145 if (!ra_list)
146 break;
147
148 if (!mwifiex_queuing_ra_based(priv))
149 ra_list->is_11n_enabled = IS_11N_ENABLED(priv);
150 else
151 ra_list->is_11n_enabled = false;
152
153 dev_dbg(adapter->dev, "data: ralist %p: is_11n_enabled=%d\n",
154 ra_list, ra_list->is_11n_enabled);
155
156 list_add_tail(&ra_list->list,
157 &priv->wmm.tid_tbl_ptr[i].ra_list);
158
159 if (!priv->wmm.tid_tbl_ptr[i].ra_list_curr)
160 priv->wmm.tid_tbl_ptr[i].ra_list_curr = ra_list;
161 }
162}
163
164/*
165 * This function sets the WMM queue priorities to their default values.
166 */
167static void mwifiex_wmm_default_queue_priorities(struct mwifiex_private *priv)
168{
169 /* Default queue priorities: VO->VI->BE->BK */
170 priv->wmm.queue_priority[0] = WMM_AC_VO;
171 priv->wmm.queue_priority[1] = WMM_AC_VI;
172 priv->wmm.queue_priority[2] = WMM_AC_BE;
173 priv->wmm.queue_priority[3] = WMM_AC_BK;
174}
175
176/*
177 * This function map ACs to TIDs.
178 */
179static void
180mwifiex_wmm_queue_priorities_tid(u8 queue_priority[])
181{
182 int i;
183
184 for (i = 0; i < 4; ++i) {
185 tos_to_tid[7 - (i * 2)] = ac_to_tid[queue_priority[i]][1];
186 tos_to_tid[6 - (i * 2)] = ac_to_tid[queue_priority[i]][0];
187 }
188}
189
190/*
191 * This function initializes WMM priority queues.
192 */
193void
194mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
195 struct ieee_types_wmm_parameter *wmm_ie)
196{
197 u16 cw_min, avg_back_off, tmp[4];
198 u32 i, j, num_ac;
199 u8 ac_idx;
200
201 if (!wmm_ie || !priv->wmm_enabled) {
202 /* WMM is not enabled, just set the defaults and return */
203 mwifiex_wmm_default_queue_priorities(priv);
204 return;
205 }
206
207 dev_dbg(priv->adapter->dev, "info: WMM Parameter IE: version=%d, "
208 "qos_info Parameter Set Count=%d, Reserved=%#x\n",
209 wmm_ie->vend_hdr.version, wmm_ie->qos_info_bitmap &
210 IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK,
211 wmm_ie->reserved);
212
213 for (num_ac = 0; num_ac < ARRAY_SIZE(wmm_ie->ac_params); num_ac++) {
214 cw_min = (1 << (wmm_ie->ac_params[num_ac].ecw_bitmap &
215 MWIFIEX_ECW_MIN)) - 1;
216 avg_back_off = (cw_min >> 1) +
217 (wmm_ie->ac_params[num_ac].aci_aifsn_bitmap &
218 MWIFIEX_AIFSN);
219
220 ac_idx = wmm_aci_to_qidx_map[(wmm_ie->ac_params[num_ac].
221 aci_aifsn_bitmap &
222 MWIFIEX_ACI) >> 5];
223 priv->wmm.queue_priority[ac_idx] = ac_idx;
224 tmp[ac_idx] = avg_back_off;
225
226 dev_dbg(priv->adapter->dev, "info: WMM: CWmax=%d CWmin=%d Avg Back-off=%d\n",
227 (1 << ((wmm_ie->ac_params[num_ac].ecw_bitmap &
228 MWIFIEX_ECW_MAX) >> 4)) - 1,
229 cw_min, avg_back_off);
230 mwifiex_wmm_ac_debug_print(&wmm_ie->ac_params[num_ac]);
231 }
232
233 /* Bubble sort */
234 for (i = 0; i < num_ac; i++) {
235 for (j = 1; j < num_ac - i; j++) {
236 if (tmp[j - 1] > tmp[j]) {
237 swap(tmp[j - 1], tmp[j]);
238 swap(priv->wmm.queue_priority[j - 1],
239 priv->wmm.queue_priority[j]);
240 } else if (tmp[j - 1] == tmp[j]) {
241 if (priv->wmm.queue_priority[j - 1]
242 < priv->wmm.queue_priority[j])
243 swap(priv->wmm.queue_priority[j - 1],
244 priv->wmm.queue_priority[j]);
245 }
246 }
247 }
248
249 mwifiex_wmm_queue_priorities_tid(priv->wmm.queue_priority);
250}
251
252/*
253 * This function evaluates whether or not an AC is to be downgraded.
254 *
255 * In case the AC is not enabled, the highest AC is returned that is
256 * enabled and does not require admission control.
257 */
258static enum mwifiex_wmm_ac_e
259mwifiex_wmm_eval_downgrade_ac(struct mwifiex_private *priv,
260 enum mwifiex_wmm_ac_e eval_ac)
261{
262 int down_ac;
263 enum mwifiex_wmm_ac_e ret_ac;
264 struct mwifiex_wmm_ac_status *ac_status;
265
266 ac_status = &priv->wmm.ac_status[eval_ac];
267
268 if (!ac_status->disabled)
269 /* Okay to use this AC, its enabled */
270 return eval_ac;
271
272 /* Setup a default return value of the lowest priority */
273 ret_ac = WMM_AC_BK;
274
275 /*
276 * Find the highest AC that is enabled and does not require
277 * admission control. The spec disallows downgrading to an AC,
278 * which is enabled due to a completed admission control.
279 * Unadmitted traffic is not to be sent on an AC with admitted
280 * traffic.
281 */
282 for (down_ac = WMM_AC_BK; down_ac < eval_ac; down_ac++) {
283 ac_status = &priv->wmm.ac_status[down_ac];
284
285 if (!ac_status->disabled && !ac_status->flow_required)
286 /* AC is enabled and does not require admission
287 control */
288 ret_ac = (enum mwifiex_wmm_ac_e) down_ac;
289 }
290
291 return ret_ac;
292}
293
294/*
295 * This function downgrades WMM priority queue.
296 */
297void
298mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv)
299{
300 int ac_val;
301
302 dev_dbg(priv->adapter->dev, "info: WMM: AC Priorities:"
303 "BK(0), BE(1), VI(2), VO(3)\n");
304
305 if (!priv->wmm_enabled) {
306 /* WMM is not enabled, default priorities */
307 for (ac_val = WMM_AC_BK; ac_val <= WMM_AC_VO; ac_val++)
308 priv->wmm.ac_down_graded_vals[ac_val] =
309 (enum mwifiex_wmm_ac_e) ac_val;
310 } else {
311 for (ac_val = WMM_AC_BK; ac_val <= WMM_AC_VO; ac_val++) {
312 priv->wmm.ac_down_graded_vals[ac_val]
313 = mwifiex_wmm_eval_downgrade_ac(priv,
314 (enum mwifiex_wmm_ac_e) ac_val);
315 dev_dbg(priv->adapter->dev, "info: WMM: AC PRIO %d maps to %d\n",
316 ac_val, priv->wmm.ac_down_graded_vals[ac_val]);
317 }
318 }
319}
320
321/*
322 * This function converts the IP TOS field to an WMM AC
323 * Queue assignment.
324 */
325static enum mwifiex_wmm_ac_e
326mwifiex_wmm_convert_tos_to_ac(struct mwifiex_adapter *adapter, u32 tos)
327{
328 /* Map of TOS UP values to WMM AC */
329 const enum mwifiex_wmm_ac_e tos_to_ac[] = { WMM_AC_BE,
330 WMM_AC_BK,
331 WMM_AC_BK,
332 WMM_AC_BE,
333 WMM_AC_VI,
334 WMM_AC_VI,
335 WMM_AC_VO,
336 WMM_AC_VO
337 };
338
339 if (tos >= ARRAY_SIZE(tos_to_ac))
340 return WMM_AC_BE;
341
342 return tos_to_ac[tos];
343}
344
345/*
346 * This function evaluates a given TID and downgrades it to a lower
347 * TID if the WMM Parameter IE received from the AP indicates that the
348 * AP is disabled (due to call admission control (ACM bit). Mapping
349 * of TID to AC is taken care of internally.
350 */
351static u8
352mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid)
353{
354 enum mwifiex_wmm_ac_e ac, ac_down;
355 u8 new_tid;
356
357 ac = mwifiex_wmm_convert_tos_to_ac(priv->adapter, tid);
358 ac_down = priv->wmm.ac_down_graded_vals[ac];
359
360 /* Send the index to tid array, picking from the array will be
361 * taken care by dequeuing function
362 */
363 new_tid = ac_to_tid[ac_down][tid % 2];
364
365 return new_tid;
366}
367
368/*
369 * This function initializes the WMM state information and the
370 * WMM data path queues.
371 */
372void
373mwifiex_wmm_init(struct mwifiex_adapter *adapter)
374{
375 int i, j;
376 struct mwifiex_private *priv;
377
378 for (j = 0; j < adapter->priv_num; ++j) {
379 priv = adapter->priv[j];
380 if (!priv)
381 continue;
382
383 for (i = 0; i < MAX_NUM_TID; ++i) {
384 priv->aggr_prio_tbl[i].amsdu = tos_to_tid_inv[i];
385 priv->aggr_prio_tbl[i].ampdu_ap = tos_to_tid_inv[i];
386 priv->aggr_prio_tbl[i].ampdu_user = tos_to_tid_inv[i];
387 priv->wmm.tid_tbl_ptr[i].ra_list_curr = NULL;
388 }
389
390 priv->aggr_prio_tbl[6].amsdu
391 = priv->aggr_prio_tbl[6].ampdu_ap
392 = priv->aggr_prio_tbl[6].ampdu_user
393 = BA_STREAM_NOT_ALLOWED;
394
395 priv->aggr_prio_tbl[7].amsdu = priv->aggr_prio_tbl[7].ampdu_ap
396 = priv->aggr_prio_tbl[7].ampdu_user
397 = BA_STREAM_NOT_ALLOWED;
398
399 priv->add_ba_param.timeout = MWIFIEX_DEFAULT_BLOCK_ACK_TIMEOUT;
400 priv->add_ba_param.tx_win_size = MWIFIEX_AMPDU_DEF_TXWINSIZE;
401 priv->add_ba_param.rx_win_size = MWIFIEX_AMPDU_DEF_RXWINSIZE;
402 }
403}
404
405/*
406 * This function checks if WMM Tx queue is empty.
407 */
408int
409mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter)
410{
411 int i, j;
412 struct mwifiex_private *priv;
413
414 for (j = 0; j < adapter->priv_num; ++j) {
415 priv = adapter->priv[j];
416 if (priv) {
417 for (i = 0; i < MAX_NUM_TID; i++)
418 if (!mwifiex_wmm_is_ra_list_empty(
419 &priv->wmm.tid_tbl_ptr[i].ra_list))
420 return false;
421 }
422 }
423
424 return true;
425}
426
427/*
428 * This function deletes all packets in an RA list node.
429 *
430 * The packet sent completion callback handler are called with
431 * status failure, after they are dequeued to ensure proper
432 * cleanup. The RA list node itself is freed at the end.
433 */
434static void
435mwifiex_wmm_del_pkts_in_ralist_node(struct mwifiex_private *priv,
436 struct mwifiex_ra_list_tbl *ra_list)
437{
438 struct mwifiex_adapter *adapter = priv->adapter;
439 struct sk_buff *skb, *tmp;
440
441 skb_queue_walk_safe(&ra_list->skb_head, skb, tmp)
442 mwifiex_write_data_complete(adapter, skb, -1);
443}
444
445/*
446 * This function deletes all packets in an RA list.
447 *
448 * Each nodes in the RA list are freed individually first, and then
449 * the RA list itself is freed.
450 */
451static void
452mwifiex_wmm_del_pkts_in_ralist(struct mwifiex_private *priv,
453 struct list_head *ra_list_head)
454{
455 struct mwifiex_ra_list_tbl *ra_list;
456
457 list_for_each_entry(ra_list, ra_list_head, list)
458 mwifiex_wmm_del_pkts_in_ralist_node(priv, ra_list);
459}
460
461/*
462 * This function deletes all packets in all RA lists.
463 */
464static void mwifiex_wmm_cleanup_queues(struct mwifiex_private *priv)
465{
466 int i;
467
468 for (i = 0; i < MAX_NUM_TID; i++)
469 mwifiex_wmm_del_pkts_in_ralist(priv, &priv->wmm.tid_tbl_ptr[i].
470 ra_list);
471}
472
473/*
474 * This function deletes all route addresses from all RA lists.
475 */
476static void mwifiex_wmm_delete_all_ralist(struct mwifiex_private *priv)
477{
478 struct mwifiex_ra_list_tbl *ra_list, *tmp_node;
479 int i;
480
481 for (i = 0; i < MAX_NUM_TID; ++i) {
482 dev_dbg(priv->adapter->dev,
483 "info: ra_list: freeing buf for tid %d\n", i);
484 list_for_each_entry_safe(ra_list, tmp_node,
485 &priv->wmm.tid_tbl_ptr[i].ra_list, list) {
486 list_del(&ra_list->list);
487 kfree(ra_list);
488 }
489
490 INIT_LIST_HEAD(&priv->wmm.tid_tbl_ptr[i].ra_list);
491
492 priv->wmm.tid_tbl_ptr[i].ra_list_curr = NULL;
493 }
494}
495
496/*
497 * This function cleans up the Tx and Rx queues.
498 *
499 * Cleanup includes -
500 * - All packets in RA lists
501 * - All entries in Rx reorder table
502 * - All entries in Tx BA stream table
503 * - MPA buffer (if required)
504 * - All RA lists
505 */
506void
507mwifiex_clean_txrx(struct mwifiex_private *priv)
508{
509 unsigned long flags;
510
511 mwifiex_11n_cleanup_reorder_tbl(priv);
512 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
513
514 mwifiex_wmm_cleanup_queues(priv);
515 mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
516
517 if (priv->adapter->if_ops.cleanup_mpa_buf)
518 priv->adapter->if_ops.cleanup_mpa_buf(priv->adapter);
519
520 mwifiex_wmm_delete_all_ralist(priv);
521 memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid));
522
523 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
524}
525
526/*
527 * This function retrieves a particular RA list node, matching with the
528 * given TID and RA address.
529 */
530static struct mwifiex_ra_list_tbl *
531mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
532 u8 *ra_addr)
533{
534 struct mwifiex_ra_list_tbl *ra_list;
535
536 list_for_each_entry(ra_list, &priv->wmm.tid_tbl_ptr[tid].ra_list,
537 list) {
538 if (!memcmp(ra_list->ra, ra_addr, ETH_ALEN))
539 return ra_list;
540 }
541
542 return NULL;
543}
544
545/*
546 * This function retrieves an RA list node for a given TID and
547 * RA address pair.
548 *
549 * If no such node is found, a new node is added first and then
550 * retrieved.
551 */
552static struct mwifiex_ra_list_tbl *
553mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid, u8 *ra_addr)
554{
555 struct mwifiex_ra_list_tbl *ra_list;
556
557 ra_list = mwifiex_wmm_get_ralist_node(priv, tid, ra_addr);
558 if (ra_list)
559 return ra_list;
560 mwifiex_ralist_add(priv, ra_addr);
561
562 return mwifiex_wmm_get_ralist_node(priv, tid, ra_addr);
563}
564
565/*
566 * This function checks if a particular RA list node exists in a given TID
567 * table index.
568 */
569int
570mwifiex_is_ralist_valid(struct mwifiex_private *priv,
571 struct mwifiex_ra_list_tbl *ra_list, int ptr_index)
572{
573 struct mwifiex_ra_list_tbl *rlist;
574
575 list_for_each_entry(rlist, &priv->wmm.tid_tbl_ptr[ptr_index].ra_list,
576 list) {
577 if (rlist == ra_list)
578 return true;
579 }
580
581 return false;
582}
583
584/*
585 * This function adds a packet to WMM queue.
586 *
587 * In disconnected state the packet is immediately dropped and the
588 * packet send completion callback is called with status failure.
589 *
590 * Otherwise, the correct RA list node is located and the packet
591 * is queued at the list tail.
592 */
593void
594mwifiex_wmm_add_buf_txqueue(struct mwifiex_adapter *adapter,
595 struct sk_buff *skb)
596{
597 struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
598 struct mwifiex_private *priv = adapter->priv[tx_info->bss_index];
599 u32 tid;
600 struct mwifiex_ra_list_tbl *ra_list;
601 u8 ra[ETH_ALEN], tid_down;
602 unsigned long flags;
603
604 if (!priv->media_connected) {
605 dev_dbg(adapter->dev, "data: drop packet in disconnect\n");
606 mwifiex_write_data_complete(adapter, skb, -1);
607 return;
608 }
609
610 tid = skb->priority;
611
612 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
613
614 tid_down = mwifiex_wmm_downgrade_tid(priv, tid);
615
616 /* In case of infra as we have already created the list during
617 association we just don't have to call get_queue_raptr, we will
618 have only 1 raptr for a tid in case of infra */
619 if (!mwifiex_queuing_ra_based(priv)) {
620 if (!list_empty(&priv->wmm.tid_tbl_ptr[tid_down].ra_list))
621 ra_list = list_first_entry(
622 &priv->wmm.tid_tbl_ptr[tid_down].ra_list,
623 struct mwifiex_ra_list_tbl, list);
624 else
625 ra_list = NULL;
626 } else {
627 memcpy(ra, skb->data, ETH_ALEN);
628 ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down, ra);
629 }
630
631 if (!ra_list) {
632 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
633 mwifiex_write_data_complete(adapter, skb, -1);
634 return;
635 }
636
637 skb_queue_tail(&ra_list->skb_head, skb);
638
639 ra_list->total_pkts_size += skb->len;
640
641 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
642}
643
644/*
645 * This function processes the get WMM status command response from firmware.
646 *
647 * The response may contain multiple TLVs -
648 * - AC Queue status TLVs
649 * - Current WMM Parameter IE TLV
650 * - Admission Control action frame TLVs
651 *
652 * This function parses the TLVs and then calls further specific functions
653 * to process any changes in the queue prioritize or state.
654 */
655int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
656 const struct host_cmd_ds_command *resp)
657{
658 u8 *curr = (u8 *) &resp->params.get_wmm_status;
659 uint16_t resp_len = le16_to_cpu(resp->size), tlv_len;
660 int valid = true;
661
662 struct mwifiex_ie_types_data *tlv_hdr;
663 struct mwifiex_ie_types_wmm_queue_status *tlv_wmm_qstatus;
664 struct ieee_types_wmm_parameter *wmm_param_ie = NULL;
665 struct mwifiex_wmm_ac_status *ac_status;
666
667 dev_dbg(priv->adapter->dev, "info: WMM: WMM_GET_STATUS cmdresp received: %d\n",
668 resp_len);
669
670 while ((resp_len >= sizeof(tlv_hdr->header)) && valid) {
671 tlv_hdr = (struct mwifiex_ie_types_data *) curr;
672 tlv_len = le16_to_cpu(tlv_hdr->header.len);
673
674 switch (le16_to_cpu(tlv_hdr->header.type)) {
675 case TLV_TYPE_WMMQSTATUS:
676 tlv_wmm_qstatus =
677 (struct mwifiex_ie_types_wmm_queue_status *)
678 tlv_hdr;
679 dev_dbg(priv->adapter->dev,
680 "info: CMD_RESP: WMM_GET_STATUS:"
681 " QSTATUS TLV: %d, %d, %d\n",
682 tlv_wmm_qstatus->queue_index,
683 tlv_wmm_qstatus->flow_required,
684 tlv_wmm_qstatus->disabled);
685
686 ac_status = &priv->wmm.ac_status[tlv_wmm_qstatus->
687 queue_index];
688 ac_status->disabled = tlv_wmm_qstatus->disabled;
689 ac_status->flow_required =
690 tlv_wmm_qstatus->flow_required;
691 ac_status->flow_created = tlv_wmm_qstatus->flow_created;
692 break;
693
694 case WLAN_EID_VENDOR_SPECIFIC:
695 /*
696 * Point the regular IEEE IE 2 bytes into the Marvell IE
697 * and setup the IEEE IE type and length byte fields
698 */
699
700 wmm_param_ie =
701 (struct ieee_types_wmm_parameter *) (curr +
702 2);
703 wmm_param_ie->vend_hdr.len = (u8) tlv_len;
704 wmm_param_ie->vend_hdr.element_id =
705 WLAN_EID_VENDOR_SPECIFIC;
706
707 dev_dbg(priv->adapter->dev,
708 "info: CMD_RESP: WMM_GET_STATUS:"
709 " WMM Parameter Set Count: %d\n",
710 wmm_param_ie->qos_info_bitmap &
711 IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK);
712
713 memcpy((u8 *) &priv->curr_bss_params.bss_descriptor.
714 wmm_ie, wmm_param_ie,
715 wmm_param_ie->vend_hdr.len + 2);
716
717 break;
718
719 default:
720 valid = false;
721 break;
722 }
723
724 curr += (tlv_len + sizeof(tlv_hdr->header));
725 resp_len -= (tlv_len + sizeof(tlv_hdr->header));
726 }
727
728 mwifiex_wmm_setup_queue_priorities(priv, wmm_param_ie);
729 mwifiex_wmm_setup_ac_downgrade(priv);
730
731 return 0;
732}
733
734/*
735 * Callback handler from the command module to allow insertion of a WMM TLV.
736 *
737 * If the BSS we are associating to supports WMM, this function adds the
738 * required WMM Information IE to the association request command buffer in
739 * the form of a Marvell extended IEEE IE.
740 */
741u32
742mwifiex_wmm_process_association_req(struct mwifiex_private *priv,
743 u8 **assoc_buf,
744 struct ieee_types_wmm_parameter *wmm_ie,
745 struct ieee80211_ht_cap *ht_cap)
746{
747 struct mwifiex_ie_types_wmm_param_set *wmm_tlv;
748 u32 ret_len = 0;
749
750 /* Null checks */
751 if (!assoc_buf)
752 return 0;
753 if (!(*assoc_buf))
754 return 0;
755
756 if (!wmm_ie)
757 return 0;
758
759 dev_dbg(priv->adapter->dev, "info: WMM: process assoc req:"
760 "bss->wmmIe=0x%x\n",
761 wmm_ie->vend_hdr.element_id);
762
763 if ((priv->wmm_required
764 || (ht_cap && (priv->adapter->config_bands & BAND_GN
765 || priv->adapter->config_bands & BAND_AN))
766 )
767 && wmm_ie->vend_hdr.element_id == WLAN_EID_VENDOR_SPECIFIC) {
768 wmm_tlv = (struct mwifiex_ie_types_wmm_param_set *) *assoc_buf;
769 wmm_tlv->header.type = cpu_to_le16((u16) wmm_info_ie[0]);
770 wmm_tlv->header.len = cpu_to_le16((u16) wmm_info_ie[1]);
771 memcpy(wmm_tlv->wmm_ie, &wmm_info_ie[2],
772 le16_to_cpu(wmm_tlv->header.len));
773 if (wmm_ie->qos_info_bitmap & IEEE80211_WMM_IE_AP_QOSINFO_UAPSD)
774 memcpy((u8 *) (wmm_tlv->wmm_ie
775 + le16_to_cpu(wmm_tlv->header.len)
776 - sizeof(priv->wmm_qosinfo)),
777 &priv->wmm_qosinfo,
778 sizeof(priv->wmm_qosinfo));
779
780 ret_len = sizeof(wmm_tlv->header)
781 + le16_to_cpu(wmm_tlv->header.len);
782
783 *assoc_buf += ret_len;
784 }
785
786 return ret_len;
787}
788
789/*
790 * This function computes the time delay in the driver queues for a
791 * given packet.
792 *
793 * When the packet is received at the OS/Driver interface, the current
794 * time is set in the packet structure. The difference between the present
795 * time and that received time is computed in this function and limited
796 * based on pre-compiled limits in the driver.
797 */
798u8
799mwifiex_wmm_compute_drv_pkt_delay(struct mwifiex_private *priv,
800 const struct sk_buff *skb)
801{
802 u8 ret_val;
803 struct timeval out_tstamp, in_tstamp;
804 u32 queue_delay;
805
806 do_gettimeofday(&out_tstamp);
807 in_tstamp = ktime_to_timeval(skb->tstamp);
808
809 queue_delay = (out_tstamp.tv_sec - in_tstamp.tv_sec) * 1000;
810 queue_delay += (out_tstamp.tv_usec - in_tstamp.tv_usec) / 1000;
811
812 /*
813 * Queue delay is passed as a uint8 in units of 2ms (ms shifted
814 * by 1). Min value (other than 0) is therefore 2ms, max is 510ms.
815 *
816 * Pass max value if queue_delay is beyond the uint8 range
817 */
818 ret_val = (u8) (min(queue_delay, priv->wmm.drv_pkt_delay_max) >> 1);
819
820 dev_dbg(priv->adapter->dev, "data: WMM: Pkt Delay: %d ms,"
821 " %d ms sent to FW\n", queue_delay, ret_val);
822
823 return ret_val;
824}
825
826/*
827 * This function retrieves the highest priority RA list table pointer.
828 */
829static struct mwifiex_ra_list_tbl *
830mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
831 struct mwifiex_private **priv, int *tid)
832{
833 struct mwifiex_private *priv_tmp;
834 struct mwifiex_ra_list_tbl *ptr, *head;
835 struct mwifiex_bss_prio_node *bssprio_node, *bssprio_head;
836 struct mwifiex_tid_tbl *tid_ptr;
837 int is_list_empty;
838 unsigned long flags;
839 int i, j;
840
841 for (j = adapter->priv_num - 1; j >= 0; --j) {
842 spin_lock_irqsave(&adapter->bss_prio_tbl[j].bss_prio_lock,
843 flags);
844 is_list_empty = list_empty(&adapter->bss_prio_tbl[j]
845 .bss_prio_head);
846 spin_unlock_irqrestore(&adapter->bss_prio_tbl[j].bss_prio_lock,
847 flags);
848 if (is_list_empty)
849 continue;
850
851 if (adapter->bss_prio_tbl[j].bss_prio_cur ==
852 (struct mwifiex_bss_prio_node *)
853 &adapter->bss_prio_tbl[j].bss_prio_head) {
854 bssprio_node =
855 list_first_entry(&adapter->bss_prio_tbl[j]
856 .bss_prio_head,
857 struct mwifiex_bss_prio_node,
858 list);
859 bssprio_head = bssprio_node;
860 } else {
861 bssprio_node = adapter->bss_prio_tbl[j].bss_prio_cur;
862 bssprio_head = bssprio_node;
863 }
864
865 do {
866 priv_tmp = bssprio_node->priv;
867
868 for (i = HIGH_PRIO_TID; i >= LOW_PRIO_TID; --i) {
869
870 tid_ptr = &(priv_tmp)->wmm.
871 tid_tbl_ptr[tos_to_tid[i]];
872
873 spin_lock_irqsave(&tid_ptr->tid_tbl_lock,
874 flags);
875 is_list_empty =
876 list_empty(&adapter->bss_prio_tbl[j]
877 .bss_prio_head);
878 spin_unlock_irqrestore(&tid_ptr->tid_tbl_lock,
879 flags);
880 if (is_list_empty)
881 continue;
882
883 /*
884 * Always choose the next ra we transmitted
885 * last time, this way we pick the ra's in
886 * round robin fashion.
887 */
888 ptr = list_first_entry(
889 &tid_ptr->ra_list_curr->list,
890 struct mwifiex_ra_list_tbl,
891 list);
892
893 head = ptr;
894 if (ptr == (struct mwifiex_ra_list_tbl *)
895 &tid_ptr->ra_list) {
896 /* Get next ra */
897 ptr = list_first_entry(&ptr->list,
898 struct mwifiex_ra_list_tbl, list);
899 head = ptr;
900 }
901
902 do {
903 is_list_empty =
904 skb_queue_empty(&ptr->skb_head);
905 if (!is_list_empty) {
906 *priv = priv_tmp;
907 *tid = tos_to_tid[i];
908 return ptr;
909 }
910 /* Get next ra */
911 ptr = list_first_entry(&ptr->list,
912 struct mwifiex_ra_list_tbl,
913 list);
914 if (ptr ==
915 (struct mwifiex_ra_list_tbl *)
916 &tid_ptr->ra_list)
917 ptr = list_first_entry(
918 &ptr->list,
919 struct mwifiex_ra_list_tbl,
920 list);
921 } while (ptr != head);
922 }
923
924 /* Get next bss priority node */
925 bssprio_node = list_first_entry(&bssprio_node->list,
926 struct mwifiex_bss_prio_node,
927 list);
928
929 if (bssprio_node ==
930 (struct mwifiex_bss_prio_node *)
931 &adapter->bss_prio_tbl[j].bss_prio_head)
932 /* Get next bss priority node */
933 bssprio_node = list_first_entry(
934 &bssprio_node->list,
935 struct mwifiex_bss_prio_node,
936 list);
937 } while (bssprio_node != bssprio_head);
938 }
939 return NULL;
940}
941
942/*
943 * This function gets the number of packets in the Tx queue of a
944 * particular RA list.
945 */
946static int
947mwifiex_num_pkts_in_txq(struct mwifiex_private *priv,
948 struct mwifiex_ra_list_tbl *ptr, int max_buf_size)
949{
950 int count = 0, total_size = 0;
951 struct sk_buff *skb, *tmp;
952
953 skb_queue_walk_safe(&ptr->skb_head, skb, tmp) {
954 total_size += skb->len;
955 if (total_size < max_buf_size)
956 ++count;
957 else
958 break;
959 }
960
961 return count;
962}
963
964/*
965 * This function sends a single packet to firmware for transmission.
966 */
967static void
968mwifiex_send_single_packet(struct mwifiex_private *priv,
969 struct mwifiex_ra_list_tbl *ptr, int ptr_index,
970 unsigned long ra_list_flags)
971 __releases(&priv->wmm.ra_list_spinlock)
972{
973 struct sk_buff *skb, *skb_next;
974 struct mwifiex_tx_param tx_param;
975 struct mwifiex_adapter *adapter = priv->adapter;
976 struct mwifiex_txinfo *tx_info;
977
978 if (skb_queue_empty(&ptr->skb_head)) {
979 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
980 ra_list_flags);
981 dev_dbg(adapter->dev, "data: nothing to send\n");
982 return;
983 }
984
985 skb = skb_dequeue(&ptr->skb_head);
986
987 tx_info = MWIFIEX_SKB_TXCB(skb);
988 dev_dbg(adapter->dev, "data: dequeuing the packet %p %p\n", ptr, skb);
989
990 ptr->total_pkts_size -= skb->len;
991
992 if (!skb_queue_empty(&ptr->skb_head))
993 skb_next = skb_peek(&ptr->skb_head);
994 else
995 skb_next = NULL;
996
997 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags);
998
999 tx_param.next_pkt_len = ((skb_next) ? skb_next->len +
1000 sizeof(struct txpd) : 0);
1001
1002 if (mwifiex_process_tx(priv, skb, &tx_param) == -EBUSY) {
1003 /* Queue the packet back at the head */
1004 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
1005
1006 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1007 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1008 ra_list_flags);
1009 mwifiex_write_data_complete(adapter, skb, -1);
1010 return;
1011 }
1012
1013 skb_queue_tail(&ptr->skb_head, skb);
1014
1015 ptr->total_pkts_size += skb->len;
1016 tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
1017 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1018 ra_list_flags);
1019 } else {
1020 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
1021 if (mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1022 priv->wmm.packets_out[ptr_index]++;
1023 priv->wmm.tid_tbl_ptr[ptr_index].ra_list_curr = ptr;
1024 }
1025 adapter->bss_prio_tbl[priv->bss_priority].bss_prio_cur =
1026 list_first_entry(
1027 &adapter->bss_prio_tbl[priv->bss_priority]
1028 .bss_prio_cur->list,
1029 struct mwifiex_bss_prio_node,
1030 list);
1031 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1032 ra_list_flags);
1033 }
1034}
1035
1036/*
1037 * This function checks if the first packet in the given RA list
1038 * is already processed or not.
1039 */
1040static int
1041mwifiex_is_ptr_processed(struct mwifiex_private *priv,
1042 struct mwifiex_ra_list_tbl *ptr)
1043{
1044 struct sk_buff *skb;
1045 struct mwifiex_txinfo *tx_info;
1046
1047 if (skb_queue_empty(&ptr->skb_head))
1048 return false;
1049
1050 skb = skb_peek(&ptr->skb_head);
1051
1052 tx_info = MWIFIEX_SKB_TXCB(skb);
1053 if (tx_info->flags & MWIFIEX_BUF_FLAG_REQUEUED_PKT)
1054 return true;
1055
1056 return false;
1057}
1058
1059/*
1060 * This function sends a single processed packet to firmware for
1061 * transmission.
1062 */
1063static void
1064mwifiex_send_processed_packet(struct mwifiex_private *priv,
1065 struct mwifiex_ra_list_tbl *ptr, int ptr_index,
1066 unsigned long ra_list_flags)
1067 __releases(&priv->wmm.ra_list_spinlock)
1068{
1069 struct mwifiex_tx_param tx_param;
1070 struct mwifiex_adapter *adapter = priv->adapter;
1071 int ret = -1;
1072 struct sk_buff *skb, *skb_next;
1073 struct mwifiex_txinfo *tx_info;
1074
1075 if (skb_queue_empty(&ptr->skb_head)) {
1076 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1077 ra_list_flags);
1078 return;
1079 }
1080
1081 skb = skb_dequeue(&ptr->skb_head);
1082
1083 if (!skb_queue_empty(&ptr->skb_head))
1084 skb_next = skb_peek(&ptr->skb_head);
1085 else
1086 skb_next = NULL;
1087
1088 tx_info = MWIFIEX_SKB_TXCB(skb);
1089
1090 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags);
1091 tx_param.next_pkt_len =
1092 ((skb_next) ? skb_next->len +
1093 sizeof(struct txpd) : 0);
1094 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
1095 skb->data, skb->len, &tx_param);
1096 switch (ret) {
1097 case -EBUSY:
1098 dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
1099 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
1100
1101 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1102 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1103 ra_list_flags);
1104 mwifiex_write_data_complete(adapter, skb, -1);
1105 return;
1106 }
1107
1108 skb_queue_tail(&ptr->skb_head, skb);
1109
1110 tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
1111 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1112 ra_list_flags);
1113 break;
1114 case -1:
1115 adapter->data_sent = false;
1116 dev_err(adapter->dev, "host_to_card failed: %#x\n", ret);
1117 adapter->dbg.num_tx_host_to_card_failure++;
1118 mwifiex_write_data_complete(adapter, skb, ret);
1119 break;
1120 case -EINPROGRESS:
1121 adapter->data_sent = false;
1122 default:
1123 break;
1124 }
1125 if (ret != -EBUSY) {
1126 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
1127 if (mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1128 priv->wmm.packets_out[ptr_index]++;
1129 priv->wmm.tid_tbl_ptr[ptr_index].ra_list_curr = ptr;
1130 }
1131 adapter->bss_prio_tbl[priv->bss_priority].bss_prio_cur =
1132 list_first_entry(
1133 &adapter->bss_prio_tbl[priv->bss_priority]
1134 .bss_prio_cur->list,
1135 struct mwifiex_bss_prio_node,
1136 list);
1137 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1138 ra_list_flags);
1139 }
1140}
1141
1142/*
1143 * This function dequeues a packet from the highest priority list
1144 * and transmits it.
1145 */
1146static int
1147mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
1148{
1149 struct mwifiex_ra_list_tbl *ptr;
1150 struct mwifiex_private *priv = NULL;
1151 int ptr_index = 0;
1152 u8 ra[ETH_ALEN];
1153 int tid_del = 0, tid = 0;
1154 unsigned long flags;
1155
1156 ptr = mwifiex_wmm_get_highest_priolist_ptr(adapter, &priv, &ptr_index);
1157 if (!ptr)
1158 return -1;
1159
1160 tid = mwifiex_get_tid(ptr);
1161
1162 dev_dbg(adapter->dev, "data: tid=%d\n", tid);
1163
1164 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
1165 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1166 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
1167 return -1;
1168 }
1169
1170 if (mwifiex_is_ptr_processed(priv, ptr)) {
1171 mwifiex_send_processed_packet(priv, ptr, ptr_index, flags);
1172 /* ra_list_spinlock has been freed in
1173 mwifiex_send_processed_packet() */
1174 return 0;
1175 }
1176
1177 if (!ptr->is_11n_enabled || mwifiex_is_ba_stream_setup(priv, ptr, tid)
1178 || ((priv->sec_info.wpa_enabled
1179 || priv->sec_info.wpa2_enabled) && !priv->wpa_is_gtk_set)
1180 ) {
1181 mwifiex_send_single_packet(priv, ptr, ptr_index, flags);
1182 /* ra_list_spinlock has been freed in
1183 mwifiex_send_single_packet() */
1184 } else {
1185 if (mwifiex_is_ampdu_allowed(priv, tid)) {
1186 if (mwifiex_space_avail_for_new_ba_stream(adapter)) {
1187 mwifiex_11n_create_tx_ba_stream_tbl(priv,
1188 ptr->ra, tid,
1189 BA_STREAM_SETUP_INPROGRESS);
1190 mwifiex_send_addba(priv, tid, ptr->ra);
1191 } else if (mwifiex_find_stream_to_delete
1192 (priv, tid, &tid_del, ra)) {
1193 mwifiex_11n_create_tx_ba_stream_tbl(priv,
1194 ptr->ra, tid,
1195 BA_STREAM_SETUP_INPROGRESS);
1196 mwifiex_send_delba(priv, tid_del, ra, 1);
1197 }
1198 }
1199/* Minimum number of AMSDU */
1200#define MIN_NUM_AMSDU 2
1201 if (mwifiex_is_amsdu_allowed(priv, tid) &&
1202 (mwifiex_num_pkts_in_txq(priv, ptr, adapter->tx_buf_size) >=
1203 MIN_NUM_AMSDU))
1204 mwifiex_11n_aggregate_pkt(priv, ptr, INTF_HEADER_LEN,
1205 ptr_index, flags);
1206 /* ra_list_spinlock has been freed in
1207 mwifiex_11n_aggregate_pkt() */
1208 else
1209 mwifiex_send_single_packet(priv, ptr, ptr_index, flags);
1210 /* ra_list_spinlock has been freed in
1211 mwifiex_send_single_packet() */
1212 }
1213 return 0;
1214}
1215
1216/*
1217 * This function transmits the highest priority packet awaiting in the
1218 * WMM Queues.
1219 */
1220void
1221mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter)
1222{
1223 do {
1224 /* Check if busy */
1225 if (adapter->data_sent || adapter->tx_lock_flag)
1226 break;
1227
1228 if (mwifiex_dequeue_tx_packet(adapter))
1229 break;
1230 } while (true);
1231}
diff --git a/drivers/net/wireless/mwifiex/wmm.h b/drivers/net/wireless/mwifiex/wmm.h
new file mode 100644
index 000000000000..fcea1f68792f
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/wmm.h
@@ -0,0 +1,110 @@
1/*
2 * Marvell Wireless LAN device driver: WMM
3 *
4 * Copyright (C) 2011, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#ifndef _MWIFIEX_WMM_H_
21#define _MWIFIEX_WMM_H_
22
23enum ieee_types_wmm_aciaifsn_bitmasks {
24 MWIFIEX_AIFSN = (BIT(0) | BIT(1) | BIT(2) | BIT(3)),
25 MWIFIEX_ACM = BIT(4),
26 MWIFIEX_ACI = (BIT(5) | BIT(6)),
27};
28
29enum ieee_types_wmm_ecw_bitmasks {
30 MWIFIEX_ECW_MIN = (BIT(0) | BIT(1) | BIT(2) | BIT(3)),
31 MWIFIEX_ECW_MAX = (BIT(4) | BIT(5) | BIT(6) | BIT(7)),
32};
33
34/*
35 * This function retrieves the TID of the given RA list.
36 */
37static inline int
38mwifiex_get_tid(struct mwifiex_ra_list_tbl *ptr)
39{
40 struct sk_buff *skb;
41
42 if (skb_queue_empty(&ptr->skb_head))
43 return 0;
44
45 skb = skb_peek(&ptr->skb_head);
46
47 return skb->priority;
48}
49
50/*
51 * This function gets the length of a list.
52 */
53static inline int
54mwifiex_wmm_list_len(struct list_head *head)
55{
56 struct list_head *pos;
57 int count = 0;
58
59 list_for_each(pos, head)
60 ++count;
61
62 return count;
63}
64
65/*
66 * This function checks if a RA list is empty or not.
67 */
68static inline u8
69mwifiex_wmm_is_ra_list_empty(struct list_head *ra_list_hhead)
70{
71 struct mwifiex_ra_list_tbl *ra_list;
72 int is_list_empty;
73
74 list_for_each_entry(ra_list, ra_list_hhead, list) {
75 is_list_empty = skb_queue_empty(&ra_list->skb_head);
76 if (!is_list_empty)
77 return false;
78 }
79
80 return true;
81}
82
83void mwifiex_wmm_add_buf_txqueue(struct mwifiex_adapter *adapter,
84 struct sk_buff *skb);
85void mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra);
86
87int mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter);
88void mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter);
89int mwifiex_is_ralist_valid(struct mwifiex_private *priv,
90 struct mwifiex_ra_list_tbl *ra_list, int tid);
91
92u8 mwifiex_wmm_compute_drv_pkt_delay(struct mwifiex_private *priv,
93 const struct sk_buff *skb);
94void mwifiex_wmm_init(struct mwifiex_adapter *adapter);
95
96extern u32 mwifiex_wmm_process_association_req(struct mwifiex_private *priv,
97 u8 **assoc_buf,
98 struct ieee_types_wmm_parameter
99 *wmmie,
100 struct ieee80211_ht_cap
101 *htcap);
102
103void mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
104 struct ieee_types_wmm_parameter
105 *wmm_ie);
106void mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv);
107extern int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
108 const struct host_cmd_ds_command *resp);
109
110#endif /* !_MWIFIEX_WMM_H_ */
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index c1ceb4b23971..32261189bcef 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -63,6 +63,7 @@ MODULE_PARM_DESC(ap_mode_default,
63#define MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL 0x00000c38 63#define MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL 0x00000c38
64#define MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK 0x00000c3c 64#define MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK 0x00000c3c
65#define MWL8K_A2H_INT_DUMMY (1 << 20) 65#define MWL8K_A2H_INT_DUMMY (1 << 20)
66#define MWL8K_A2H_INT_BA_WATCHDOG (1 << 14)
66#define MWL8K_A2H_INT_CHNL_SWITCHED (1 << 11) 67#define MWL8K_A2H_INT_CHNL_SWITCHED (1 << 11)
67#define MWL8K_A2H_INT_QUEUE_EMPTY (1 << 10) 68#define MWL8K_A2H_INT_QUEUE_EMPTY (1 << 10)
68#define MWL8K_A2H_INT_RADAR_DETECT (1 << 7) 69#define MWL8K_A2H_INT_RADAR_DETECT (1 << 7)
@@ -73,6 +74,14 @@ MODULE_PARM_DESC(ap_mode_default,
73#define MWL8K_A2H_INT_RX_READY (1 << 1) 74#define MWL8K_A2H_INT_RX_READY (1 << 1)
74#define MWL8K_A2H_INT_TX_DONE (1 << 0) 75#define MWL8K_A2H_INT_TX_DONE (1 << 0)
75 76
77/* HW micro second timer register
78 * located at offset 0xA600. This
79 * will be used to timestamp tx
80 * packets.
81 */
82
83#define MWL8K_HW_TIMER_REGISTER 0x0000a600
84
76#define MWL8K_A2H_EVENTS (MWL8K_A2H_INT_DUMMY | \ 85#define MWL8K_A2H_EVENTS (MWL8K_A2H_INT_DUMMY | \
77 MWL8K_A2H_INT_CHNL_SWITCHED | \ 86 MWL8K_A2H_INT_CHNL_SWITCHED | \
78 MWL8K_A2H_INT_QUEUE_EMPTY | \ 87 MWL8K_A2H_INT_QUEUE_EMPTY | \
@@ -82,10 +91,14 @@ MODULE_PARM_DESC(ap_mode_default,
82 MWL8K_A2H_INT_MAC_EVENT | \ 91 MWL8K_A2H_INT_MAC_EVENT | \
83 MWL8K_A2H_INT_OPC_DONE | \ 92 MWL8K_A2H_INT_OPC_DONE | \
84 MWL8K_A2H_INT_RX_READY | \ 93 MWL8K_A2H_INT_RX_READY | \
85 MWL8K_A2H_INT_TX_DONE) 94 MWL8K_A2H_INT_TX_DONE | \
95 MWL8K_A2H_INT_BA_WATCHDOG)
86 96
87#define MWL8K_RX_QUEUES 1 97#define MWL8K_RX_QUEUES 1
88#define MWL8K_TX_QUEUES 4 98#define MWL8K_TX_WMM_QUEUES 4
99#define MWL8K_MAX_AMPDU_QUEUES 8
100#define MWL8K_MAX_TX_QUEUES (MWL8K_TX_WMM_QUEUES + MWL8K_MAX_AMPDU_QUEUES)
101#define mwl8k_tx_queues(priv) (MWL8K_TX_WMM_QUEUES + (priv)->num_ampdu_queues)
89 102
90struct rxd_ops { 103struct rxd_ops {
91 int rxd_size; 104 int rxd_size;
@@ -134,6 +147,21 @@ struct mwl8k_tx_queue {
134 struct sk_buff **skb; 147 struct sk_buff **skb;
135}; 148};
136 149
150enum {
151 AMPDU_NO_STREAM,
152 AMPDU_STREAM_NEW,
153 AMPDU_STREAM_IN_PROGRESS,
154 AMPDU_STREAM_ACTIVE,
155};
156
157struct mwl8k_ampdu_stream {
158 struct ieee80211_sta *sta;
159 u8 tid;
160 u8 state;
161 u8 idx;
162 u8 txq_idx; /* index of this stream in priv->txq */
163};
164
137struct mwl8k_priv { 165struct mwl8k_priv {
138 struct ieee80211_hw *hw; 166 struct ieee80211_hw *hw;
139 struct pci_dev *pdev; 167 struct pci_dev *pdev;
@@ -160,6 +188,12 @@ struct mwl8k_priv {
160 u32 ap_macids_supported; 188 u32 ap_macids_supported;
161 u32 sta_macids_supported; 189 u32 sta_macids_supported;
162 190
191 /* Ampdu stream information */
192 u8 num_ampdu_queues;
193 spinlock_t stream_lock;
194 struct mwl8k_ampdu_stream ampdu[MWL8K_MAX_AMPDU_QUEUES];
195 struct work_struct watchdog_ba_handle;
196
163 /* firmware access */ 197 /* firmware access */
164 struct mutex fw_mutex; 198 struct mutex fw_mutex;
165 struct task_struct *fw_mutex_owner; 199 struct task_struct *fw_mutex_owner;
@@ -191,7 +225,8 @@ struct mwl8k_priv {
191 int pending_tx_pkts; 225 int pending_tx_pkts;
192 226
193 struct mwl8k_rx_queue rxq[MWL8K_RX_QUEUES]; 227 struct mwl8k_rx_queue rxq[MWL8K_RX_QUEUES];
194 struct mwl8k_tx_queue txq[MWL8K_TX_QUEUES]; 228 struct mwl8k_tx_queue txq[MWL8K_MAX_TX_QUEUES];
229 u32 txq_offset[MWL8K_MAX_TX_QUEUES];
195 230
196 bool radio_on; 231 bool radio_on;
197 bool radio_short_preamble; 232 bool radio_short_preamble;
@@ -224,7 +259,7 @@ struct mwl8k_priv {
224 * preserve the queue configurations so they can be restored if/when 259 * preserve the queue configurations so they can be restored if/when
225 * the firmware image is swapped. 260 * the firmware image is swapped.
226 */ 261 */
227 struct ieee80211_tx_queue_params wmm_params[MWL8K_TX_QUEUES]; 262 struct ieee80211_tx_queue_params wmm_params[MWL8K_TX_WMM_QUEUES];
228 263
229 /* async firmware loading state */ 264 /* async firmware loading state */
230 unsigned fw_state; 265 unsigned fw_state;
@@ -262,9 +297,17 @@ struct mwl8k_vif {
262#define MWL8K_VIF(_vif) ((struct mwl8k_vif *)&((_vif)->drv_priv)) 297#define MWL8K_VIF(_vif) ((struct mwl8k_vif *)&((_vif)->drv_priv))
263#define IEEE80211_KEY_CONF(_u8) ((struct ieee80211_key_conf *)(_u8)) 298#define IEEE80211_KEY_CONF(_u8) ((struct ieee80211_key_conf *)(_u8))
264 299
300struct tx_traffic_info {
301 u32 start_time;
302 u32 pkts;
303};
304
305#define MWL8K_MAX_TID 8
265struct mwl8k_sta { 306struct mwl8k_sta {
266 /* Index into station database. Returned by UPDATE_STADB. */ 307 /* Index into station database. Returned by UPDATE_STADB. */
267 u8 peer_id; 308 u8 peer_id;
309 u8 is_ampdu_allowed;
310 struct tx_traffic_info tx_stats[MWL8K_MAX_TID];
268}; 311};
269#define MWL8K_STA(_sta) ((struct mwl8k_sta *)&((_sta)->drv_priv)) 312#define MWL8K_STA(_sta) ((struct mwl8k_sta *)&((_sta)->drv_priv))
270 313
@@ -352,10 +395,12 @@ static const struct ieee80211_rate mwl8k_rates_50[] = {
352#define MWL8K_CMD_ENABLE_SNIFFER 0x0150 395#define MWL8K_CMD_ENABLE_SNIFFER 0x0150
353#define MWL8K_CMD_SET_MAC_ADDR 0x0202 /* per-vif */ 396#define MWL8K_CMD_SET_MAC_ADDR 0x0202 /* per-vif */
354#define MWL8K_CMD_SET_RATEADAPT_MODE 0x0203 397#define MWL8K_CMD_SET_RATEADAPT_MODE 0x0203
398#define MWL8K_CMD_GET_WATCHDOG_BITMAP 0x0205
355#define MWL8K_CMD_BSS_START 0x1100 /* per-vif */ 399#define MWL8K_CMD_BSS_START 0x1100 /* per-vif */
356#define MWL8K_CMD_SET_NEW_STN 0x1111 /* per-vif */ 400#define MWL8K_CMD_SET_NEW_STN 0x1111 /* per-vif */
357#define MWL8K_CMD_UPDATE_ENCRYPTION 0x1122 /* per-vif */ 401#define MWL8K_CMD_UPDATE_ENCRYPTION 0x1122 /* per-vif */
358#define MWL8K_CMD_UPDATE_STADB 0x1123 402#define MWL8K_CMD_UPDATE_STADB 0x1123
403#define MWL8K_CMD_BASTREAM 0x1125
359 404
360static const char *mwl8k_cmd_name(__le16 cmd, char *buf, int bufsize) 405static const char *mwl8k_cmd_name(__le16 cmd, char *buf, int bufsize)
361{ 406{
@@ -395,6 +440,8 @@ static const char *mwl8k_cmd_name(__le16 cmd, char *buf, int bufsize)
395 MWL8K_CMDNAME(SET_NEW_STN); 440 MWL8K_CMDNAME(SET_NEW_STN);
396 MWL8K_CMDNAME(UPDATE_ENCRYPTION); 441 MWL8K_CMDNAME(UPDATE_ENCRYPTION);
397 MWL8K_CMDNAME(UPDATE_STADB); 442 MWL8K_CMDNAME(UPDATE_STADB);
443 MWL8K_CMDNAME(BASTREAM);
444 MWL8K_CMDNAME(GET_WATCHDOG_BITMAP);
398 default: 445 default:
399 snprintf(buf, bufsize, "0x%x", cmd); 446 snprintf(buf, bufsize, "0x%x", cmd);
400 } 447 }
@@ -669,7 +716,7 @@ static int mwl8k_load_firmware(struct ieee80211_hw *hw)
669 "helper image\n", pci_name(priv->pdev)); 716 "helper image\n", pci_name(priv->pdev));
670 return rc; 717 return rc;
671 } 718 }
672 msleep(5); 719 msleep(20);
673 720
674 rc = mwl8k_feed_fw_image(priv, fw->data, fw->size); 721 rc = mwl8k_feed_fw_image(priv, fw->data, fw->size);
675 } else { 722 } else {
@@ -734,8 +781,11 @@ static inline void mwl8k_remove_dma_header(struct sk_buff *skb, __le16 qos)
734 skb_pull(skb, sizeof(*tr) - hdrlen); 781 skb_pull(skb, sizeof(*tr) - hdrlen);
735} 782}
736 783
784#define REDUCED_TX_HEADROOM 8
785
737static void 786static void
738mwl8k_add_dma_header(struct sk_buff *skb, int tail_pad) 787mwl8k_add_dma_header(struct mwl8k_priv *priv, struct sk_buff *skb,
788 int head_pad, int tail_pad)
739{ 789{
740 struct ieee80211_hdr *wh; 790 struct ieee80211_hdr *wh;
741 int hdrlen; 791 int hdrlen;
@@ -751,7 +801,23 @@ mwl8k_add_dma_header(struct sk_buff *skb, int tail_pad)
751 wh = (struct ieee80211_hdr *)skb->data; 801 wh = (struct ieee80211_hdr *)skb->data;
752 802
753 hdrlen = ieee80211_hdrlen(wh->frame_control); 803 hdrlen = ieee80211_hdrlen(wh->frame_control);
754 reqd_hdrlen = sizeof(*tr); 804
805 /*
806 * Check if skb_resize is required because of
807 * tx_headroom adjustment.
808 */
809 if (priv->ap_fw && (hdrlen < (sizeof(struct ieee80211_cts)
810 + REDUCED_TX_HEADROOM))) {
811 if (pskb_expand_head(skb, REDUCED_TX_HEADROOM, 0, GFP_ATOMIC)) {
812
813 wiphy_err(priv->hw->wiphy,
814 "Failed to reallocate TX buffer\n");
815 return;
816 }
817 skb->truesize += REDUCED_TX_HEADROOM;
818 }
819
820 reqd_hdrlen = sizeof(*tr) + head_pad;
755 821
756 if (hdrlen != reqd_hdrlen) 822 if (hdrlen != reqd_hdrlen)
757 skb_push(skb, reqd_hdrlen - hdrlen); 823 skb_push(skb, reqd_hdrlen - hdrlen);
@@ -773,12 +839,14 @@ mwl8k_add_dma_header(struct sk_buff *skb, int tail_pad)
773 tr->fwlen = cpu_to_le16(skb->len - sizeof(*tr) + tail_pad); 839 tr->fwlen = cpu_to_le16(skb->len - sizeof(*tr) + tail_pad);
774} 840}
775 841
776static void mwl8k_encapsulate_tx_frame(struct sk_buff *skb) 842static void mwl8k_encapsulate_tx_frame(struct mwl8k_priv *priv,
843 struct sk_buff *skb)
777{ 844{
778 struct ieee80211_hdr *wh; 845 struct ieee80211_hdr *wh;
779 struct ieee80211_tx_info *tx_info; 846 struct ieee80211_tx_info *tx_info;
780 struct ieee80211_key_conf *key_conf; 847 struct ieee80211_key_conf *key_conf;
781 int data_pad; 848 int data_pad;
849 int head_pad = 0;
782 850
783 wh = (struct ieee80211_hdr *)skb->data; 851 wh = (struct ieee80211_hdr *)skb->data;
784 852
@@ -790,9 +858,7 @@ static void mwl8k_encapsulate_tx_frame(struct sk_buff *skb)
790 858
791 /* 859 /*
792 * Make sure the packet header is in the DMA header format (4-address 860 * Make sure the packet header is in the DMA header format (4-address
793 * without QoS), the necessary crypto padding between the header and the 861 * without QoS), and add head & tail padding when HW crypto is enabled.
794 * payload has already been provided by mac80211, but it doesn't add tail
795 * padding when HW crypto is enabled.
796 * 862 *
797 * We have the following trailer padding requirements: 863 * We have the following trailer padding requirements:
798 * - WEP: 4 trailer bytes (ICV) 864 * - WEP: 4 trailer bytes (ICV)
@@ -801,6 +867,7 @@ static void mwl8k_encapsulate_tx_frame(struct sk_buff *skb)
801 */ 867 */
802 data_pad = 0; 868 data_pad = 0;
803 if (key_conf != NULL) { 869 if (key_conf != NULL) {
870 head_pad = key_conf->iv_len;
804 switch (key_conf->cipher) { 871 switch (key_conf->cipher) {
805 case WLAN_CIPHER_SUITE_WEP40: 872 case WLAN_CIPHER_SUITE_WEP40:
806 case WLAN_CIPHER_SUITE_WEP104: 873 case WLAN_CIPHER_SUITE_WEP104:
@@ -814,7 +881,7 @@ static void mwl8k_encapsulate_tx_frame(struct sk_buff *skb)
814 break; 881 break;
815 } 882 }
816 } 883 }
817 mwl8k_add_dma_header(skb, data_pad); 884 mwl8k_add_dma_header(priv, skb, head_pad, data_pad);
818} 885}
819 886
820/* 887/*
@@ -1127,6 +1194,9 @@ static void mwl8k_rxq_deinit(struct ieee80211_hw *hw, int index)
1127 struct mwl8k_rx_queue *rxq = priv->rxq + index; 1194 struct mwl8k_rx_queue *rxq = priv->rxq + index;
1128 int i; 1195 int i;
1129 1196
1197 if (rxq->rxd == NULL)
1198 return;
1199
1130 for (i = 0; i < MWL8K_RX_DESCS; i++) { 1200 for (i = 0; i < MWL8K_RX_DESCS; i++) {
1131 if (rxq->buf[i].skb != NULL) { 1201 if (rxq->buf[i].skb != NULL) {
1132 pci_unmap_single(priv->pdev, 1202 pci_unmap_single(priv->pdev,
@@ -1319,7 +1389,7 @@ struct mwl8k_tx_desc {
1319 __le16 pkt_len; 1389 __le16 pkt_len;
1320 __u8 dest_MAC_addr[ETH_ALEN]; 1390 __u8 dest_MAC_addr[ETH_ALEN];
1321 __le32 next_txd_phys_addr; 1391 __le32 next_txd_phys_addr;
1322 __le32 reserved; 1392 __le32 timestamp;
1323 __le16 rate_info; 1393 __le16 rate_info;
1324 __u8 peer_id; 1394 __u8 peer_id;
1325 __u8 tx_frag_cnt; 1395 __u8 tx_frag_cnt;
@@ -1383,7 +1453,7 @@ static void mwl8k_dump_tx_rings(struct ieee80211_hw *hw)
1383 struct mwl8k_priv *priv = hw->priv; 1453 struct mwl8k_priv *priv = hw->priv;
1384 int i; 1454 int i;
1385 1455
1386 for (i = 0; i < MWL8K_TX_QUEUES; i++) { 1456 for (i = 0; i < mwl8k_tx_queues(priv); i++) {
1387 struct mwl8k_tx_queue *txq = priv->txq + i; 1457 struct mwl8k_tx_queue *txq = priv->txq + i;
1388 int fw_owned = 0; 1458 int fw_owned = 0;
1389 int drv_owned = 0; 1459 int drv_owned = 0;
@@ -1452,9 +1522,8 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
1452 1522
1453 if (timeout) { 1523 if (timeout) {
1454 WARN_ON(priv->pending_tx_pkts); 1524 WARN_ON(priv->pending_tx_pkts);
1455 if (retry) { 1525 if (retry)
1456 wiphy_notice(hw->wiphy, "tx rings drained\n"); 1526 wiphy_notice(hw->wiphy, "tx rings drained\n");
1457 }
1458 break; 1527 break;
1459 } 1528 }
1460 1529
@@ -1484,6 +1553,41 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
1484 MWL8K_TXD_STATUS_OK_RETRY | \ 1553 MWL8K_TXD_STATUS_OK_RETRY | \
1485 MWL8K_TXD_STATUS_OK_MORE_RETRY)) 1554 MWL8K_TXD_STATUS_OK_MORE_RETRY))
1486 1555
1556static int mwl8k_tid_queue_mapping(u8 tid)
1557{
1558 BUG_ON(tid > 7);
1559
1560 switch (tid) {
1561 case 0:
1562 case 3:
1563 return IEEE80211_AC_BE;
1564 break;
1565 case 1:
1566 case 2:
1567 return IEEE80211_AC_BK;
1568 break;
1569 case 4:
1570 case 5:
1571 return IEEE80211_AC_VI;
1572 break;
1573 case 6:
1574 case 7:
1575 return IEEE80211_AC_VO;
1576 break;
1577 default:
1578 return -1;
1579 break;
1580 }
1581}
1582
1583/* The firmware will fill in the rate information
1584 * for each packet that gets queued in the hardware
1585 * and these macros will interpret that info.
1586 */
1587
1588#define RI_FORMAT(a) (a & 0x0001)
1589#define RI_RATE_ID_MCS(a) ((a & 0x01f8) >> 3)
1590
1487static int 1591static int
1488mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int limit, int force) 1592mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int limit, int force)
1489{ 1593{
@@ -1500,6 +1604,10 @@ mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int limit, int force)
1500 struct sk_buff *skb; 1604 struct sk_buff *skb;
1501 struct ieee80211_tx_info *info; 1605 struct ieee80211_tx_info *info;
1502 u32 status; 1606 u32 status;
1607 struct ieee80211_sta *sta;
1608 struct mwl8k_sta *sta_info = NULL;
1609 u16 rate_info;
1610 struct ieee80211_hdr *wh;
1503 1611
1504 tx = txq->head; 1612 tx = txq->head;
1505 tx_desc = txq->txd + tx; 1613 tx_desc = txq->txd + tx;
@@ -1528,18 +1636,40 @@ mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int limit, int force)
1528 1636
1529 mwl8k_remove_dma_header(skb, tx_desc->qos_control); 1637 mwl8k_remove_dma_header(skb, tx_desc->qos_control);
1530 1638
1639 wh = (struct ieee80211_hdr *) skb->data;
1640
1531 /* Mark descriptor as unused */ 1641 /* Mark descriptor as unused */
1532 tx_desc->pkt_phys_addr = 0; 1642 tx_desc->pkt_phys_addr = 0;
1533 tx_desc->pkt_len = 0; 1643 tx_desc->pkt_len = 0;
1534 1644
1535 info = IEEE80211_SKB_CB(skb); 1645 info = IEEE80211_SKB_CB(skb);
1646 if (ieee80211_is_data(wh->frame_control)) {
1647 sta = info->control.sta;
1648 if (sta) {
1649 sta_info = MWL8K_STA(sta);
1650 BUG_ON(sta_info == NULL);
1651 rate_info = le16_to_cpu(tx_desc->rate_info);
1652 /* If rate is < 6.5 Mpbs for an ht station
1653 * do not form an ampdu. If the station is a
1654 * legacy station (format = 0), do not form an
1655 * ampdu
1656 */
1657 if (RI_RATE_ID_MCS(rate_info) < 1 ||
1658 RI_FORMAT(rate_info) == 0) {
1659 sta_info->is_ampdu_allowed = false;
1660 } else {
1661 sta_info->is_ampdu_allowed = true;
1662 }
1663 }
1664 }
1665
1536 ieee80211_tx_info_clear_status(info); 1666 ieee80211_tx_info_clear_status(info);
1537 1667
1538 /* Rate control is happening in the firmware. 1668 /* Rate control is happening in the firmware.
1539 * Ensure no tx rate is being reported. 1669 * Ensure no tx rate is being reported.
1540 */ 1670 */
1541 info->status.rates[0].idx = -1; 1671 info->status.rates[0].idx = -1;
1542 info->status.rates[0].count = 1; 1672 info->status.rates[0].count = 1;
1543 1673
1544 if (MWL8K_TXD_SUCCESS(status)) 1674 if (MWL8K_TXD_SUCCESS(status))
1545 info->flags |= IEEE80211_TX_STAT_ACK; 1675 info->flags |= IEEE80211_TX_STAT_ACK;
@@ -1549,9 +1679,6 @@ mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int limit, int force)
1549 processed++; 1679 processed++;
1550 } 1680 }
1551 1681
1552 if (processed && priv->radio_on && !mutex_is_locked(&priv->fw_mutex))
1553 ieee80211_wake_queue(hw, index);
1554
1555 return processed; 1682 return processed;
1556} 1683}
1557 1684
@@ -1561,6 +1688,9 @@ static void mwl8k_txq_deinit(struct ieee80211_hw *hw, int index)
1561 struct mwl8k_priv *priv = hw->priv; 1688 struct mwl8k_priv *priv = hw->priv;
1562 struct mwl8k_tx_queue *txq = priv->txq + index; 1689 struct mwl8k_tx_queue *txq = priv->txq + index;
1563 1690
1691 if (txq->txd == NULL)
1692 return;
1693
1564 mwl8k_txq_reclaim(hw, index, INT_MAX, 1); 1694 mwl8k_txq_reclaim(hw, index, INT_MAX, 1);
1565 1695
1566 kfree(txq->skb); 1696 kfree(txq->skb);
@@ -1572,12 +1702,116 @@ static void mwl8k_txq_deinit(struct ieee80211_hw *hw, int index)
1572 txq->txd = NULL; 1702 txq->txd = NULL;
1573} 1703}
1574 1704
1705/* caller must hold priv->stream_lock when calling the stream functions */
1706static struct mwl8k_ampdu_stream *
1707mwl8k_add_stream(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u8 tid)
1708{
1709 struct mwl8k_ampdu_stream *stream;
1710 struct mwl8k_priv *priv = hw->priv;
1711 int i;
1712
1713 for (i = 0; i < priv->num_ampdu_queues; i++) {
1714 stream = &priv->ampdu[i];
1715 if (stream->state == AMPDU_NO_STREAM) {
1716 stream->sta = sta;
1717 stream->state = AMPDU_STREAM_NEW;
1718 stream->tid = tid;
1719 stream->idx = i;
1720 stream->txq_idx = MWL8K_TX_WMM_QUEUES + i;
1721 wiphy_debug(hw->wiphy, "Added a new stream for %pM %d",
1722 sta->addr, tid);
1723 return stream;
1724 }
1725 }
1726 return NULL;
1727}
1728
1729static int
1730mwl8k_start_stream(struct ieee80211_hw *hw, struct mwl8k_ampdu_stream *stream)
1731{
1732 int ret;
1733
1734 /* if the stream has already been started, don't start it again */
1735 if (stream->state != AMPDU_STREAM_NEW)
1736 return 0;
1737 ret = ieee80211_start_tx_ba_session(stream->sta, stream->tid, 0);
1738 if (ret)
1739 wiphy_debug(hw->wiphy, "Failed to start stream for %pM %d: "
1740 "%d\n", stream->sta->addr, stream->tid, ret);
1741 else
1742 wiphy_debug(hw->wiphy, "Started stream for %pM %d\n",
1743 stream->sta->addr, stream->tid);
1744 return ret;
1745}
1746
1747static void
1748mwl8k_remove_stream(struct ieee80211_hw *hw, struct mwl8k_ampdu_stream *stream)
1749{
1750 wiphy_debug(hw->wiphy, "Remove stream for %pM %d\n", stream->sta->addr,
1751 stream->tid);
1752 memset(stream, 0, sizeof(*stream));
1753}
1754
1755static struct mwl8k_ampdu_stream *
1756mwl8k_lookup_stream(struct ieee80211_hw *hw, u8 *addr, u8 tid)
1757{
1758 struct mwl8k_priv *priv = hw->priv;
1759 int i;
1760
1761 for (i = 0 ; i < priv->num_ampdu_queues; i++) {
1762 struct mwl8k_ampdu_stream *stream;
1763 stream = &priv->ampdu[i];
1764 if (stream->state == AMPDU_NO_STREAM)
1765 continue;
1766 if (!memcmp(stream->sta->addr, addr, ETH_ALEN) &&
1767 stream->tid == tid)
1768 return stream;
1769 }
1770 return NULL;
1771}
1772
1773#define MWL8K_AMPDU_PACKET_THRESHOLD 64
1774static inline bool mwl8k_ampdu_allowed(struct ieee80211_sta *sta, u8 tid)
1775{
1776 struct mwl8k_sta *sta_info = MWL8K_STA(sta);
1777 struct tx_traffic_info *tx_stats;
1778
1779 BUG_ON(tid >= MWL8K_MAX_TID);
1780 tx_stats = &sta_info->tx_stats[tid];
1781
1782 return sta_info->is_ampdu_allowed &&
1783 tx_stats->pkts > MWL8K_AMPDU_PACKET_THRESHOLD;
1784}
1785
1786static inline void mwl8k_tx_count_packet(struct ieee80211_sta *sta, u8 tid)
1787{
1788 struct mwl8k_sta *sta_info = MWL8K_STA(sta);
1789 struct tx_traffic_info *tx_stats;
1790
1791 BUG_ON(tid >= MWL8K_MAX_TID);
1792 tx_stats = &sta_info->tx_stats[tid];
1793
1794 if (tx_stats->start_time == 0)
1795 tx_stats->start_time = jiffies;
1796
1797 /* reset the packet count after each second elapses. If the number of
1798 * packets ever exceeds the ampdu_min_traffic threshold, we will allow
1799 * an ampdu stream to be started.
1800 */
1801 if (jiffies - tx_stats->start_time > HZ) {
1802 tx_stats->pkts = 0;
1803 tx_stats->start_time = 0;
1804 } else
1805 tx_stats->pkts++;
1806}
1807
1575static void 1808static void
1576mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb) 1809mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1577{ 1810{
1578 struct mwl8k_priv *priv = hw->priv; 1811 struct mwl8k_priv *priv = hw->priv;
1579 struct ieee80211_tx_info *tx_info; 1812 struct ieee80211_tx_info *tx_info;
1580 struct mwl8k_vif *mwl8k_vif; 1813 struct mwl8k_vif *mwl8k_vif;
1814 struct ieee80211_sta *sta;
1581 struct ieee80211_hdr *wh; 1815 struct ieee80211_hdr *wh;
1582 struct mwl8k_tx_queue *txq; 1816 struct mwl8k_tx_queue *txq;
1583 struct mwl8k_tx_desc *tx; 1817 struct mwl8k_tx_desc *tx;
@@ -1585,6 +1819,12 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1585 u32 txstatus; 1819 u32 txstatus;
1586 u8 txdatarate; 1820 u8 txdatarate;
1587 u16 qos; 1821 u16 qos;
1822 int txpriority;
1823 u8 tid = 0;
1824 struct mwl8k_ampdu_stream *stream = NULL;
1825 bool start_ba_session = false;
1826 bool mgmtframe = false;
1827 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
1588 1828
1589 wh = (struct ieee80211_hdr *)skb->data; 1829 wh = (struct ieee80211_hdr *)skb->data;
1590 if (ieee80211_is_data_qos(wh->frame_control)) 1830 if (ieee80211_is_data_qos(wh->frame_control))
@@ -1592,14 +1832,18 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1592 else 1832 else
1593 qos = 0; 1833 qos = 0;
1594 1834
1835 if (ieee80211_is_mgmt(wh->frame_control))
1836 mgmtframe = true;
1837
1595 if (priv->ap_fw) 1838 if (priv->ap_fw)
1596 mwl8k_encapsulate_tx_frame(skb); 1839 mwl8k_encapsulate_tx_frame(priv, skb);
1597 else 1840 else
1598 mwl8k_add_dma_header(skb, 0); 1841 mwl8k_add_dma_header(priv, skb, 0, 0);
1599 1842
1600 wh = &((struct mwl8k_dma_data *)skb->data)->wh; 1843 wh = &((struct mwl8k_dma_data *)skb->data)->wh;
1601 1844
1602 tx_info = IEEE80211_SKB_CB(skb); 1845 tx_info = IEEE80211_SKB_CB(skb);
1846 sta = tx_info->control.sta;
1603 mwl8k_vif = MWL8K_VIF(tx_info->control.vif); 1847 mwl8k_vif = MWL8K_VIF(tx_info->control.vif);
1604 1848
1605 if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 1849 if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
@@ -1627,12 +1871,91 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1627 qos |= MWL8K_QOS_ACK_POLICY_NORMAL; 1871 qos |= MWL8K_QOS_ACK_POLICY_NORMAL;
1628 } 1872 }
1629 1873
1874 /* Queue ADDBA request in the respective data queue. While setting up
1875 * the ampdu stream, mac80211 queues further packets for that
1876 * particular ra/tid pair. However, packets piled up in the hardware
1877 * for that ra/tid pair will still go out. ADDBA request and the
1878 * related data packets going out from different queues asynchronously
1879 * will cause a shift in the receiver window which might result in
1880 * ampdu packets getting dropped at the receiver after the stream has
1881 * been setup.
1882 */
1883 if (unlikely(ieee80211_is_action(wh->frame_control) &&
1884 mgmt->u.action.category == WLAN_CATEGORY_BACK &&
1885 mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ &&
1886 priv->ap_fw)) {
1887 u16 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
1888 tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
1889 index = mwl8k_tid_queue_mapping(tid);
1890 }
1891
1892 txpriority = index;
1893
1894 if (ieee80211_is_data_qos(wh->frame_control) &&
1895 skb->protocol != cpu_to_be16(ETH_P_PAE) &&
1896 sta->ht_cap.ht_supported && priv->ap_fw) {
1897 tid = qos & 0xf;
1898 mwl8k_tx_count_packet(sta, tid);
1899 spin_lock(&priv->stream_lock);
1900 stream = mwl8k_lookup_stream(hw, sta->addr, tid);
1901 if (stream != NULL) {
1902 if (stream->state == AMPDU_STREAM_ACTIVE) {
1903 txpriority = stream->txq_idx;
1904 index = stream->txq_idx;
1905 } else if (stream->state == AMPDU_STREAM_NEW) {
1906 /* We get here if the driver sends us packets
1907 * after we've initiated a stream, but before
1908 * our ampdu_action routine has been called
1909 * with IEEE80211_AMPDU_TX_START to get the SSN
1910 * for the ADDBA request. So this packet can
1911 * go out with no risk of sequence number
1912 * mismatch. No special handling is required.
1913 */
1914 } else {
1915 /* Drop packets that would go out after the
1916 * ADDBA request was sent but before the ADDBA
1917 * response is received. If we don't do this,
1918 * the recipient would probably receive it
1919 * after the ADDBA request with SSN 0. This
1920 * will cause the recipient's BA receive window
1921 * to shift, which would cause the subsequent
1922 * packets in the BA stream to be discarded.
1923 * mac80211 queues our packets for us in this
1924 * case, so this is really just a safety check.
1925 */
1926 wiphy_warn(hw->wiphy,
1927 "Cannot send packet while ADDBA "
1928 "dialog is underway.\n");
1929 spin_unlock(&priv->stream_lock);
1930 dev_kfree_skb(skb);
1931 return;
1932 }
1933 } else {
1934 /* Defer calling mwl8k_start_stream so that the current
1935 * skb can go out before the ADDBA request. This
1936 * prevents sequence number mismatch at the recepient
1937 * as described above.
1938 */
1939 if (mwl8k_ampdu_allowed(sta, tid)) {
1940 stream = mwl8k_add_stream(hw, sta, tid);
1941 if (stream != NULL)
1942 start_ba_session = true;
1943 }
1944 }
1945 spin_unlock(&priv->stream_lock);
1946 }
1947
1630 dma = pci_map_single(priv->pdev, skb->data, 1948 dma = pci_map_single(priv->pdev, skb->data,
1631 skb->len, PCI_DMA_TODEVICE); 1949 skb->len, PCI_DMA_TODEVICE);
1632 1950
1633 if (pci_dma_mapping_error(priv->pdev, dma)) { 1951 if (pci_dma_mapping_error(priv->pdev, dma)) {
1634 wiphy_debug(hw->wiphy, 1952 wiphy_debug(hw->wiphy,
1635 "failed to dma map skb, dropping TX frame.\n"); 1953 "failed to dma map skb, dropping TX frame.\n");
1954 if (start_ba_session) {
1955 spin_lock(&priv->stream_lock);
1956 mwl8k_remove_stream(hw, stream);
1957 spin_unlock(&priv->stream_lock);
1958 }
1636 dev_kfree_skb(skb); 1959 dev_kfree_skb(skb);
1637 return; 1960 return;
1638 } 1961 }
@@ -1641,12 +1964,34 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1641 1964
1642 txq = priv->txq + index; 1965 txq = priv->txq + index;
1643 1966
1967 /* Mgmt frames that go out frequently are probe
1968 * responses. Other mgmt frames got out relatively
1969 * infrequently. Hence reserve 2 buffers so that
1970 * other mgmt frames do not get dropped due to an
1971 * already queued probe response in one of the
1972 * reserved buffers.
1973 */
1974
1975 if (txq->len >= MWL8K_TX_DESCS - 2) {
1976 if (mgmtframe == false ||
1977 txq->len == MWL8K_TX_DESCS) {
1978 if (start_ba_session) {
1979 spin_lock(&priv->stream_lock);
1980 mwl8k_remove_stream(hw, stream);
1981 spin_unlock(&priv->stream_lock);
1982 }
1983 spin_unlock_bh(&priv->tx_lock);
1984 dev_kfree_skb(skb);
1985 return;
1986 }
1987 }
1988
1644 BUG_ON(txq->skb[txq->tail] != NULL); 1989 BUG_ON(txq->skb[txq->tail] != NULL);
1645 txq->skb[txq->tail] = skb; 1990 txq->skb[txq->tail] = skb;
1646 1991
1647 tx = txq->txd + txq->tail; 1992 tx = txq->txd + txq->tail;
1648 tx->data_rate = txdatarate; 1993 tx->data_rate = txdatarate;
1649 tx->tx_priority = index; 1994 tx->tx_priority = txpriority;
1650 tx->qos_control = cpu_to_le16(qos); 1995 tx->qos_control = cpu_to_le16(qos);
1651 tx->pkt_phys_addr = cpu_to_le32(dma); 1996 tx->pkt_phys_addr = cpu_to_le32(dma);
1652 tx->pkt_len = cpu_to_le16(skb->len); 1997 tx->pkt_len = cpu_to_le16(skb->len);
@@ -1655,6 +2000,11 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1655 tx->peer_id = MWL8K_STA(tx_info->control.sta)->peer_id; 2000 tx->peer_id = MWL8K_STA(tx_info->control.sta)->peer_id;
1656 else 2001 else
1657 tx->peer_id = 0; 2002 tx->peer_id = 0;
2003
2004 if (priv->ap_fw)
2005 tx->timestamp = cpu_to_le32(ioread32(priv->regs +
2006 MWL8K_HW_TIMER_REGISTER));
2007
1658 wmb(); 2008 wmb();
1659 tx->status = cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED | txstatus); 2009 tx->status = cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED | txstatus);
1660 2010
@@ -1665,12 +2015,17 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1665 if (txq->tail == MWL8K_TX_DESCS) 2015 if (txq->tail == MWL8K_TX_DESCS)
1666 txq->tail = 0; 2016 txq->tail = 0;
1667 2017
1668 if (txq->head == txq->tail)
1669 ieee80211_stop_queue(hw, index);
1670
1671 mwl8k_tx_start(priv); 2018 mwl8k_tx_start(priv);
1672 2019
1673 spin_unlock_bh(&priv->tx_lock); 2020 spin_unlock_bh(&priv->tx_lock);
2021
2022 /* Initiate the ampdu session here */
2023 if (start_ba_session) {
2024 spin_lock(&priv->stream_lock);
2025 if (mwl8k_start_stream(hw, stream))
2026 mwl8k_remove_stream(hw, stream);
2027 spin_unlock(&priv->stream_lock);
2028 }
1674} 2029}
1675 2030
1676 2031
@@ -1868,7 +2223,7 @@ struct mwl8k_cmd_get_hw_spec_sta {
1868 __u8 mcs_bitmap[16]; 2223 __u8 mcs_bitmap[16];
1869 __le32 rx_queue_ptr; 2224 __le32 rx_queue_ptr;
1870 __le32 num_tx_queues; 2225 __le32 num_tx_queues;
1871 __le32 tx_queue_ptrs[MWL8K_TX_QUEUES]; 2226 __le32 tx_queue_ptrs[MWL8K_TX_WMM_QUEUES];
1872 __le32 caps2; 2227 __le32 caps2;
1873 __le32 num_tx_desc_per_queue; 2228 __le32 num_tx_desc_per_queue;
1874 __le32 total_rxd; 2229 __le32 total_rxd;
@@ -1974,8 +2329,8 @@ static int mwl8k_cmd_get_hw_spec_sta(struct ieee80211_hw *hw)
1974 memset(cmd->perm_addr, 0xff, sizeof(cmd->perm_addr)); 2329 memset(cmd->perm_addr, 0xff, sizeof(cmd->perm_addr));
1975 cmd->ps_cookie = cpu_to_le32(priv->cookie_dma); 2330 cmd->ps_cookie = cpu_to_le32(priv->cookie_dma);
1976 cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rxd_dma); 2331 cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rxd_dma);
1977 cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES); 2332 cmd->num_tx_queues = cpu_to_le32(mwl8k_tx_queues(priv));
1978 for (i = 0; i < MWL8K_TX_QUEUES; i++) 2333 for (i = 0; i < mwl8k_tx_queues(priv); i++)
1979 cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].txd_dma); 2334 cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].txd_dma);
1980 cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS); 2335 cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS);
1981 cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS); 2336 cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS);
@@ -2017,13 +2372,16 @@ struct mwl8k_cmd_get_hw_spec_ap {
2017 __le32 wcbbase2; 2372 __le32 wcbbase2;
2018 __le32 wcbbase3; 2373 __le32 wcbbase3;
2019 __le32 fw_api_version; 2374 __le32 fw_api_version;
2375 __le32 caps;
2376 __le32 num_of_ampdu_queues;
2377 __le32 wcbbase_ampdu[MWL8K_MAX_AMPDU_QUEUES];
2020} __packed; 2378} __packed;
2021 2379
2022static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw) 2380static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw)
2023{ 2381{
2024 struct mwl8k_priv *priv = hw->priv; 2382 struct mwl8k_priv *priv = hw->priv;
2025 struct mwl8k_cmd_get_hw_spec_ap *cmd; 2383 struct mwl8k_cmd_get_hw_spec_ap *cmd;
2026 int rc; 2384 int rc, i;
2027 u32 api_version; 2385 u32 api_version;
2028 2386
2029 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 2387 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -2055,27 +2413,31 @@ static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw)
2055 priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs); 2413 priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs);
2056 priv->fw_rev = le32_to_cpu(cmd->fw_rev); 2414 priv->fw_rev = le32_to_cpu(cmd->fw_rev);
2057 priv->hw_rev = cmd->hw_rev; 2415 priv->hw_rev = cmd->hw_rev;
2058 mwl8k_setup_2ghz_band(hw); 2416 mwl8k_set_caps(hw, le32_to_cpu(cmd->caps));
2059 priv->ap_macids_supported = 0x000000ff; 2417 priv->ap_macids_supported = 0x000000ff;
2060 priv->sta_macids_supported = 0x00000000; 2418 priv->sta_macids_supported = 0x00000000;
2061 2419 priv->num_ampdu_queues = le32_to_cpu(cmd->num_of_ampdu_queues);
2062 off = le32_to_cpu(cmd->wcbbase0) & 0xffff; 2420 if (priv->num_ampdu_queues > MWL8K_MAX_AMPDU_QUEUES) {
2063 iowrite32(priv->txq[0].txd_dma, priv->sram + off); 2421 wiphy_warn(hw->wiphy, "fw reported %d ampdu queues"
2064 2422 " but we only support %d.\n",
2423 priv->num_ampdu_queues,
2424 MWL8K_MAX_AMPDU_QUEUES);
2425 priv->num_ampdu_queues = MWL8K_MAX_AMPDU_QUEUES;
2426 }
2065 off = le32_to_cpu(cmd->rxwrptr) & 0xffff; 2427 off = le32_to_cpu(cmd->rxwrptr) & 0xffff;
2066 iowrite32(priv->rxq[0].rxd_dma, priv->sram + off); 2428 iowrite32(priv->rxq[0].rxd_dma, priv->sram + off);
2067 2429
2068 off = le32_to_cpu(cmd->rxrdptr) & 0xffff; 2430 off = le32_to_cpu(cmd->rxrdptr) & 0xffff;
2069 iowrite32(priv->rxq[0].rxd_dma, priv->sram + off); 2431 iowrite32(priv->rxq[0].rxd_dma, priv->sram + off);
2070 2432
2071 off = le32_to_cpu(cmd->wcbbase1) & 0xffff; 2433 priv->txq_offset[0] = le32_to_cpu(cmd->wcbbase0) & 0xffff;
2072 iowrite32(priv->txq[1].txd_dma, priv->sram + off); 2434 priv->txq_offset[1] = le32_to_cpu(cmd->wcbbase1) & 0xffff;
2435 priv->txq_offset[2] = le32_to_cpu(cmd->wcbbase2) & 0xffff;
2436 priv->txq_offset[3] = le32_to_cpu(cmd->wcbbase3) & 0xffff;
2073 2437
2074 off = le32_to_cpu(cmd->wcbbase2) & 0xffff; 2438 for (i = 0; i < priv->num_ampdu_queues; i++)
2075 iowrite32(priv->txq[2].txd_dma, priv->sram + off); 2439 priv->txq_offset[i + MWL8K_TX_WMM_QUEUES] =
2076 2440 le32_to_cpu(cmd->wcbbase_ampdu[i]) & 0xffff;
2077 off = le32_to_cpu(cmd->wcbbase3) & 0xffff;
2078 iowrite32(priv->txq[3].txd_dma, priv->sram + off);
2079 } 2441 }
2080 2442
2081done: 2443done:
@@ -2098,12 +2460,20 @@ struct mwl8k_cmd_set_hw_spec {
2098 __le32 caps; 2460 __le32 caps;
2099 __le32 rx_queue_ptr; 2461 __le32 rx_queue_ptr;
2100 __le32 num_tx_queues; 2462 __le32 num_tx_queues;
2101 __le32 tx_queue_ptrs[MWL8K_TX_QUEUES]; 2463 __le32 tx_queue_ptrs[MWL8K_MAX_TX_QUEUES];
2102 __le32 flags; 2464 __le32 flags;
2103 __le32 num_tx_desc_per_queue; 2465 __le32 num_tx_desc_per_queue;
2104 __le32 total_rxd; 2466 __le32 total_rxd;
2105} __packed; 2467} __packed;
2106 2468
2469/* If enabled, MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY will cause
2470 * packets to expire 500 ms after the timestamp in the tx descriptor. That is,
2471 * the packets that are queued for more than 500ms, will be dropped in the
2472 * hardware. This helps minimizing the issues caused due to head-of-line
2473 * blocking where a slow client can hog the bandwidth and affect traffic to a
2474 * faster client.
2475 */
2476#define MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY 0x00000400
2107#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080 2477#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080
2108#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP 0x00000020 2478#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP 0x00000020
2109#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON 0x00000010 2479#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON 0x00000010
@@ -2124,7 +2494,7 @@ static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
2124 2494
2125 cmd->ps_cookie = cpu_to_le32(priv->cookie_dma); 2495 cmd->ps_cookie = cpu_to_le32(priv->cookie_dma);
2126 cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rxd_dma); 2496 cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rxd_dma);
2127 cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES); 2497 cmd->num_tx_queues = cpu_to_le32(mwl8k_tx_queues(priv));
2128 2498
2129 /* 2499 /*
2130 * Mac80211 stack has Q0 as highest priority and Q3 as lowest in 2500 * Mac80211 stack has Q0 as highest priority and Q3 as lowest in
@@ -2132,14 +2502,15 @@ static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
2132 * in that order. Map Q3 of mac80211 to Q0 of firmware so that the 2502 * in that order. Map Q3 of mac80211 to Q0 of firmware so that the
2133 * priority is interpreted the right way in firmware. 2503 * priority is interpreted the right way in firmware.
2134 */ 2504 */
2135 for (i = 0; i < MWL8K_TX_QUEUES; i++) { 2505 for (i = 0; i < mwl8k_tx_queues(priv); i++) {
2136 int j = MWL8K_TX_QUEUES - 1 - i; 2506 int j = mwl8k_tx_queues(priv) - 1 - i;
2137 cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[j].txd_dma); 2507 cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[j].txd_dma);
2138 } 2508 }
2139 2509
2140 cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT | 2510 cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT |
2141 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP | 2511 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP |
2142 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON); 2512 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON |
2513 MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY);
2143 cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS); 2514 cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS);
2144 cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS); 2515 cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS);
2145 2516
@@ -2356,7 +2727,7 @@ struct mwl8k_cmd_tx_power {
2356 __le16 bw; 2727 __le16 bw;
2357 __le16 sub_ch; 2728 __le16 sub_ch;
2358 __le16 power_level_list[MWL8K_TX_POWER_LEVEL_TOTAL]; 2729 __le16 power_level_list[MWL8K_TX_POWER_LEVEL_TOTAL];
2359} __attribute__((packed)); 2730} __packed;
2360 2731
2361static int mwl8k_cmd_tx_power(struct ieee80211_hw *hw, 2732static int mwl8k_cmd_tx_power(struct ieee80211_hw *hw,
2362 struct ieee80211_conf *conf, 2733 struct ieee80211_conf *conf,
@@ -3123,6 +3494,65 @@ static int mwl8k_cmd_set_rateadapt_mode(struct ieee80211_hw *hw, __u16 mode)
3123} 3494}
3124 3495
3125/* 3496/*
3497 * CMD_GET_WATCHDOG_BITMAP.
3498 */
3499struct mwl8k_cmd_get_watchdog_bitmap {
3500 struct mwl8k_cmd_pkt header;
3501 u8 bitmap;
3502} __packed;
3503
3504static int mwl8k_cmd_get_watchdog_bitmap(struct ieee80211_hw *hw, u8 *bitmap)
3505{
3506 struct mwl8k_cmd_get_watchdog_bitmap *cmd;
3507 int rc;
3508
3509 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
3510 if (cmd == NULL)
3511 return -ENOMEM;
3512
3513 cmd->header.code = cpu_to_le16(MWL8K_CMD_GET_WATCHDOG_BITMAP);
3514 cmd->header.length = cpu_to_le16(sizeof(*cmd));
3515
3516 rc = mwl8k_post_cmd(hw, &cmd->header);
3517 if (!rc)
3518 *bitmap = cmd->bitmap;
3519
3520 kfree(cmd);
3521
3522 return rc;
3523}
3524
3525#define INVALID_BA 0xAA
3526static void mwl8k_watchdog_ba_events(struct work_struct *work)
3527{
3528 int rc;
3529 u8 bitmap = 0, stream_index;
3530 struct mwl8k_ampdu_stream *streams;
3531 struct mwl8k_priv *priv =
3532 container_of(work, struct mwl8k_priv, watchdog_ba_handle);
3533
3534 rc = mwl8k_cmd_get_watchdog_bitmap(priv->hw, &bitmap);
3535 if (rc)
3536 return;
3537
3538 if (bitmap == INVALID_BA)
3539 return;
3540
3541 /* the bitmap is the hw queue number. Map it to the ampdu queue. */
3542 stream_index = bitmap - MWL8K_TX_WMM_QUEUES;
3543
3544 BUG_ON(stream_index >= priv->num_ampdu_queues);
3545
3546 streams = &priv->ampdu[stream_index];
3547
3548 if (streams->state == AMPDU_STREAM_ACTIVE)
3549 ieee80211_stop_tx_ba_session(streams->sta, streams->tid);
3550
3551 return;
3552}
3553
3554
3555/*
3126 * CMD_BSS_START. 3556 * CMD_BSS_START.
3127 */ 3557 */
3128struct mwl8k_cmd_bss_start { 3558struct mwl8k_cmd_bss_start {
@@ -3151,6 +3581,152 @@ static int mwl8k_cmd_bss_start(struct ieee80211_hw *hw,
3151} 3581}
3152 3582
3153/* 3583/*
3584 * CMD_BASTREAM.
3585 */
3586
3587/*
3588 * UPSTREAM is tx direction
3589 */
3590#define BASTREAM_FLAG_DIRECTION_UPSTREAM 0x00
3591#define BASTREAM_FLAG_IMMEDIATE_TYPE 0x01
3592
3593enum ba_stream_action_type {
3594 MWL8K_BA_CREATE,
3595 MWL8K_BA_UPDATE,
3596 MWL8K_BA_DESTROY,
3597 MWL8K_BA_FLUSH,
3598 MWL8K_BA_CHECK,
3599};
3600
3601
3602struct mwl8k_create_ba_stream {
3603 __le32 flags;
3604 __le32 idle_thrs;
3605 __le32 bar_thrs;
3606 __le32 window_size;
3607 u8 peer_mac_addr[6];
3608 u8 dialog_token;
3609 u8 tid;
3610 u8 queue_id;
3611 u8 param_info;
3612 __le32 ba_context;
3613 u8 reset_seq_no_flag;
3614 __le16 curr_seq_no;
3615 u8 sta_src_mac_addr[6];
3616} __packed;
3617
3618struct mwl8k_destroy_ba_stream {
3619 __le32 flags;
3620 __le32 ba_context;
3621} __packed;
3622
3623struct mwl8k_cmd_bastream {
3624 struct mwl8k_cmd_pkt header;
3625 __le32 action;
3626 union {
3627 struct mwl8k_create_ba_stream create_params;
3628 struct mwl8k_destroy_ba_stream destroy_params;
3629 };
3630} __packed;
3631
3632static int
3633mwl8k_check_ba(struct ieee80211_hw *hw, struct mwl8k_ampdu_stream *stream)
3634{
3635 struct mwl8k_cmd_bastream *cmd;
3636 int rc;
3637
3638 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
3639 if (cmd == NULL)
3640 return -ENOMEM;
3641
3642 cmd->header.code = cpu_to_le16(MWL8K_CMD_BASTREAM);
3643 cmd->header.length = cpu_to_le16(sizeof(*cmd));
3644
3645 cmd->action = cpu_to_le32(MWL8K_BA_CHECK);
3646
3647 cmd->create_params.queue_id = stream->idx;
3648 memcpy(&cmd->create_params.peer_mac_addr[0], stream->sta->addr,
3649 ETH_ALEN);
3650 cmd->create_params.tid = stream->tid;
3651
3652 cmd->create_params.flags =
3653 cpu_to_le32(BASTREAM_FLAG_IMMEDIATE_TYPE) |
3654 cpu_to_le32(BASTREAM_FLAG_DIRECTION_UPSTREAM);
3655
3656 rc = mwl8k_post_cmd(hw, &cmd->header);
3657
3658 kfree(cmd);
3659
3660 return rc;
3661}
3662
3663static int
3664mwl8k_create_ba(struct ieee80211_hw *hw, struct mwl8k_ampdu_stream *stream,
3665 u8 buf_size)
3666{
3667 struct mwl8k_cmd_bastream *cmd;
3668 int rc;
3669
3670 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
3671 if (cmd == NULL)
3672 return -ENOMEM;
3673
3674
3675 cmd->header.code = cpu_to_le16(MWL8K_CMD_BASTREAM);
3676 cmd->header.length = cpu_to_le16(sizeof(*cmd));
3677
3678 cmd->action = cpu_to_le32(MWL8K_BA_CREATE);
3679
3680 cmd->create_params.bar_thrs = cpu_to_le32((u32)buf_size);
3681 cmd->create_params.window_size = cpu_to_le32((u32)buf_size);
3682 cmd->create_params.queue_id = stream->idx;
3683
3684 memcpy(cmd->create_params.peer_mac_addr, stream->sta->addr, ETH_ALEN);
3685 cmd->create_params.tid = stream->tid;
3686 cmd->create_params.curr_seq_no = cpu_to_le16(0);
3687 cmd->create_params.reset_seq_no_flag = 1;
3688
3689 cmd->create_params.param_info =
3690 (stream->sta->ht_cap.ampdu_factor &
3691 IEEE80211_HT_AMPDU_PARM_FACTOR) |
3692 ((stream->sta->ht_cap.ampdu_density << 2) &
3693 IEEE80211_HT_AMPDU_PARM_DENSITY);
3694
3695 cmd->create_params.flags =
3696 cpu_to_le32(BASTREAM_FLAG_IMMEDIATE_TYPE |
3697 BASTREAM_FLAG_DIRECTION_UPSTREAM);
3698
3699 rc = mwl8k_post_cmd(hw, &cmd->header);
3700
3701 wiphy_debug(hw->wiphy, "Created a BA stream for %pM : tid %d\n",
3702 stream->sta->addr, stream->tid);
3703 kfree(cmd);
3704
3705 return rc;
3706}
3707
3708static void mwl8k_destroy_ba(struct ieee80211_hw *hw,
3709 struct mwl8k_ampdu_stream *stream)
3710{
3711 struct mwl8k_cmd_bastream *cmd;
3712
3713 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
3714 if (cmd == NULL)
3715 return;
3716
3717 cmd->header.code = cpu_to_le16(MWL8K_CMD_BASTREAM);
3718 cmd->header.length = cpu_to_le16(sizeof(*cmd));
3719 cmd->action = cpu_to_le32(MWL8K_BA_DESTROY);
3720
3721 cmd->destroy_params.ba_context = cpu_to_le32(stream->idx);
3722 mwl8k_post_cmd(hw, &cmd->header);
3723
3724 wiphy_debug(hw->wiphy, "Deleted BA stream index %d\n", stream->idx);
3725
3726 kfree(cmd);
3727}
3728
3729/*
3154 * CMD_SET_NEW_STN. 3730 * CMD_SET_NEW_STN.
3155 */ 3731 */
3156struct mwl8k_cmd_set_new_stn { 3732struct mwl8k_cmd_set_new_stn {
@@ -3274,7 +3850,7 @@ struct mwl8k_cmd_update_encryption {
3274 __u8 mac_addr[6]; 3850 __u8 mac_addr[6];
3275 __u8 encr_type; 3851 __u8 encr_type;
3276 3852
3277} __attribute__((packed)); 3853} __packed;
3278 3854
3279struct mwl8k_cmd_set_key { 3855struct mwl8k_cmd_set_key {
3280 struct mwl8k_cmd_pkt header; 3856 struct mwl8k_cmd_pkt header;
@@ -3294,7 +3870,7 @@ struct mwl8k_cmd_set_key {
3294 __le16 tkip_tsc_low; 3870 __le16 tkip_tsc_low;
3295 __le32 tkip_tsc_high; 3871 __le32 tkip_tsc_high;
3296 __u8 mac_addr[6]; 3872 __u8 mac_addr[6];
3297} __attribute__((packed)); 3873} __packed;
3298 3874
3299enum { 3875enum {
3300 MWL8K_ENCR_ENABLE, 3876 MWL8K_ENCR_ENABLE,
@@ -3422,7 +3998,7 @@ static int mwl8k_cmd_encryption_set_key(struct ieee80211_hw *hw,
3422 mwl8k_vif->wep_key_conf[idx].enabled = 1; 3998 mwl8k_vif->wep_key_conf[idx].enabled = 1;
3423 } 3999 }
3424 4000
3425 keymlen = 0; 4001 keymlen = key->keylen;
3426 action = MWL8K_ENCR_SET_KEY; 4002 action = MWL8K_ENCR_SET_KEY;
3427 break; 4003 break;
3428 case WLAN_CIPHER_SUITE_TKIP: 4004 case WLAN_CIPHER_SUITE_TKIP:
@@ -3496,7 +4072,6 @@ static int mwl8k_set_key(struct ieee80211_hw *hw,
3496 addr = sta->addr; 4072 addr = sta->addr;
3497 4073
3498 if (cmd_param == SET_KEY) { 4074 if (cmd_param == SET_KEY) {
3499 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
3500 rc = mwl8k_cmd_encryption_set_key(hw, vif, addr, key); 4075 rc = mwl8k_cmd_encryption_set_key(hw, vif, addr, key);
3501 if (rc) 4076 if (rc)
3502 goto out; 4077 goto out;
@@ -3671,6 +4246,11 @@ static irqreturn_t mwl8k_interrupt(int irq, void *dev_id)
3671 tasklet_schedule(&priv->poll_rx_task); 4246 tasklet_schedule(&priv->poll_rx_task);
3672 } 4247 }
3673 4248
4249 if (status & MWL8K_A2H_INT_BA_WATCHDOG) {
4250 status &= ~MWL8K_A2H_INT_BA_WATCHDOG;
4251 ieee80211_queue_work(hw, &priv->watchdog_ba_handle);
4252 }
4253
3674 if (status) 4254 if (status)
3675 iowrite32(~status, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS); 4255 iowrite32(~status, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
3676 4256
@@ -3699,7 +4279,7 @@ static void mwl8k_tx_poll(unsigned long data)
3699 4279
3700 spin_lock_bh(&priv->tx_lock); 4280 spin_lock_bh(&priv->tx_lock);
3701 4281
3702 for (i = 0; i < MWL8K_TX_QUEUES; i++) 4282 for (i = 0; i < mwl8k_tx_queues(priv); i++)
3703 limit -= mwl8k_txq_reclaim(hw, i, limit, 0); 4283 limit -= mwl8k_txq_reclaim(hw, i, limit, 0);
3704 4284
3705 if (!priv->pending_tx_pkts && priv->tx_wait != NULL) { 4285 if (!priv->pending_tx_pkts && priv->tx_wait != NULL) {
@@ -3774,6 +4354,8 @@ static int mwl8k_start(struct ieee80211_hw *hw)
3774 4354
3775 /* Enable interrupts */ 4355 /* Enable interrupts */
3776 iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); 4356 iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
4357 iowrite32(MWL8K_A2H_EVENTS,
4358 priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK);
3777 4359
3778 rc = mwl8k_fw_lock(hw); 4360 rc = mwl8k_fw_lock(hw);
3779 if (!rc) { 4361 if (!rc) {
@@ -3829,6 +4411,7 @@ static void mwl8k_stop(struct ieee80211_hw *hw)
3829 4411
3830 /* Stop finalize join worker */ 4412 /* Stop finalize join worker */
3831 cancel_work_sync(&priv->finalize_join_worker); 4413 cancel_work_sync(&priv->finalize_join_worker);
4414 cancel_work_sync(&priv->watchdog_ba_handle);
3832 if (priv->beacon_skb != NULL) 4415 if (priv->beacon_skb != NULL)
3833 dev_kfree_skb(priv->beacon_skb); 4416 dev_kfree_skb(priv->beacon_skb);
3834 4417
@@ -3837,7 +4420,7 @@ static void mwl8k_stop(struct ieee80211_hw *hw)
3837 tasklet_disable(&priv->poll_rx_task); 4420 tasklet_disable(&priv->poll_rx_task);
3838 4421
3839 /* Return all skbs to mac80211 */ 4422 /* Return all skbs to mac80211 */
3840 for (i = 0; i < MWL8K_TX_QUEUES; i++) 4423 for (i = 0; i < mwl8k_tx_queues(priv); i++)
3841 mwl8k_txq_reclaim(hw, i, INT_MAX, 1); 4424 mwl8k_txq_reclaim(hw, i, INT_MAX, 1);
3842} 4425}
3843 4426
@@ -3958,9 +4541,12 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
3958 conf->power_level = 18; 4541 conf->power_level = 18;
3959 4542
3960 if (priv->ap_fw) { 4543 if (priv->ap_fw) {
3961 rc = mwl8k_cmd_tx_power(hw, conf, conf->power_level); 4544
3962 if (rc) 4545 if (conf->flags & IEEE80211_CONF_CHANGE_POWER) {
3963 goto out; 4546 rc = mwl8k_cmd_tx_power(hw, conf, conf->power_level);
4547 if (rc)
4548 goto out;
4549 }
3964 4550
3965 rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_RX, 0x3); 4551 rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_RX, 0x3);
3966 if (rc) 4552 if (rc)
@@ -3987,7 +4573,7 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
3987 struct ieee80211_bss_conf *info, u32 changed) 4573 struct ieee80211_bss_conf *info, u32 changed)
3988{ 4574{
3989 struct mwl8k_priv *priv = hw->priv; 4575 struct mwl8k_priv *priv = hw->priv;
3990 u32 ap_legacy_rates; 4576 u32 ap_legacy_rates = 0;
3991 u8 ap_mcs_rates[16]; 4577 u8 ap_mcs_rates[16];
3992 int rc; 4578 int rc;
3993 4579
@@ -4312,6 +4898,8 @@ static int mwl8k_sta_add(struct ieee80211_hw *hw,
4312 ret = mwl8k_cmd_update_stadb_add(hw, vif, sta); 4898 ret = mwl8k_cmd_update_stadb_add(hw, vif, sta);
4313 if (ret >= 0) { 4899 if (ret >= 0) {
4314 MWL8K_STA(sta)->peer_id = ret; 4900 MWL8K_STA(sta)->peer_id = ret;
4901 if (sta->ht_cap.ht_supported)
4902 MWL8K_STA(sta)->is_ampdu_allowed = true;
4315 ret = 0; 4903 ret = 0;
4316 } 4904 }
4317 4905
@@ -4335,14 +4923,14 @@ static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
4335 4923
4336 rc = mwl8k_fw_lock(hw); 4924 rc = mwl8k_fw_lock(hw);
4337 if (!rc) { 4925 if (!rc) {
4338 BUG_ON(queue > MWL8K_TX_QUEUES - 1); 4926 BUG_ON(queue > MWL8K_TX_WMM_QUEUES - 1);
4339 memcpy(&priv->wmm_params[queue], params, sizeof(*params)); 4927 memcpy(&priv->wmm_params[queue], params, sizeof(*params));
4340 4928
4341 if (!priv->wmm_enabled) 4929 if (!priv->wmm_enabled)
4342 rc = mwl8k_cmd_set_wmm_mode(hw, 1); 4930 rc = mwl8k_cmd_set_wmm_mode(hw, 1);
4343 4931
4344 if (!rc) { 4932 if (!rc) {
4345 int q = MWL8K_TX_QUEUES - 1 - queue; 4933 int q = MWL8K_TX_WMM_QUEUES - 1 - queue;
4346 rc = mwl8k_cmd_set_edca_params(hw, q, 4934 rc = mwl8k_cmd_set_edca_params(hw, q,
4347 params->cw_min, 4935 params->cw_min,
4348 params->cw_max, 4936 params->cw_max,
@@ -4378,21 +4966,118 @@ static int mwl8k_get_survey(struct ieee80211_hw *hw, int idx,
4378 return 0; 4966 return 0;
4379} 4967}
4380 4968
4969#define MAX_AMPDU_ATTEMPTS 5
4970
4381static int 4971static int
4382mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 4972mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
4383 enum ieee80211_ampdu_mlme_action action, 4973 enum ieee80211_ampdu_mlme_action action,
4384 struct ieee80211_sta *sta, u16 tid, u16 *ssn, 4974 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
4385 u8 buf_size) 4975 u8 buf_size)
4386{ 4976{
4977
4978 int i, rc = 0;
4979 struct mwl8k_priv *priv = hw->priv;
4980 struct mwl8k_ampdu_stream *stream;
4981 u8 *addr = sta->addr;
4982
4983 if (!(hw->flags & IEEE80211_HW_AMPDU_AGGREGATION))
4984 return -ENOTSUPP;
4985
4986 spin_lock(&priv->stream_lock);
4987 stream = mwl8k_lookup_stream(hw, addr, tid);
4988
4387 switch (action) { 4989 switch (action) {
4388 case IEEE80211_AMPDU_RX_START: 4990 case IEEE80211_AMPDU_RX_START:
4389 case IEEE80211_AMPDU_RX_STOP: 4991 case IEEE80211_AMPDU_RX_STOP:
4390 if (!(hw->flags & IEEE80211_HW_AMPDU_AGGREGATION)) 4992 break;
4391 return -ENOTSUPP; 4993 case IEEE80211_AMPDU_TX_START:
4392 return 0; 4994 /* By the time we get here the hw queues may contain outgoing
4995 * packets for this RA/TID that are not part of this BA
4996 * session. The hw will assign sequence numbers to these
4997 * packets as they go out. So if we query the hw for its next
4998 * sequence number and use that for the SSN here, it may end up
4999 * being wrong, which will lead to sequence number mismatch at
5000 * the recipient. To avoid this, we reset the sequence number
5001 * to O for the first MPDU in this BA stream.
5002 */
5003 *ssn = 0;
5004 if (stream == NULL) {
5005 /* This means that somebody outside this driver called
5006 * ieee80211_start_tx_ba_session. This is unexpected
5007 * because we do our own rate control. Just warn and
5008 * move on.
5009 */
5010 wiphy_warn(hw->wiphy, "Unexpected call to %s. "
5011 "Proceeding anyway.\n", __func__);
5012 stream = mwl8k_add_stream(hw, sta, tid);
5013 }
5014 if (stream == NULL) {
5015 wiphy_debug(hw->wiphy, "no free AMPDU streams\n");
5016 rc = -EBUSY;
5017 break;
5018 }
5019 stream->state = AMPDU_STREAM_IN_PROGRESS;
5020
5021 /* Release the lock before we do the time consuming stuff */
5022 spin_unlock(&priv->stream_lock);
5023 for (i = 0; i < MAX_AMPDU_ATTEMPTS; i++) {
5024 rc = mwl8k_check_ba(hw, stream);
5025
5026 if (!rc)
5027 break;
5028 /*
5029 * HW queues take time to be flushed, give them
5030 * sufficient time
5031 */
5032
5033 msleep(1000);
5034 }
5035 spin_lock(&priv->stream_lock);
5036 if (rc) {
5037 wiphy_err(hw->wiphy, "Stream for tid %d busy after %d"
5038 " attempts\n", tid, MAX_AMPDU_ATTEMPTS);
5039 mwl8k_remove_stream(hw, stream);
5040 rc = -EBUSY;
5041 break;
5042 }
5043 ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
5044 break;
5045 case IEEE80211_AMPDU_TX_STOP:
5046 if (stream == NULL)
5047 break;
5048 if (stream->state == AMPDU_STREAM_ACTIVE) {
5049 spin_unlock(&priv->stream_lock);
5050 mwl8k_destroy_ba(hw, stream);
5051 spin_lock(&priv->stream_lock);
5052 }
5053 mwl8k_remove_stream(hw, stream);
5054 ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
5055 break;
5056 case IEEE80211_AMPDU_TX_OPERATIONAL:
5057 BUG_ON(stream == NULL);
5058 BUG_ON(stream->state != AMPDU_STREAM_IN_PROGRESS);
5059 spin_unlock(&priv->stream_lock);
5060 rc = mwl8k_create_ba(hw, stream, buf_size);
5061 spin_lock(&priv->stream_lock);
5062 if (!rc)
5063 stream->state = AMPDU_STREAM_ACTIVE;
5064 else {
5065 spin_unlock(&priv->stream_lock);
5066 mwl8k_destroy_ba(hw, stream);
5067 spin_lock(&priv->stream_lock);
5068 wiphy_debug(hw->wiphy,
5069 "Failed adding stream for sta %pM tid %d\n",
5070 addr, tid);
5071 mwl8k_remove_stream(hw, stream);
5072 }
5073 break;
5074
4393 default: 5075 default:
4394 return -ENOTSUPP; 5076 rc = -ENOTSUPP;
4395 } 5077 }
5078
5079 spin_unlock(&priv->stream_lock);
5080 return rc;
4396} 5081}
4397 5082
4398static const struct ieee80211_ops mwl8k_ops = { 5083static const struct ieee80211_ops mwl8k_ops = {
@@ -4441,7 +5126,7 @@ enum {
4441 MWL8366, 5126 MWL8366,
4442}; 5127};
4443 5128
4444#define MWL8K_8366_AP_FW_API 1 5129#define MWL8K_8366_AP_FW_API 2
4445#define _MWL8K_8366_AP_FW(api) "mwl8k/fmimage_8366_ap-" #api ".fw" 5130#define _MWL8K_8366_AP_FW(api) "mwl8k/fmimage_8366_ap-" #api ".fw"
4446#define MWL8K_8366_AP_FW(api) _MWL8K_8366_AP_FW(api) 5131#define MWL8K_8366_AP_FW(api) _MWL8K_8366_AP_FW(api)
4447 5132
@@ -4607,6 +5292,23 @@ static int mwl8k_init_firmware(struct ieee80211_hw *hw, char *fw_image,
4607 return rc; 5292 return rc;
4608} 5293}
4609 5294
5295static int mwl8k_init_txqs(struct ieee80211_hw *hw)
5296{
5297 struct mwl8k_priv *priv = hw->priv;
5298 int rc = 0;
5299 int i;
5300
5301 for (i = 0; i < mwl8k_tx_queues(priv); i++) {
5302 rc = mwl8k_txq_init(hw, i);
5303 if (rc)
5304 break;
5305 if (priv->ap_fw)
5306 iowrite32(priv->txq[i].txd_dma,
5307 priv->sram + priv->txq_offset[i]);
5308 }
5309 return rc;
5310}
5311
4610/* initialize hw after successfully loading a firmware image */ 5312/* initialize hw after successfully loading a firmware image */
4611static int mwl8k_probe_hw(struct ieee80211_hw *hw) 5313static int mwl8k_probe_hw(struct ieee80211_hw *hw)
4612{ 5314{
@@ -4634,17 +5336,26 @@ static int mwl8k_probe_hw(struct ieee80211_hw *hw)
4634 goto err_stop_firmware; 5336 goto err_stop_firmware;
4635 rxq_refill(hw, 0, INT_MAX); 5337 rxq_refill(hw, 0, INT_MAX);
4636 5338
4637 for (i = 0; i < MWL8K_TX_QUEUES; i++) { 5339 /* For the sta firmware, we need to know the dma addresses of tx queues
4638 rc = mwl8k_txq_init(hw, i); 5340 * before sending MWL8K_CMD_GET_HW_SPEC. So we must initialize them
5341 * prior to issuing this command. But for the AP case, we learn the
5342 * total number of queues from the result CMD_GET_HW_SPEC, so for this
5343 * case we must initialize the tx queues after.
5344 */
5345 priv->num_ampdu_queues = 0;
5346 if (!priv->ap_fw) {
5347 rc = mwl8k_init_txqs(hw);
4639 if (rc) 5348 if (rc)
4640 goto err_free_queues; 5349 goto err_free_queues;
4641 } 5350 }
4642 5351
4643 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS); 5352 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
4644 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); 5353 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
4645 iowrite32(MWL8K_A2H_INT_TX_DONE | MWL8K_A2H_INT_RX_READY, 5354 iowrite32(MWL8K_A2H_INT_TX_DONE|MWL8K_A2H_INT_RX_READY|
5355 MWL8K_A2H_INT_BA_WATCHDOG,
4646 priv->regs + MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL); 5356 priv->regs + MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL);
4647 iowrite32(0xffffffff, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK); 5357 iowrite32(MWL8K_A2H_INT_OPC_DONE,
5358 priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK);
4648 5359
4649 rc = request_irq(priv->pdev->irq, mwl8k_interrupt, 5360 rc = request_irq(priv->pdev->irq, mwl8k_interrupt,
4650 IRQF_SHARED, MWL8K_NAME, hw); 5361 IRQF_SHARED, MWL8K_NAME, hw);
@@ -4653,6 +5364,8 @@ static int mwl8k_probe_hw(struct ieee80211_hw *hw)
4653 goto err_free_queues; 5364 goto err_free_queues;
4654 } 5365 }
4655 5366
5367 memset(priv->ampdu, 0, sizeof(priv->ampdu));
5368
4656 /* 5369 /*
4657 * Temporarily enable interrupts. Initial firmware host 5370 * Temporarily enable interrupts. Initial firmware host
4658 * commands use interrupts and avoid polling. Disable 5371 * commands use interrupts and avoid polling. Disable
@@ -4664,6 +5377,8 @@ static int mwl8k_probe_hw(struct ieee80211_hw *hw)
4664 if (priv->ap_fw) { 5377 if (priv->ap_fw) {
4665 rc = mwl8k_cmd_get_hw_spec_ap(hw); 5378 rc = mwl8k_cmd_get_hw_spec_ap(hw);
4666 if (!rc) 5379 if (!rc)
5380 rc = mwl8k_init_txqs(hw);
5381 if (!rc)
4667 rc = mwl8k_cmd_set_hw_spec(hw); 5382 rc = mwl8k_cmd_set_hw_spec(hw);
4668 } else { 5383 } else {
4669 rc = mwl8k_cmd_get_hw_spec_sta(hw); 5384 rc = mwl8k_cmd_get_hw_spec_sta(hw);
@@ -4705,7 +5420,7 @@ err_free_irq:
4705 free_irq(priv->pdev->irq, hw); 5420 free_irq(priv->pdev->irq, hw);
4706 5421
4707err_free_queues: 5422err_free_queues:
4708 for (i = 0; i < MWL8K_TX_QUEUES; i++) 5423 for (i = 0; i < mwl8k_tx_queues(priv); i++)
4709 mwl8k_txq_deinit(hw, i); 5424 mwl8k_txq_deinit(hw, i);
4710 mwl8k_rxq_deinit(hw, 0); 5425 mwl8k_rxq_deinit(hw, 0);
4711 5426
@@ -4727,7 +5442,7 @@ static int mwl8k_reload_firmware(struct ieee80211_hw *hw, char *fw_image)
4727 mwl8k_stop(hw); 5442 mwl8k_stop(hw);
4728 mwl8k_rxq_deinit(hw, 0); 5443 mwl8k_rxq_deinit(hw, 0);
4729 5444
4730 for (i = 0; i < MWL8K_TX_QUEUES; i++) 5445 for (i = 0; i < mwl8k_tx_queues(priv); i++)
4731 mwl8k_txq_deinit(hw, i); 5446 mwl8k_txq_deinit(hw, i);
4732 5447
4733 rc = mwl8k_init_firmware(hw, fw_image, false); 5448 rc = mwl8k_init_firmware(hw, fw_image, false);
@@ -4746,7 +5461,7 @@ static int mwl8k_reload_firmware(struct ieee80211_hw *hw, char *fw_image)
4746 if (rc) 5461 if (rc)
4747 goto fail; 5462 goto fail;
4748 5463
4749 for (i = 0; i < MWL8K_TX_QUEUES; i++) { 5464 for (i = 0; i < MWL8K_TX_WMM_QUEUES; i++) {
4750 rc = mwl8k_conf_tx(hw, i, &priv->wmm_params[i]); 5465 rc = mwl8k_conf_tx(hw, i, &priv->wmm_params[i]);
4751 if (rc) 5466 if (rc)
4752 goto fail; 5467 goto fail;
@@ -4778,9 +5493,11 @@ static int mwl8k_firmware_load_success(struct mwl8k_priv *priv)
4778 hw->extra_tx_headroom = 5493 hw->extra_tx_headroom =
4779 sizeof(struct mwl8k_dma_data) - sizeof(struct ieee80211_cts); 5494 sizeof(struct mwl8k_dma_data) - sizeof(struct ieee80211_cts);
4780 5495
5496 hw->extra_tx_headroom -= priv->ap_fw ? REDUCED_TX_HEADROOM : 0;
5497
4781 hw->channel_change_time = 10; 5498 hw->channel_change_time = 10;
4782 5499
4783 hw->queues = MWL8K_TX_QUEUES; 5500 hw->queues = MWL8K_TX_WMM_QUEUES;
4784 5501
4785 /* Set rssi values to dBm */ 5502 /* Set rssi values to dBm */
4786 hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_HAS_RATE_CONTROL; 5503 hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_HAS_RATE_CONTROL;
@@ -4796,6 +5513,8 @@ static int mwl8k_firmware_load_success(struct mwl8k_priv *priv)
4796 5513
4797 /* Finalize join worker */ 5514 /* Finalize join worker */
4798 INIT_WORK(&priv->finalize_join_worker, mwl8k_finalize_join_worker); 5515 INIT_WORK(&priv->finalize_join_worker, mwl8k_finalize_join_worker);
5516 /* Handle watchdog ba events */
5517 INIT_WORK(&priv->watchdog_ba_handle, mwl8k_watchdog_ba_events);
4799 5518
4800 /* TX reclaim and RX tasklets. */ 5519 /* TX reclaim and RX tasklets. */
4801 tasklet_init(&priv->poll_tx_task, mwl8k_tx_poll, (unsigned long)hw); 5520 tasklet_init(&priv->poll_tx_task, mwl8k_tx_poll, (unsigned long)hw);
@@ -4815,6 +5534,8 @@ static int mwl8k_firmware_load_success(struct mwl8k_priv *priv)
4815 5534
4816 spin_lock_init(&priv->tx_lock); 5535 spin_lock_init(&priv->tx_lock);
4817 5536
5537 spin_lock_init(&priv->stream_lock);
5538
4818 priv->tx_wait = NULL; 5539 priv->tx_wait = NULL;
4819 5540
4820 rc = mwl8k_probe_hw(hw); 5541 rc = mwl8k_probe_hw(hw);
@@ -4836,7 +5557,7 @@ static int mwl8k_firmware_load_success(struct mwl8k_priv *priv)
4836 return 0; 5557 return 0;
4837 5558
4838err_unprobe_hw: 5559err_unprobe_hw:
4839 for (i = 0; i < MWL8K_TX_QUEUES; i++) 5560 for (i = 0; i < mwl8k_tx_queues(priv); i++)
4840 mwl8k_txq_deinit(hw, i); 5561 mwl8k_txq_deinit(hw, i);
4841 mwl8k_rxq_deinit(hw, 0); 5562 mwl8k_rxq_deinit(hw, 0);
4842 5563
@@ -4995,10 +5716,10 @@ static void __devexit mwl8k_remove(struct pci_dev *pdev)
4995 mwl8k_hw_reset(priv); 5716 mwl8k_hw_reset(priv);
4996 5717
4997 /* Return all skbs to mac80211 */ 5718 /* Return all skbs to mac80211 */
4998 for (i = 0; i < MWL8K_TX_QUEUES; i++) 5719 for (i = 0; i < mwl8k_tx_queues(priv); i++)
4999 mwl8k_txq_reclaim(hw, i, INT_MAX, 1); 5720 mwl8k_txq_reclaim(hw, i, INT_MAX, 1);
5000 5721
5001 for (i = 0; i < MWL8K_TX_QUEUES; i++) 5722 for (i = 0; i < mwl8k_tx_queues(priv); i++)
5002 mwl8k_txq_deinit(hw, i); 5723 mwl8k_txq_deinit(hw, i);
5003 5724
5004 mwl8k_rxq_deinit(hw, 0); 5725 mwl8k_rxq_deinit(hw, 0);
diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c
index 13d750da9301..54cc0bba66b9 100644
--- a/drivers/net/wireless/p54/eeprom.c
+++ b/drivers/net/wireless/p54/eeprom.c
@@ -491,7 +491,7 @@ static int p54_parse_rssical(struct ieee80211_hw *dev,
491 struct pda_rssi_cal_entry *cal = (void *) &data[offset]; 491 struct pda_rssi_cal_entry *cal = (void *) &data[offset];
492 492
493 for (i = 0; i < entries; i++) { 493 for (i = 0; i < entries; i++) {
494 u16 freq; 494 u16 freq = 0;
495 switch (i) { 495 switch (i) {
496 case IEEE80211_BAND_2GHZ: 496 case IEEE80211_BAND_2GHZ:
497 freq = 2437; 497 freq = 2437;
diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c
index 2fab7d20ffc2..b6a061cbbdec 100644
--- a/drivers/net/wireless/p54/fwio.c
+++ b/drivers/net/wireless/p54/fwio.c
@@ -727,3 +727,34 @@ int p54_fetch_statistics(struct p54_common *priv)
727 p54_tx(priv, skb); 727 p54_tx(priv, skb);
728 return 0; 728 return 0;
729} 729}
730
731int p54_set_groupfilter(struct p54_common *priv)
732{
733 struct p54_group_address_table *grp;
734 struct sk_buff *skb;
735 bool on = false;
736
737 skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*grp),
738 P54_CONTROL_TYPE_GROUP_ADDRESS_TABLE, GFP_KERNEL);
739 if (!skb)
740 return -ENOMEM;
741
742 grp = (struct p54_group_address_table *)skb_put(skb, sizeof(*grp));
743
744 on = !(priv->filter_flags & FIF_ALLMULTI) &&
745 (priv->mc_maclist_num > 0 &&
746 priv->mc_maclist_num <= MC_FILTER_ADDRESS_NUM);
747
748 if (on) {
749 grp->filter_enable = cpu_to_le16(1);
750 grp->num_address = cpu_to_le16(priv->mc_maclist_num);
751 memcpy(grp->mac_list, priv->mc_maclist, sizeof(grp->mac_list));
752 } else {
753 grp->filter_enable = cpu_to_le16(0);
754 grp->num_address = cpu_to_le16(0);
755 memset(grp->mac_list, 0, sizeof(grp->mac_list));
756 }
757
758 p54_tx(priv, skb);
759 return 0;
760}
diff --git a/drivers/net/wireless/p54/lmac.h b/drivers/net/wireless/p54/lmac.h
index eb581abc1079..3d8d622bec55 100644
--- a/drivers/net/wireless/p54/lmac.h
+++ b/drivers/net/wireless/p54/lmac.h
@@ -540,6 +540,7 @@ int p54_update_beacon_tim(struct p54_common *priv, u16 aid, bool set);
540int p54_setup_mac(struct p54_common *priv); 540int p54_setup_mac(struct p54_common *priv);
541int p54_set_ps(struct p54_common *priv); 541int p54_set_ps(struct p54_common *priv);
542int p54_fetch_statistics(struct p54_common *priv); 542int p54_fetch_statistics(struct p54_common *priv);
543int p54_set_groupfilter(struct p54_common *priv);
543 544
544/* e/v DCF setup */ 545/* e/v DCF setup */
545int p54_set_edcf(struct p54_common *priv); 546int p54_set_edcf(struct p54_common *priv);
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index a946991989c6..a5a6d9e647bb 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -308,6 +308,31 @@ out:
308 return ret; 308 return ret;
309} 309}
310 310
311static u64 p54_prepare_multicast(struct ieee80211_hw *dev,
312 struct netdev_hw_addr_list *mc_list)
313{
314 struct p54_common *priv = dev->priv;
315 struct netdev_hw_addr *ha;
316 int i;
317
318 BUILD_BUG_ON(ARRAY_SIZE(priv->mc_maclist) !=
319 ARRAY_SIZE(((struct p54_group_address_table *)NULL)->mac_list));
320 /*
321 * The first entry is reserved for the global broadcast MAC.
322 * Otherwise the firmware will drop it and ARP will no longer work.
323 */
324 i = 1;
325 priv->mc_maclist_num = netdev_hw_addr_list_count(mc_list) + i;
326 netdev_hw_addr_list_for_each(ha, mc_list) {
327 memcpy(&priv->mc_maclist[i], ha->addr, ETH_ALEN);
328 i++;
329 if (i >= ARRAY_SIZE(priv->mc_maclist))
330 break;
331 }
332
333 return 1; /* update */
334}
335
311static void p54_configure_filter(struct ieee80211_hw *dev, 336static void p54_configure_filter(struct ieee80211_hw *dev,
312 unsigned int changed_flags, 337 unsigned int changed_flags,
313 unsigned int *total_flags, 338 unsigned int *total_flags,
@@ -316,12 +341,16 @@ static void p54_configure_filter(struct ieee80211_hw *dev,
316 struct p54_common *priv = dev->priv; 341 struct p54_common *priv = dev->priv;
317 342
318 *total_flags &= FIF_PROMISC_IN_BSS | 343 *total_flags &= FIF_PROMISC_IN_BSS |
344 FIF_ALLMULTI |
319 FIF_OTHER_BSS; 345 FIF_OTHER_BSS;
320 346
321 priv->filter_flags = *total_flags; 347 priv->filter_flags = *total_flags;
322 348
323 if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) 349 if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS))
324 p54_setup_mac(priv); 350 p54_setup_mac(priv);
351
352 if (changed_flags & FIF_ALLMULTI || multicast)
353 p54_set_groupfilter(priv);
325} 354}
326 355
327static int p54_conf_tx(struct ieee80211_hw *dev, u16 queue, 356static int p54_conf_tx(struct ieee80211_hw *dev, u16 queue,
@@ -591,6 +620,7 @@ static const struct ieee80211_ops p54_ops = {
591 .config = p54_config, 620 .config = p54_config,
592 .flush = p54_flush, 621 .flush = p54_flush,
593 .bss_info_changed = p54_bss_info_changed, 622 .bss_info_changed = p54_bss_info_changed,
623 .prepare_multicast = p54_prepare_multicast,
594 .configure_filter = p54_configure_filter, 624 .configure_filter = p54_configure_filter,
595 .conf_tx = p54_conf_tx, 625 .conf_tx = p54_conf_tx,
596 .get_stats = p54_get_stats, 626 .get_stats = p54_get_stats,
@@ -660,6 +690,7 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
660 init_completion(&priv->beacon_comp); 690 init_completion(&priv->beacon_comp);
661 INIT_DELAYED_WORK(&priv->work, p54_work); 691 INIT_DELAYED_WORK(&priv->work, p54_work);
662 692
693 memset(&priv->mc_maclist[0], ~0, ETH_ALEN);
663 return dev; 694 return dev;
664} 695}
665EXPORT_SYMBOL_GPL(p54_init_common); 696EXPORT_SYMBOL_GPL(p54_init_common);
diff --git a/drivers/net/wireless/p54/p54.h b/drivers/net/wireless/p54/p54.h
index 50730fc23fe5..799d05e12595 100644
--- a/drivers/net/wireless/p54/p54.h
+++ b/drivers/net/wireless/p54/p54.h
@@ -211,8 +211,10 @@ struct p54_common {
211 /* BBP/MAC state */ 211 /* BBP/MAC state */
212 u8 mac_addr[ETH_ALEN]; 212 u8 mac_addr[ETH_ALEN];
213 u8 bssid[ETH_ALEN]; 213 u8 bssid[ETH_ALEN];
214 u8 mc_maclist[4][ETH_ALEN];
214 u16 wakeup_timer; 215 u16 wakeup_timer;
215 unsigned int filter_flags; 216 unsigned int filter_flags;
217 int mc_maclist_num;
216 int mode; 218 int mode;
217 u32 tsf_low32, tsf_high32; 219 u32 tsf_low32, tsf_high32;
218 u32 basic_rate_mask; 220 u32 basic_rate_mask;
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 0494d7b102d4..1b753173680f 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -331,10 +331,9 @@ static void p54p_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
331 struct p54p_ring_control *ring_control = priv->ring_control; 331 struct p54p_ring_control *ring_control = priv->ring_control;
332 struct p54p_desc *desc; 332 struct p54p_desc *desc;
333 dma_addr_t mapping; 333 dma_addr_t mapping;
334 u32 device_idx, idx, i; 334 u32 idx, i;
335 335
336 spin_lock_irqsave(&priv->lock, flags); 336 spin_lock_irqsave(&priv->lock, flags);
337 device_idx = le32_to_cpu(ring_control->device_idx[1]);
338 idx = le32_to_cpu(ring_control->host_idx[1]); 337 idx = le32_to_cpu(ring_control->host_idx[1]);
339 i = idx % ARRAY_SIZE(ring_control->tx_data); 338 i = idx % ARRAY_SIZE(ring_control->tx_data);
340 339
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index f630552427b7..9def1e5369a1 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -59,7 +59,6 @@ config RT2800PCI
59 select RT2800_LIB 59 select RT2800_LIB
60 select RT2X00_LIB_PCI if PCI 60 select RT2X00_LIB_PCI if PCI
61 select RT2X00_LIB_SOC if RALINK_RT288X || RALINK_RT305X 61 select RT2X00_LIB_SOC if RALINK_RT288X || RALINK_RT305X
62 select RT2X00_LIB_HT
63 select RT2X00_LIB_FIRMWARE 62 select RT2X00_LIB_FIRMWARE
64 select RT2X00_LIB_CRYPTO 63 select RT2X00_LIB_CRYPTO
65 select CRC_CCITT 64 select CRC_CCITT
@@ -74,17 +73,13 @@ config RT2800PCI
74if RT2800PCI 73if RT2800PCI
75 74
76config RT2800PCI_RT33XX 75config RT2800PCI_RT33XX
77 bool "rt2800pci - Include support for rt33xx devices (EXPERIMENTAL)" 76 bool "rt2800pci - Include support for rt33xx devices"
78 depends on EXPERIMENTAL 77 default y
79 default n
80 ---help--- 78 ---help---
81 This adds support for rt33xx wireless chipset family to the 79 This adds support for rt33xx wireless chipset family to the
82 rt2800pci driver. 80 rt2800pci driver.
83 Supported chips: RT3390 81 Supported chips: RT3390
84 82
85 Support for these devices is non-functional at the moment and is
86 intended for testers and developers.
87
88config RT2800PCI_RT35XX 83config RT2800PCI_RT35XX
89 bool "rt2800pci - Include support for rt35xx devices (EXPERIMENTAL)" 84 bool "rt2800pci - Include support for rt35xx devices (EXPERIMENTAL)"
90 depends on EXPERIMENTAL 85 depends on EXPERIMENTAL
@@ -98,17 +93,14 @@ config RT2800PCI_RT35XX
98 intended for testers and developers. 93 intended for testers and developers.
99 94
100config RT2800PCI_RT53XX 95config RT2800PCI_RT53XX
101 bool "rt2800-pci - Include support for rt53xx devices (EXPERIMENTAL)" 96 bool "rt2800pci - Include support for rt53xx devices (EXPERIMENTAL)"
102 depends on EXPERIMENTAL 97 depends on EXPERIMENTAL
103 default n 98 default y
104 ---help--- 99 ---help---
105 This adds support for rt53xx wireless chipset family to the 100 This adds support for rt53xx wireless chipset family to the
106 rt2800pci driver. 101 rt2800pci driver.
107 Supported chips: RT5390 102 Supported chips: RT5390
108 103
109 Support for these devices is non-functional at the moment and is
110 intended for testers and developers.
111
112endif 104endif
113 105
114config RT2500USB 106config RT2500USB
@@ -140,7 +132,6 @@ config RT2800USB
140 depends on USB 132 depends on USB
141 select RT2800_LIB 133 select RT2800_LIB
142 select RT2X00_LIB_USB 134 select RT2X00_LIB_USB
143 select RT2X00_LIB_HT
144 select RT2X00_LIB_FIRMWARE 135 select RT2X00_LIB_FIRMWARE
145 select RT2X00_LIB_CRYPTO 136 select RT2X00_LIB_CRYPTO
146 select CRC_CCITT 137 select CRC_CCITT
@@ -153,17 +144,13 @@ config RT2800USB
153if RT2800USB 144if RT2800USB
154 145
155config RT2800USB_RT33XX 146config RT2800USB_RT33XX
156 bool "rt2800usb - Include support for rt33xx devices (EXPERIMENTAL)" 147 bool "rt2800usb - Include support for rt33xx devices"
157 depends on EXPERIMENTAL 148 default y
158 default n
159 ---help--- 149 ---help---
160 This adds support for rt33xx wireless chipset family to the 150 This adds support for rt33xx wireless chipset family to the
161 rt2800usb driver. 151 rt2800usb driver.
162 Supported chips: RT3370 152 Supported chips: RT3370
163 153
164 Support for these devices is non-functional at the moment and is
165 intended for testers and developers.
166
167config RT2800USB_RT35XX 154config RT2800USB_RT35XX
168 bool "rt2800usb - Include support for rt35xx devices (EXPERIMENTAL)" 155 bool "rt2800usb - Include support for rt35xx devices (EXPERIMENTAL)"
169 depends on EXPERIMENTAL 156 depends on EXPERIMENTAL
@@ -176,6 +163,15 @@ config RT2800USB_RT35XX
176 Support for these devices is non-functional at the moment and is 163 Support for these devices is non-functional at the moment and is
177 intended for testers and developers. 164 intended for testers and developers.
178 165
166config RT2800USB_RT53XX
167 bool "rt2800usb - Include support for rt53xx devices (EXPERIMENTAL)"
168 depends on EXPERIMENTAL
169 default y
170 ---help---
171 This adds support for rt53xx wireless chipset family to the
172 rt2800pci driver.
173 Supported chips: RT5370
174
179config RT2800USB_UNKNOWN 175config RT2800USB_UNKNOWN
180 bool "rt2800usb - Include support for unknown (USB) devices" 176 bool "rt2800usb - Include support for unknown (USB) devices"
181 default n 177 default n
@@ -207,9 +203,6 @@ config RT2X00_LIB_USB
207config RT2X00_LIB 203config RT2X00_LIB
208 tristate 204 tristate
209 205
210config RT2X00_LIB_HT
211 boolean
212
213config RT2X00_LIB_FIRMWARE 206config RT2X00_LIB_FIRMWARE
214 boolean 207 boolean
215 select FW_LOADER 208 select FW_LOADER
diff --git a/drivers/net/wireless/rt2x00/Makefile b/drivers/net/wireless/rt2x00/Makefile
index 971339858297..349d5b8284a4 100644
--- a/drivers/net/wireless/rt2x00/Makefile
+++ b/drivers/net/wireless/rt2x00/Makefile
@@ -7,7 +7,6 @@ rt2x00lib-$(CONFIG_RT2X00_LIB_DEBUGFS) += rt2x00debug.o
7rt2x00lib-$(CONFIG_RT2X00_LIB_CRYPTO) += rt2x00crypto.o 7rt2x00lib-$(CONFIG_RT2X00_LIB_CRYPTO) += rt2x00crypto.o
8rt2x00lib-$(CONFIG_RT2X00_LIB_FIRMWARE) += rt2x00firmware.o 8rt2x00lib-$(CONFIG_RT2X00_LIB_FIRMWARE) += rt2x00firmware.o
9rt2x00lib-$(CONFIG_RT2X00_LIB_LEDS) += rt2x00leds.o 9rt2x00lib-$(CONFIG_RT2X00_LIB_LEDS) += rt2x00leds.o
10rt2x00lib-$(CONFIG_RT2X00_LIB_HT) += rt2x00ht.o
11 10
12obj-$(CONFIG_RT2X00_LIB) += rt2x00lib.o 11obj-$(CONFIG_RT2X00_LIB) += rt2x00lib.o
13obj-$(CONFIG_RT2X00_LIB_PCI) += rt2x00pci.o 12obj-$(CONFIG_RT2X00_LIB_PCI) += rt2x00pci.o
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 329f3283697b..937f9e8bf05f 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -1314,8 +1314,8 @@ static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev,
1314 } 1314 }
1315} 1315}
1316 1316
1317static void rt2400pci_enable_interrupt(struct rt2x00_dev *rt2x00dev, 1317static inline void rt2400pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
1318 struct rt2x00_field32 irq_field) 1318 struct rt2x00_field32 irq_field)
1319{ 1319{
1320 u32 reg; 1320 u32 reg;
1321 1321
@@ -1368,8 +1368,10 @@ static void rt2400pci_tbtt_tasklet(unsigned long data)
1368static void rt2400pci_rxdone_tasklet(unsigned long data) 1368static void rt2400pci_rxdone_tasklet(unsigned long data)
1369{ 1369{
1370 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; 1370 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
1371 rt2x00pci_rxdone(rt2x00dev); 1371 if (rt2x00pci_rxdone(rt2x00dev))
1372 rt2400pci_enable_interrupt(rt2x00dev, CSR8_RXDONE); 1372 tasklet_schedule(&rt2x00dev->rxdone_tasklet);
1373 else
1374 rt2400pci_enable_interrupt(rt2x00dev, CSR8_RXDONE);
1373} 1375}
1374 1376
1375static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance) 1377static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance)
@@ -1534,13 +1536,13 @@ static int rt2400pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
1534 * Detect if this device has an hardware controlled radio. 1536 * Detect if this device has an hardware controlled radio.
1535 */ 1537 */
1536 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) 1538 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO))
1537 __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags); 1539 __set_bit(CAPABILITY_HW_BUTTON, &rt2x00dev->cap_flags);
1538 1540
1539 /* 1541 /*
1540 * Check if the BBP tuning should be enabled. 1542 * Check if the BBP tuning should be enabled.
1541 */ 1543 */
1542 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_AGCVGC_TUNING)) 1544 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_AGCVGC_TUNING))
1543 __set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags); 1545 __set_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags);
1544 1546
1545 return 0; 1547 return 0;
1546} 1548}
@@ -1638,9 +1640,9 @@ static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev)
1638 /* 1640 /*
1639 * This device requires the atim queue and DMA-mapped skbs. 1641 * This device requires the atim queue and DMA-mapped skbs.
1640 */ 1642 */
1641 __set_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags); 1643 __set_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags);
1642 __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags); 1644 __set_bit(REQUIRE_DMA, &rt2x00dev->cap_flags);
1643 __set_bit(DRIVER_REQUIRE_SW_SEQNO, &rt2x00dev->flags); 1645 __set_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags);
1644 1646
1645 /* 1647 /*
1646 * Set the rssi offset. 1648 * Set the rssi offset.
@@ -1718,6 +1720,9 @@ static const struct ieee80211_ops rt2400pci_mac80211_ops = {
1718 .tx_last_beacon = rt2400pci_tx_last_beacon, 1720 .tx_last_beacon = rt2400pci_tx_last_beacon,
1719 .rfkill_poll = rt2x00mac_rfkill_poll, 1721 .rfkill_poll = rt2x00mac_rfkill_poll,
1720 .flush = rt2x00mac_flush, 1722 .flush = rt2x00mac_flush,
1723 .set_antenna = rt2x00mac_set_antenna,
1724 .get_antenna = rt2x00mac_get_antenna,
1725 .get_ringparam = rt2x00mac_get_ringparam,
1721}; 1726};
1722 1727
1723static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = { 1728static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = {
@@ -1738,6 +1743,7 @@ static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = {
1738 .start_queue = rt2400pci_start_queue, 1743 .start_queue = rt2400pci_start_queue,
1739 .kick_queue = rt2400pci_kick_queue, 1744 .kick_queue = rt2400pci_kick_queue,
1740 .stop_queue = rt2400pci_stop_queue, 1745 .stop_queue = rt2400pci_stop_queue,
1746 .flush_queue = rt2x00pci_flush_queue,
1741 .write_tx_desc = rt2400pci_write_tx_desc, 1747 .write_tx_desc = rt2400pci_write_tx_desc,
1742 .write_beacon = rt2400pci_write_beacon, 1748 .write_beacon = rt2400pci_write_beacon,
1743 .fill_rxdone = rt2400pci_fill_rxdone, 1749 .fill_rxdone = rt2400pci_fill_rxdone,
@@ -1799,10 +1805,11 @@ static const struct rt2x00_ops rt2400pci_ops = {
1799 * RT2400pci module information. 1805 * RT2400pci module information.
1800 */ 1806 */
1801static DEFINE_PCI_DEVICE_TABLE(rt2400pci_device_table) = { 1807static DEFINE_PCI_DEVICE_TABLE(rt2400pci_device_table) = {
1802 { PCI_DEVICE(0x1814, 0x0101), PCI_DEVICE_DATA(&rt2400pci_ops) }, 1808 { PCI_DEVICE(0x1814, 0x0101) },
1803 { 0, } 1809 { 0, }
1804}; 1810};
1805 1811
1812
1806MODULE_AUTHOR(DRV_PROJECT); 1813MODULE_AUTHOR(DRV_PROJECT);
1807MODULE_VERSION(DRV_VERSION); 1814MODULE_VERSION(DRV_VERSION);
1808MODULE_DESCRIPTION("Ralink RT2400 PCI & PCMCIA Wireless LAN driver."); 1815MODULE_DESCRIPTION("Ralink RT2400 PCI & PCMCIA Wireless LAN driver.");
@@ -1810,10 +1817,16 @@ MODULE_SUPPORTED_DEVICE("Ralink RT2460 PCI & PCMCIA chipset based cards");
1810MODULE_DEVICE_TABLE(pci, rt2400pci_device_table); 1817MODULE_DEVICE_TABLE(pci, rt2400pci_device_table);
1811MODULE_LICENSE("GPL"); 1818MODULE_LICENSE("GPL");
1812 1819
1820static int rt2400pci_probe(struct pci_dev *pci_dev,
1821 const struct pci_device_id *id)
1822{
1823 return rt2x00pci_probe(pci_dev, &rt2400pci_ops);
1824}
1825
1813static struct pci_driver rt2400pci_driver = { 1826static struct pci_driver rt2400pci_driver = {
1814 .name = KBUILD_MODNAME, 1827 .name = KBUILD_MODNAME,
1815 .id_table = rt2400pci_device_table, 1828 .id_table = rt2400pci_device_table,
1816 .probe = rt2x00pci_probe, 1829 .probe = rt2400pci_probe,
1817 .remove = __devexit_p(rt2x00pci_remove), 1830 .remove = __devexit_p(rt2x00pci_remove),
1818 .suspend = rt2x00pci_suspend, 1831 .suspend = rt2x00pci_suspend,
1819 .resume = rt2x00pci_resume, 1832 .resume = rt2x00pci_resume,
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 58277878889e..d27d7b8ba3b6 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -1446,8 +1446,8 @@ static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev,
1446 } 1446 }
1447} 1447}
1448 1448
1449static void rt2500pci_enable_interrupt(struct rt2x00_dev *rt2x00dev, 1449static inline void rt2500pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
1450 struct rt2x00_field32 irq_field) 1450 struct rt2x00_field32 irq_field)
1451{ 1451{
1452 u32 reg; 1452 u32 reg;
1453 1453
@@ -1500,8 +1500,10 @@ static void rt2500pci_tbtt_tasklet(unsigned long data)
1500static void rt2500pci_rxdone_tasklet(unsigned long data) 1500static void rt2500pci_rxdone_tasklet(unsigned long data)
1501{ 1501{
1502 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; 1502 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
1503 rt2x00pci_rxdone(rt2x00dev); 1503 if (rt2x00pci_rxdone(rt2x00dev))
1504 rt2500pci_enable_interrupt(rt2x00dev, CSR8_RXDONE); 1504 tasklet_schedule(&rt2x00dev->rxdone_tasklet);
1505 else
1506 rt2500pci_enable_interrupt(rt2x00dev, CSR8_RXDONE);
1505} 1507}
1506 1508
1507static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance) 1509static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance)
@@ -1685,14 +1687,14 @@ static int rt2500pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
1685 * Detect if this device has an hardware controlled radio. 1687 * Detect if this device has an hardware controlled radio.
1686 */ 1688 */
1687 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) 1689 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO))
1688 __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags); 1690 __set_bit(CAPABILITY_HW_BUTTON, &rt2x00dev->cap_flags);
1689 1691
1690 /* 1692 /*
1691 * Check if the BBP tuning should be enabled. 1693 * Check if the BBP tuning should be enabled.
1692 */ 1694 */
1693 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom); 1695 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom);
1694 if (!rt2x00_get_field16(eeprom, EEPROM_NIC_DYN_BBP_TUNE)) 1696 if (!rt2x00_get_field16(eeprom, EEPROM_NIC_DYN_BBP_TUNE))
1695 __set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags); 1697 __set_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags);
1696 1698
1697 /* 1699 /*
1698 * Read the RSSI <-> dBm offset information. 1700 * Read the RSSI <-> dBm offset information.
@@ -1956,9 +1958,9 @@ static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev)
1956 /* 1958 /*
1957 * This device requires the atim queue and DMA-mapped skbs. 1959 * This device requires the atim queue and DMA-mapped skbs.
1958 */ 1960 */
1959 __set_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags); 1961 __set_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags);
1960 __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags); 1962 __set_bit(REQUIRE_DMA, &rt2x00dev->cap_flags);
1961 __set_bit(DRIVER_REQUIRE_SW_SEQNO, &rt2x00dev->flags); 1963 __set_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags);
1962 1964
1963 /* 1965 /*
1964 * Set the rssi offset. 1966 * Set the rssi offset.
@@ -2011,6 +2013,9 @@ static const struct ieee80211_ops rt2500pci_mac80211_ops = {
2011 .tx_last_beacon = rt2500pci_tx_last_beacon, 2013 .tx_last_beacon = rt2500pci_tx_last_beacon,
2012 .rfkill_poll = rt2x00mac_rfkill_poll, 2014 .rfkill_poll = rt2x00mac_rfkill_poll,
2013 .flush = rt2x00mac_flush, 2015 .flush = rt2x00mac_flush,
2016 .set_antenna = rt2x00mac_set_antenna,
2017 .get_antenna = rt2x00mac_get_antenna,
2018 .get_ringparam = rt2x00mac_get_ringparam,
2014}; 2019};
2015 2020
2016static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = { 2021static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = {
@@ -2031,6 +2036,7 @@ static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = {
2031 .start_queue = rt2500pci_start_queue, 2036 .start_queue = rt2500pci_start_queue,
2032 .kick_queue = rt2500pci_kick_queue, 2037 .kick_queue = rt2500pci_kick_queue,
2033 .stop_queue = rt2500pci_stop_queue, 2038 .stop_queue = rt2500pci_stop_queue,
2039 .flush_queue = rt2x00pci_flush_queue,
2034 .write_tx_desc = rt2500pci_write_tx_desc, 2040 .write_tx_desc = rt2500pci_write_tx_desc,
2035 .write_beacon = rt2500pci_write_beacon, 2041 .write_beacon = rt2500pci_write_beacon,
2036 .fill_rxdone = rt2500pci_fill_rxdone, 2042 .fill_rxdone = rt2500pci_fill_rxdone,
@@ -2092,7 +2098,7 @@ static const struct rt2x00_ops rt2500pci_ops = {
2092 * RT2500pci module information. 2098 * RT2500pci module information.
2093 */ 2099 */
2094static DEFINE_PCI_DEVICE_TABLE(rt2500pci_device_table) = { 2100static DEFINE_PCI_DEVICE_TABLE(rt2500pci_device_table) = {
2095 { PCI_DEVICE(0x1814, 0x0201), PCI_DEVICE_DATA(&rt2500pci_ops) }, 2101 { PCI_DEVICE(0x1814, 0x0201) },
2096 { 0, } 2102 { 0, }
2097}; 2103};
2098 2104
@@ -2103,10 +2109,16 @@ MODULE_SUPPORTED_DEVICE("Ralink RT2560 PCI & PCMCIA chipset based cards");
2103MODULE_DEVICE_TABLE(pci, rt2500pci_device_table); 2109MODULE_DEVICE_TABLE(pci, rt2500pci_device_table);
2104MODULE_LICENSE("GPL"); 2110MODULE_LICENSE("GPL");
2105 2111
2112static int rt2500pci_probe(struct pci_dev *pci_dev,
2113 const struct pci_device_id *id)
2114{
2115 return rt2x00pci_probe(pci_dev, &rt2500pci_ops);
2116}
2117
2106static struct pci_driver rt2500pci_driver = { 2118static struct pci_driver rt2500pci_driver = {
2107 .name = KBUILD_MODNAME, 2119 .name = KBUILD_MODNAME,
2108 .id_table = rt2500pci_device_table, 2120 .id_table = rt2500pci_device_table,
2109 .probe = rt2x00pci_probe, 2121 .probe = rt2500pci_probe,
2110 .remove = __devexit_p(rt2x00pci_remove), 2122 .remove = __devexit_p(rt2x00pci_remove),
2111 .suspend = rt2x00pci_suspend, 2123 .suspend = rt2x00pci_suspend,
2112 .resume = rt2x00pci_resume, 2124 .resume = rt2x00pci_resume,
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 979fe6596a2d..15237c275486 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -1519,7 +1519,7 @@ static int rt2500usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
1519 * Detect if this device has an hardware controlled radio. 1519 * Detect if this device has an hardware controlled radio.
1520 */ 1520 */
1521 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) 1521 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO))
1522 __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags); 1522 __set_bit(CAPABILITY_HW_BUTTON, &rt2x00dev->cap_flags);
1523 1523
1524 /* 1524 /*
1525 * Read the RSSI <-> dBm offset information. 1525 * Read the RSSI <-> dBm offset information.
@@ -1790,14 +1790,14 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
1790 /* 1790 /*
1791 * This device requires the atim queue 1791 * This device requires the atim queue
1792 */ 1792 */
1793 __set_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags); 1793 __set_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags);
1794 __set_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags); 1794 __set_bit(REQUIRE_BEACON_GUARD, &rt2x00dev->cap_flags);
1795 if (!modparam_nohwcrypt) { 1795 if (!modparam_nohwcrypt) {
1796 __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags); 1796 __set_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags);
1797 __set_bit(DRIVER_REQUIRE_COPY_IV, &rt2x00dev->flags); 1797 __set_bit(REQUIRE_COPY_IV, &rt2x00dev->cap_flags);
1798 } 1798 }
1799 __set_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags); 1799 __set_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags);
1800 __set_bit(DRIVER_REQUIRE_SW_SEQNO, &rt2x00dev->flags); 1800 __set_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags);
1801 1801
1802 /* 1802 /*
1803 * Set the rssi offset. 1803 * Set the rssi offset.
@@ -1824,6 +1824,9 @@ static const struct ieee80211_ops rt2500usb_mac80211_ops = {
1824 .conf_tx = rt2x00mac_conf_tx, 1824 .conf_tx = rt2x00mac_conf_tx,
1825 .rfkill_poll = rt2x00mac_rfkill_poll, 1825 .rfkill_poll = rt2x00mac_rfkill_poll,
1826 .flush = rt2x00mac_flush, 1826 .flush = rt2x00mac_flush,
1827 .set_antenna = rt2x00mac_set_antenna,
1828 .get_antenna = rt2x00mac_get_antenna,
1829 .get_ringparam = rt2x00mac_get_ringparam,
1827}; 1830};
1828 1831
1829static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = { 1832static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = {
@@ -1905,58 +1908,54 @@ static const struct rt2x00_ops rt2500usb_ops = {
1905 */ 1908 */
1906static struct usb_device_id rt2500usb_device_table[] = { 1909static struct usb_device_id rt2500usb_device_table[] = {
1907 /* ASUS */ 1910 /* ASUS */
1908 { USB_DEVICE(0x0b05, 0x1706), USB_DEVICE_DATA(&rt2500usb_ops) }, 1911 { USB_DEVICE(0x0b05, 0x1706) },
1909 { USB_DEVICE(0x0b05, 0x1707), USB_DEVICE_DATA(&rt2500usb_ops) }, 1912 { USB_DEVICE(0x0b05, 0x1707) },
1910 /* Belkin */ 1913 /* Belkin */
1911 { USB_DEVICE(0x050d, 0x7050), USB_DEVICE_DATA(&rt2500usb_ops) }, 1914 { USB_DEVICE(0x050d, 0x7050) },
1912 { USB_DEVICE(0x050d, 0x7051), USB_DEVICE_DATA(&rt2500usb_ops) }, 1915 { USB_DEVICE(0x050d, 0x7051) },
1913 { USB_DEVICE(0x050d, 0x705a), USB_DEVICE_DATA(&rt2500usb_ops) },
1914 /* Cisco Systems */ 1916 /* Cisco Systems */
1915 { USB_DEVICE(0x13b1, 0x000d), USB_DEVICE_DATA(&rt2500usb_ops) }, 1917 { USB_DEVICE(0x13b1, 0x000d) },
1916 { USB_DEVICE(0x13b1, 0x0011), USB_DEVICE_DATA(&rt2500usb_ops) }, 1918 { USB_DEVICE(0x13b1, 0x0011) },
1917 { USB_DEVICE(0x13b1, 0x001a), USB_DEVICE_DATA(&rt2500usb_ops) }, 1919 { USB_DEVICE(0x13b1, 0x001a) },
1918 /* CNet */
1919 { USB_DEVICE(0x1371, 0x9022), USB_DEVICE_DATA(&rt2500usb_ops) },
1920 /* Conceptronic */ 1920 /* Conceptronic */
1921 { USB_DEVICE(0x14b2, 0x3c02), USB_DEVICE_DATA(&rt2500usb_ops) }, 1921 { USB_DEVICE(0x14b2, 0x3c02) },
1922 /* D-LINK */ 1922 /* D-LINK */
1923 { USB_DEVICE(0x2001, 0x3c00), USB_DEVICE_DATA(&rt2500usb_ops) }, 1923 { USB_DEVICE(0x2001, 0x3c00) },
1924 /* Gigabyte */ 1924 /* Gigabyte */
1925 { USB_DEVICE(0x1044, 0x8001), USB_DEVICE_DATA(&rt2500usb_ops) }, 1925 { USB_DEVICE(0x1044, 0x8001) },
1926 { USB_DEVICE(0x1044, 0x8007), USB_DEVICE_DATA(&rt2500usb_ops) }, 1926 { USB_DEVICE(0x1044, 0x8007) },
1927 /* Hercules */ 1927 /* Hercules */
1928 { USB_DEVICE(0x06f8, 0xe000), USB_DEVICE_DATA(&rt2500usb_ops) }, 1928 { USB_DEVICE(0x06f8, 0xe000) },
1929 /* Melco */ 1929 /* Melco */
1930 { USB_DEVICE(0x0411, 0x005e), USB_DEVICE_DATA(&rt2500usb_ops) }, 1930 { USB_DEVICE(0x0411, 0x005e) },
1931 { USB_DEVICE(0x0411, 0x0066), USB_DEVICE_DATA(&rt2500usb_ops) }, 1931 { USB_DEVICE(0x0411, 0x0066) },
1932 { USB_DEVICE(0x0411, 0x0067), USB_DEVICE_DATA(&rt2500usb_ops) }, 1932 { USB_DEVICE(0x0411, 0x0067) },
1933 { USB_DEVICE(0x0411, 0x008b), USB_DEVICE_DATA(&rt2500usb_ops) }, 1933 { USB_DEVICE(0x0411, 0x008b) },
1934 { USB_DEVICE(0x0411, 0x0097), USB_DEVICE_DATA(&rt2500usb_ops) }, 1934 { USB_DEVICE(0x0411, 0x0097) },
1935 /* MSI */ 1935 /* MSI */
1936 { USB_DEVICE(0x0db0, 0x6861), USB_DEVICE_DATA(&rt2500usb_ops) }, 1936 { USB_DEVICE(0x0db0, 0x6861) },
1937 { USB_DEVICE(0x0db0, 0x6865), USB_DEVICE_DATA(&rt2500usb_ops) }, 1937 { USB_DEVICE(0x0db0, 0x6865) },
1938 { USB_DEVICE(0x0db0, 0x6869), USB_DEVICE_DATA(&rt2500usb_ops) }, 1938 { USB_DEVICE(0x0db0, 0x6869) },
1939 /* Ralink */ 1939 /* Ralink */
1940 { USB_DEVICE(0x148f, 0x1706), USB_DEVICE_DATA(&rt2500usb_ops) }, 1940 { USB_DEVICE(0x148f, 0x1706) },
1941 { USB_DEVICE(0x148f, 0x2570), USB_DEVICE_DATA(&rt2500usb_ops) }, 1941 { USB_DEVICE(0x148f, 0x2570) },
1942 { USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt2500usb_ops) }, 1942 { USB_DEVICE(0x148f, 0x9020) },
1943 { USB_DEVICE(0x148f, 0x9020), USB_DEVICE_DATA(&rt2500usb_ops) },
1944 /* Sagem */ 1943 /* Sagem */
1945 { USB_DEVICE(0x079b, 0x004b), USB_DEVICE_DATA(&rt2500usb_ops) }, 1944 { USB_DEVICE(0x079b, 0x004b) },
1946 /* Siemens */ 1945 /* Siemens */
1947 { USB_DEVICE(0x0681, 0x3c06), USB_DEVICE_DATA(&rt2500usb_ops) }, 1946 { USB_DEVICE(0x0681, 0x3c06) },
1948 /* SMC */ 1947 /* SMC */
1949 { USB_DEVICE(0x0707, 0xee13), USB_DEVICE_DATA(&rt2500usb_ops) }, 1948 { USB_DEVICE(0x0707, 0xee13) },
1950 /* Spairon */ 1949 /* Spairon */
1951 { USB_DEVICE(0x114b, 0x0110), USB_DEVICE_DATA(&rt2500usb_ops) }, 1950 { USB_DEVICE(0x114b, 0x0110) },
1952 /* SURECOM */ 1951 /* SURECOM */
1953 { USB_DEVICE(0x0769, 0x11f3), USB_DEVICE_DATA(&rt2500usb_ops) }, 1952 { USB_DEVICE(0x0769, 0x11f3) },
1954 /* Trust */ 1953 /* Trust */
1955 { USB_DEVICE(0x0eb0, 0x9020), USB_DEVICE_DATA(&rt2500usb_ops) }, 1954 { USB_DEVICE(0x0eb0, 0x9020) },
1956 /* VTech */ 1955 /* VTech */
1957 { USB_DEVICE(0x0f88, 0x3012), USB_DEVICE_DATA(&rt2500usb_ops) }, 1956 { USB_DEVICE(0x0f88, 0x3012) },
1958 /* Zinwell */ 1957 /* Zinwell */
1959 { USB_DEVICE(0x5a57, 0x0260), USB_DEVICE_DATA(&rt2500usb_ops) }, 1958 { USB_DEVICE(0x5a57, 0x0260) },
1960 { 0, } 1959 { 0, }
1961}; 1960};
1962 1961
@@ -1967,10 +1966,16 @@ MODULE_SUPPORTED_DEVICE("Ralink RT2570 USB chipset based cards");
1967MODULE_DEVICE_TABLE(usb, rt2500usb_device_table); 1966MODULE_DEVICE_TABLE(usb, rt2500usb_device_table);
1968MODULE_LICENSE("GPL"); 1967MODULE_LICENSE("GPL");
1969 1968
1969static int rt2500usb_probe(struct usb_interface *usb_intf,
1970 const struct usb_device_id *id)
1971{
1972 return rt2x00usb_probe(usb_intf, &rt2500usb_ops);
1973}
1974
1970static struct usb_driver rt2500usb_driver = { 1975static struct usb_driver rt2500usb_driver = {
1971 .name = KBUILD_MODNAME, 1976 .name = KBUILD_MODNAME,
1972 .id_table = rt2500usb_device_table, 1977 .id_table = rt2500usb_device_table,
1973 .probe = rt2x00usb_probe, 1978 .probe = rt2500usb_probe,
1974 .disconnect = rt2x00usb_disconnect, 1979 .disconnect = rt2x00usb_disconnect,
1975 .suspend = rt2x00usb_suspend, 1980 .suspend = rt2x00usb_suspend,
1976 .resume = rt2x00usb_resume, 1981 .resume = rt2x00usb_resume,
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index 8fbc5fa965e0..f67bc9b31b28 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -51,6 +51,7 @@
51 * RF3320 2.4G 1T1R(RT3350/RT3370/RT3390) 51 * RF3320 2.4G 1T1R(RT3350/RT3370/RT3390)
52 * RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392) 52 * RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392)
53 * RF3853 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662) 53 * RF3853 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662)
54 * RF5370 2.4G 1T1R
54 * RF5390 2.4G 1T1R 55 * RF5390 2.4G 1T1R
55 */ 56 */
56#define RF2820 0x0001 57#define RF2820 0x0001
@@ -66,6 +67,7 @@
66#define RF3320 0x000b 67#define RF3320 0x000b
67#define RF3322 0x000c 68#define RF3322 0x000c
68#define RF3853 0x000d 69#define RF3853 0x000d
70#define RF5370 0x5370
69#define RF5390 0x5390 71#define RF5390 0x5390
70 72
71/* 73/*
@@ -2104,6 +2106,59 @@ struct mac_iveiv_entry {
2104#define EEPROM_TXPOWER_BG_2 FIELD16(0xff00) 2106#define EEPROM_TXPOWER_BG_2 FIELD16(0xff00)
2105 2107
2106/* 2108/*
2109 * EEPROM temperature compensation boundaries 802.11BG
2110 * MINUS4: If the actual TSSI is below this boundary, tx power needs to be
2111 * reduced by (agc_step * -4)
2112 * MINUS3: If the actual TSSI is below this boundary, tx power needs to be
2113 * reduced by (agc_step * -3)
2114 */
2115#define EEPROM_TSSI_BOUND_BG1 0x0037
2116#define EEPROM_TSSI_BOUND_BG1_MINUS4 FIELD16(0x00ff)
2117#define EEPROM_TSSI_BOUND_BG1_MINUS3 FIELD16(0xff00)
2118
2119/*
2120 * EEPROM temperature compensation boundaries 802.11BG
2121 * MINUS2: If the actual TSSI is below this boundary, tx power needs to be
2122 * reduced by (agc_step * -2)
2123 * MINUS1: If the actual TSSI is below this boundary, tx power needs to be
2124 * reduced by (agc_step * -1)
2125 */
2126#define EEPROM_TSSI_BOUND_BG2 0x0038
2127#define EEPROM_TSSI_BOUND_BG2_MINUS2 FIELD16(0x00ff)
2128#define EEPROM_TSSI_BOUND_BG2_MINUS1 FIELD16(0xff00)
2129
2130/*
2131 * EEPROM temperature compensation boundaries 802.11BG
2132 * REF: Reference TSSI value, no tx power changes needed
2133 * PLUS1: If the actual TSSI is above this boundary, tx power needs to be
2134 * increased by (agc_step * 1)
2135 */
2136#define EEPROM_TSSI_BOUND_BG3 0x0039
2137#define EEPROM_TSSI_BOUND_BG3_REF FIELD16(0x00ff)
2138#define EEPROM_TSSI_BOUND_BG3_PLUS1 FIELD16(0xff00)
2139
2140/*
2141 * EEPROM temperature compensation boundaries 802.11BG
2142 * PLUS2: If the actual TSSI is above this boundary, tx power needs to be
2143 * increased by (agc_step * 2)
2144 * PLUS3: If the actual TSSI is above this boundary, tx power needs to be
2145 * increased by (agc_step * 3)
2146 */
2147#define EEPROM_TSSI_BOUND_BG4 0x003a
2148#define EEPROM_TSSI_BOUND_BG4_PLUS2 FIELD16(0x00ff)
2149#define EEPROM_TSSI_BOUND_BG4_PLUS3 FIELD16(0xff00)
2150
2151/*
2152 * EEPROM temperature compensation boundaries 802.11BG
2153 * PLUS4: If the actual TSSI is above this boundary, tx power needs to be
2154 * increased by (agc_step * 4)
2155 * AGC_STEP: Temperature compensation step.
2156 */
2157#define EEPROM_TSSI_BOUND_BG5 0x003b
2158#define EEPROM_TSSI_BOUND_BG5_PLUS4 FIELD16(0x00ff)
2159#define EEPROM_TSSI_BOUND_BG5_AGC_STEP FIELD16(0xff00)
2160
2161/*
2107 * EEPROM TXPOWER 802.11A 2162 * EEPROM TXPOWER 802.11A
2108 */ 2163 */
2109#define EEPROM_TXPOWER_A1 0x003c 2164#define EEPROM_TXPOWER_A1 0x003c
@@ -2113,6 +2168,59 @@ struct mac_iveiv_entry {
2113#define EEPROM_TXPOWER_A_2 FIELD16(0xff00) 2168#define EEPROM_TXPOWER_A_2 FIELD16(0xff00)
2114 2169
2115/* 2170/*
2171 * EEPROM temperature compensation boundaries 802.11A
2172 * MINUS4: If the actual TSSI is below this boundary, tx power needs to be
2173 * reduced by (agc_step * -4)
2174 * MINUS3: If the actual TSSI is below this boundary, tx power needs to be
2175 * reduced by (agc_step * -3)
2176 */
2177#define EEPROM_TSSI_BOUND_A1 0x006a
2178#define EEPROM_TSSI_BOUND_A1_MINUS4 FIELD16(0x00ff)
2179#define EEPROM_TSSI_BOUND_A1_MINUS3 FIELD16(0xff00)
2180
2181/*
2182 * EEPROM temperature compensation boundaries 802.11A
2183 * MINUS2: If the actual TSSI is below this boundary, tx power needs to be
2184 * reduced by (agc_step * -2)
2185 * MINUS1: If the actual TSSI is below this boundary, tx power needs to be
2186 * reduced by (agc_step * -1)
2187 */
2188#define EEPROM_TSSI_BOUND_A2 0x006b
2189#define EEPROM_TSSI_BOUND_A2_MINUS2 FIELD16(0x00ff)
2190#define EEPROM_TSSI_BOUND_A2_MINUS1 FIELD16(0xff00)
2191
2192/*
2193 * EEPROM temperature compensation boundaries 802.11A
2194 * REF: Reference TSSI value, no tx power changes needed
2195 * PLUS1: If the actual TSSI is above this boundary, tx power needs to be
2196 * increased by (agc_step * 1)
2197 */
2198#define EEPROM_TSSI_BOUND_A3 0x006c
2199#define EEPROM_TSSI_BOUND_A3_REF FIELD16(0x00ff)
2200#define EEPROM_TSSI_BOUND_A3_PLUS1 FIELD16(0xff00)
2201
2202/*
2203 * EEPROM temperature compensation boundaries 802.11A
2204 * PLUS2: If the actual TSSI is above this boundary, tx power needs to be
2205 * increased by (agc_step * 2)
2206 * PLUS3: If the actual TSSI is above this boundary, tx power needs to be
2207 * increased by (agc_step * 3)
2208 */
2209#define EEPROM_TSSI_BOUND_A4 0x006d
2210#define EEPROM_TSSI_BOUND_A4_PLUS2 FIELD16(0x00ff)
2211#define EEPROM_TSSI_BOUND_A4_PLUS3 FIELD16(0xff00)
2212
2213/*
2214 * EEPROM temperature compensation boundaries 802.11A
2215 * PLUS4: If the actual TSSI is above this boundary, tx power needs to be
2216 * increased by (agc_step * 4)
2217 * AGC_STEP: Temperature compensation step.
2218 */
2219#define EEPROM_TSSI_BOUND_A5 0x006e
2220#define EEPROM_TSSI_BOUND_A5_PLUS4 FIELD16(0x00ff)
2221#define EEPROM_TSSI_BOUND_A5_AGC_STEP FIELD16(0xff00)
2222
2223/*
2116 * EEPROM TXPOWER by rate: tx power per tx rate for HT20 mode 2224 * EEPROM TXPOWER by rate: tx power per tx rate for HT20 mode
2117 */ 2225 */
2118#define EEPROM_TXPOWER_BYRATE 0x006f 2226#define EEPROM_TXPOWER_BYRATE 0x006f
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index dbf74d07d947..2a6aa85cc6c9 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -687,6 +687,9 @@ void rt2800_txdone_entry(struct queue_entry *entry, u32 status)
687 mcs = real_mcs; 687 mcs = real_mcs;
688 } 688 }
689 689
690 if (aggr == 1 || ampdu == 1)
691 __set_bit(TXDONE_AMPDU, &txdesc.flags);
692
690 /* 693 /*
691 * Ralink has a retry mechanism using a global fallback 694 * Ralink has a retry mechanism using a global fallback
692 * table. We setup this fallback table to try the immediate 695 * table. We setup this fallback table to try the immediate
@@ -727,34 +730,20 @@ void rt2800_txdone(struct rt2x00_dev *rt2x00dev)
727 struct data_queue *queue; 730 struct data_queue *queue;
728 struct queue_entry *entry; 731 struct queue_entry *entry;
729 u32 reg; 732 u32 reg;
730 u8 pid; 733 u8 qid;
731 int i;
732 734
733 /* 735 while (kfifo_get(&rt2x00dev->txstatus_fifo, &reg)) {
734 * TX_STA_FIFO is a stack of X entries, hence read TX_STA_FIFO
735 * at most X times and also stop processing once the TX_STA_FIFO_VALID
736 * flag is not set anymore.
737 *
738 * The legacy drivers use X=TX_RING_SIZE but state in a comment
739 * that the TX_STA_FIFO stack has a size of 16. We stick to our
740 * tx ring size for now.
741 */
742 for (i = 0; i < rt2x00dev->ops->tx->entry_num; i++) {
743 rt2800_register_read(rt2x00dev, TX_STA_FIFO, &reg);
744 if (!rt2x00_get_field32(reg, TX_STA_FIFO_VALID))
745 break;
746 736
747 /* 737 /* TX_STA_FIFO_PID_QUEUE is a 2-bit field, thus
748 * Skip this entry when it contains an invalid 738 * qid is guaranteed to be one of the TX QIDs
749 * queue identication number.
750 */ 739 */
751 pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_QUEUE); 740 qid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_QUEUE);
752 if (pid >= QID_RX) 741 queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
753 continue; 742 if (unlikely(!queue)) {
754 743 WARNING(rt2x00dev, "Got TX status for an unavailable "
755 queue = rt2x00queue_get_tx_queue(rt2x00dev, pid); 744 "queue %u, dropping\n", qid);
756 if (unlikely(!queue))
757 continue; 745 continue;
746 }
758 747
759 /* 748 /*
760 * Inside each queue, we process each entry in a chronological 749 * Inside each queue, we process each entry in a chronological
@@ -946,25 +935,49 @@ static void rt2800_brightness_set(struct led_classdev *led_cdev,
946 unsigned int ledmode = 935 unsigned int ledmode =
947 rt2x00_get_field16(led->rt2x00dev->led_mcu_reg, 936 rt2x00_get_field16(led->rt2x00dev->led_mcu_reg,
948 EEPROM_FREQ_LED_MODE); 937 EEPROM_FREQ_LED_MODE);
938 u32 reg;
949 939
950 if (led->type == LED_TYPE_RADIO) { 940 /* Check for SoC (SOC devices don't support MCU requests) */
951 rt2800_mcu_request(led->rt2x00dev, MCU_LED, 0xff, ledmode, 941 if (rt2x00_is_soc(led->rt2x00dev)) {
952 enabled ? 0x20 : 0); 942 rt2800_register_read(led->rt2x00dev, LED_CFG, &reg);
953 } else if (led->type == LED_TYPE_ASSOC) { 943
954 rt2800_mcu_request(led->rt2x00dev, MCU_LED, 0xff, ledmode, 944 /* Set LED Polarity */
955 enabled ? (bg_mode ? 0x60 : 0xa0) : 0x20); 945 rt2x00_set_field32(&reg, LED_CFG_LED_POLAR, polarity);
956 } else if (led->type == LED_TYPE_QUALITY) { 946
957 /* 947 /* Set LED Mode */
958 * The brightness is divided into 6 levels (0 - 5), 948 if (led->type == LED_TYPE_RADIO) {
959 * The specs tell us the following levels: 949 rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE,
960 * 0, 1 ,3, 7, 15, 31 950 enabled ? 3 : 0);
961 * to determine the level in a simple way we can simply 951 } else if (led->type == LED_TYPE_ASSOC) {
962 * work with bitshifting: 952 rt2x00_set_field32(&reg, LED_CFG_Y_LED_MODE,
963 * (1 << level) - 1 953 enabled ? 3 : 0);
964 */ 954 } else if (led->type == LED_TYPE_QUALITY) {
965 rt2800_mcu_request(led->rt2x00dev, MCU_LED_STRENGTH, 0xff, 955 rt2x00_set_field32(&reg, LED_CFG_R_LED_MODE,
966 (1 << brightness / (LED_FULL / 6)) - 1, 956 enabled ? 3 : 0);
967 polarity); 957 }
958
959 rt2800_register_write(led->rt2x00dev, LED_CFG, reg);
960
961 } else {
962 if (led->type == LED_TYPE_RADIO) {
963 rt2800_mcu_request(led->rt2x00dev, MCU_LED, 0xff, ledmode,
964 enabled ? 0x20 : 0);
965 } else if (led->type == LED_TYPE_ASSOC) {
966 rt2800_mcu_request(led->rt2x00dev, MCU_LED, 0xff, ledmode,
967 enabled ? (bg_mode ? 0x60 : 0xa0) : 0x20);
968 } else if (led->type == LED_TYPE_QUALITY) {
969 /*
970 * The brightness is divided into 6 levels (0 - 5),
971 * The specs tell us the following levels:
972 * 0, 1 ,3, 7, 15, 31
973 * to determine the level in a simple way we can simply
974 * work with bitshifting:
975 * (1 << level) - 1
976 */
977 rt2800_mcu_request(led->rt2x00dev, MCU_LED_STRENGTH, 0xff,
978 (1 << brightness / (LED_FULL / 6)) - 1,
979 polarity);
980 }
968 } 981 }
969} 982}
970 983
@@ -1218,6 +1231,25 @@ void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
1218 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg); 1231 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
1219 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_SYNC, conf->sync); 1232 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_SYNC, conf->sync);
1220 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); 1233 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
1234
1235 if (conf->sync == TSF_SYNC_AP_NONE) {
1236 /*
1237 * Tune beacon queue transmit parameters for AP mode
1238 */
1239 rt2800_register_read(rt2x00dev, TBTT_SYNC_CFG, &reg);
1240 rt2x00_set_field32(&reg, TBTT_SYNC_CFG_BCN_CWMIN, 0);
1241 rt2x00_set_field32(&reg, TBTT_SYNC_CFG_BCN_AIFSN, 1);
1242 rt2x00_set_field32(&reg, TBTT_SYNC_CFG_BCN_EXP_WIN, 32);
1243 rt2x00_set_field32(&reg, TBTT_SYNC_CFG_TBTT_ADJUST, 0);
1244 rt2800_register_write(rt2x00dev, TBTT_SYNC_CFG, reg);
1245 } else {
1246 rt2800_register_read(rt2x00dev, TBTT_SYNC_CFG, &reg);
1247 rt2x00_set_field32(&reg, TBTT_SYNC_CFG_BCN_CWMIN, 4);
1248 rt2x00_set_field32(&reg, TBTT_SYNC_CFG_BCN_AIFSN, 2);
1249 rt2x00_set_field32(&reg, TBTT_SYNC_CFG_BCN_EXP_WIN, 32);
1250 rt2x00_set_field32(&reg, TBTT_SYNC_CFG_TBTT_ADJUST, 16);
1251 rt2800_register_write(rt2x00dev, TBTT_SYNC_CFG, reg);
1252 }
1221 } 1253 }
1222 1254
1223 if (flags & CONFIG_UPDATE_MAC) { 1255 if (flags & CONFIG_UPDATE_MAC) {
@@ -1608,7 +1640,6 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
1608 struct channel_info *info) 1640 struct channel_info *info)
1609{ 1641{
1610 u8 rfcsr; 1642 u8 rfcsr;
1611 u16 eeprom;
1612 1643
1613 rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1); 1644 rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1);
1614 rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3); 1645 rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3);
@@ -1638,11 +1669,10 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
1638 rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset); 1669 rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset);
1639 rt2800_rfcsr_write(rt2x00dev, 17, rfcsr); 1670 rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
1640 1671
1641 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
1642 if (rf->channel <= 14) { 1672 if (rf->channel <= 14) {
1643 int idx = rf->channel-1; 1673 int idx = rf->channel-1;
1644 1674
1645 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_BT_COEXIST)) { 1675 if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
1646 if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) { 1676 if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) {
1647 /* r55/r59 value array of channel 1~14 */ 1677 /* r55/r59 value array of channel 1~14 */
1648 static const char r55_bt_rev[] = {0x83, 0x83, 1678 static const char r55_bt_rev[] = {0x83, 0x83,
@@ -1721,7 +1751,8 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
1721 rt2x00_rf(rt2x00dev, RF3052) || 1751 rt2x00_rf(rt2x00dev, RF3052) ||
1722 rt2x00_rf(rt2x00dev, RF3320)) 1752 rt2x00_rf(rt2x00dev, RF3320))
1723 rt2800_config_channel_rf3xxx(rt2x00dev, conf, rf, info); 1753 rt2800_config_channel_rf3xxx(rt2x00dev, conf, rf, info);
1724 else if (rt2x00_rf(rt2x00dev, RF5390)) 1754 else if (rt2x00_rf(rt2x00dev, RF5370) ||
1755 rt2x00_rf(rt2x00dev, RF5390))
1725 rt2800_config_channel_rf53xx(rt2x00dev, conf, rf, info); 1756 rt2800_config_channel_rf53xx(rt2x00dev, conf, rf, info);
1726 else 1757 else
1727 rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info); 1758 rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
@@ -1736,8 +1767,8 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
1736 1767
1737 if (rf->channel <= 14) { 1768 if (rf->channel <= 14) {
1738 if (!rt2x00_rt(rt2x00dev, RT5390)) { 1769 if (!rt2x00_rt(rt2x00dev, RT5390)) {
1739 if (test_bit(CONFIG_EXTERNAL_LNA_BG, 1770 if (test_bit(CAPABILITY_EXTERNAL_LNA_BG,
1740 &rt2x00dev->flags)) { 1771 &rt2x00dev->cap_flags)) {
1741 rt2800_bbp_write(rt2x00dev, 82, 0x62); 1772 rt2800_bbp_write(rt2x00dev, 82, 0x62);
1742 rt2800_bbp_write(rt2x00dev, 75, 0x46); 1773 rt2800_bbp_write(rt2x00dev, 75, 0x46);
1743 } else { 1774 } else {
@@ -1748,7 +1779,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
1748 } else { 1779 } else {
1749 rt2800_bbp_write(rt2x00dev, 82, 0xf2); 1780 rt2800_bbp_write(rt2x00dev, 82, 0xf2);
1750 1781
1751 if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags)) 1782 if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags))
1752 rt2800_bbp_write(rt2x00dev, 75, 0x46); 1783 rt2800_bbp_write(rt2x00dev, 75, 0x46);
1753 else 1784 else
1754 rt2800_bbp_write(rt2x00dev, 75, 0x50); 1785 rt2800_bbp_write(rt2x00dev, 75, 0x50);
@@ -1813,17 +1844,131 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
1813 rt2800_register_read(rt2x00dev, CH_BUSY_STA_SEC, &reg); 1844 rt2800_register_read(rt2x00dev, CH_BUSY_STA_SEC, &reg);
1814} 1845}
1815 1846
1847static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev)
1848{
1849 u8 tssi_bounds[9];
1850 u8 current_tssi;
1851 u16 eeprom;
1852 u8 step;
1853 int i;
1854
1855 /*
1856 * Read TSSI boundaries for temperature compensation from
1857 * the EEPROM.
1858 *
1859 * Array idx 0 1 2 3 4 5 6 7 8
1860 * Matching Delta value -4 -3 -2 -1 0 +1 +2 +3 +4
1861 * Example TSSI bounds 0xF0 0xD0 0xB5 0xA0 0x88 0x45 0x25 0x15 0x00
1862 */
1863 if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
1864 rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG1, &eeprom);
1865 tssi_bounds[0] = rt2x00_get_field16(eeprom,
1866 EEPROM_TSSI_BOUND_BG1_MINUS4);
1867 tssi_bounds[1] = rt2x00_get_field16(eeprom,
1868 EEPROM_TSSI_BOUND_BG1_MINUS3);
1869
1870 rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG2, &eeprom);
1871 tssi_bounds[2] = rt2x00_get_field16(eeprom,
1872 EEPROM_TSSI_BOUND_BG2_MINUS2);
1873 tssi_bounds[3] = rt2x00_get_field16(eeprom,
1874 EEPROM_TSSI_BOUND_BG2_MINUS1);
1875
1876 rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG3, &eeprom);
1877 tssi_bounds[4] = rt2x00_get_field16(eeprom,
1878 EEPROM_TSSI_BOUND_BG3_REF);
1879 tssi_bounds[5] = rt2x00_get_field16(eeprom,
1880 EEPROM_TSSI_BOUND_BG3_PLUS1);
1881
1882 rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG4, &eeprom);
1883 tssi_bounds[6] = rt2x00_get_field16(eeprom,
1884 EEPROM_TSSI_BOUND_BG4_PLUS2);
1885 tssi_bounds[7] = rt2x00_get_field16(eeprom,
1886 EEPROM_TSSI_BOUND_BG4_PLUS3);
1887
1888 rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG5, &eeprom);
1889 tssi_bounds[8] = rt2x00_get_field16(eeprom,
1890 EEPROM_TSSI_BOUND_BG5_PLUS4);
1891
1892 step = rt2x00_get_field16(eeprom,
1893 EEPROM_TSSI_BOUND_BG5_AGC_STEP);
1894 } else {
1895 rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A1, &eeprom);
1896 tssi_bounds[0] = rt2x00_get_field16(eeprom,
1897 EEPROM_TSSI_BOUND_A1_MINUS4);
1898 tssi_bounds[1] = rt2x00_get_field16(eeprom,
1899 EEPROM_TSSI_BOUND_A1_MINUS3);
1900
1901 rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A2, &eeprom);
1902 tssi_bounds[2] = rt2x00_get_field16(eeprom,
1903 EEPROM_TSSI_BOUND_A2_MINUS2);
1904 tssi_bounds[3] = rt2x00_get_field16(eeprom,
1905 EEPROM_TSSI_BOUND_A2_MINUS1);
1906
1907 rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A3, &eeprom);
1908 tssi_bounds[4] = rt2x00_get_field16(eeprom,
1909 EEPROM_TSSI_BOUND_A3_REF);
1910 tssi_bounds[5] = rt2x00_get_field16(eeprom,
1911 EEPROM_TSSI_BOUND_A3_PLUS1);
1912
1913 rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A4, &eeprom);
1914 tssi_bounds[6] = rt2x00_get_field16(eeprom,
1915 EEPROM_TSSI_BOUND_A4_PLUS2);
1916 tssi_bounds[7] = rt2x00_get_field16(eeprom,
1917 EEPROM_TSSI_BOUND_A4_PLUS3);
1918
1919 rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A5, &eeprom);
1920 tssi_bounds[8] = rt2x00_get_field16(eeprom,
1921 EEPROM_TSSI_BOUND_A5_PLUS4);
1922
1923 step = rt2x00_get_field16(eeprom,
1924 EEPROM_TSSI_BOUND_A5_AGC_STEP);
1925 }
1926
1927 /*
1928 * Check if temperature compensation is supported.
1929 */
1930 if (tssi_bounds[4] == 0xff)
1931 return 0;
1932
1933 /*
1934 * Read current TSSI (BBP 49).
1935 */
1936 rt2800_bbp_read(rt2x00dev, 49, &current_tssi);
1937
1938 /*
1939 * Compare TSSI value (BBP49) with the compensation boundaries
1940 * from the EEPROM and increase or decrease tx power.
1941 */
1942 for (i = 0; i <= 3; i++) {
1943 if (current_tssi > tssi_bounds[i])
1944 break;
1945 }
1946
1947 if (i == 4) {
1948 for (i = 8; i >= 5; i--) {
1949 if (current_tssi < tssi_bounds[i])
1950 break;
1951 }
1952 }
1953
1954 return (i - 4) * step;
1955}
1956
1816static int rt2800_get_txpower_bw_comp(struct rt2x00_dev *rt2x00dev, 1957static int rt2800_get_txpower_bw_comp(struct rt2x00_dev *rt2x00dev,
1817 enum ieee80211_band band) 1958 enum ieee80211_band band)
1818{ 1959{
1819 u16 eeprom; 1960 u16 eeprom;
1820 u8 comp_en; 1961 u8 comp_en;
1821 u8 comp_type; 1962 u8 comp_type;
1822 int comp_value; 1963 int comp_value = 0;
1823 1964
1824 rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_DELTA, &eeprom); 1965 rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_DELTA, &eeprom);
1825 1966
1826 if (eeprom == 0xffff) 1967 /*
1968 * HT40 compensation not required.
1969 */
1970 if (eeprom == 0xffff ||
1971 !test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
1827 return 0; 1972 return 0;
1828 1973
1829 if (band == IEEE80211_BAND_2GHZ) { 1974 if (band == IEEE80211_BAND_2GHZ) {
@@ -1853,11 +1998,9 @@ static int rt2800_get_txpower_bw_comp(struct rt2x00_dev *rt2x00dev,
1853 return comp_value; 1998 return comp_value;
1854} 1999}
1855 2000
1856static u8 rt2800_compesate_txpower(struct rt2x00_dev *rt2x00dev, 2001static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
1857 int is_rate_b, 2002 enum ieee80211_band band, int power_level,
1858 enum ieee80211_band band, 2003 u8 txpower, int delta)
1859 int power_level,
1860 u8 txpower)
1861{ 2004{
1862 u32 reg; 2005 u32 reg;
1863 u16 eeprom; 2006 u16 eeprom;
@@ -1865,15 +2008,11 @@ static u8 rt2800_compesate_txpower(struct rt2x00_dev *rt2x00dev,
1865 u8 eirp_txpower; 2008 u8 eirp_txpower;
1866 u8 eirp_txpower_criterion; 2009 u8 eirp_txpower_criterion;
1867 u8 reg_limit; 2010 u8 reg_limit;
1868 int bw_comp = 0;
1869 2011
1870 if (!((band == IEEE80211_BAND_5GHZ) && is_rate_b)) 2012 if (!((band == IEEE80211_BAND_5GHZ) && is_rate_b))
1871 return txpower; 2013 return txpower;
1872 2014
1873 if (test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags)) 2015 if (test_bit(CAPABILITY_POWER_LIMIT, &rt2x00dev->cap_flags)) {
1874 bw_comp = rt2800_get_txpower_bw_comp(rt2x00dev, band);
1875
1876 if (test_bit(CONFIG_SUPPORT_POWER_LIMIT, &rt2x00dev->flags)) {
1877 /* 2016 /*
1878 * Check if eirp txpower exceed txpower_limit. 2017 * Check if eirp txpower exceed txpower_limit.
1879 * We use OFDM 6M as criterion and its eirp txpower 2018 * We use OFDM 6M as criterion and its eirp txpower
@@ -1895,18 +2034,19 @@ static u8 rt2800_compesate_txpower(struct rt2x00_dev *rt2x00dev,
1895 EEPROM_EIRP_MAX_TX_POWER_5GHZ); 2034 EEPROM_EIRP_MAX_TX_POWER_5GHZ);
1896 2035
1897 eirp_txpower = eirp_txpower_criterion + (txpower - criterion) + 2036 eirp_txpower = eirp_txpower_criterion + (txpower - criterion) +
1898 (is_rate_b ? 4 : 0) + bw_comp; 2037 (is_rate_b ? 4 : 0) + delta;
1899 2038
1900 reg_limit = (eirp_txpower > power_level) ? 2039 reg_limit = (eirp_txpower > power_level) ?
1901 (eirp_txpower - power_level) : 0; 2040 (eirp_txpower - power_level) : 0;
1902 } else 2041 } else
1903 reg_limit = 0; 2042 reg_limit = 0;
1904 2043
1905 return txpower + bw_comp - reg_limit; 2044 return txpower + delta - reg_limit;
1906} 2045}
1907 2046
1908static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev, 2047static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
1909 struct ieee80211_conf *conf) 2048 enum ieee80211_band band,
2049 int power_level)
1910{ 2050{
1911 u8 txpower; 2051 u8 txpower;
1912 u16 eeprom; 2052 u16 eeprom;
@@ -1914,8 +2054,17 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
1914 u32 reg; 2054 u32 reg;
1915 u8 r1; 2055 u8 r1;
1916 u32 offset; 2056 u32 offset;
1917 enum ieee80211_band band = conf->channel->band; 2057 int delta;
1918 int power_level = conf->power_level; 2058
2059 /*
2060 * Calculate HT40 compensation delta
2061 */
2062 delta = rt2800_get_txpower_bw_comp(rt2x00dev, band);
2063
2064 /*
2065 * calculate temperature compensation delta
2066 */
2067 delta += rt2800_get_gain_calibration_delta(rt2x00dev);
1919 2068
1920 /* 2069 /*
1921 * set to normal bbp tx power control mode: +/- 0dBm 2070 * set to normal bbp tx power control mode: +/- 0dBm
@@ -1944,8 +2093,8 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
1944 */ 2093 */
1945 txpower = rt2x00_get_field16(eeprom, 2094 txpower = rt2x00_get_field16(eeprom,
1946 EEPROM_TXPOWER_BYRATE_RATE0); 2095 EEPROM_TXPOWER_BYRATE_RATE0);
1947 txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band, 2096 txpower = rt2800_compensate_txpower(rt2x00dev, is_rate_b, band,
1948 power_level, txpower); 2097 power_level, txpower, delta);
1949 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE0, txpower); 2098 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE0, txpower);
1950 2099
1951 /* 2100 /*
@@ -1955,8 +2104,8 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
1955 */ 2104 */
1956 txpower = rt2x00_get_field16(eeprom, 2105 txpower = rt2x00_get_field16(eeprom,
1957 EEPROM_TXPOWER_BYRATE_RATE1); 2106 EEPROM_TXPOWER_BYRATE_RATE1);
1958 txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band, 2107 txpower = rt2800_compensate_txpower(rt2x00dev, is_rate_b, band,
1959 power_level, txpower); 2108 power_level, txpower, delta);
1960 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE1, txpower); 2109 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE1, txpower);
1961 2110
1962 /* 2111 /*
@@ -1966,8 +2115,8 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
1966 */ 2115 */
1967 txpower = rt2x00_get_field16(eeprom, 2116 txpower = rt2x00_get_field16(eeprom,
1968 EEPROM_TXPOWER_BYRATE_RATE2); 2117 EEPROM_TXPOWER_BYRATE_RATE2);
1969 txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band, 2118 txpower = rt2800_compensate_txpower(rt2x00dev, is_rate_b, band,
1970 power_level, txpower); 2119 power_level, txpower, delta);
1971 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE2, txpower); 2120 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE2, txpower);
1972 2121
1973 /* 2122 /*
@@ -1977,8 +2126,8 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
1977 */ 2126 */
1978 txpower = rt2x00_get_field16(eeprom, 2127 txpower = rt2x00_get_field16(eeprom,
1979 EEPROM_TXPOWER_BYRATE_RATE3); 2128 EEPROM_TXPOWER_BYRATE_RATE3);
1980 txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band, 2129 txpower = rt2800_compensate_txpower(rt2x00dev, is_rate_b, band,
1981 power_level, txpower); 2130 power_level, txpower, delta);
1982 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE3, txpower); 2131 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE3, txpower);
1983 2132
1984 /* read the next four txpower values */ 2133 /* read the next four txpower values */
@@ -1993,8 +2142,8 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
1993 */ 2142 */
1994 txpower = rt2x00_get_field16(eeprom, 2143 txpower = rt2x00_get_field16(eeprom,
1995 EEPROM_TXPOWER_BYRATE_RATE0); 2144 EEPROM_TXPOWER_BYRATE_RATE0);
1996 txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band, 2145 txpower = rt2800_compensate_txpower(rt2x00dev, is_rate_b, band,
1997 power_level, txpower); 2146 power_level, txpower, delta);
1998 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE4, txpower); 2147 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE4, txpower);
1999 2148
2000 /* 2149 /*
@@ -2004,8 +2153,8 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
2004 */ 2153 */
2005 txpower = rt2x00_get_field16(eeprom, 2154 txpower = rt2x00_get_field16(eeprom,
2006 EEPROM_TXPOWER_BYRATE_RATE1); 2155 EEPROM_TXPOWER_BYRATE_RATE1);
2007 txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band, 2156 txpower = rt2800_compensate_txpower(rt2x00dev, is_rate_b, band,
2008 power_level, txpower); 2157 power_level, txpower, delta);
2009 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE5, txpower); 2158 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE5, txpower);
2010 2159
2011 /* 2160 /*
@@ -2015,8 +2164,8 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
2015 */ 2164 */
2016 txpower = rt2x00_get_field16(eeprom, 2165 txpower = rt2x00_get_field16(eeprom,
2017 EEPROM_TXPOWER_BYRATE_RATE2); 2166 EEPROM_TXPOWER_BYRATE_RATE2);
2018 txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band, 2167 txpower = rt2800_compensate_txpower(rt2x00dev, is_rate_b, band,
2019 power_level, txpower); 2168 power_level, txpower, delta);
2020 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE6, txpower); 2169 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE6, txpower);
2021 2170
2022 /* 2171 /*
@@ -2026,8 +2175,8 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
2026 */ 2175 */
2027 txpower = rt2x00_get_field16(eeprom, 2176 txpower = rt2x00_get_field16(eeprom,
2028 EEPROM_TXPOWER_BYRATE_RATE3); 2177 EEPROM_TXPOWER_BYRATE_RATE3);
2029 txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band, 2178 txpower = rt2800_compensate_txpower(rt2x00dev, is_rate_b, band,
2030 power_level, txpower); 2179 power_level, txpower, delta);
2031 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE7, txpower); 2180 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE7, txpower);
2032 2181
2033 rt2800_register_write(rt2x00dev, offset, reg); 2182 rt2800_register_write(rt2x00dev, offset, reg);
@@ -2037,6 +2186,13 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
2037 } 2186 }
2038} 2187}
2039 2188
2189void rt2800_gain_calibration(struct rt2x00_dev *rt2x00dev)
2190{
2191 rt2800_config_txpower(rt2x00dev, rt2x00dev->curr_band,
2192 rt2x00dev->tx_power);
2193}
2194EXPORT_SYMBOL_GPL(rt2800_gain_calibration);
2195
2040static void rt2800_config_retry_limit(struct rt2x00_dev *rt2x00dev, 2196static void rt2800_config_retry_limit(struct rt2x00_dev *rt2x00dev,
2041 struct rt2x00lib_conf *libconf) 2197 struct rt2x00lib_conf *libconf)
2042{ 2198{
@@ -2090,10 +2246,12 @@ void rt2800_config(struct rt2x00_dev *rt2x00dev,
2090 if (flags & IEEE80211_CONF_CHANGE_CHANNEL) { 2246 if (flags & IEEE80211_CONF_CHANGE_CHANNEL) {
2091 rt2800_config_channel(rt2x00dev, libconf->conf, 2247 rt2800_config_channel(rt2x00dev, libconf->conf,
2092 &libconf->rf, &libconf->channel); 2248 &libconf->rf, &libconf->channel);
2093 rt2800_config_txpower(rt2x00dev, libconf->conf); 2249 rt2800_config_txpower(rt2x00dev, libconf->conf->channel->band,
2250 libconf->conf->power_level);
2094 } 2251 }
2095 if (flags & IEEE80211_CONF_CHANGE_POWER) 2252 if (flags & IEEE80211_CONF_CHANGE_POWER)
2096 rt2800_config_txpower(rt2x00dev, libconf->conf); 2253 rt2800_config_txpower(rt2x00dev, libconf->conf->channel->band,
2254 libconf->conf->power_level);
2097 if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS) 2255 if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
2098 rt2800_config_retry_limit(rt2x00dev, libconf); 2256 rt2800_config_retry_limit(rt2x00dev, libconf);
2099 if (flags & IEEE80211_CONF_CHANGE_PS) 2257 if (flags & IEEE80211_CONF_CHANGE_PS)
@@ -2254,7 +2412,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
2254 } else if (rt2800_is_305x_soc(rt2x00dev)) { 2412 } else if (rt2800_is_305x_soc(rt2x00dev)) {
2255 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400); 2413 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
2256 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000); 2414 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
2257 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x0000001f); 2415 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000030);
2258 } else if (rt2x00_rt(rt2x00dev, RT5390)) { 2416 } else if (rt2x00_rt(rt2x00dev, RT5390)) {
2259 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404); 2417 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
2260 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); 2418 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
@@ -2758,8 +2916,7 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
2758 ant = (div_mode == 3) ? 1 : 0; 2916 ant = (div_mode == 3) ? 1 : 0;
2759 2917
2760 /* check if this is a Bluetooth combo card */ 2918 /* check if this is a Bluetooth combo card */
2761 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); 2919 if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
2762 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_BT_COEXIST)) {
2763 u32 reg; 2920 u32 reg;
2764 2921
2765 rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg); 2922 rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
@@ -3155,8 +3312,8 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
3155 rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) || 3312 rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
3156 rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) || 3313 rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
3157 rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) { 3314 rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
3158 if (!test_bit(CONFIG_EXTERNAL_LNA_BG, 3315 if (!test_bit(CAPABILITY_EXTERNAL_LNA_BG,
3159 &rt2x00dev->flags)) 3316 &rt2x00dev->cap_flags))
3160 rt2x00_set_field8(&rfcsr, RFCSR17_R, 1); 3317 rt2x00_set_field8(&rfcsr, RFCSR17_R, 1);
3161 } 3318 }
3162 rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &eeprom); 3319 rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &eeprom);
@@ -3530,6 +3687,7 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
3530 !rt2x00_rf(rt2x00dev, RF3022) && 3687 !rt2x00_rf(rt2x00dev, RF3022) &&
3531 !rt2x00_rf(rt2x00dev, RF3052) && 3688 !rt2x00_rf(rt2x00dev, RF3052) &&
3532 !rt2x00_rf(rt2x00dev, RF3320) && 3689 !rt2x00_rf(rt2x00dev, RF3320) &&
3690 !rt2x00_rf(rt2x00dev, RF5370) &&
3533 !rt2x00_rf(rt2x00dev, RF5390)) { 3691 !rt2x00_rf(rt2x00dev, RF5390)) {
3534 ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); 3692 ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
3535 return -ENODEV; 3693 return -ENODEV;
@@ -3568,26 +3726,30 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
3568 } 3726 }
3569 3727
3570 /* 3728 /*
3571 * Read frequency offset and RF programming sequence. 3729 * Determine external LNA informations.
3572 */ 3730 */
3573 rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom);
3574 rt2x00dev->freq_offset = rt2x00_get_field16(eeprom, EEPROM_FREQ_OFFSET);
3575
3576 /*
3577 * Read external LNA informations.
3578 */
3579 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
3580
3581 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_EXTERNAL_LNA_5G)) 3731 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_EXTERNAL_LNA_5G))
3582 __set_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags); 3732 __set_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags);
3583 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_EXTERNAL_LNA_2G)) 3733 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_EXTERNAL_LNA_2G))
3584 __set_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags); 3734 __set_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags);
3585 3735
3586 /* 3736 /*
3587 * Detect if this device has an hardware controlled radio. 3737 * Detect if this device has an hardware controlled radio.
3588 */ 3738 */
3589 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_HW_RADIO)) 3739 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_HW_RADIO))
3590 __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags); 3740 __set_bit(CAPABILITY_HW_BUTTON, &rt2x00dev->cap_flags);
3741
3742 /*
3743 * Detect if this device has Bluetooth co-existence.
3744 */
3745 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_BT_COEXIST))
3746 __set_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags);
3747
3748 /*
3749 * Read frequency offset and RF programming sequence.
3750 */
3751 rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom);
3752 rt2x00dev->freq_offset = rt2x00_get_field16(eeprom, EEPROM_FREQ_OFFSET);
3591 3753
3592 /* 3754 /*
3593 * Store led settings, for correct led behaviour. 3755 * Store led settings, for correct led behaviour.
@@ -3597,7 +3759,7 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
3597 rt2800_init_led(rt2x00dev, &rt2x00dev->led_assoc, LED_TYPE_ASSOC); 3759 rt2800_init_led(rt2x00dev, &rt2x00dev->led_assoc, LED_TYPE_ASSOC);
3598 rt2800_init_led(rt2x00dev, &rt2x00dev->led_qual, LED_TYPE_QUALITY); 3760 rt2800_init_led(rt2x00dev, &rt2x00dev->led_qual, LED_TYPE_QUALITY);
3599 3761
3600 rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &rt2x00dev->led_mcu_reg); 3762 rt2x00dev->led_mcu_reg = eeprom;
3601#endif /* CONFIG_RT2X00_LIB_LEDS */ 3763#endif /* CONFIG_RT2X00_LIB_LEDS */
3602 3764
3603 /* 3765 /*
@@ -3607,7 +3769,7 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
3607 3769
3608 if (rt2x00_get_field16(eeprom, EEPROM_EIRP_MAX_TX_POWER_2GHZ) < 3770 if (rt2x00_get_field16(eeprom, EEPROM_EIRP_MAX_TX_POWER_2GHZ) <
3609 EIRP_MAX_TX_POWER_LIMIT) 3771 EIRP_MAX_TX_POWER_LIMIT)
3610 __set_bit(CONFIG_SUPPORT_POWER_LIMIT, &rt2x00dev->flags); 3772 __set_bit(CAPABILITY_POWER_LIMIT, &rt2x00dev->cap_flags);
3611 3773
3612 return 0; 3774 return 0;
3613} 3775}
@@ -3828,6 +3990,7 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
3828 rt2x00_rf(rt2x00dev, RF3021) || 3990 rt2x00_rf(rt2x00dev, RF3021) ||
3829 rt2x00_rf(rt2x00dev, RF3022) || 3991 rt2x00_rf(rt2x00dev, RF3022) ||
3830 rt2x00_rf(rt2x00dev, RF3320) || 3992 rt2x00_rf(rt2x00dev, RF3320) ||
3993 rt2x00_rf(rt2x00dev, RF5370) ||
3831 rt2x00_rf(rt2x00dev, RF5390)) { 3994 rt2x00_rf(rt2x00dev, RF5390)) {
3832 spec->num_channels = 14; 3995 spec->num_channels = 14;
3833 spec->channels = rf_vals_3x; 3996 spec->channels = rf_vals_3x;
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index 0c92d86a36f4..f2d15941c71a 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -181,6 +181,7 @@ void rt2800_link_stats(struct rt2x00_dev *rt2x00dev, struct link_qual *qual);
181void rt2800_reset_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual); 181void rt2800_reset_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual);
182void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual, 182void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
183 const u32 count); 183 const u32 count);
184void rt2800_gain_calibration(struct rt2x00_dev *rt2x00dev);
184 185
185int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev); 186int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev);
186void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev); 187void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 808073aa9dcc..cc4a54f571b8 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -66,7 +66,7 @@ static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
66 return; 66 return;
67 67
68 for (i = 0; i < 200; i++) { 68 for (i = 0; i < 200; i++) {
69 rt2800_register_read(rt2x00dev, H2M_MAILBOX_CID, &reg); 69 rt2x00pci_register_read(rt2x00dev, H2M_MAILBOX_CID, &reg);
70 70
71 if ((rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD0) == token) || 71 if ((rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD0) == token) ||
72 (rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD1) == token) || 72 (rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD1) == token) ||
@@ -80,8 +80,8 @@ static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
80 if (i == 200) 80 if (i == 200)
81 ERROR(rt2x00dev, "MCU request failed, no response from hardware\n"); 81 ERROR(rt2x00dev, "MCU request failed, no response from hardware\n");
82 82
83 rt2800_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0); 83 rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0);
84 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0); 84 rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
85} 85}
86 86
87#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) 87#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X)
@@ -105,7 +105,7 @@ static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom)
105 struct rt2x00_dev *rt2x00dev = eeprom->data; 105 struct rt2x00_dev *rt2x00dev = eeprom->data;
106 u32 reg; 106 u32 reg;
107 107
108 rt2800_register_read(rt2x00dev, E2PROM_CSR, &reg); 108 rt2x00pci_register_read(rt2x00dev, E2PROM_CSR, &reg);
109 109
110 eeprom->reg_data_in = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_IN); 110 eeprom->reg_data_in = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_IN);
111 eeprom->reg_data_out = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_OUT); 111 eeprom->reg_data_out = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_OUT);
@@ -127,7 +127,7 @@ static void rt2800pci_eepromregister_write(struct eeprom_93cx6 *eeprom)
127 rt2x00_set_field32(&reg, E2PROM_CSR_CHIP_SELECT, 127 rt2x00_set_field32(&reg, E2PROM_CSR_CHIP_SELECT,
128 !!eeprom->reg_chip_select); 128 !!eeprom->reg_chip_select);
129 129
130 rt2800_register_write(rt2x00dev, E2PROM_CSR, reg); 130 rt2x00pci_register_write(rt2x00dev, E2PROM_CSR, reg);
131} 131}
132 132
133static void rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev) 133static void rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev)
@@ -135,7 +135,7 @@ static void rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev)
135 struct eeprom_93cx6 eeprom; 135 struct eeprom_93cx6 eeprom;
136 u32 reg; 136 u32 reg;
137 137
138 rt2800_register_read(rt2x00dev, E2PROM_CSR, &reg); 138 rt2x00pci_register_read(rt2x00dev, E2PROM_CSR, &reg);
139 139
140 eeprom.data = rt2x00dev; 140 eeprom.data = rt2x00dev;
141 eeprom.register_read = rt2800pci_eepromregister_read; 141 eeprom.register_read = rt2800pci_eepromregister_read;
@@ -195,9 +195,9 @@ static void rt2800pci_start_queue(struct data_queue *queue)
195 195
196 switch (queue->qid) { 196 switch (queue->qid) {
197 case QID_RX: 197 case QID_RX:
198 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg); 198 rt2x00pci_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
199 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1); 199 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
200 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg); 200 rt2x00pci_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
201 break; 201 break;
202 case QID_BEACON: 202 case QID_BEACON:
203 /* 203 /*
@@ -207,15 +207,15 @@ static void rt2800pci_start_queue(struct data_queue *queue)
207 tasklet_enable(&rt2x00dev->tbtt_tasklet); 207 tasklet_enable(&rt2x00dev->tbtt_tasklet);
208 tasklet_enable(&rt2x00dev->pretbtt_tasklet); 208 tasklet_enable(&rt2x00dev->pretbtt_tasklet);
209 209
210 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg); 210 rt2x00pci_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
211 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1); 211 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
212 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1); 212 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
213 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1); 213 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
214 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); 214 rt2x00pci_register_write(rt2x00dev, BCN_TIME_CFG, reg);
215 215
216 rt2800_register_read(rt2x00dev, INT_TIMER_EN, &reg); 216 rt2x00pci_register_read(rt2x00dev, INT_TIMER_EN, &reg);
217 rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 1); 217 rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 1);
218 rt2800_register_write(rt2x00dev, INT_TIMER_EN, reg); 218 rt2x00pci_register_write(rt2x00dev, INT_TIMER_EN, reg);
219 break; 219 break;
220 default: 220 default:
221 break; 221 break;
@@ -233,11 +233,13 @@ static void rt2800pci_kick_queue(struct data_queue *queue)
233 case QID_AC_BE: 233 case QID_AC_BE:
234 case QID_AC_BK: 234 case QID_AC_BK:
235 entry = rt2x00queue_get_entry(queue, Q_INDEX); 235 entry = rt2x00queue_get_entry(queue, Q_INDEX);
236 rt2800_register_write(rt2x00dev, TX_CTX_IDX(queue->qid), entry->entry_idx); 236 rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX(queue->qid),
237 entry->entry_idx);
237 break; 238 break;
238 case QID_MGMT: 239 case QID_MGMT:
239 entry = rt2x00queue_get_entry(queue, Q_INDEX); 240 entry = rt2x00queue_get_entry(queue, Q_INDEX);
240 rt2800_register_write(rt2x00dev, TX_CTX_IDX(5), entry->entry_idx); 241 rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX(5),
242 entry->entry_idx);
241 break; 243 break;
242 default: 244 default:
243 break; 245 break;
@@ -251,20 +253,20 @@ static void rt2800pci_stop_queue(struct data_queue *queue)
251 253
252 switch (queue->qid) { 254 switch (queue->qid) {
253 case QID_RX: 255 case QID_RX:
254 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg); 256 rt2x00pci_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
255 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0); 257 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
256 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg); 258 rt2x00pci_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
257 break; 259 break;
258 case QID_BEACON: 260 case QID_BEACON:
259 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg); 261 rt2x00pci_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
260 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0); 262 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0);
261 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0); 263 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
262 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0); 264 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
263 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); 265 rt2x00pci_register_write(rt2x00dev, BCN_TIME_CFG, reg);
264 266
265 rt2800_register_read(rt2x00dev, INT_TIMER_EN, &reg); 267 rt2x00pci_register_read(rt2x00dev, INT_TIMER_EN, &reg);
266 rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 0); 268 rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 0);
267 rt2800_register_write(rt2x00dev, INT_TIMER_EN, reg); 269 rt2x00pci_register_write(rt2x00dev, INT_TIMER_EN, reg);
268 270
269 /* 271 /*
270 * Wait for tbtt tasklets to finish. 272 * Wait for tbtt tasklets to finish.
@@ -295,19 +297,19 @@ static int rt2800pci_write_firmware(struct rt2x00_dev *rt2x00dev,
295 */ 297 */
296 reg = 0; 298 reg = 0;
297 rt2x00_set_field32(&reg, PBF_SYS_CTRL_HOST_RAM_WRITE, 1); 299 rt2x00_set_field32(&reg, PBF_SYS_CTRL_HOST_RAM_WRITE, 1);
298 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, reg); 300 rt2x00pci_register_write(rt2x00dev, PBF_SYS_CTRL, reg);
299 301
300 /* 302 /*
301 * Write firmware to device. 303 * Write firmware to device.
302 */ 304 */
303 rt2800_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE, 305 rt2x00pci_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE,
304 data, len); 306 data, len);
305 307
306 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000); 308 rt2x00pci_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000);
307 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00001); 309 rt2x00pci_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00001);
308 310
309 rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0); 311 rt2x00pci_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
310 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0); 312 rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
311 313
312 return 0; 314 return 0;
313} 315}
@@ -351,7 +353,7 @@ static void rt2800pci_clear_entry(struct queue_entry *entry)
351 * Set RX IDX in register to inform hardware that we have 353 * Set RX IDX in register to inform hardware that we have
352 * handled this entry and it is available for reuse again. 354 * handled this entry and it is available for reuse again.
353 */ 355 */
354 rt2800_register_write(rt2x00dev, RX_CRX_IDX, 356 rt2x00pci_register_write(rt2x00dev, RX_CRX_IDX,
355 entry->entry_idx); 357 entry->entry_idx);
356 } else { 358 } else {
357 rt2x00_desc_read(entry_priv->desc, 1, &word); 359 rt2x00_desc_read(entry_priv->desc, 1, &word);
@@ -369,45 +371,51 @@ static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
369 * Initialize registers. 371 * Initialize registers.
370 */ 372 */
371 entry_priv = rt2x00dev->tx[0].entries[0].priv_data; 373 entry_priv = rt2x00dev->tx[0].entries[0].priv_data;
372 rt2800_register_write(rt2x00dev, TX_BASE_PTR0, entry_priv->desc_dma); 374 rt2x00pci_register_write(rt2x00dev, TX_BASE_PTR0, entry_priv->desc_dma);
373 rt2800_register_write(rt2x00dev, TX_MAX_CNT0, rt2x00dev->tx[0].limit); 375 rt2x00pci_register_write(rt2x00dev, TX_MAX_CNT0,
374 rt2800_register_write(rt2x00dev, TX_CTX_IDX0, 0); 376 rt2x00dev->tx[0].limit);
375 rt2800_register_write(rt2x00dev, TX_DTX_IDX0, 0); 377 rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX0, 0);
378 rt2x00pci_register_write(rt2x00dev, TX_DTX_IDX0, 0);
376 379
377 entry_priv = rt2x00dev->tx[1].entries[0].priv_data; 380 entry_priv = rt2x00dev->tx[1].entries[0].priv_data;
378 rt2800_register_write(rt2x00dev, TX_BASE_PTR1, entry_priv->desc_dma); 381 rt2x00pci_register_write(rt2x00dev, TX_BASE_PTR1, entry_priv->desc_dma);
379 rt2800_register_write(rt2x00dev, TX_MAX_CNT1, rt2x00dev->tx[1].limit); 382 rt2x00pci_register_write(rt2x00dev, TX_MAX_CNT1,
380 rt2800_register_write(rt2x00dev, TX_CTX_IDX1, 0); 383 rt2x00dev->tx[1].limit);
381 rt2800_register_write(rt2x00dev, TX_DTX_IDX1, 0); 384 rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX1, 0);
385 rt2x00pci_register_write(rt2x00dev, TX_DTX_IDX1, 0);
382 386
383 entry_priv = rt2x00dev->tx[2].entries[0].priv_data; 387 entry_priv = rt2x00dev->tx[2].entries[0].priv_data;
384 rt2800_register_write(rt2x00dev, TX_BASE_PTR2, entry_priv->desc_dma); 388 rt2x00pci_register_write(rt2x00dev, TX_BASE_PTR2, entry_priv->desc_dma);
385 rt2800_register_write(rt2x00dev, TX_MAX_CNT2, rt2x00dev->tx[2].limit); 389 rt2x00pci_register_write(rt2x00dev, TX_MAX_CNT2,
386 rt2800_register_write(rt2x00dev, TX_CTX_IDX2, 0); 390 rt2x00dev->tx[2].limit);
387 rt2800_register_write(rt2x00dev, TX_DTX_IDX2, 0); 391 rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX2, 0);
392 rt2x00pci_register_write(rt2x00dev, TX_DTX_IDX2, 0);
388 393
389 entry_priv = rt2x00dev->tx[3].entries[0].priv_data; 394 entry_priv = rt2x00dev->tx[3].entries[0].priv_data;
390 rt2800_register_write(rt2x00dev, TX_BASE_PTR3, entry_priv->desc_dma); 395 rt2x00pci_register_write(rt2x00dev, TX_BASE_PTR3, entry_priv->desc_dma);
391 rt2800_register_write(rt2x00dev, TX_MAX_CNT3, rt2x00dev->tx[3].limit); 396 rt2x00pci_register_write(rt2x00dev, TX_MAX_CNT3,
392 rt2800_register_write(rt2x00dev, TX_CTX_IDX3, 0); 397 rt2x00dev->tx[3].limit);
393 rt2800_register_write(rt2x00dev, TX_DTX_IDX3, 0); 398 rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX3, 0);
399 rt2x00pci_register_write(rt2x00dev, TX_DTX_IDX3, 0);
394 400
395 entry_priv = rt2x00dev->rx->entries[0].priv_data; 401 entry_priv = rt2x00dev->rx->entries[0].priv_data;
396 rt2800_register_write(rt2x00dev, RX_BASE_PTR, entry_priv->desc_dma); 402 rt2x00pci_register_write(rt2x00dev, RX_BASE_PTR, entry_priv->desc_dma);
397 rt2800_register_write(rt2x00dev, RX_MAX_CNT, rt2x00dev->rx[0].limit); 403 rt2x00pci_register_write(rt2x00dev, RX_MAX_CNT,
398 rt2800_register_write(rt2x00dev, RX_CRX_IDX, rt2x00dev->rx[0].limit - 1); 404 rt2x00dev->rx[0].limit);
399 rt2800_register_write(rt2x00dev, RX_DRX_IDX, 0); 405 rt2x00pci_register_write(rt2x00dev, RX_CRX_IDX,
406 rt2x00dev->rx[0].limit - 1);
407 rt2x00pci_register_write(rt2x00dev, RX_DRX_IDX, 0);
400 408
401 /* 409 /*
402 * Enable global DMA configuration 410 * Enable global DMA configuration
403 */ 411 */
404 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg); 412 rt2x00pci_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
405 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0); 413 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
406 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0); 414 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
407 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1); 415 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
408 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg); 416 rt2x00pci_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
409 417
410 rt2800_register_write(rt2x00dev, DELAY_INT_CFG, 0); 418 rt2x00pci_register_write(rt2x00dev, DELAY_INT_CFG, 0);
411 419
412 return 0; 420 return 0;
413} 421}
@@ -427,8 +435,8 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
427 * should clear the register to assure a clean state. 435 * should clear the register to assure a clean state.
428 */ 436 */
429 if (state == STATE_RADIO_IRQ_ON) { 437 if (state == STATE_RADIO_IRQ_ON) {
430 rt2800_register_read(rt2x00dev, INT_SOURCE_CSR, &reg); 438 rt2x00pci_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
431 rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, reg); 439 rt2x00pci_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
432 440
433 /* 441 /*
434 * Enable tasklets. The beacon related tasklets are 442 * Enable tasklets. The beacon related tasklets are
@@ -440,7 +448,7 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
440 } 448 }
441 449
442 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags); 450 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
443 rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg); 451 rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
444 rt2x00_set_field32(&reg, INT_MASK_CSR_RXDELAYINT, 0); 452 rt2x00_set_field32(&reg, INT_MASK_CSR_RXDELAYINT, 0);
445 rt2x00_set_field32(&reg, INT_MASK_CSR_TXDELAYINT, 0); 453 rt2x00_set_field32(&reg, INT_MASK_CSR_TXDELAYINT, 0);
446 rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, mask); 454 rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, mask);
@@ -459,7 +467,7 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
459 rt2x00_set_field32(&reg, INT_MASK_CSR_GPTIMER, 0); 467 rt2x00_set_field32(&reg, INT_MASK_CSR_GPTIMER, 0);
460 rt2x00_set_field32(&reg, INT_MASK_CSR_RX_COHERENT, 0); 468 rt2x00_set_field32(&reg, INT_MASK_CSR_RX_COHERENT, 0);
461 rt2x00_set_field32(&reg, INT_MASK_CSR_TX_COHERENT, 0); 469 rt2x00_set_field32(&reg, INT_MASK_CSR_TX_COHERENT, 0);
462 rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg); 470 rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg);
463 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags); 471 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
464 472
465 if (state == STATE_RADIO_IRQ_OFF) { 473 if (state == STATE_RADIO_IRQ_OFF) {
@@ -480,7 +488,7 @@ static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
480 /* 488 /*
481 * Reset DMA indexes 489 * Reset DMA indexes
482 */ 490 */
483 rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg); 491 rt2x00pci_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
484 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1); 492 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
485 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1); 493 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
486 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1); 494 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
@@ -488,26 +496,26 @@ static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
488 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1); 496 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
489 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1); 497 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
490 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1); 498 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
491 rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg); 499 rt2x00pci_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
492 500
493 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f); 501 rt2x00pci_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
494 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00); 502 rt2x00pci_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
495 503
496 if (rt2x00_rt(rt2x00dev, RT5390)) { 504 if (rt2x00_rt(rt2x00dev, RT5390)) {
497 rt2800_register_read(rt2x00dev, AUX_CTRL, &reg); 505 rt2x00pci_register_read(rt2x00dev, AUX_CTRL, &reg);
498 rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1); 506 rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
499 rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1); 507 rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
500 rt2800_register_write(rt2x00dev, AUX_CTRL, reg); 508 rt2x00pci_register_write(rt2x00dev, AUX_CTRL, reg);
501 } 509 }
502 510
503 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003); 511 rt2x00pci_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
504 512
505 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg); 513 rt2x00pci_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
506 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1); 514 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
507 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1); 515 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
508 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg); 516 rt2x00pci_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
509 517
510 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000); 518 rt2x00pci_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
511 519
512 return 0; 520 return 0;
513} 521}
@@ -525,8 +533,8 @@ static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev)
525{ 533{
526 if (rt2x00_is_soc(rt2x00dev)) { 534 if (rt2x00_is_soc(rt2x00dev)) {
527 rt2800_disable_radio(rt2x00dev); 535 rt2800_disable_radio(rt2x00dev);
528 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0); 536 rt2x00pci_register_write(rt2x00dev, PWR_PIN_CFG, 0);
529 rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0); 537 rt2x00pci_register_write(rt2x00dev, TX_PIN_CFG, 0);
530 } 538 }
531} 539}
532 540
@@ -537,8 +545,10 @@ static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev,
537 rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKUP, 0, 0x02); 545 rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKUP, 0, 0x02);
538 rt2800pci_mcu_status(rt2x00dev, TOKEN_WAKUP); 546 rt2800pci_mcu_status(rt2x00dev, TOKEN_WAKUP);
539 } else if (state == STATE_SLEEP) { 547 } else if (state == STATE_SLEEP) {
540 rt2800_register_write(rt2x00dev, H2M_MAILBOX_STATUS, 0xffffffff); 548 rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_STATUS,
541 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, 0xffffffff); 549 0xffffffff);
550 rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CID,
551 0xffffffff);
542 rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0x01, 0xff, 0x01); 552 rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0x01, 0xff, 0x01);
543 } 553 }
544 554
@@ -717,12 +727,13 @@ static void rt2800pci_wakeup(struct rt2x00_dev *rt2x00dev)
717 rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS); 727 rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
718} 728}
719 729
720static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev) 730static bool rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
721{ 731{
722 struct data_queue *queue; 732 struct data_queue *queue;
723 struct queue_entry *entry; 733 struct queue_entry *entry;
724 u32 status; 734 u32 status;
725 u8 qid; 735 u8 qid;
736 int max_tx_done = 16;
726 737
727 while (kfifo_get(&rt2x00dev->txstatus_fifo, &status)) { 738 while (kfifo_get(&rt2x00dev->txstatus_fifo, &status)) {
728 qid = rt2x00_get_field32(status, TX_STA_FIFO_PID_QUEUE); 739 qid = rt2x00_get_field32(status, TX_STA_FIFO_PID_QUEUE);
@@ -759,11 +770,16 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
759 770
760 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); 771 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
761 rt2800_txdone_entry(entry, status); 772 rt2800_txdone_entry(entry, status);
773
774 if (--max_tx_done == 0)
775 break;
762 } 776 }
777
778 return !max_tx_done;
763} 779}
764 780
765static void rt2800pci_enable_interrupt(struct rt2x00_dev *rt2x00dev, 781static inline void rt2800pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
766 struct rt2x00_field32 irq_field) 782 struct rt2x00_field32 irq_field)
767{ 783{
768 u32 reg; 784 u32 reg;
769 785
@@ -772,15 +788,17 @@ static void rt2800pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
772 * access needs locking. 788 * access needs locking.
773 */ 789 */
774 spin_lock_irq(&rt2x00dev->irqmask_lock); 790 spin_lock_irq(&rt2x00dev->irqmask_lock);
775 rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg); 791 rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
776 rt2x00_set_field32(&reg, irq_field, 1); 792 rt2x00_set_field32(&reg, irq_field, 1);
777 rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg); 793 rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg);
778 spin_unlock_irq(&rt2x00dev->irqmask_lock); 794 spin_unlock_irq(&rt2x00dev->irqmask_lock);
779} 795}
780 796
781static void rt2800pci_txstatus_tasklet(unsigned long data) 797static void rt2800pci_txstatus_tasklet(unsigned long data)
782{ 798{
783 rt2800pci_txdone((struct rt2x00_dev *)data); 799 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
800 if (rt2800pci_txdone(rt2x00dev))
801 tasklet_schedule(&rt2x00dev->txstatus_tasklet);
784 802
785 /* 803 /*
786 * No need to enable the tx status interrupt here as we always 804 * No need to enable the tx status interrupt here as we always
@@ -806,8 +824,10 @@ static void rt2800pci_tbtt_tasklet(unsigned long data)
806static void rt2800pci_rxdone_tasklet(unsigned long data) 824static void rt2800pci_rxdone_tasklet(unsigned long data)
807{ 825{
808 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; 826 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
809 rt2x00pci_rxdone(rt2x00dev); 827 if (rt2x00pci_rxdone(rt2x00dev))
810 rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_RX_DONE); 828 tasklet_schedule(&rt2x00dev->rxdone_tasklet);
829 else
830 rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_RX_DONE);
811} 831}
812 832
813static void rt2800pci_autowake_tasklet(unsigned long data) 833static void rt2800pci_autowake_tasklet(unsigned long data)
@@ -841,7 +861,7 @@ static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
841 * need to lock the kfifo. 861 * need to lock the kfifo.
842 */ 862 */
843 for (i = 0; i < rt2x00dev->ops->tx->entry_num; i++) { 863 for (i = 0; i < rt2x00dev->ops->tx->entry_num; i++) {
844 rt2800_register_read(rt2x00dev, TX_STA_FIFO, &status); 864 rt2x00pci_register_read(rt2x00dev, TX_STA_FIFO, &status);
845 865
846 if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID)) 866 if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID))
847 break; 867 break;
@@ -863,8 +883,8 @@ static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
863 u32 reg, mask; 883 u32 reg, mask;
864 884
865 /* Read status and ACK all interrupts */ 885 /* Read status and ACK all interrupts */
866 rt2800_register_read(rt2x00dev, INT_SOURCE_CSR, &reg); 886 rt2x00pci_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
867 rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, reg); 887 rt2x00pci_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
868 888
869 if (!reg) 889 if (!reg)
870 return IRQ_NONE; 890 return IRQ_NONE;
@@ -904,9 +924,9 @@ static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
904 * the tasklet will reenable the appropriate interrupts. 924 * the tasklet will reenable the appropriate interrupts.
905 */ 925 */
906 spin_lock(&rt2x00dev->irqmask_lock); 926 spin_lock(&rt2x00dev->irqmask_lock);
907 rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg); 927 rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
908 reg &= mask; 928 reg &= mask;
909 rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg); 929 rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg);
910 spin_unlock(&rt2x00dev->irqmask_lock); 930 spin_unlock(&rt2x00dev->irqmask_lock);
911 931
912 return IRQ_HANDLED; 932 return IRQ_HANDLED;
@@ -956,28 +976,28 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
956 * This device has multiple filters for control frames 976 * This device has multiple filters for control frames
957 * and has a separate filter for PS Poll frames. 977 * and has a separate filter for PS Poll frames.
958 */ 978 */
959 __set_bit(DRIVER_SUPPORT_CONTROL_FILTERS, &rt2x00dev->flags); 979 __set_bit(CAPABILITY_CONTROL_FILTERS, &rt2x00dev->cap_flags);
960 __set_bit(DRIVER_SUPPORT_CONTROL_FILTER_PSPOLL, &rt2x00dev->flags); 980 __set_bit(CAPABILITY_CONTROL_FILTER_PSPOLL, &rt2x00dev->cap_flags);
961 981
962 /* 982 /*
963 * This device has a pre tbtt interrupt and thus fetches 983 * This device has a pre tbtt interrupt and thus fetches
964 * a new beacon directly prior to transmission. 984 * a new beacon directly prior to transmission.
965 */ 985 */
966 __set_bit(DRIVER_SUPPORT_PRE_TBTT_INTERRUPT, &rt2x00dev->flags); 986 __set_bit(CAPABILITY_PRE_TBTT_INTERRUPT, &rt2x00dev->cap_flags);
967 987
968 /* 988 /*
969 * This device requires firmware. 989 * This device requires firmware.
970 */ 990 */
971 if (!rt2x00_is_soc(rt2x00dev)) 991 if (!rt2x00_is_soc(rt2x00dev))
972 __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags); 992 __set_bit(REQUIRE_FIRMWARE, &rt2x00dev->cap_flags);
973 __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags); 993 __set_bit(REQUIRE_DMA, &rt2x00dev->cap_flags);
974 __set_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags); 994 __set_bit(REQUIRE_L2PAD, &rt2x00dev->cap_flags);
975 __set_bit(DRIVER_REQUIRE_TXSTATUS_FIFO, &rt2x00dev->flags); 995 __set_bit(REQUIRE_TXSTATUS_FIFO, &rt2x00dev->cap_flags);
976 __set_bit(DRIVER_REQUIRE_TASKLET_CONTEXT, &rt2x00dev->flags); 996 __set_bit(REQUIRE_TASKLET_CONTEXT, &rt2x00dev->cap_flags);
977 if (!modparam_nohwcrypt) 997 if (!modparam_nohwcrypt)
978 __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags); 998 __set_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags);
979 __set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags); 999 __set_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags);
980 __set_bit(DRIVER_REQUIRE_HT_TX_DESC, &rt2x00dev->flags); 1000 __set_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags);
981 1001
982 /* 1002 /*
983 * Set the rssi offset. 1003 * Set the rssi offset.
@@ -1008,6 +1028,7 @@ static const struct ieee80211_ops rt2800pci_mac80211_ops = {
1008 .ampdu_action = rt2800_ampdu_action, 1028 .ampdu_action = rt2800_ampdu_action,
1009 .flush = rt2x00mac_flush, 1029 .flush = rt2x00mac_flush,
1010 .get_survey = rt2800_get_survey, 1030 .get_survey = rt2800_get_survey,
1031 .get_ringparam = rt2x00mac_get_ringparam,
1011}; 1032};
1012 1033
1013static const struct rt2800_ops rt2800pci_rt2800_ops = { 1034static const struct rt2800_ops rt2800pci_rt2800_ops = {
@@ -1043,9 +1064,11 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
1043 .link_stats = rt2800_link_stats, 1064 .link_stats = rt2800_link_stats,
1044 .reset_tuner = rt2800_reset_tuner, 1065 .reset_tuner = rt2800_reset_tuner,
1045 .link_tuner = rt2800_link_tuner, 1066 .link_tuner = rt2800_link_tuner,
1067 .gain_calibration = rt2800_gain_calibration,
1046 .start_queue = rt2800pci_start_queue, 1068 .start_queue = rt2800pci_start_queue,
1047 .kick_queue = rt2800pci_kick_queue, 1069 .kick_queue = rt2800pci_kick_queue,
1048 .stop_queue = rt2800pci_stop_queue, 1070 .stop_queue = rt2800pci_stop_queue,
1071 .flush_queue = rt2x00pci_flush_queue,
1049 .write_tx_desc = rt2800pci_write_tx_desc, 1072 .write_tx_desc = rt2800pci_write_tx_desc,
1050 .write_tx_data = rt2800_write_tx_data, 1073 .write_tx_data = rt2800_write_tx_data,
1051 .write_beacon = rt2800_write_beacon, 1074 .write_beacon = rt2800_write_beacon,
@@ -1105,36 +1128,36 @@ static const struct rt2x00_ops rt2800pci_ops = {
1105 */ 1128 */
1106#ifdef CONFIG_PCI 1129#ifdef CONFIG_PCI
1107static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = { 1130static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
1108 { PCI_DEVICE(0x1814, 0x0601), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1131 { PCI_DEVICE(0x1814, 0x0601) },
1109 { PCI_DEVICE(0x1814, 0x0681), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1132 { PCI_DEVICE(0x1814, 0x0681) },
1110 { PCI_DEVICE(0x1814, 0x0701), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1133 { PCI_DEVICE(0x1814, 0x0701) },
1111 { PCI_DEVICE(0x1814, 0x0781), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1134 { PCI_DEVICE(0x1814, 0x0781) },
1112 { PCI_DEVICE(0x1814, 0x3090), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1135 { PCI_DEVICE(0x1814, 0x3090) },
1113 { PCI_DEVICE(0x1814, 0x3091), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1136 { PCI_DEVICE(0x1814, 0x3091) },
1114 { PCI_DEVICE(0x1814, 0x3092), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1137 { PCI_DEVICE(0x1814, 0x3092) },
1115 { PCI_DEVICE(0x1432, 0x7708), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1138 { PCI_DEVICE(0x1432, 0x7708) },
1116 { PCI_DEVICE(0x1432, 0x7727), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1139 { PCI_DEVICE(0x1432, 0x7727) },
1117 { PCI_DEVICE(0x1432, 0x7728), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1140 { PCI_DEVICE(0x1432, 0x7728) },
1118 { PCI_DEVICE(0x1432, 0x7738), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1141 { PCI_DEVICE(0x1432, 0x7738) },
1119 { PCI_DEVICE(0x1432, 0x7748), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1142 { PCI_DEVICE(0x1432, 0x7748) },
1120 { PCI_DEVICE(0x1432, 0x7758), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1143 { PCI_DEVICE(0x1432, 0x7758) },
1121 { PCI_DEVICE(0x1432, 0x7768), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1144 { PCI_DEVICE(0x1432, 0x7768) },
1122 { PCI_DEVICE(0x1462, 0x891a), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1145 { PCI_DEVICE(0x1462, 0x891a) },
1123 { PCI_DEVICE(0x1a3b, 0x1059), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1146 { PCI_DEVICE(0x1a3b, 0x1059) },
1124#ifdef CONFIG_RT2800PCI_RT33XX 1147#ifdef CONFIG_RT2800PCI_RT33XX
1125 { PCI_DEVICE(0x1814, 0x3390), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1148 { PCI_DEVICE(0x1814, 0x3390) },
1126#endif 1149#endif
1127#ifdef CONFIG_RT2800PCI_RT35XX 1150#ifdef CONFIG_RT2800PCI_RT35XX
1128 { PCI_DEVICE(0x1432, 0x7711), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1151 { PCI_DEVICE(0x1432, 0x7711) },
1129 { PCI_DEVICE(0x1432, 0x7722), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1152 { PCI_DEVICE(0x1432, 0x7722) },
1130 { PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1153 { PCI_DEVICE(0x1814, 0x3060) },
1131 { PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1154 { PCI_DEVICE(0x1814, 0x3062) },
1132 { PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1155 { PCI_DEVICE(0x1814, 0x3562) },
1133 { PCI_DEVICE(0x1814, 0x3592), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1156 { PCI_DEVICE(0x1814, 0x3592) },
1134 { PCI_DEVICE(0x1814, 0x3593), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1157 { PCI_DEVICE(0x1814, 0x3593) },
1135#endif 1158#endif
1136#ifdef CONFIG_RT2800PCI_RT53XX 1159#ifdef CONFIG_RT2800PCI_RT53XX
1137 { PCI_DEVICE(0x1814, 0x5390), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1160 { PCI_DEVICE(0x1814, 0x5390) },
1138#endif 1161#endif
1139 { 0, } 1162 { 0, }
1140}; 1163};
@@ -1170,10 +1193,16 @@ static struct platform_driver rt2800soc_driver = {
1170#endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT305X */ 1193#endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT305X */
1171 1194
1172#ifdef CONFIG_PCI 1195#ifdef CONFIG_PCI
1196static int rt2800pci_probe(struct pci_dev *pci_dev,
1197 const struct pci_device_id *id)
1198{
1199 return rt2x00pci_probe(pci_dev, &rt2800pci_ops);
1200}
1201
1173static struct pci_driver rt2800pci_driver = { 1202static struct pci_driver rt2800pci_driver = {
1174 .name = KBUILD_MODNAME, 1203 .name = KBUILD_MODNAME,
1175 .id_table = rt2800pci_device_table, 1204 .id_table = rt2800pci_device_table,
1176 .probe = rt2x00pci_probe, 1205 .probe = rt2800pci_probe,
1177 .remove = __devexit_p(rt2x00pci_remove), 1206 .remove = __devexit_p(rt2x00pci_remove),
1178 .suspend = rt2x00pci_suspend, 1207 .suspend = rt2x00pci_suspend,
1179 .resume = rt2x00pci_resume, 1208 .resume = rt2x00pci_resume,
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 37509d019910..ba82c972703a 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -59,16 +59,16 @@ static void rt2800usb_start_queue(struct data_queue *queue)
59 59
60 switch (queue->qid) { 60 switch (queue->qid) {
61 case QID_RX: 61 case QID_RX:
62 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg); 62 rt2x00usb_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
63 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1); 63 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
64 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg); 64 rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
65 break; 65 break;
66 case QID_BEACON: 66 case QID_BEACON:
67 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg); 67 rt2x00usb_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
68 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1); 68 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
69 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1); 69 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
70 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1); 70 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
71 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); 71 rt2x00usb_register_write(rt2x00dev, BCN_TIME_CFG, reg);
72 break; 72 break;
73 default: 73 default:
74 break; 74 break;
@@ -82,16 +82,16 @@ static void rt2800usb_stop_queue(struct data_queue *queue)
82 82
83 switch (queue->qid) { 83 switch (queue->qid) {
84 case QID_RX: 84 case QID_RX:
85 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg); 85 rt2x00usb_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
86 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0); 86 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
87 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg); 87 rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
88 break; 88 break;
89 case QID_BEACON: 89 case QID_BEACON:
90 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg); 90 rt2x00usb_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
91 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0); 91 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0);
92 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0); 92 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
93 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0); 93 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
94 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); 94 rt2x00usb_register_write(rt2x00dev, BCN_TIME_CFG, reg);
95 break; 95 break;
96 default: 96 default:
97 break; 97 break;
@@ -99,6 +99,63 @@ static void rt2800usb_stop_queue(struct data_queue *queue)
99} 99}
100 100
101/* 101/*
102 * test if there is an entry in any TX queue for which DMA is done
103 * but the TX status has not been returned yet
104 */
105static bool rt2800usb_txstatus_pending(struct rt2x00_dev *rt2x00dev)
106{
107 struct data_queue *queue;
108
109 tx_queue_for_each(rt2x00dev, queue) {
110 if (rt2x00queue_get_entry(queue, Q_INDEX_DMA_DONE) !=
111 rt2x00queue_get_entry(queue, Q_INDEX_DONE))
112 return true;
113 }
114 return false;
115}
116
117static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
118 int urb_status, u32 tx_status)
119{
120 if (urb_status) {
121 WARNING(rt2x00dev, "rt2x00usb_register_read_async failed: %d\n", urb_status);
122 return false;
123 }
124
125 /* try to read all TX_STA_FIFO entries before scheduling txdone_work */
126 if (rt2x00_get_field32(tx_status, TX_STA_FIFO_VALID)) {
127 if (!kfifo_put(&rt2x00dev->txstatus_fifo, &tx_status)) {
128 WARNING(rt2x00dev, "TX status FIFO overrun, "
129 "drop tx status report.\n");
130 queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
131 } else
132 return true;
133 } else if (!kfifo_is_empty(&rt2x00dev->txstatus_fifo)) {
134 queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
135 } else if (rt2800usb_txstatus_pending(rt2x00dev)) {
136 mod_timer(&rt2x00dev->txstatus_timer, jiffies + msecs_to_jiffies(2));
137 }
138
139 return false;
140}
141
142static void rt2800usb_tx_dma_done(struct queue_entry *entry)
143{
144 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
145
146 rt2x00usb_register_read_async(rt2x00dev, TX_STA_FIFO,
147 rt2800usb_tx_sta_fifo_read_completed);
148}
149
150static void rt2800usb_tx_sta_fifo_timeout(unsigned long data)
151{
152 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
153
154 rt2x00usb_register_read_async(rt2x00dev, TX_STA_FIFO,
155 rt2800usb_tx_sta_fifo_read_completed);
156}
157
158/*
102 * Firmware functions 159 * Firmware functions
103 */ 160 */
104static char *rt2800usb_get_firmware_name(struct rt2x00_dev *rt2x00dev) 161static char *rt2800usb_get_firmware_name(struct rt2x00_dev *rt2x00dev)
@@ -129,11 +186,11 @@ static int rt2800usb_write_firmware(struct rt2x00_dev *rt2x00dev,
129 /* 186 /*
130 * Write firmware to device. 187 * Write firmware to device.
131 */ 188 */
132 rt2800_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE, 189 rt2x00usb_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE,
133 data + offset, length); 190 data + offset, length);
134 191
135 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0); 192 rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
136 rt2800_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0); 193 rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0);
137 194
138 /* 195 /*
139 * Send firmware request to device to load firmware, 196 * Send firmware request to device to load firmware,
@@ -148,7 +205,7 @@ static int rt2800usb_write_firmware(struct rt2x00_dev *rt2x00dev,
148 } 205 }
149 206
150 msleep(10); 207 msleep(10);
151 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0); 208 rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
152 209
153 return 0; 210 return 0;
154} 211}
@@ -166,22 +223,22 @@ static int rt2800usb_init_registers(struct rt2x00_dev *rt2x00dev)
166 if (rt2800_wait_csr_ready(rt2x00dev)) 223 if (rt2800_wait_csr_ready(rt2x00dev))
167 return -EBUSY; 224 return -EBUSY;
168 225
169 rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, &reg); 226 rt2x00usb_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
170 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, reg & ~0x00002000); 227 rt2x00usb_register_write(rt2x00dev, PBF_SYS_CTRL, reg & ~0x00002000);
171 228
172 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003); 229 rt2x00usb_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
173 230
174 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg); 231 rt2x00usb_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
175 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1); 232 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
176 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1); 233 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
177 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg); 234 rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
178 235
179 rt2800_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000); 236 rt2x00usb_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000);
180 237
181 rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0, 238 rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
182 USB_MODE_RESET, REGISTER_TIMEOUT); 239 USB_MODE_RESET, REGISTER_TIMEOUT);
183 240
184 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000); 241 rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
185 242
186 return 0; 243 return 0;
187} 244}
@@ -193,7 +250,7 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
193 if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev))) 250 if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev)))
194 return -EIO; 251 return -EIO;
195 252
196 rt2800_register_read(rt2x00dev, USB_DMA_CFG, &reg); 253 rt2x00usb_register_read(rt2x00dev, USB_DMA_CFG, &reg);
197 rt2x00_set_field32(&reg, USB_DMA_CFG_PHY_CLEAR, 0); 254 rt2x00_set_field32(&reg, USB_DMA_CFG_PHY_CLEAR, 0);
198 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_EN, 0); 255 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_EN, 0);
199 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_TIMEOUT, 128); 256 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_TIMEOUT, 128);
@@ -206,7 +263,7 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
206 / 1024) - 3); 263 / 1024) - 3);
207 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_EN, 1); 264 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_EN, 1);
208 rt2x00_set_field32(&reg, USB_DMA_CFG_TX_BULK_EN, 1); 265 rt2x00_set_field32(&reg, USB_DMA_CFG_TX_BULK_EN, 1);
209 rt2800_register_write(rt2x00dev, USB_DMA_CFG, reg); 266 rt2x00usb_register_write(rt2x00dev, USB_DMA_CFG, reg);
210 267
211 return rt2800_enable_radio(rt2x00dev); 268 return rt2800_enable_radio(rt2x00dev);
212} 269}
@@ -282,12 +339,12 @@ static void rt2800usb_watchdog(struct rt2x00_dev *rt2x00dev)
282 unsigned int i; 339 unsigned int i;
283 u32 reg; 340 u32 reg;
284 341
285 rt2800_register_read(rt2x00dev, TXRXQ_PCNT, &reg); 342 rt2x00usb_register_read(rt2x00dev, TXRXQ_PCNT, &reg);
286 if (rt2x00_get_field32(reg, TXRXQ_PCNT_TX0Q)) { 343 if (rt2x00_get_field32(reg, TXRXQ_PCNT_TX0Q)) {
287 WARNING(rt2x00dev, "TX HW queue 0 timed out," 344 WARNING(rt2x00dev, "TX HW queue 0 timed out,"
288 " invoke forced kick\n"); 345 " invoke forced kick\n");
289 346
290 rt2800_register_write(rt2x00dev, PBF_CFG, 0xf40012); 347 rt2x00usb_register_write(rt2x00dev, PBF_CFG, 0xf40012);
291 348
292 for (i = 0; i < 10; i++) { 349 for (i = 0; i < 10; i++) {
293 udelay(10); 350 udelay(10);
@@ -295,15 +352,15 @@ static void rt2800usb_watchdog(struct rt2x00_dev *rt2x00dev)
295 break; 352 break;
296 } 353 }
297 354
298 rt2800_register_write(rt2x00dev, PBF_CFG, 0xf40006); 355 rt2x00usb_register_write(rt2x00dev, PBF_CFG, 0xf40006);
299 } 356 }
300 357
301 rt2800_register_read(rt2x00dev, TXRXQ_PCNT, &reg); 358 rt2x00usb_register_read(rt2x00dev, TXRXQ_PCNT, &reg);
302 if (rt2x00_get_field32(reg, TXRXQ_PCNT_TX1Q)) { 359 if (rt2x00_get_field32(reg, TXRXQ_PCNT_TX1Q)) {
303 WARNING(rt2x00dev, "TX HW queue 1 timed out," 360 WARNING(rt2x00dev, "TX HW queue 1 timed out,"
304 " invoke forced kick\n"); 361 " invoke forced kick\n");
305 362
306 rt2800_register_write(rt2x00dev, PBF_CFG, 0xf4000a); 363 rt2x00usb_register_write(rt2x00dev, PBF_CFG, 0xf4000a);
307 364
308 for (i = 0; i < 10; i++) { 365 for (i = 0; i < 10; i++) {
309 udelay(10); 366 udelay(10);
@@ -311,7 +368,7 @@ static void rt2800usb_watchdog(struct rt2x00_dev *rt2x00dev)
311 break; 368 break;
312 } 369 }
313 370
314 rt2800_register_write(rt2x00dev, PBF_CFG, 0xf40006); 371 rt2x00usb_register_write(rt2x00dev, PBF_CFG, 0xf40006);
315 } 372 }
316 373
317 rt2x00usb_watchdog(rt2x00dev); 374 rt2x00usb_watchdog(rt2x00dev);
@@ -420,13 +477,24 @@ static void rt2800usb_work_txdone(struct work_struct *work)
420 while (!rt2x00queue_empty(queue)) { 477 while (!rt2x00queue_empty(queue)) {
421 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); 478 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
422 479
423 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) || 480 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
424 !test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) 481 break;
482 if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
483 rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);
484 else if (rt2x00queue_status_timeout(entry))
485 rt2x00lib_txdone_noinfo(entry, TXDONE_UNKNOWN);
486 else
425 break; 487 break;
426
427 rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);
428 } 488 }
429 } 489 }
490
491 /*
492 * The hw may delay sending the packet after DMA complete
493 * if the medium is busy, thus the TX_STA_FIFO entry is
494 * also delayed -> use a timer to retrieve it.
495 */
496 if (rt2800usb_txstatus_pending(rt2x00dev))
497 mod_timer(&rt2x00dev->txstatus_timer, jiffies + msecs_to_jiffies(2));
430} 498}
431 499
432/* 500/*
@@ -553,19 +621,24 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
553 * This device has multiple filters for control frames 621 * This device has multiple filters for control frames
554 * and has a separate filter for PS Poll frames. 622 * and has a separate filter for PS Poll frames.
555 */ 623 */
556 __set_bit(DRIVER_SUPPORT_CONTROL_FILTERS, &rt2x00dev->flags); 624 __set_bit(CAPABILITY_CONTROL_FILTERS, &rt2x00dev->cap_flags);
557 __set_bit(DRIVER_SUPPORT_CONTROL_FILTER_PSPOLL, &rt2x00dev->flags); 625 __set_bit(CAPABILITY_CONTROL_FILTER_PSPOLL, &rt2x00dev->cap_flags);
558 626
559 /* 627 /*
560 * This device requires firmware. 628 * This device requires firmware.
561 */ 629 */
562 __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags); 630 __set_bit(REQUIRE_FIRMWARE, &rt2x00dev->cap_flags);
563 __set_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags); 631 __set_bit(REQUIRE_L2PAD, &rt2x00dev->cap_flags);
564 if (!modparam_nohwcrypt) 632 if (!modparam_nohwcrypt)
565 __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags); 633 __set_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags);
566 __set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags); 634 __set_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags);
567 __set_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags); 635 __set_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags);
568 __set_bit(DRIVER_REQUIRE_HT_TX_DESC, &rt2x00dev->flags); 636 __set_bit(REQUIRE_TXSTATUS_FIFO, &rt2x00dev->cap_flags);
637 __set_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags);
638
639 setup_timer(&rt2x00dev->txstatus_timer,
640 rt2800usb_tx_sta_fifo_timeout,
641 (unsigned long) rt2x00dev);
569 642
570 /* 643 /*
571 * Set the rssi offset. 644 * Set the rssi offset.
@@ -602,6 +675,7 @@ static const struct ieee80211_ops rt2800usb_mac80211_ops = {
602 .ampdu_action = rt2800_ampdu_action, 675 .ampdu_action = rt2800_ampdu_action,
603 .flush = rt2x00mac_flush, 676 .flush = rt2x00mac_flush,
604 .get_survey = rt2800_get_survey, 677 .get_survey = rt2800_get_survey,
678 .get_ringparam = rt2x00mac_get_ringparam,
605}; 679};
606 680
607static const struct rt2800_ops rt2800usb_rt2800_ops = { 681static const struct rt2800_ops rt2800usb_rt2800_ops = {
@@ -630,11 +704,13 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
630 .link_stats = rt2800_link_stats, 704 .link_stats = rt2800_link_stats,
631 .reset_tuner = rt2800_reset_tuner, 705 .reset_tuner = rt2800_reset_tuner,
632 .link_tuner = rt2800_link_tuner, 706 .link_tuner = rt2800_link_tuner,
707 .gain_calibration = rt2800_gain_calibration,
633 .watchdog = rt2800usb_watchdog, 708 .watchdog = rt2800usb_watchdog,
634 .start_queue = rt2800usb_start_queue, 709 .start_queue = rt2800usb_start_queue,
635 .kick_queue = rt2x00usb_kick_queue, 710 .kick_queue = rt2x00usb_kick_queue,
636 .stop_queue = rt2800usb_stop_queue, 711 .stop_queue = rt2800usb_stop_queue,
637 .flush_queue = rt2x00usb_flush_queue, 712 .flush_queue = rt2x00usb_flush_queue,
713 .tx_dma_done = rt2800usb_tx_dma_done,
638 .write_tx_desc = rt2800usb_write_tx_desc, 714 .write_tx_desc = rt2800usb_write_tx_desc,
639 .write_tx_data = rt2800usb_write_tx_data, 715 .write_tx_data = rt2800usb_write_tx_data,
640 .write_beacon = rt2800_write_beacon, 716 .write_beacon = rt2800_write_beacon,
@@ -695,294 +771,340 @@ static const struct rt2x00_ops rt2800usb_ops = {
695 */ 771 */
696static struct usb_device_id rt2800usb_device_table[] = { 772static struct usb_device_id rt2800usb_device_table[] = {
697 /* Abocom */ 773 /* Abocom */
698 { USB_DEVICE(0x07b8, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) }, 774 { USB_DEVICE(0x07b8, 0x2870) },
699 { USB_DEVICE(0x07b8, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) }, 775 { USB_DEVICE(0x07b8, 0x2770) },
700 { USB_DEVICE(0x07b8, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) }, 776 { USB_DEVICE(0x07b8, 0x3070) },
701 { USB_DEVICE(0x07b8, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) }, 777 { USB_DEVICE(0x07b8, 0x3071) },
702 { USB_DEVICE(0x07b8, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) }, 778 { USB_DEVICE(0x07b8, 0x3072) },
703 { USB_DEVICE(0x1482, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) }, 779 { USB_DEVICE(0x1482, 0x3c09) },
704 /* AirTies */ 780 /* AirTies */
705 { USB_DEVICE(0x1eda, 0x2310), USB_DEVICE_DATA(&rt2800usb_ops) }, 781 { USB_DEVICE(0x1eda, 0x2012) },
782 { USB_DEVICE(0x1eda, 0x2310) },
706 /* Allwin */ 783 /* Allwin */
707 { USB_DEVICE(0x8516, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) }, 784 { USB_DEVICE(0x8516, 0x2070) },
708 { USB_DEVICE(0x8516, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) }, 785 { USB_DEVICE(0x8516, 0x2770) },
709 { USB_DEVICE(0x8516, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) }, 786 { USB_DEVICE(0x8516, 0x2870) },
710 { USB_DEVICE(0x8516, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) }, 787 { USB_DEVICE(0x8516, 0x3070) },
711 { USB_DEVICE(0x8516, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) }, 788 { USB_DEVICE(0x8516, 0x3071) },
712 { USB_DEVICE(0x8516, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) }, 789 { USB_DEVICE(0x8516, 0x3072) },
790 /* Alpha Networks */
791 { USB_DEVICE(0x14b2, 0x3c06) },
792 { USB_DEVICE(0x14b2, 0x3c07) },
793 { USB_DEVICE(0x14b2, 0x3c09) },
794 { USB_DEVICE(0x14b2, 0x3c12) },
795 { USB_DEVICE(0x14b2, 0x3c23) },
796 { USB_DEVICE(0x14b2, 0x3c25) },
797 { USB_DEVICE(0x14b2, 0x3c27) },
798 { USB_DEVICE(0x14b2, 0x3c28) },
799 { USB_DEVICE(0x14b2, 0x3c2c) },
713 /* Amit */ 800 /* Amit */
714 { USB_DEVICE(0x15c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) }, 801 { USB_DEVICE(0x15c5, 0x0008) },
715 /* Askey */ 802 /* Askey */
716 { USB_DEVICE(0x1690, 0x0740), USB_DEVICE_DATA(&rt2800usb_ops) }, 803 { USB_DEVICE(0x1690, 0x0740) },
717 /* ASUS */ 804 /* ASUS */
718 { USB_DEVICE(0x0b05, 0x1731), USB_DEVICE_DATA(&rt2800usb_ops) }, 805 { USB_DEVICE(0x0b05, 0x1731) },
719 { USB_DEVICE(0x0b05, 0x1732), USB_DEVICE_DATA(&rt2800usb_ops) }, 806 { USB_DEVICE(0x0b05, 0x1732) },
720 { USB_DEVICE(0x0b05, 0x1742), USB_DEVICE_DATA(&rt2800usb_ops) }, 807 { USB_DEVICE(0x0b05, 0x1742) },
721 { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) }, 808 { USB_DEVICE(0x0b05, 0x1784) },
722 { USB_DEVICE(0x1761, 0x0b05), USB_DEVICE_DATA(&rt2800usb_ops) }, 809 { USB_DEVICE(0x1761, 0x0b05) },
723 /* AzureWave */ 810 /* AzureWave */
724 { USB_DEVICE(0x13d3, 0x3247), USB_DEVICE_DATA(&rt2800usb_ops) }, 811 { USB_DEVICE(0x13d3, 0x3247) },
725 { USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) }, 812 { USB_DEVICE(0x13d3, 0x3273) },
726 { USB_DEVICE(0x13d3, 0x3305), USB_DEVICE_DATA(&rt2800usb_ops) }, 813 { USB_DEVICE(0x13d3, 0x3305) },
727 { USB_DEVICE(0x13d3, 0x3307), USB_DEVICE_DATA(&rt2800usb_ops) }, 814 { USB_DEVICE(0x13d3, 0x3307) },
728 { USB_DEVICE(0x13d3, 0x3321), USB_DEVICE_DATA(&rt2800usb_ops) }, 815 { USB_DEVICE(0x13d3, 0x3321) },
729 /* Belkin */ 816 /* Belkin */
730 { USB_DEVICE(0x050d, 0x8053), USB_DEVICE_DATA(&rt2800usb_ops) }, 817 { USB_DEVICE(0x050d, 0x8053) },
731 { USB_DEVICE(0x050d, 0x805c), USB_DEVICE_DATA(&rt2800usb_ops) }, 818 { USB_DEVICE(0x050d, 0x805c) },
732 { USB_DEVICE(0x050d, 0x815c), USB_DEVICE_DATA(&rt2800usb_ops) }, 819 { USB_DEVICE(0x050d, 0x815c) },
733 { USB_DEVICE(0x050d, 0x825b), USB_DEVICE_DATA(&rt2800usb_ops) }, 820 { USB_DEVICE(0x050d, 0x825b) },
734 { USB_DEVICE(0x050d, 0x935a), USB_DEVICE_DATA(&rt2800usb_ops) }, 821 { USB_DEVICE(0x050d, 0x935a) },
735 { USB_DEVICE(0x050d, 0x935b), USB_DEVICE_DATA(&rt2800usb_ops) }, 822 { USB_DEVICE(0x050d, 0x935b) },
736 /* Buffalo */ 823 /* Buffalo */
737 { USB_DEVICE(0x0411, 0x00e8), USB_DEVICE_DATA(&rt2800usb_ops) }, 824 { USB_DEVICE(0x0411, 0x00e8) },
738 { USB_DEVICE(0x0411, 0x016f), USB_DEVICE_DATA(&rt2800usb_ops) }, 825 { USB_DEVICE(0x0411, 0x016f) },
739 /* Conceptronic */ 826 { USB_DEVICE(0x0411, 0x01a2) },
740 { USB_DEVICE(0x14b2, 0x3c06), USB_DEVICE_DATA(&rt2800usb_ops) },
741 { USB_DEVICE(0x14b2, 0x3c07), USB_DEVICE_DATA(&rt2800usb_ops) },
742 { USB_DEVICE(0x14b2, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) },
743 { USB_DEVICE(0x14b2, 0x3c12), USB_DEVICE_DATA(&rt2800usb_ops) },
744 { USB_DEVICE(0x14b2, 0x3c23), USB_DEVICE_DATA(&rt2800usb_ops) },
745 { USB_DEVICE(0x14b2, 0x3c25), USB_DEVICE_DATA(&rt2800usb_ops) },
746 { USB_DEVICE(0x14b2, 0x3c27), USB_DEVICE_DATA(&rt2800usb_ops) },
747 { USB_DEVICE(0x14b2, 0x3c28), USB_DEVICE_DATA(&rt2800usb_ops) },
748 /* Corega */ 827 /* Corega */
749 { USB_DEVICE(0x07aa, 0x002f), USB_DEVICE_DATA(&rt2800usb_ops) }, 828 { USB_DEVICE(0x07aa, 0x002f) },
750 { USB_DEVICE(0x07aa, 0x003c), USB_DEVICE_DATA(&rt2800usb_ops) }, 829 { USB_DEVICE(0x07aa, 0x003c) },
751 { USB_DEVICE(0x07aa, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) }, 830 { USB_DEVICE(0x07aa, 0x003f) },
752 { USB_DEVICE(0x18c5, 0x0012), USB_DEVICE_DATA(&rt2800usb_ops) }, 831 { USB_DEVICE(0x18c5, 0x0012) },
753 /* D-Link */ 832 /* D-Link */
754 { USB_DEVICE(0x07d1, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) }, 833 { USB_DEVICE(0x07d1, 0x3c09) },
755 { USB_DEVICE(0x07d1, 0x3c0a), USB_DEVICE_DATA(&rt2800usb_ops) }, 834 { USB_DEVICE(0x07d1, 0x3c0a) },
756 { USB_DEVICE(0x07d1, 0x3c0d), USB_DEVICE_DATA(&rt2800usb_ops) }, 835 { USB_DEVICE(0x07d1, 0x3c0d) },
757 { USB_DEVICE(0x07d1, 0x3c0e), USB_DEVICE_DATA(&rt2800usb_ops) }, 836 { USB_DEVICE(0x07d1, 0x3c0e) },
758 { USB_DEVICE(0x07d1, 0x3c0f), USB_DEVICE_DATA(&rt2800usb_ops) }, 837 { USB_DEVICE(0x07d1, 0x3c0f) },
759 { USB_DEVICE(0x07d1, 0x3c11), USB_DEVICE_DATA(&rt2800usb_ops) }, 838 { USB_DEVICE(0x07d1, 0x3c11) },
760 { USB_DEVICE(0x07d1, 0x3c16), USB_DEVICE_DATA(&rt2800usb_ops) }, 839 { USB_DEVICE(0x07d1, 0x3c16) },
761 /* Draytek */ 840 /* Draytek */
762 { USB_DEVICE(0x07fa, 0x7712), USB_DEVICE_DATA(&rt2800usb_ops) }, 841 { USB_DEVICE(0x07fa, 0x7712) },
763 /* Edimax */ 842 /* Edimax */
764 { USB_DEVICE(0x7392, 0x7711), USB_DEVICE_DATA(&rt2800usb_ops) }, 843 { USB_DEVICE(0x7392, 0x7711) },
765 { USB_DEVICE(0x7392, 0x7717), USB_DEVICE_DATA(&rt2800usb_ops) }, 844 { USB_DEVICE(0x7392, 0x7717) },
766 { USB_DEVICE(0x7392, 0x7718), USB_DEVICE_DATA(&rt2800usb_ops) }, 845 { USB_DEVICE(0x7392, 0x7718) },
767 /* Encore */ 846 /* Encore */
768 { USB_DEVICE(0x203d, 0x1480), USB_DEVICE_DATA(&rt2800usb_ops) }, 847 { USB_DEVICE(0x203d, 0x1480) },
769 { USB_DEVICE(0x203d, 0x14a9), USB_DEVICE_DATA(&rt2800usb_ops) }, 848 { USB_DEVICE(0x203d, 0x14a9) },
770 /* EnGenius */ 849 /* EnGenius */
771 { USB_DEVICE(0x1740, 0x9701), USB_DEVICE_DATA(&rt2800usb_ops) }, 850 { USB_DEVICE(0x1740, 0x9701) },
772 { USB_DEVICE(0x1740, 0x9702), USB_DEVICE_DATA(&rt2800usb_ops) }, 851 { USB_DEVICE(0x1740, 0x9702) },
773 { USB_DEVICE(0x1740, 0x9703), USB_DEVICE_DATA(&rt2800usb_ops) }, 852 { USB_DEVICE(0x1740, 0x9703) },
774 { USB_DEVICE(0x1740, 0x9705), USB_DEVICE_DATA(&rt2800usb_ops) }, 853 { USB_DEVICE(0x1740, 0x9705) },
775 { USB_DEVICE(0x1740, 0x9706), USB_DEVICE_DATA(&rt2800usb_ops) }, 854 { USB_DEVICE(0x1740, 0x9706) },
776 { USB_DEVICE(0x1740, 0x9707), USB_DEVICE_DATA(&rt2800usb_ops) }, 855 { USB_DEVICE(0x1740, 0x9707) },
777 { USB_DEVICE(0x1740, 0x9708), USB_DEVICE_DATA(&rt2800usb_ops) }, 856 { USB_DEVICE(0x1740, 0x9708) },
778 { USB_DEVICE(0x1740, 0x9709), USB_DEVICE_DATA(&rt2800usb_ops) }, 857 { USB_DEVICE(0x1740, 0x9709) },
858 /* Gemtek */
859 { USB_DEVICE(0x15a9, 0x0012) },
779 /* Gigabyte */ 860 /* Gigabyte */
780 { USB_DEVICE(0x1044, 0x800b), USB_DEVICE_DATA(&rt2800usb_ops) }, 861 { USB_DEVICE(0x1044, 0x800b) },
781 { USB_DEVICE(0x1044, 0x800d), USB_DEVICE_DATA(&rt2800usb_ops) }, 862 { USB_DEVICE(0x1044, 0x800d) },
782 /* Hawking */ 863 /* Hawking */
783 { USB_DEVICE(0x0e66, 0x0001), USB_DEVICE_DATA(&rt2800usb_ops) }, 864 { USB_DEVICE(0x0e66, 0x0001) },
784 { USB_DEVICE(0x0e66, 0x0003), USB_DEVICE_DATA(&rt2800usb_ops) }, 865 { USB_DEVICE(0x0e66, 0x0003) },
785 { USB_DEVICE(0x0e66, 0x0009), USB_DEVICE_DATA(&rt2800usb_ops) }, 866 { USB_DEVICE(0x0e66, 0x0009) },
786 { USB_DEVICE(0x0e66, 0x000b), USB_DEVICE_DATA(&rt2800usb_ops) }, 867 { USB_DEVICE(0x0e66, 0x000b) },
787 { USB_DEVICE(0x0e66, 0x0013), USB_DEVICE_DATA(&rt2800usb_ops) }, 868 { USB_DEVICE(0x0e66, 0x0013) },
788 { USB_DEVICE(0x0e66, 0x0017), USB_DEVICE_DATA(&rt2800usb_ops) }, 869 { USB_DEVICE(0x0e66, 0x0017) },
789 { USB_DEVICE(0x0e66, 0x0018), USB_DEVICE_DATA(&rt2800usb_ops) }, 870 { USB_DEVICE(0x0e66, 0x0018) },
790 /* I-O DATA */ 871 /* I-O DATA */
791 { USB_DEVICE(0x04bb, 0x0945), USB_DEVICE_DATA(&rt2800usb_ops) }, 872 { USB_DEVICE(0x04bb, 0x0945) },
792 { USB_DEVICE(0x04bb, 0x0947), USB_DEVICE_DATA(&rt2800usb_ops) }, 873 { USB_DEVICE(0x04bb, 0x0947) },
793 { USB_DEVICE(0x04bb, 0x0948), USB_DEVICE_DATA(&rt2800usb_ops) }, 874 { USB_DEVICE(0x04bb, 0x0948) },
794 /* Linksys */ 875 /* Linksys */
795 { USB_DEVICE(0x1737, 0x0070), USB_DEVICE_DATA(&rt2800usb_ops) }, 876 { USB_DEVICE(0x13b1, 0x0031) },
796 { USB_DEVICE(0x1737, 0x0071), USB_DEVICE_DATA(&rt2800usb_ops) }, 877 { USB_DEVICE(0x1737, 0x0070) },
878 { USB_DEVICE(0x1737, 0x0071) },
797 /* Logitec */ 879 /* Logitec */
798 { USB_DEVICE(0x0789, 0x0162), USB_DEVICE_DATA(&rt2800usb_ops) }, 880 { USB_DEVICE(0x0789, 0x0162) },
799 { USB_DEVICE(0x0789, 0x0163), USB_DEVICE_DATA(&rt2800usb_ops) }, 881 { USB_DEVICE(0x0789, 0x0163) },
800 { USB_DEVICE(0x0789, 0x0164), USB_DEVICE_DATA(&rt2800usb_ops) }, 882 { USB_DEVICE(0x0789, 0x0164) },
801 { USB_DEVICE(0x0789, 0x0166), USB_DEVICE_DATA(&rt2800usb_ops) }, 883 { USB_DEVICE(0x0789, 0x0166) },
802 /* Motorola */ 884 /* Motorola */
803 { USB_DEVICE(0x100d, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) }, 885 { USB_DEVICE(0x100d, 0x9031) },
804 /* MSI */ 886 /* MSI */
805 { USB_DEVICE(0x0db0, 0x3820), USB_DEVICE_DATA(&rt2800usb_ops) }, 887 { USB_DEVICE(0x0db0, 0x3820) },
806 { USB_DEVICE(0x0db0, 0x3821), USB_DEVICE_DATA(&rt2800usb_ops) }, 888 { USB_DEVICE(0x0db0, 0x3821) },
807 { USB_DEVICE(0x0db0, 0x3822), USB_DEVICE_DATA(&rt2800usb_ops) }, 889 { USB_DEVICE(0x0db0, 0x3822) },
808 { USB_DEVICE(0x0db0, 0x3870), USB_DEVICE_DATA(&rt2800usb_ops) }, 890 { USB_DEVICE(0x0db0, 0x3870) },
809 { USB_DEVICE(0x0db0, 0x3871), USB_DEVICE_DATA(&rt2800usb_ops) }, 891 { USB_DEVICE(0x0db0, 0x3871) },
810 { USB_DEVICE(0x0db0, 0x6899), USB_DEVICE_DATA(&rt2800usb_ops) }, 892 { USB_DEVICE(0x0db0, 0x6899) },
811 { USB_DEVICE(0x0db0, 0x821a), USB_DEVICE_DATA(&rt2800usb_ops) }, 893 { USB_DEVICE(0x0db0, 0x821a) },
812 { USB_DEVICE(0x0db0, 0x822a), USB_DEVICE_DATA(&rt2800usb_ops) }, 894 { USB_DEVICE(0x0db0, 0x822a) },
813 { USB_DEVICE(0x0db0, 0x822b), USB_DEVICE_DATA(&rt2800usb_ops) }, 895 { USB_DEVICE(0x0db0, 0x822b) },
814 { USB_DEVICE(0x0db0, 0x822c), USB_DEVICE_DATA(&rt2800usb_ops) }, 896 { USB_DEVICE(0x0db0, 0x822c) },
815 { USB_DEVICE(0x0db0, 0x870a), USB_DEVICE_DATA(&rt2800usb_ops) }, 897 { USB_DEVICE(0x0db0, 0x870a) },
816 { USB_DEVICE(0x0db0, 0x871a), USB_DEVICE_DATA(&rt2800usb_ops) }, 898 { USB_DEVICE(0x0db0, 0x871a) },
817 { USB_DEVICE(0x0db0, 0x871b), USB_DEVICE_DATA(&rt2800usb_ops) }, 899 { USB_DEVICE(0x0db0, 0x871b) },
818 { USB_DEVICE(0x0db0, 0x871c), USB_DEVICE_DATA(&rt2800usb_ops) }, 900 { USB_DEVICE(0x0db0, 0x871c) },
819 { USB_DEVICE(0x0db0, 0x899a), USB_DEVICE_DATA(&rt2800usb_ops) }, 901 { USB_DEVICE(0x0db0, 0x899a) },
820 /* Para */ 902 /* Para */
821 { USB_DEVICE(0x20b8, 0x8888), USB_DEVICE_DATA(&rt2800usb_ops) }, 903 { USB_DEVICE(0x20b8, 0x8888) },
822 /* Pegatron */ 904 /* Pegatron */
823 { USB_DEVICE(0x1d4d, 0x000c), USB_DEVICE_DATA(&rt2800usb_ops) }, 905 { USB_DEVICE(0x1d4d, 0x000c) },
824 { USB_DEVICE(0x1d4d, 0x000e), USB_DEVICE_DATA(&rt2800usb_ops) }, 906 { USB_DEVICE(0x1d4d, 0x000e) },
825 { USB_DEVICE(0x1d4d, 0x0011), USB_DEVICE_DATA(&rt2800usb_ops) }, 907 { USB_DEVICE(0x1d4d, 0x0011) },
826 /* Philips */ 908 /* Philips */
827 { USB_DEVICE(0x0471, 0x200f), USB_DEVICE_DATA(&rt2800usb_ops) }, 909 { USB_DEVICE(0x0471, 0x200f) },
828 /* Planex */ 910 /* Planex */
829 { USB_DEVICE(0x2019, 0xab25), USB_DEVICE_DATA(&rt2800usb_ops) }, 911 { USB_DEVICE(0x2019, 0xab25) },
830 { USB_DEVICE(0x2019, 0xed06), USB_DEVICE_DATA(&rt2800usb_ops) }, 912 { USB_DEVICE(0x2019, 0xed06) },
831 /* Quanta */ 913 /* Quanta */
832 { USB_DEVICE(0x1a32, 0x0304), USB_DEVICE_DATA(&rt2800usb_ops) }, 914 { USB_DEVICE(0x1a32, 0x0304) },
833 /* Ralink */ 915 /* Ralink */
834 { USB_DEVICE(0x148f, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) }, 916 { USB_DEVICE(0x148f, 0x2070) },
835 { USB_DEVICE(0x148f, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) }, 917 { USB_DEVICE(0x148f, 0x2770) },
836 { USB_DEVICE(0x148f, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) }, 918 { USB_DEVICE(0x148f, 0x2870) },
837 { USB_DEVICE(0x148f, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) }, 919 { USB_DEVICE(0x148f, 0x3070) },
838 { USB_DEVICE(0x148f, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) }, 920 { USB_DEVICE(0x148f, 0x3071) },
839 { USB_DEVICE(0x148f, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) }, 921 { USB_DEVICE(0x148f, 0x3072) },
840 /* Samsung */ 922 /* Samsung */
841 { USB_DEVICE(0x04e8, 0x2018), USB_DEVICE_DATA(&rt2800usb_ops) }, 923 { USB_DEVICE(0x04e8, 0x2018) },
842 /* Siemens */ 924 /* Siemens */
843 { USB_DEVICE(0x129b, 0x1828), USB_DEVICE_DATA(&rt2800usb_ops) }, 925 { USB_DEVICE(0x129b, 0x1828) },
844 /* Sitecom */ 926 /* Sitecom */
845 { USB_DEVICE(0x0df6, 0x0017), USB_DEVICE_DATA(&rt2800usb_ops) }, 927 { USB_DEVICE(0x0df6, 0x0017) },
846 { USB_DEVICE(0x0df6, 0x002b), USB_DEVICE_DATA(&rt2800usb_ops) }, 928 { USB_DEVICE(0x0df6, 0x002b) },
847 { USB_DEVICE(0x0df6, 0x002c), USB_DEVICE_DATA(&rt2800usb_ops) }, 929 { USB_DEVICE(0x0df6, 0x002c) },
848 { USB_DEVICE(0x0df6, 0x002d), USB_DEVICE_DATA(&rt2800usb_ops) }, 930 { USB_DEVICE(0x0df6, 0x002d) },
849 { USB_DEVICE(0x0df6, 0x0039), USB_DEVICE_DATA(&rt2800usb_ops) }, 931 { USB_DEVICE(0x0df6, 0x0039) },
850 { USB_DEVICE(0x0df6, 0x003b), USB_DEVICE_DATA(&rt2800usb_ops) }, 932 { USB_DEVICE(0x0df6, 0x003b) },
851 { USB_DEVICE(0x0df6, 0x003d), USB_DEVICE_DATA(&rt2800usb_ops) }, 933 { USB_DEVICE(0x0df6, 0x003d) },
852 { USB_DEVICE(0x0df6, 0x003e), USB_DEVICE_DATA(&rt2800usb_ops) }, 934 { USB_DEVICE(0x0df6, 0x003e) },
853 { USB_DEVICE(0x0df6, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) }, 935 { USB_DEVICE(0x0df6, 0x003f) },
854 { USB_DEVICE(0x0df6, 0x0040), USB_DEVICE_DATA(&rt2800usb_ops) }, 936 { USB_DEVICE(0x0df6, 0x0040) },
855 { USB_DEVICE(0x0df6, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) }, 937 { USB_DEVICE(0x0df6, 0x0042) },
856 { USB_DEVICE(0x0df6, 0x0047), USB_DEVICE_DATA(&rt2800usb_ops) }, 938 { USB_DEVICE(0x0df6, 0x0047) },
857 { USB_DEVICE(0x0df6, 0x0048), USB_DEVICE_DATA(&rt2800usb_ops) }, 939 { USB_DEVICE(0x0df6, 0x0048) },
940 { USB_DEVICE(0x0df6, 0x0051) },
941 { USB_DEVICE(0x0df6, 0x005f) },
858 /* SMC */ 942 /* SMC */
859 { USB_DEVICE(0x083a, 0x6618), USB_DEVICE_DATA(&rt2800usb_ops) }, 943 { USB_DEVICE(0x083a, 0x6618) },
860 { USB_DEVICE(0x083a, 0x7511), USB_DEVICE_DATA(&rt2800usb_ops) }, 944 { USB_DEVICE(0x083a, 0x7511) },
861 { USB_DEVICE(0x083a, 0x7512), USB_DEVICE_DATA(&rt2800usb_ops) }, 945 { USB_DEVICE(0x083a, 0x7512) },
862 { USB_DEVICE(0x083a, 0x7522), USB_DEVICE_DATA(&rt2800usb_ops) }, 946 { USB_DEVICE(0x083a, 0x7522) },
863 { USB_DEVICE(0x083a, 0x8522), USB_DEVICE_DATA(&rt2800usb_ops) }, 947 { USB_DEVICE(0x083a, 0x8522) },
864 { USB_DEVICE(0x083a, 0xa618), USB_DEVICE_DATA(&rt2800usb_ops) }, 948 { USB_DEVICE(0x083a, 0xa618) },
865 { USB_DEVICE(0x083a, 0xa701), USB_DEVICE_DATA(&rt2800usb_ops) }, 949 { USB_DEVICE(0x083a, 0xa701) },
866 { USB_DEVICE(0x083a, 0xa702), USB_DEVICE_DATA(&rt2800usb_ops) }, 950 { USB_DEVICE(0x083a, 0xa702) },
867 { USB_DEVICE(0x083a, 0xa703), USB_DEVICE_DATA(&rt2800usb_ops) }, 951 { USB_DEVICE(0x083a, 0xa703) },
868 { USB_DEVICE(0x083a, 0xb522), USB_DEVICE_DATA(&rt2800usb_ops) }, 952 { USB_DEVICE(0x083a, 0xb522) },
869 /* Sparklan */ 953 /* Sparklan */
870 { USB_DEVICE(0x15a9, 0x0006), USB_DEVICE_DATA(&rt2800usb_ops) }, 954 { USB_DEVICE(0x15a9, 0x0006) },
871 /* Sweex */ 955 /* Sweex */
872 { USB_DEVICE(0x177f, 0x0302), USB_DEVICE_DATA(&rt2800usb_ops) }, 956 { USB_DEVICE(0x177f, 0x0302) },
873 /* U-Media*/ 957 /* U-Media */
874 { USB_DEVICE(0x157e, 0x300e), USB_DEVICE_DATA(&rt2800usb_ops) }, 958 { USB_DEVICE(0x157e, 0x300e) },
959 { USB_DEVICE(0x157e, 0x3013) },
875 /* ZCOM */ 960 /* ZCOM */
876 { USB_DEVICE(0x0cde, 0x0022), USB_DEVICE_DATA(&rt2800usb_ops) }, 961 { USB_DEVICE(0x0cde, 0x0022) },
877 { USB_DEVICE(0x0cde, 0x0025), USB_DEVICE_DATA(&rt2800usb_ops) }, 962 { USB_DEVICE(0x0cde, 0x0025) },
878 /* Zinwell */ 963 /* Zinwell */
879 { USB_DEVICE(0x5a57, 0x0280), USB_DEVICE_DATA(&rt2800usb_ops) }, 964 { USB_DEVICE(0x5a57, 0x0280) },
880 { USB_DEVICE(0x5a57, 0x0282), USB_DEVICE_DATA(&rt2800usb_ops) }, 965 { USB_DEVICE(0x5a57, 0x0282) },
881 { USB_DEVICE(0x5a57, 0x0283), USB_DEVICE_DATA(&rt2800usb_ops) }, 966 { USB_DEVICE(0x5a57, 0x0283) },
882 { USB_DEVICE(0x5a57, 0x5257), USB_DEVICE_DATA(&rt2800usb_ops) }, 967 { USB_DEVICE(0x5a57, 0x5257) },
883 /* Zyxel */ 968 /* Zyxel */
884 { USB_DEVICE(0x0586, 0x3416), USB_DEVICE_DATA(&rt2800usb_ops) }, 969 { USB_DEVICE(0x0586, 0x3416) },
970 { USB_DEVICE(0x0586, 0x3418) },
971 { USB_DEVICE(0x0586, 0x341e) },
972 { USB_DEVICE(0x0586, 0x343e) },
885#ifdef CONFIG_RT2800USB_RT33XX 973#ifdef CONFIG_RT2800USB_RT33XX
886 /* Ralink */ 974 /* Ralink */
887 { USB_DEVICE(0x148f, 0x3370), USB_DEVICE_DATA(&rt2800usb_ops) }, 975 { USB_DEVICE(0x148f, 0x3370) },
888 { USB_DEVICE(0x148f, 0x8070), USB_DEVICE_DATA(&rt2800usb_ops) }, 976 { USB_DEVICE(0x148f, 0x8070) },
889 /* Sitecom */ 977 /* Sitecom */
890 { USB_DEVICE(0x0df6, 0x0050), USB_DEVICE_DATA(&rt2800usb_ops) }, 978 { USB_DEVICE(0x0df6, 0x0050) },
891#endif 979#endif
892#ifdef CONFIG_RT2800USB_RT35XX 980#ifdef CONFIG_RT2800USB_RT35XX
893 /* Allwin */ 981 /* Allwin */
894 { USB_DEVICE(0x8516, 0x3572), USB_DEVICE_DATA(&rt2800usb_ops) }, 982 { USB_DEVICE(0x8516, 0x3572) },
895 /* Askey */ 983 /* Askey */
896 { USB_DEVICE(0x1690, 0x0744), USB_DEVICE_DATA(&rt2800usb_ops) }, 984 { USB_DEVICE(0x1690, 0x0744) },
897 /* Cisco */ 985 /* Cisco */
898 { USB_DEVICE(0x167b, 0x4001), USB_DEVICE_DATA(&rt2800usb_ops) }, 986 { USB_DEVICE(0x167b, 0x4001) },
899 /* EnGenius */ 987 /* EnGenius */
900 { USB_DEVICE(0x1740, 0x9801), USB_DEVICE_DATA(&rt2800usb_ops) }, 988 { USB_DEVICE(0x1740, 0x9801) },
901 /* I-O DATA */ 989 /* I-O DATA */
902 { USB_DEVICE(0x04bb, 0x0944), USB_DEVICE_DATA(&rt2800usb_ops) }, 990 { USB_DEVICE(0x04bb, 0x0944) },
991 /* Linksys */
992 { USB_DEVICE(0x13b1, 0x002f) },
993 { USB_DEVICE(0x1737, 0x0079) },
903 /* Ralink */ 994 /* Ralink */
904 { USB_DEVICE(0x148f, 0x3572), USB_DEVICE_DATA(&rt2800usb_ops) }, 995 { USB_DEVICE(0x148f, 0x3572) },
905 /* Sitecom */ 996 /* Sitecom */
906 { USB_DEVICE(0x0df6, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) }, 997 { USB_DEVICE(0x0df6, 0x0041) },
907 /* Toshiba */ 998 /* Toshiba */
908 { USB_DEVICE(0x0930, 0x0a07), USB_DEVICE_DATA(&rt2800usb_ops) }, 999 { USB_DEVICE(0x0930, 0x0a07) },
909 /* Zinwell */ 1000 /* Zinwell */
910 { USB_DEVICE(0x5a57, 0x0284), USB_DEVICE_DATA(&rt2800usb_ops) }, 1001 { USB_DEVICE(0x5a57, 0x0284) },
1002#endif
1003#ifdef CONFIG_RT2800USB_RT53XX
1004 /* Azurewave */
1005 { USB_DEVICE(0x13d3, 0x3329) },
1006 { USB_DEVICE(0x13d3, 0x3365) },
1007 /* Ralink */
1008 { USB_DEVICE(0x148f, 0x5370) },
1009 { USB_DEVICE(0x148f, 0x5372) },
911#endif 1010#endif
912#ifdef CONFIG_RT2800USB_UNKNOWN 1011#ifdef CONFIG_RT2800USB_UNKNOWN
913 /* 1012 /*
914 * Unclear what kind of devices these are (they aren't supported by the 1013 * Unclear what kind of devices these are (they aren't supported by the
915 * vendor linux driver). 1014 * vendor linux driver).
916 */ 1015 */
1016 /* Abocom */
1017 { USB_DEVICE(0x07b8, 0x3073) },
1018 { USB_DEVICE(0x07b8, 0x3074) },
1019 /* Alpha Networks */
1020 { USB_DEVICE(0x14b2, 0x3c08) },
1021 { USB_DEVICE(0x14b2, 0x3c11) },
917 /* Amigo */ 1022 /* Amigo */
918 { USB_DEVICE(0x0e0b, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) }, 1023 { USB_DEVICE(0x0e0b, 0x9031) },
919 { USB_DEVICE(0x0e0b, 0x9041), USB_DEVICE_DATA(&rt2800usb_ops) }, 1024 { USB_DEVICE(0x0e0b, 0x9041) },
920 /* ASUS */ 1025 /* ASUS */
921 { USB_DEVICE(0x0b05, 0x1760), USB_DEVICE_DATA(&rt2800usb_ops) }, 1026 { USB_DEVICE(0x0b05, 0x166a) },
922 { USB_DEVICE(0x0b05, 0x1761), USB_DEVICE_DATA(&rt2800usb_ops) }, 1027 { USB_DEVICE(0x0b05, 0x1760) },
923 { USB_DEVICE(0x0b05, 0x1790), USB_DEVICE_DATA(&rt2800usb_ops) }, 1028 { USB_DEVICE(0x0b05, 0x1761) },
1029 { USB_DEVICE(0x0b05, 0x1790) },
1030 { USB_DEVICE(0x0b05, 0x179d) },
924 /* AzureWave */ 1031 /* AzureWave */
925 { USB_DEVICE(0x13d3, 0x3262), USB_DEVICE_DATA(&rt2800usb_ops) }, 1032 { USB_DEVICE(0x13d3, 0x3262) },
926 { USB_DEVICE(0x13d3, 0x3284), USB_DEVICE_DATA(&rt2800usb_ops) }, 1033 { USB_DEVICE(0x13d3, 0x3284) },
927 { USB_DEVICE(0x13d3, 0x3322), USB_DEVICE_DATA(&rt2800usb_ops) }, 1034 { USB_DEVICE(0x13d3, 0x3322) },
928 /* Belkin */ 1035 /* Belkin */
929 { USB_DEVICE(0x050d, 0x825a), USB_DEVICE_DATA(&rt2800usb_ops) }, 1036 { USB_DEVICE(0x050d, 0x1003) },
1037 { USB_DEVICE(0x050d, 0x825a) },
930 /* Buffalo */ 1038 /* Buffalo */
931 { USB_DEVICE(0x0411, 0x012e), USB_DEVICE_DATA(&rt2800usb_ops) }, 1039 { USB_DEVICE(0x0411, 0x012e) },
932 { USB_DEVICE(0x0411, 0x0148), USB_DEVICE_DATA(&rt2800usb_ops) }, 1040 { USB_DEVICE(0x0411, 0x0148) },
933 { USB_DEVICE(0x0411, 0x0150), USB_DEVICE_DATA(&rt2800usb_ops) }, 1041 { USB_DEVICE(0x0411, 0x0150) },
934 { USB_DEVICE(0x0411, 0x015d), USB_DEVICE_DATA(&rt2800usb_ops) }, 1042 { USB_DEVICE(0x0411, 0x015d) },
935 /* Conceptronic */
936 { USB_DEVICE(0x14b2, 0x3c08), USB_DEVICE_DATA(&rt2800usb_ops) },
937 { USB_DEVICE(0x14b2, 0x3c11), USB_DEVICE_DATA(&rt2800usb_ops) },
938 /* Corega */ 1043 /* Corega */
939 { USB_DEVICE(0x07aa, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) }, 1044 { USB_DEVICE(0x07aa, 0x0041) },
940 { USB_DEVICE(0x07aa, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) }, 1045 { USB_DEVICE(0x07aa, 0x0042) },
941 { USB_DEVICE(0x18c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) }, 1046 { USB_DEVICE(0x18c5, 0x0008) },
942 /* D-Link */ 1047 /* D-Link */
943 { USB_DEVICE(0x07d1, 0x3c0b), USB_DEVICE_DATA(&rt2800usb_ops) }, 1048 { USB_DEVICE(0x07d1, 0x3c0b) },
944 { USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) }, 1049 { USB_DEVICE(0x07d1, 0x3c13) },
945 { USB_DEVICE(0x07d1, 0x3c15), USB_DEVICE_DATA(&rt2800usb_ops) }, 1050 { USB_DEVICE(0x07d1, 0x3c15) },
946 { USB_DEVICE(0x07d1, 0x3c17), USB_DEVICE_DATA(&rt2800usb_ops) }, 1051 { USB_DEVICE(0x07d1, 0x3c17) },
1052 { USB_DEVICE(0x2001, 0x3c17) },
947 /* Edimax */ 1053 /* Edimax */
948 { USB_DEVICE(0x7392, 0x4085), USB_DEVICE_DATA(&rt2800usb_ops) }, 1054 { USB_DEVICE(0x7392, 0x4085) },
1055 { USB_DEVICE(0x7392, 0x7722) },
949 /* Encore */ 1056 /* Encore */
950 { USB_DEVICE(0x203d, 0x14a1), USB_DEVICE_DATA(&rt2800usb_ops) }, 1057 { USB_DEVICE(0x203d, 0x14a1) },
951 /* Gemtek */ 1058 /* Gemtek */
952 { USB_DEVICE(0x15a9, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) }, 1059 { USB_DEVICE(0x15a9, 0x0010) },
953 /* Gigabyte */ 1060 /* Gigabyte */
954 { USB_DEVICE(0x1044, 0x800c), USB_DEVICE_DATA(&rt2800usb_ops) }, 1061 { USB_DEVICE(0x1044, 0x800c) },
1062 /* Huawei */
1063 { USB_DEVICE(0x148f, 0xf101) },
1064 /* I-O DATA */
1065 { USB_DEVICE(0x04bb, 0x094b) },
955 /* LevelOne */ 1066 /* LevelOne */
956 { USB_DEVICE(0x1740, 0x0605), USB_DEVICE_DATA(&rt2800usb_ops) }, 1067 { USB_DEVICE(0x1740, 0x0605) },
957 { USB_DEVICE(0x1740, 0x0615), USB_DEVICE_DATA(&rt2800usb_ops) }, 1068 { USB_DEVICE(0x1740, 0x0615) },
958 /* Linksys */ 1069 /* Linksys */
959 { USB_DEVICE(0x1737, 0x0077), USB_DEVICE_DATA(&rt2800usb_ops) }, 1070 { USB_DEVICE(0x1737, 0x0077) },
960 { USB_DEVICE(0x1737, 0x0078), USB_DEVICE_DATA(&rt2800usb_ops) }, 1071 { USB_DEVICE(0x1737, 0x0078) },
961 { USB_DEVICE(0x1737, 0x0079), USB_DEVICE_DATA(&rt2800usb_ops) }, 1072 /* Logitec */
1073 { USB_DEVICE(0x0789, 0x0168) },
1074 { USB_DEVICE(0x0789, 0x0169) },
962 /* Motorola */ 1075 /* Motorola */
963 { USB_DEVICE(0x100d, 0x9032), USB_DEVICE_DATA(&rt2800usb_ops) }, 1076 { USB_DEVICE(0x100d, 0x9032) },
964 /* Ovislink */ 1077 /* Ovislink */
965 { USB_DEVICE(0x1b75, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) }, 1078 { USB_DEVICE(0x1b75, 0x3071) },
966 { USB_DEVICE(0x1b75, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) }, 1079 { USB_DEVICE(0x1b75, 0x3072) },
967 /* Pegatron */ 1080 /* Pegatron */
968 { USB_DEVICE(0x05a6, 0x0101), USB_DEVICE_DATA(&rt2800usb_ops) }, 1081 { USB_DEVICE(0x05a6, 0x0101) },
969 { USB_DEVICE(0x1d4d, 0x0002), USB_DEVICE_DATA(&rt2800usb_ops) }, 1082 { USB_DEVICE(0x1d4d, 0x0002) },
970 { USB_DEVICE(0x1d4d, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) }, 1083 { USB_DEVICE(0x1d4d, 0x0010) },
971 /* Planex */ 1084 /* Planex */
972 { USB_DEVICE(0x2019, 0x5201), USB_DEVICE_DATA(&rt2800usb_ops) }, 1085 { USB_DEVICE(0x2019, 0x5201) },
973 { USB_DEVICE(0x2019, 0xab24), USB_DEVICE_DATA(&rt2800usb_ops) }, 1086 { USB_DEVICE(0x2019, 0xab24) },
974 /* Qcom */ 1087 /* Qcom */
975 { USB_DEVICE(0x18e8, 0x6259), USB_DEVICE_DATA(&rt2800usb_ops) }, 1088 { USB_DEVICE(0x18e8, 0x6259) },
1089 /* RadioShack */
1090 { USB_DEVICE(0x08b9, 0x1197) },
1091 /* Sitecom */
1092 { USB_DEVICE(0x0df6, 0x003c) },
1093 { USB_DEVICE(0x0df6, 0x004a) },
1094 { USB_DEVICE(0x0df6, 0x004d) },
1095 { USB_DEVICE(0x0df6, 0x0053) },
1096 { USB_DEVICE(0x0df6, 0x0060) },
1097 { USB_DEVICE(0x0df6, 0x0062) },
976 /* SMC */ 1098 /* SMC */
977 { USB_DEVICE(0x083a, 0xa512), USB_DEVICE_DATA(&rt2800usb_ops) }, 1099 { USB_DEVICE(0x083a, 0xa512) },
978 { USB_DEVICE(0x083a, 0xc522), USB_DEVICE_DATA(&rt2800usb_ops) }, 1100 { USB_DEVICE(0x083a, 0xc522) },
979 { USB_DEVICE(0x083a, 0xd522), USB_DEVICE_DATA(&rt2800usb_ops) }, 1101 { USB_DEVICE(0x083a, 0xd522) },
980 { USB_DEVICE(0x083a, 0xf511), USB_DEVICE_DATA(&rt2800usb_ops) }, 1102 { USB_DEVICE(0x083a, 0xf511) },
981 /* Sweex */ 1103 /* Sweex */
982 { USB_DEVICE(0x177f, 0x0153), USB_DEVICE_DATA(&rt2800usb_ops) }, 1104 { USB_DEVICE(0x177f, 0x0153) },
983 { USB_DEVICE(0x177f, 0x0313), USB_DEVICE_DATA(&rt2800usb_ops) }, 1105 { USB_DEVICE(0x177f, 0x0313) },
984 /* Zyxel */ 1106 /* Zyxel */
985 { USB_DEVICE(0x0586, 0x341a), USB_DEVICE_DATA(&rt2800usb_ops) }, 1107 { USB_DEVICE(0x0586, 0x341a) },
986#endif 1108#endif
987 { 0, } 1109 { 0, }
988}; 1110};
@@ -995,10 +1117,16 @@ MODULE_DEVICE_TABLE(usb, rt2800usb_device_table);
995MODULE_FIRMWARE(FIRMWARE_RT2870); 1117MODULE_FIRMWARE(FIRMWARE_RT2870);
996MODULE_LICENSE("GPL"); 1118MODULE_LICENSE("GPL");
997 1119
1120static int rt2800usb_probe(struct usb_interface *usb_intf,
1121 const struct usb_device_id *id)
1122{
1123 return rt2x00usb_probe(usb_intf, &rt2800usb_ops);
1124}
1125
998static struct usb_driver rt2800usb_driver = { 1126static struct usb_driver rt2800usb_driver = {
999 .name = KBUILD_MODNAME, 1127 .name = KBUILD_MODNAME,
1000 .id_table = rt2800usb_device_table, 1128 .id_table = rt2800usb_device_table,
1001 .probe = rt2x00usb_probe, 1129 .probe = rt2800usb_probe,
1002 .disconnect = rt2x00usb_disconnect, 1130 .disconnect = rt2x00usb_disconnect,
1003 .suspend = rt2x00usb_suspend, 1131 .suspend = rt2x00usb_suspend,
1004 .resume = rt2x00usb_resume, 1132 .resume = rt2x00usb_resume,
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 7f10239f56a8..c446db69bd3c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -37,6 +37,7 @@
37#include <linux/etherdevice.h> 37#include <linux/etherdevice.h>
38#include <linux/input-polldev.h> 38#include <linux/input-polldev.h>
39#include <linux/kfifo.h> 39#include <linux/kfifo.h>
40#include <linux/timer.h>
40 41
41#include <net/mac80211.h> 42#include <net/mac80211.h>
42 43
@@ -348,6 +349,11 @@ struct link {
348 * to bring the device/driver back into the desired state. 349 * to bring the device/driver back into the desired state.
349 */ 350 */
350 struct delayed_work watchdog_work; 351 struct delayed_work watchdog_work;
352
353 /*
354 * Work structure for scheduling periodic AGC adjustments.
355 */
356 struct delayed_work agc_work;
351}; 357};
352 358
353enum rt2x00_delayed_flags { 359enum rt2x00_delayed_flags {
@@ -556,6 +562,7 @@ struct rt2x00lib_ops {
556 struct link_qual *qual); 562 struct link_qual *qual);
557 void (*link_tuner) (struct rt2x00_dev *rt2x00dev, 563 void (*link_tuner) (struct rt2x00_dev *rt2x00dev,
558 struct link_qual *qual, const u32 count); 564 struct link_qual *qual, const u32 count);
565 void (*gain_calibration) (struct rt2x00_dev *rt2x00dev);
559 566
560 /* 567 /*
561 * Data queue handlers. 568 * Data queue handlers.
@@ -564,7 +571,8 @@ struct rt2x00lib_ops {
564 void (*start_queue) (struct data_queue *queue); 571 void (*start_queue) (struct data_queue *queue);
565 void (*kick_queue) (struct data_queue *queue); 572 void (*kick_queue) (struct data_queue *queue);
566 void (*stop_queue) (struct data_queue *queue); 573 void (*stop_queue) (struct data_queue *queue);
567 void (*flush_queue) (struct data_queue *queue); 574 void (*flush_queue) (struct data_queue *queue, bool drop);
575 void (*tx_dma_done) (struct queue_entry *entry);
568 576
569 /* 577 /*
570 * TX control handlers 578 * TX control handlers
@@ -637,11 +645,11 @@ struct rt2x00_ops {
637}; 645};
638 646
639/* 647/*
640 * rt2x00 device flags 648 * rt2x00 state flags
641 */ 649 */
642enum rt2x00_flags { 650enum rt2x00_state_flags {
643 /* 651 /*
644 * Device state flags 652 * Device flags
645 */ 653 */
646 DEVICE_STATE_PRESENT, 654 DEVICE_STATE_PRESENT,
647 DEVICE_STATE_REGISTERED_HW, 655 DEVICE_STATE_REGISTERED_HW,
@@ -651,40 +659,47 @@ enum rt2x00_flags {
651 DEVICE_STATE_SCANNING, 659 DEVICE_STATE_SCANNING,
652 660
653 /* 661 /*
654 * Driver requirements
655 */
656 DRIVER_REQUIRE_FIRMWARE,
657 DRIVER_REQUIRE_BEACON_GUARD,
658 DRIVER_REQUIRE_ATIM_QUEUE,
659 DRIVER_REQUIRE_DMA,
660 DRIVER_REQUIRE_COPY_IV,
661 DRIVER_REQUIRE_L2PAD,
662 DRIVER_REQUIRE_TXSTATUS_FIFO,
663 DRIVER_REQUIRE_TASKLET_CONTEXT,
664 DRIVER_REQUIRE_SW_SEQNO,
665 DRIVER_REQUIRE_HT_TX_DESC,
666
667 /*
668 * Driver features
669 */
670 CONFIG_SUPPORT_HW_BUTTON,
671 CONFIG_SUPPORT_HW_CRYPTO,
672 CONFIG_SUPPORT_POWER_LIMIT,
673 DRIVER_SUPPORT_CONTROL_FILTERS,
674 DRIVER_SUPPORT_CONTROL_FILTER_PSPOLL,
675 DRIVER_SUPPORT_PRE_TBTT_INTERRUPT,
676 DRIVER_SUPPORT_LINK_TUNING,
677 DRIVER_SUPPORT_WATCHDOG,
678
679 /*
680 * Driver configuration 662 * Driver configuration
681 */ 663 */
682 CONFIG_FRAME_TYPE,
683 CONFIG_RF_SEQUENCE,
684 CONFIG_EXTERNAL_LNA_A,
685 CONFIG_EXTERNAL_LNA_BG,
686 CONFIG_DOUBLE_ANTENNA,
687 CONFIG_CHANNEL_HT40, 664 CONFIG_CHANNEL_HT40,
665 CONFIG_POWERSAVING,
666};
667
668/*
669 * rt2x00 capability flags
670 */
671enum rt2x00_capability_flags {
672 /*
673 * Requirements
674 */
675 REQUIRE_FIRMWARE,
676 REQUIRE_BEACON_GUARD,
677 REQUIRE_ATIM_QUEUE,
678 REQUIRE_DMA,
679 REQUIRE_COPY_IV,
680 REQUIRE_L2PAD,
681 REQUIRE_TXSTATUS_FIFO,
682 REQUIRE_TASKLET_CONTEXT,
683 REQUIRE_SW_SEQNO,
684 REQUIRE_HT_TX_DESC,
685 REQUIRE_PS_AUTOWAKE,
686
687 /*
688 * Capabilities
689 */
690 CAPABILITY_HW_BUTTON,
691 CAPABILITY_HW_CRYPTO,
692 CAPABILITY_POWER_LIMIT,
693 CAPABILITY_CONTROL_FILTERS,
694 CAPABILITY_CONTROL_FILTER_PSPOLL,
695 CAPABILITY_PRE_TBTT_INTERRUPT,
696 CAPABILITY_LINK_TUNING,
697 CAPABILITY_FRAME_TYPE,
698 CAPABILITY_RF_SEQUENCE,
699 CAPABILITY_EXTERNAL_LNA_A,
700 CAPABILITY_EXTERNAL_LNA_BG,
701 CAPABILITY_DOUBLE_ANTENNA,
702 CAPABILITY_BT_COEXIST,
688}; 703};
689 704
690/* 705/*
@@ -733,13 +748,20 @@ struct rt2x00_dev {
733#endif /* CONFIG_RT2X00_LIB_LEDS */ 748#endif /* CONFIG_RT2X00_LIB_LEDS */
734 749
735 /* 750 /*
736 * Device flags. 751 * Device state flags.
737 * In these flags the current status and some 752 * In these flags the current status is stored.
738 * of the device capabilities are stored. 753 * Access to these flags should occur atomically.
739 */ 754 */
740 unsigned long flags; 755 unsigned long flags;
741 756
742 /* 757 /*
758 * Device capabiltiy flags.
759 * In these flags the device/driver capabilities are stored.
760 * Access to these flags should occur non-atomically.
761 */
762 unsigned long cap_flags;
763
764 /*
743 * Device information, Bus IRQ and name (PCI, SoC) 765 * Device information, Bus IRQ and name (PCI, SoC)
744 */ 766 */
745 int irq; 767 int irq;
@@ -855,10 +877,20 @@ struct rt2x00_dev {
855 u8 calibration[2]; 877 u8 calibration[2];
856 878
857 /* 879 /*
880 * Association id.
881 */
882 u16 aid;
883
884 /*
858 * Beacon interval. 885 * Beacon interval.
859 */ 886 */
860 u16 beacon_int; 887 u16 beacon_int;
861 888
889 /**
890 * Timestamp of last received beacon
891 */
892 unsigned long last_beacon;
893
862 /* 894 /*
863 * Low level statistics which will have 895 * Low level statistics which will have
864 * to be kept up to date while device is running. 896 * to be kept up to date while device is running.
@@ -887,6 +919,11 @@ struct rt2x00_dev {
887 struct work_struct txdone_work; 919 struct work_struct txdone_work;
888 920
889 /* 921 /*
922 * Powersaving work
923 */
924 struct delayed_work autowakeup_work;
925
926 /*
890 * Data queue arrays for RX, TX, Beacon and ATIM. 927 * Data queue arrays for RX, TX, Beacon and ATIM.
891 */ 928 */
892 unsigned int data_queues; 929 unsigned int data_queues;
@@ -906,6 +943,11 @@ struct rt2x00_dev {
906 DECLARE_KFIFO_PTR(txstatus_fifo, u32); 943 DECLARE_KFIFO_PTR(txstatus_fifo, u32);
907 944
908 /* 945 /*
946 * Timer to ensure tx status reports are read (rt2800usb).
947 */
948 struct timer_list txstatus_timer;
949
950 /*
909 * Tasklet for processing tx status reports (rt2800pci). 951 * Tasklet for processing tx status reports (rt2800pci).
910 */ 952 */
911 struct tasklet_struct txstatus_tasklet; 953 struct tasklet_struct txstatus_tasklet;
@@ -1230,6 +1272,10 @@ int rt2x00mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
1230 const struct ieee80211_tx_queue_params *params); 1272 const struct ieee80211_tx_queue_params *params);
1231void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw); 1273void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw);
1232void rt2x00mac_flush(struct ieee80211_hw *hw, bool drop); 1274void rt2x00mac_flush(struct ieee80211_hw *hw, bool drop);
1275int rt2x00mac_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant);
1276int rt2x00mac_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
1277void rt2x00mac_get_ringparam(struct ieee80211_hw *hw,
1278 u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max);
1233 1279
1234/* 1280/*
1235 * Driver allocation handlers. 1281 * Driver allocation handlers.
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index 9416e36de29e..555180d8f4aa 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -100,6 +100,10 @@ void rt2x00lib_config_erp(struct rt2x00_dev *rt2x00dev,
100 erp.basic_rates = bss_conf->basic_rates; 100 erp.basic_rates = bss_conf->basic_rates;
101 erp.beacon_int = bss_conf->beacon_int; 101 erp.beacon_int = bss_conf->beacon_int;
102 102
103 /* Update the AID, this is needed for dynamic PS support */
104 rt2x00dev->aid = bss_conf->assoc ? bss_conf->aid : 0;
105 rt2x00dev->last_beacon = bss_conf->timestamp;
106
103 /* Update global beacon interval time, this is needed for PS support */ 107 /* Update global beacon interval time, this is needed for PS support */
104 rt2x00dev->beacon_int = bss_conf->beacon_int; 108 rt2x00dev->beacon_int = bss_conf->beacon_int;
105 109
@@ -109,15 +113,6 @@ void rt2x00lib_config_erp(struct rt2x00_dev *rt2x00dev,
109 rt2x00dev->ops->lib->config_erp(rt2x00dev, &erp, changed); 113 rt2x00dev->ops->lib->config_erp(rt2x00dev, &erp, changed);
110} 114}
111 115
112static inline
113enum antenna rt2x00lib_config_antenna_check(enum antenna current_ant,
114 enum antenna default_ant)
115{
116 if (current_ant != ANTENNA_SW_DIVERSITY)
117 return current_ant;
118 return (default_ant != ANTENNA_SW_DIVERSITY) ? default_ant : ANTENNA_B;
119}
120
121void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev, 116void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
122 struct antenna_setup config) 117 struct antenna_setup config)
123{ 118{
@@ -126,19 +121,35 @@ void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
126 struct antenna_setup *active = &rt2x00dev->link.ant.active; 121 struct antenna_setup *active = &rt2x00dev->link.ant.active;
127 122
128 /* 123 /*
129 * Failsafe: Make sure we are not sending the 124 * When the caller tries to send the SW diversity,
130 * ANTENNA_SW_DIVERSITY state to the driver. 125 * we must update the ANTENNA_RX_DIVERSITY flag to
131 * If that happens, fallback to hardware defaults, 126 * enable the antenna diversity in the link tuner.
132 * or our own default. 127 *
128 * Secondly, we must guarentee we never send the
129 * software antenna diversity command to the driver.
133 */ 130 */
134 if (!(ant->flags & ANTENNA_RX_DIVERSITY)) 131 if (!(ant->flags & ANTENNA_RX_DIVERSITY)) {
135 config.rx = rt2x00lib_config_antenna_check(config.rx, def->rx); 132 if (config.rx == ANTENNA_SW_DIVERSITY) {
136 else if (config.rx == ANTENNA_SW_DIVERSITY) 133 ant->flags |= ANTENNA_RX_DIVERSITY;
134
135 if (def->rx == ANTENNA_SW_DIVERSITY)
136 config.rx = ANTENNA_B;
137 else
138 config.rx = def->rx;
139 }
140 } else if (config.rx == ANTENNA_SW_DIVERSITY)
137 config.rx = active->rx; 141 config.rx = active->rx;
138 142
139 if (!(ant->flags & ANTENNA_TX_DIVERSITY)) 143 if (!(ant->flags & ANTENNA_TX_DIVERSITY)) {
140 config.tx = rt2x00lib_config_antenna_check(config.tx, def->tx); 144 if (config.tx == ANTENNA_SW_DIVERSITY) {
141 else if (config.tx == ANTENNA_SW_DIVERSITY) 145 ant->flags |= ANTENNA_TX_DIVERSITY;
146
147 if (def->tx == ANTENNA_SW_DIVERSITY)
148 config.tx = ANTENNA_B;
149 else
150 config.tx = def->tx;
151 }
152 } else if (config.tx == ANTENNA_SW_DIVERSITY)
142 config.tx = active->tx; 153 config.tx = active->tx;
143 154
144 /* 155 /*
@@ -163,12 +174,43 @@ void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
163 rt2x00queue_start_queue(rt2x00dev->rx); 174 rt2x00queue_start_queue(rt2x00dev->rx);
164} 175}
165 176
177static u16 rt2x00ht_center_channel(struct rt2x00_dev *rt2x00dev,
178 struct ieee80211_conf *conf)
179{
180 struct hw_mode_spec *spec = &rt2x00dev->spec;
181 int center_channel;
182 u16 i;
183
184 /*
185 * Initialize center channel to current channel.
186 */
187 center_channel = spec->channels[conf->channel->hw_value].channel;
188
189 /*
190 * Adjust center channel to HT40+ and HT40- operation.
191 */
192 if (conf_is_ht40_plus(conf))
193 center_channel += 2;
194 else if (conf_is_ht40_minus(conf))
195 center_channel -= (center_channel == 14) ? 1 : 2;
196
197 for (i = 0; i < spec->num_channels; i++)
198 if (spec->channels[i].channel == center_channel)
199 return i;
200
201 WARN_ON(1);
202 return conf->channel->hw_value;
203}
204
166void rt2x00lib_config(struct rt2x00_dev *rt2x00dev, 205void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
167 struct ieee80211_conf *conf, 206 struct ieee80211_conf *conf,
168 unsigned int ieee80211_flags) 207 unsigned int ieee80211_flags)
169{ 208{
170 struct rt2x00lib_conf libconf; 209 struct rt2x00lib_conf libconf;
171 u16 hw_value; 210 u16 hw_value;
211 u16 autowake_timeout;
212 u16 beacon_int;
213 u16 beacon_diff;
172 214
173 memset(&libconf, 0, sizeof(libconf)); 215 memset(&libconf, 0, sizeof(libconf));
174 216
@@ -176,10 +218,10 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
176 218
177 if (ieee80211_flags & IEEE80211_CONF_CHANGE_CHANNEL) { 219 if (ieee80211_flags & IEEE80211_CONF_CHANGE_CHANNEL) {
178 if (conf_is_ht40(conf)) { 220 if (conf_is_ht40(conf)) {
179 __set_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags); 221 set_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags);
180 hw_value = rt2x00ht_center_channel(rt2x00dev, conf); 222 hw_value = rt2x00ht_center_channel(rt2x00dev, conf);
181 } else { 223 } else {
182 __clear_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags); 224 clear_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags);
183 hw_value = conf->channel->hw_value; 225 hw_value = conf->channel->hw_value;
184 } 226 }
185 227
@@ -192,6 +234,10 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
192 sizeof(libconf.channel)); 234 sizeof(libconf.channel));
193 } 235 }
194 236
237 if (test_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags) &&
238 (ieee80211_flags & IEEE80211_CONF_CHANGE_PS))
239 cancel_delayed_work_sync(&rt2x00dev->autowakeup_work);
240
195 /* 241 /*
196 * Start configuration. 242 * Start configuration.
197 */ 243 */
@@ -204,6 +250,26 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
204 if (ieee80211_flags & IEEE80211_CONF_CHANGE_CHANNEL) 250 if (ieee80211_flags & IEEE80211_CONF_CHANGE_CHANNEL)
205 rt2x00link_reset_tuner(rt2x00dev, false); 251 rt2x00link_reset_tuner(rt2x00dev, false);
206 252
253 if (test_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags) &&
254 (ieee80211_flags & IEEE80211_CONF_CHANGE_PS) &&
255 (conf->flags & IEEE80211_CONF_PS)) {
256 beacon_diff = (long)jiffies - (long)rt2x00dev->last_beacon;
257 beacon_int = msecs_to_jiffies(rt2x00dev->beacon_int);
258
259 if (beacon_diff > beacon_int)
260 beacon_diff = 0;
261
262 autowake_timeout = (conf->max_sleep_period * beacon_int) - beacon_diff;
263 queue_delayed_work(rt2x00dev->workqueue,
264 &rt2x00dev->autowakeup_work,
265 autowake_timeout - 15);
266 }
267
268 if (conf->flags & IEEE80211_CONF_PS)
269 set_bit(CONFIG_POWERSAVING, &rt2x00dev->flags);
270 else
271 clear_bit(CONFIG_POWERSAVING, &rt2x00dev->flags);
272
207 rt2x00dev->curr_band = conf->channel->band; 273 rt2x00dev->curr_band = conf->channel->band;
208 rt2x00dev->curr_freq = conf->channel->center_freq; 274 rt2x00dev->curr_freq = conf->channel->center_freq;
209 rt2x00dev->tx_power = conf->power_level; 275 rt2x00dev->tx_power = conf->power_level;
diff --git a/drivers/net/wireless/rt2x00/rt2x00crypto.c b/drivers/net/wireless/rt2x00/rt2x00crypto.c
index 3f5688fbf3f7..1bb9d46077ff 100644
--- a/drivers/net/wireless/rt2x00/rt2x00crypto.c
+++ b/drivers/net/wireless/rt2x00/rt2x00crypto.c
@@ -52,7 +52,7 @@ void rt2x00crypto_create_tx_descriptor(struct queue_entry *entry,
52 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); 52 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
53 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key; 53 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
54 54
55 if (!test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags) || !hw_key) 55 if (!test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags) || !hw_key)
56 return; 56 return;
57 57
58 __set_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags); 58 __set_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags);
@@ -80,7 +80,7 @@ unsigned int rt2x00crypto_tx_overhead(struct rt2x00_dev *rt2x00dev,
80 struct ieee80211_key_conf *key = tx_info->control.hw_key; 80 struct ieee80211_key_conf *key = tx_info->control.hw_key;
81 unsigned int overhead = 0; 81 unsigned int overhead = 0;
82 82
83 if (!test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags) || !key) 83 if (!test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags) || !key)
84 return overhead; 84 return overhead;
85 85
86 /* 86 /*
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index c92db3264741..78787fcc919e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -63,7 +63,8 @@ struct rt2x00debug_intf {
63 * - driver folder 63 * - driver folder
64 * - driver file 64 * - driver file
65 * - chipset file 65 * - chipset file
66 * - device flags file 66 * - device state flags file
67 * - device capability flags file
67 * - register folder 68 * - register folder
68 * - csr offset/value files 69 * - csr offset/value files
69 * - eeprom offset/value files 70 * - eeprom offset/value files
@@ -78,6 +79,7 @@ struct rt2x00debug_intf {
78 struct dentry *driver_entry; 79 struct dentry *driver_entry;
79 struct dentry *chipset_entry; 80 struct dentry *chipset_entry;
80 struct dentry *dev_flags; 81 struct dentry *dev_flags;
82 struct dentry *cap_flags;
81 struct dentry *register_folder; 83 struct dentry *register_folder;
82 struct dentry *csr_off_entry; 84 struct dentry *csr_off_entry;
83 struct dentry *csr_val_entry; 85 struct dentry *csr_val_entry;
@@ -553,6 +555,35 @@ static const struct file_operations rt2x00debug_fop_dev_flags = {
553 .llseek = default_llseek, 555 .llseek = default_llseek,
554}; 556};
555 557
558static ssize_t rt2x00debug_read_cap_flags(struct file *file,
559 char __user *buf,
560 size_t length,
561 loff_t *offset)
562{
563 struct rt2x00debug_intf *intf = file->private_data;
564 char line[16];
565 size_t size;
566
567 if (*offset)
568 return 0;
569
570 size = sprintf(line, "0x%.8x\n", (unsigned int)intf->rt2x00dev->cap_flags);
571
572 if (copy_to_user(buf, line, size))
573 return -EFAULT;
574
575 *offset += size;
576 return size;
577}
578
579static const struct file_operations rt2x00debug_fop_cap_flags = {
580 .owner = THIS_MODULE,
581 .read = rt2x00debug_read_cap_flags,
582 .open = rt2x00debug_file_open,
583 .release = rt2x00debug_file_release,
584 .llseek = default_llseek,
585};
586
556static struct dentry *rt2x00debug_create_file_driver(const char *name, 587static struct dentry *rt2x00debug_create_file_driver(const char *name,
557 struct rt2x00debug_intf 588 struct rt2x00debug_intf
558 *intf, 589 *intf,
@@ -568,7 +599,6 @@ static struct dentry *rt2x00debug_create_file_driver(const char *name,
568 blob->data = data; 599 blob->data = data;
569 data += sprintf(data, "driver:\t%s\n", intf->rt2x00dev->ops->name); 600 data += sprintf(data, "driver:\t%s\n", intf->rt2x00dev->ops->name);
570 data += sprintf(data, "version:\t%s\n", DRV_VERSION); 601 data += sprintf(data, "version:\t%s\n", DRV_VERSION);
571 data += sprintf(data, "compiled:\t%s %s\n", __DATE__, __TIME__);
572 blob->size = strlen(blob->data); 602 blob->size = strlen(blob->data);
573 603
574 return debugfs_create_blob(name, S_IRUSR, intf->driver_folder, blob); 604 return debugfs_create_blob(name, S_IRUSR, intf->driver_folder, blob);
@@ -653,6 +683,12 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
653 if (IS_ERR(intf->dev_flags) || !intf->dev_flags) 683 if (IS_ERR(intf->dev_flags) || !intf->dev_flags)
654 goto exit; 684 goto exit;
655 685
686 intf->cap_flags = debugfs_create_file("cap_flags", S_IRUSR,
687 intf->driver_folder, intf,
688 &rt2x00debug_fop_cap_flags);
689 if (IS_ERR(intf->cap_flags) || !intf->cap_flags)
690 goto exit;
691
656 intf->register_folder = 692 intf->register_folder =
657 debugfs_create_dir("register", intf->driver_folder); 693 debugfs_create_dir("register", intf->driver_folder);
658 if (IS_ERR(intf->register_folder) || !intf->register_folder) 694 if (IS_ERR(intf->register_folder) || !intf->register_folder)
@@ -706,7 +742,7 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
706 intf, &rt2x00debug_fop_queue_stats); 742 intf, &rt2x00debug_fop_queue_stats);
707 743
708#ifdef CONFIG_RT2X00_LIB_CRYPTO 744#ifdef CONFIG_RT2X00_LIB_CRYPTO
709 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) 745 if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags))
710 intf->crypto_stats_entry = 746 intf->crypto_stats_entry =
711 debugfs_create_file("crypto", S_IRUGO, intf->queue_folder, 747 debugfs_create_file("crypto", S_IRUGO, intf->queue_folder,
712 intf, &rt2x00debug_fop_crypto_stats); 748 intf, &rt2x00debug_fop_crypto_stats);
@@ -744,6 +780,7 @@ void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev)
744 debugfs_remove(intf->csr_off_entry); 780 debugfs_remove(intf->csr_off_entry);
745 debugfs_remove(intf->register_folder); 781 debugfs_remove(intf->register_folder);
746 debugfs_remove(intf->dev_flags); 782 debugfs_remove(intf->dev_flags);
783 debugfs_remove(intf->cap_flags);
747 debugfs_remove(intf->chipset_entry); 784 debugfs_remove(intf->chipset_entry);
748 debugfs_remove(intf->driver_entry); 785 debugfs_remove(intf->driver_entry);
749 debugfs_remove(intf->driver_folder); 786 debugfs_remove(intf->driver_folder);
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 84eb6ad36377..c018d67aab8e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -27,6 +27,7 @@
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/log2.h>
30 31
31#include "rt2x00.h" 32#include "rt2x00.h"
32#include "rt2x00lib.h" 33#include "rt2x00lib.h"
@@ -70,6 +71,7 @@ int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev)
70 */ 71 */
71 rt2x00queue_start_queues(rt2x00dev); 72 rt2x00queue_start_queues(rt2x00dev);
72 rt2x00link_start_tuner(rt2x00dev); 73 rt2x00link_start_tuner(rt2x00dev);
74 rt2x00link_start_agc(rt2x00dev);
73 75
74 /* 76 /*
75 * Start watchdog monitoring. 77 * Start watchdog monitoring.
@@ -92,6 +94,7 @@ void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev)
92 /* 94 /*
93 * Stop all queues 95 * Stop all queues
94 */ 96 */
97 rt2x00link_stop_agc(rt2x00dev);
95 rt2x00link_stop_tuner(rt2x00dev); 98 rt2x00link_stop_tuner(rt2x00dev);
96 rt2x00queue_stop_queues(rt2x00dev); 99 rt2x00queue_stop_queues(rt2x00dev);
97 rt2x00queue_flush_queues(rt2x00dev, true); 100 rt2x00queue_flush_queues(rt2x00dev, true);
@@ -138,6 +141,16 @@ static void rt2x00lib_intf_scheduled(struct work_struct *work)
138 rt2x00dev); 141 rt2x00dev);
139} 142}
140 143
144static void rt2x00lib_autowakeup(struct work_struct *work)
145{
146 struct rt2x00_dev *rt2x00dev =
147 container_of(work, struct rt2x00_dev, autowakeup_work.work);
148
149 if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE))
150 ERROR(rt2x00dev, "Device failed to wakeup.\n");
151 clear_bit(CONFIG_POWERSAVING, &rt2x00dev->flags);
152}
153
141/* 154/*
142 * Interrupt context handlers. 155 * Interrupt context handlers.
143 */ 156 */
@@ -197,7 +210,7 @@ void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
197 * here as they will fetch the next beacon directly prior to 210 * here as they will fetch the next beacon directly prior to
198 * transmission. 211 * transmission.
199 */ 212 */
200 if (test_bit(DRIVER_SUPPORT_PRE_TBTT_INTERRUPT, &rt2x00dev->flags)) 213 if (test_bit(CAPABILITY_PRE_TBTT_INTERRUPT, &rt2x00dev->cap_flags))
201 return; 214 return;
202 215
203 /* fetch next beacon */ 216 /* fetch next beacon */
@@ -222,7 +235,7 @@ EXPORT_SYMBOL_GPL(rt2x00lib_pretbtt);
222void rt2x00lib_dmastart(struct queue_entry *entry) 235void rt2x00lib_dmastart(struct queue_entry *entry)
223{ 236{
224 set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); 237 set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
225 rt2x00queue_index_inc(entry->queue, Q_INDEX); 238 rt2x00queue_index_inc(entry, Q_INDEX);
226} 239}
227EXPORT_SYMBOL_GPL(rt2x00lib_dmastart); 240EXPORT_SYMBOL_GPL(rt2x00lib_dmastart);
228 241
@@ -230,7 +243,7 @@ void rt2x00lib_dmadone(struct queue_entry *entry)
230{ 243{
231 set_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags); 244 set_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags);
232 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); 245 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
233 rt2x00queue_index_inc(entry->queue, Q_INDEX_DMA_DONE); 246 rt2x00queue_index_inc(entry, Q_INDEX_DMA_DONE);
234} 247}
235EXPORT_SYMBOL_GPL(rt2x00lib_dmadone); 248EXPORT_SYMBOL_GPL(rt2x00lib_dmadone);
236 249
@@ -268,7 +281,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
268 /* 281 /*
269 * Remove L2 padding which was added during 282 * Remove L2 padding which was added during
270 */ 283 */
271 if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags)) 284 if (test_bit(REQUIRE_L2PAD, &rt2x00dev->cap_flags))
272 rt2x00queue_remove_l2pad(entry->skb, header_length); 285 rt2x00queue_remove_l2pad(entry->skb, header_length);
273 286
274 /* 287 /*
@@ -277,7 +290,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
277 * mac80211 will expect the same data to be present it the 290 * mac80211 will expect the same data to be present it the
278 * frame as it was passed to us. 291 * frame as it was passed to us.
279 */ 292 */
280 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) 293 if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags))
281 rt2x00crypto_tx_insert_iv(entry->skb, header_length); 294 rt2x00crypto_tx_insert_iv(entry->skb, header_length);
282 295
283 /* 296 /*
@@ -350,10 +363,14 @@ void rt2x00lib_txdone(struct queue_entry *entry,
350 * which would allow the rc algorithm to better decide on 363 * which would allow the rc algorithm to better decide on
351 * which rates are suitable. 364 * which rates are suitable.
352 */ 365 */
353 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { 366 if (test_bit(TXDONE_AMPDU, &txdesc->flags) ||
367 tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
354 tx_info->flags |= IEEE80211_TX_STAT_AMPDU; 368 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
355 tx_info->status.ampdu_len = 1; 369 tx_info->status.ampdu_len = 1;
356 tx_info->status.ampdu_ack_len = success ? 1 : 0; 370 tx_info->status.ampdu_ack_len = success ? 1 : 0;
371
372 if (!success)
373 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
357 } 374 }
358 375
359 if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) { 376 if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
@@ -370,7 +387,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
370 * send the status report back. 387 * send the status report back.
371 */ 388 */
372 if (!(skbdesc_flags & SKBDESC_NOT_MAC80211)) { 389 if (!(skbdesc_flags & SKBDESC_NOT_MAC80211)) {
373 if (test_bit(DRIVER_REQUIRE_TASKLET_CONTEXT, &rt2x00dev->flags)) 390 if (test_bit(REQUIRE_TASKLET_CONTEXT, &rt2x00dev->cap_flags))
374 ieee80211_tx_status(rt2x00dev->hw, entry->skb); 391 ieee80211_tx_status(rt2x00dev->hw, entry->skb);
375 else 392 else
376 ieee80211_tx_status_ni(rt2x00dev->hw, entry->skb); 393 ieee80211_tx_status_ni(rt2x00dev->hw, entry->skb);
@@ -385,7 +402,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
385 402
386 rt2x00dev->ops->lib->clear_entry(entry); 403 rt2x00dev->ops->lib->clear_entry(entry);
387 404
388 rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE); 405 rt2x00queue_index_inc(entry, Q_INDEX_DONE);
389 406
390 /* 407 /*
391 * If the data queue was below the threshold before the txdone 408 * If the data queue was below the threshold before the txdone
@@ -409,6 +426,77 @@ void rt2x00lib_txdone_noinfo(struct queue_entry *entry, u32 status)
409} 426}
410EXPORT_SYMBOL_GPL(rt2x00lib_txdone_noinfo); 427EXPORT_SYMBOL_GPL(rt2x00lib_txdone_noinfo);
411 428
429static u8 *rt2x00lib_find_ie(u8 *data, unsigned int len, u8 ie)
430{
431 struct ieee80211_mgmt *mgmt = (void *)data;
432 u8 *pos, *end;
433
434 pos = (u8 *)mgmt->u.beacon.variable;
435 end = data + len;
436 while (pos < end) {
437 if (pos + 2 + pos[1] > end)
438 return NULL;
439
440 if (pos[0] == ie)
441 return pos;
442
443 pos += 2 + pos[1];
444 }
445
446 return NULL;
447}
448
449static void rt2x00lib_rxdone_check_ps(struct rt2x00_dev *rt2x00dev,
450 struct sk_buff *skb,
451 struct rxdone_entry_desc *rxdesc)
452{
453 struct ieee80211_hdr *hdr = (void *) skb->data;
454 struct ieee80211_tim_ie *tim_ie;
455 u8 *tim;
456 u8 tim_len;
457 bool cam;
458
459 /* If this is not a beacon, or if mac80211 has no powersaving
460 * configured, or if the device is already in powersaving mode
461 * we can exit now. */
462 if (likely(!ieee80211_is_beacon(hdr->frame_control) ||
463 !(rt2x00dev->hw->conf.flags & IEEE80211_CONF_PS)))
464 return;
465
466 /* min. beacon length + FCS_LEN */
467 if (skb->len <= 40 + FCS_LEN)
468 return;
469
470 /* and only beacons from the associated BSSID, please */
471 if (!(rxdesc->dev_flags & RXDONE_MY_BSS) ||
472 !rt2x00dev->aid)
473 return;
474
475 rt2x00dev->last_beacon = jiffies;
476
477 tim = rt2x00lib_find_ie(skb->data, skb->len - FCS_LEN, WLAN_EID_TIM);
478 if (!tim)
479 return;
480
481 if (tim[1] < sizeof(*tim_ie))
482 return;
483
484 tim_len = tim[1];
485 tim_ie = (struct ieee80211_tim_ie *) &tim[2];
486
487 /* Check whenever the PHY can be turned off again. */
488
489 /* 1. What about buffered unicast traffic for our AID? */
490 cam = ieee80211_check_tim(tim_ie, tim_len, rt2x00dev->aid);
491
492 /* 2. Maybe the AP wants to send multicast/broadcast data? */
493 cam |= (tim_ie->bitmap_ctrl & 0x01);
494
495 if (!cam && !test_bit(CONFIG_POWERSAVING, &rt2x00dev->flags))
496 rt2x00lib_config(rt2x00dev, &rt2x00dev->hw->conf,
497 IEEE80211_CONF_CHANGE_PS);
498}
499
412static int rt2x00lib_rxdone_read_signal(struct rt2x00_dev *rt2x00dev, 500static int rt2x00lib_rxdone_read_signal(struct rt2x00_dev *rt2x00dev,
413 struct rxdone_entry_desc *rxdesc) 501 struct rxdone_entry_desc *rxdesc)
414{ 502{
@@ -511,8 +599,6 @@ void rt2x00lib_rxdone(struct queue_entry *entry)
511 (rxdesc.size > header_length) && 599 (rxdesc.size > header_length) &&
512 (rxdesc.dev_flags & RXDONE_L2PAD)) 600 (rxdesc.dev_flags & RXDONE_L2PAD))
513 rt2x00queue_remove_l2pad(entry->skb, header_length); 601 rt2x00queue_remove_l2pad(entry->skb, header_length);
514 else
515 rt2x00queue_align_payload(entry->skb, header_length);
516 602
517 /* Trim buffer to correct size */ 603 /* Trim buffer to correct size */
518 skb_trim(entry->skb, rxdesc.size); 604 skb_trim(entry->skb, rxdesc.size);
@@ -526,6 +612,12 @@ void rt2x00lib_rxdone(struct queue_entry *entry)
526 rxdesc.flags |= RX_FLAG_HT; 612 rxdesc.flags |= RX_FLAG_HT;
527 613
528 /* 614 /*
615 * Check if this is a beacon, and more frames have been
616 * buffered while we were in powersaving mode.
617 */
618 rt2x00lib_rxdone_check_ps(rt2x00dev, entry->skb, &rxdesc);
619
620 /*
529 * Update extra components 621 * Update extra components
530 */ 622 */
531 rt2x00link_update_stats(rt2x00dev, entry->skb, &rxdesc); 623 rt2x00link_update_stats(rt2x00dev, entry->skb, &rxdesc);
@@ -554,7 +646,7 @@ void rt2x00lib_rxdone(struct queue_entry *entry)
554 646
555submit_entry: 647submit_entry:
556 entry->flags = 0; 648 entry->flags = 0;
557 rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE); 649 rt2x00queue_index_inc(entry, Q_INDEX_DONE);
558 if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) && 650 if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) &&
559 test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 651 test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
560 rt2x00dev->ops->lib->clear_entry(entry); 652 rt2x00dev->ops->lib->clear_entry(entry);
@@ -801,23 +893,28 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
801 /* 893 /*
802 * Take TX headroom required for alignment into account. 894 * Take TX headroom required for alignment into account.
803 */ 895 */
804 if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags)) 896 if (test_bit(REQUIRE_L2PAD, &rt2x00dev->cap_flags))
805 rt2x00dev->hw->extra_tx_headroom += RT2X00_L2PAD_SIZE; 897 rt2x00dev->hw->extra_tx_headroom += RT2X00_L2PAD_SIZE;
806 else if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) 898 else if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags))
807 rt2x00dev->hw->extra_tx_headroom += RT2X00_ALIGN_SIZE; 899 rt2x00dev->hw->extra_tx_headroom += RT2X00_ALIGN_SIZE;
808 900
809 /* 901 /*
810 * Allocate tx status FIFO for driver use. 902 * Allocate tx status FIFO for driver use.
811 */ 903 */
812 if (test_bit(DRIVER_REQUIRE_TXSTATUS_FIFO, &rt2x00dev->flags)) { 904 if (test_bit(REQUIRE_TXSTATUS_FIFO, &rt2x00dev->cap_flags)) {
813 /* 905 /*
814 * Allocate txstatus fifo and tasklet, we use a size of 512 906 * Allocate the txstatus fifo. In the worst case the tx
815 * for the kfifo which is big enough to store 512/4=128 tx 907 * status fifo has to hold the tx status of all entries
816 * status reports. In the worst case (tx status for all tx 908 * in all tx queues. Hence, calculate the kfifo size as
817 * queues gets reported before we've got a chance to handle 909 * tx_queues * entry_num and round up to the nearest
818 * them) 24*4=384 tx status reports need to be cached. 910 * power of 2.
819 */ 911 */
820 status = kfifo_alloc(&rt2x00dev->txstatus_fifo, 512, 912 int kfifo_size =
913 roundup_pow_of_two(rt2x00dev->ops->tx_queues *
914 rt2x00dev->ops->tx->entry_num *
915 sizeof(u32));
916
917 status = kfifo_alloc(&rt2x00dev->txstatus_fifo, kfifo_size,
821 GFP_KERNEL); 918 GFP_KERNEL);
822 if (status) 919 if (status)
823 return status; 920 return status;
@@ -1007,6 +1104,7 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
1007 } 1104 }
1008 1105
1009 INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled); 1106 INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled);
1107 INIT_DELAYED_WORK(&rt2x00dev->autowakeup_work, rt2x00lib_autowakeup);
1010 1108
1011 /* 1109 /*
1012 * Let the driver probe the device to detect the capabilities. 1110 * Let the driver probe the device to detect the capabilities.
@@ -1063,6 +1161,7 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
1063 */ 1161 */
1064 cancel_work_sync(&rt2x00dev->intf_work); 1162 cancel_work_sync(&rt2x00dev->intf_work);
1065 if (rt2x00_is_usb(rt2x00dev)) { 1163 if (rt2x00_is_usb(rt2x00dev)) {
1164 del_timer_sync(&rt2x00dev->txstatus_timer);
1066 cancel_work_sync(&rt2x00dev->rxdone_work); 1165 cancel_work_sync(&rt2x00dev->rxdone_work);
1067 cancel_work_sync(&rt2x00dev->txdone_work); 1166 cancel_work_sync(&rt2x00dev->txdone_work);
1068 } 1167 }
diff --git a/drivers/net/wireless/rt2x00/rt2x00firmware.c b/drivers/net/wireless/rt2x00/rt2x00firmware.c
index be0ff78c1b16..f316aad30612 100644
--- a/drivers/net/wireless/rt2x00/rt2x00firmware.c
+++ b/drivers/net/wireless/rt2x00/rt2x00firmware.c
@@ -99,7 +99,7 @@ int rt2x00lib_load_firmware(struct rt2x00_dev *rt2x00dev)
99{ 99{
100 int retval; 100 int retval;
101 101
102 if (!test_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags)) 102 if (!test_bit(REQUIRE_FIRMWARE, &rt2x00dev->cap_flags))
103 return 0; 103 return 0;
104 104
105 if (!rt2x00dev->fw) { 105 if (!rt2x00dev->fw) {
diff --git a/drivers/net/wireless/rt2x00/rt2x00ht.c b/drivers/net/wireless/rt2x00/rt2x00ht.c
deleted file mode 100644
index ae1219dffaae..000000000000
--- a/drivers/net/wireless/rt2x00/rt2x00ht.c
+++ /dev/null
@@ -1,133 +0,0 @@
1/*
2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 Module: rt2x00lib
23 Abstract: rt2x00 HT specific routines.
24 */
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28
29#include "rt2x00.h"
30#include "rt2x00lib.h"
31
32void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
33 struct txentry_desc *txdesc,
34 const struct rt2x00_rate *hwrate)
35{
36 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
37 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
38 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
39
40 if (tx_info->control.sta)
41 txdesc->u.ht.mpdu_density =
42 tx_info->control.sta->ht_cap.ampdu_density;
43
44 txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */
45
46 txdesc->u.ht.stbc =
47 (tx_info->flags & IEEE80211_TX_CTL_STBC) >> IEEE80211_TX_CTL_STBC_SHIFT;
48
49 /*
50 * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the
51 * mcs rate to be used
52 */
53 if (txrate->flags & IEEE80211_TX_RC_MCS) {
54 txdesc->u.ht.mcs = txrate->idx;
55
56 /*
57 * MIMO PS should be set to 1 for STA's using dynamic SM PS
58 * when using more then one tx stream (>MCS7).
59 */
60 if (tx_info->control.sta && txdesc->u.ht.mcs > 7 &&
61 ((tx_info->control.sta->ht_cap.cap &
62 IEEE80211_HT_CAP_SM_PS) >>
63 IEEE80211_HT_CAP_SM_PS_SHIFT) ==
64 WLAN_HT_CAP_SM_PS_DYNAMIC)
65 __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
66 } else {
67 txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs);
68 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
69 txdesc->u.ht.mcs |= 0x08;
70 }
71
72 /*
73 * This frame is eligible for an AMPDU, however, don't aggregate
74 * frames that are intended to probe a specific tx rate.
75 */
76 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU &&
77 !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
78 __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags);
79
80 /*
81 * Set 40Mhz mode if necessary (for legacy rates this will
82 * duplicate the frame to both channels).
83 */
84 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH ||
85 txrate->flags & IEEE80211_TX_RC_DUP_DATA)
86 __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags);
87 if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
88 __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags);
89
90 /*
91 * Determine IFS values
92 * - Use TXOP_BACKOFF for management frames
93 * - Use TXOP_SIFS for fragment bursts
94 * - Use TXOP_HTTXOP for everything else
95 *
96 * Note: rt2800 devices won't use CTS protection (if used)
97 * for frames not transmitted with TXOP_HTTXOP
98 */
99 if (ieee80211_is_mgmt(hdr->frame_control))
100 txdesc->u.ht.txop = TXOP_BACKOFF;
101 else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
102 txdesc->u.ht.txop = TXOP_SIFS;
103 else
104 txdesc->u.ht.txop = TXOP_HTTXOP;
105}
106
107u16 rt2x00ht_center_channel(struct rt2x00_dev *rt2x00dev,
108 struct ieee80211_conf *conf)
109{
110 struct hw_mode_spec *spec = &rt2x00dev->spec;
111 int center_channel;
112 u16 i;
113
114 /*
115 * Initialize center channel to current channel.
116 */
117 center_channel = spec->channels[conf->channel->hw_value].channel;
118
119 /*
120 * Adjust center channel to HT40+ and HT40- operation.
121 */
122 if (conf_is_ht40_plus(conf))
123 center_channel += 2;
124 else if (conf_is_ht40_minus(conf))
125 center_channel -= (center_channel == 14) ? 1 : 2;
126
127 for (i = 0; i < spec->num_channels; i++)
128 if (spec->channels[i].channel == center_channel)
129 return i;
130
131 WARN_ON(1);
132 return conf->channel->hw_value;
133}
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index 2d94cbaf5f4a..322cc4f3de5d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -32,6 +32,7 @@
32 */ 32 */
33#define WATCHDOG_INTERVAL round_jiffies_relative(HZ) 33#define WATCHDOG_INTERVAL round_jiffies_relative(HZ)
34#define LINK_TUNE_INTERVAL round_jiffies_relative(HZ) 34#define LINK_TUNE_INTERVAL round_jiffies_relative(HZ)
35#define AGC_INTERVAL round_jiffies_relative(4 * HZ)
35 36
36/* 37/*
37 * rt2x00_rate: Per rate device information 38 * rt2x00_rate: Per rate device information
@@ -119,16 +120,6 @@ void rt2x00queue_free_skb(struct queue_entry *entry);
119void rt2x00queue_align_frame(struct sk_buff *skb); 120void rt2x00queue_align_frame(struct sk_buff *skb);
120 121
121/** 122/**
122 * rt2x00queue_align_payload - Align 802.11 payload to 4-byte boundary
123 * @skb: The skb to align
124 * @header_length: Length of 802.11 header
125 *
126 * Align the 802.11 payload to a 4-byte boundary, this could
127 * mean the header is not aligned properly though.
128 */
129void rt2x00queue_align_payload(struct sk_buff *skb, unsigned int header_length);
130
131/**
132 * rt2x00queue_insert_l2pad - Align 802.11 header & payload to 4-byte boundary 123 * rt2x00queue_insert_l2pad - Align 802.11 header & payload to 4-byte boundary
133 * @skb: The skb to align 124 * @skb: The skb to align
134 * @header_length: Length of 802.11 header 125 * @header_length: Length of 802.11 header
@@ -184,14 +175,14 @@ int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
184 175
185/** 176/**
186 * rt2x00queue_index_inc - Index incrementation function 177 * rt2x00queue_index_inc - Index incrementation function
187 * @queue: Queue (&struct data_queue) to perform the action on. 178 * @entry: Queue entry (&struct queue_entry) to perform the action on.
188 * @index: Index type (&enum queue_index) to perform the action on. 179 * @index: Index type (&enum queue_index) to perform the action on.
189 * 180 *
190 * This function will increase the requested index on the queue, 181 * This function will increase the requested index on the entry's queue,
191 * it will grab the appropriate locks and handle queue overflow events by 182 * it will grab the appropriate locks and handle queue overflow events by
192 * resetting the index to the start of the queue. 183 * resetting the index to the start of the queue.
193 */ 184 */
194void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index); 185void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index);
195 186
196/** 187/**
197 * rt2x00queue_init_queues - Initialize all data queues 188 * rt2x00queue_init_queues - Initialize all data queues
@@ -281,6 +272,18 @@ void rt2x00link_start_watchdog(struct rt2x00_dev *rt2x00dev);
281void rt2x00link_stop_watchdog(struct rt2x00_dev *rt2x00dev); 272void rt2x00link_stop_watchdog(struct rt2x00_dev *rt2x00dev);
282 273
283/** 274/**
275 * rt2x00link_start_agc - Start periodic gain calibration
276 * @rt2x00dev: Pointer to &struct rt2x00_dev.
277 */
278void rt2x00link_start_agc(struct rt2x00_dev *rt2x00dev);
279
280/**
281 * rt2x00link_stop_agc - Stop periodic gain calibration
282 * @rt2x00dev: Pointer to &struct rt2x00_dev.
283 */
284void rt2x00link_stop_agc(struct rt2x00_dev *rt2x00dev);
285
286/**
284 * rt2x00link_register - Initialize link tuning & watchdog functionality 287 * rt2x00link_register - Initialize link tuning & watchdog functionality
285 * @rt2x00dev: Pointer to &struct rt2x00_dev. 288 * @rt2x00dev: Pointer to &struct rt2x00_dev.
286 * 289 *
@@ -385,41 +388,17 @@ static inline void rt2x00crypto_rx_insert_iv(struct sk_buff *skb,
385#endif /* CONFIG_RT2X00_LIB_CRYPTO */ 388#endif /* CONFIG_RT2X00_LIB_CRYPTO */
386 389
387/* 390/*
388 * HT handlers.
389 */
390#ifdef CONFIG_RT2X00_LIB_HT
391void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
392 struct txentry_desc *txdesc,
393 const struct rt2x00_rate *hwrate);
394
395u16 rt2x00ht_center_channel(struct rt2x00_dev *rt2x00dev,
396 struct ieee80211_conf *conf);
397#else
398static inline void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
399 struct txentry_desc *txdesc,
400 const struct rt2x00_rate *hwrate)
401{
402}
403
404static inline u16 rt2x00ht_center_channel(struct rt2x00_dev *rt2x00dev,
405 struct ieee80211_conf *conf)
406{
407 return conf->channel->hw_value;
408}
409#endif /* CONFIG_RT2X00_LIB_HT */
410
411/*
412 * RFkill handlers. 391 * RFkill handlers.
413 */ 392 */
414static inline void rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev) 393static inline void rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev)
415{ 394{
416 if (test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags)) 395 if (test_bit(CAPABILITY_HW_BUTTON, &rt2x00dev->cap_flags))
417 wiphy_rfkill_start_polling(rt2x00dev->hw->wiphy); 396 wiphy_rfkill_start_polling(rt2x00dev->hw->wiphy);
418} 397}
419 398
420static inline void rt2x00rfkill_unregister(struct rt2x00_dev *rt2x00dev) 399static inline void rt2x00rfkill_unregister(struct rt2x00_dev *rt2x00dev)
421{ 400{
422 if (test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags)) 401 if (test_bit(CAPABILITY_HW_BUTTON, &rt2x00dev->cap_flags))
423 wiphy_rfkill_stop_polling(rt2x00dev->hw->wiphy); 402 wiphy_rfkill_stop_polling(rt2x00dev->hw->wiphy);
424} 403}
425 404
diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/rt2x00/rt2x00link.c
index 29abfdeb0b65..ea10b0068f82 100644
--- a/drivers/net/wireless/rt2x00/rt2x00link.c
+++ b/drivers/net/wireless/rt2x00/rt2x00link.c
@@ -192,17 +192,7 @@ static bool rt2x00lib_antenna_diversity(struct rt2x00_dev *rt2x00dev)
192 /* 192 /*
193 * Determine if software diversity is enabled for 193 * Determine if software diversity is enabled for
194 * either the TX or RX antenna (or both). 194 * either the TX or RX antenna (or both).
195 * Always perform this check since within the link
196 * tuner interval the configuration might have changed.
197 */ 195 */
198 ant->flags &= ~ANTENNA_RX_DIVERSITY;
199 ant->flags &= ~ANTENNA_TX_DIVERSITY;
200
201 if (rt2x00dev->default_ant.rx == ANTENNA_SW_DIVERSITY)
202 ant->flags |= ANTENNA_RX_DIVERSITY;
203 if (rt2x00dev->default_ant.tx == ANTENNA_SW_DIVERSITY)
204 ant->flags |= ANTENNA_TX_DIVERSITY;
205
206 if (!(ant->flags & ANTENNA_RX_DIVERSITY) && 196 if (!(ant->flags & ANTENNA_RX_DIVERSITY) &&
207 !(ant->flags & ANTENNA_TX_DIVERSITY)) { 197 !(ant->flags & ANTENNA_TX_DIVERSITY)) {
208 ant->flags = 0; 198 ant->flags = 0;
@@ -383,7 +373,7 @@ static void rt2x00link_tuner(struct work_struct *work)
383 * do not support link tuning at all, while other devices can disable 373 * do not support link tuning at all, while other devices can disable
384 * the feature from the EEPROM. 374 * the feature from the EEPROM.
385 */ 375 */
386 if (test_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags)) 376 if (test_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags))
387 rt2x00dev->ops->lib->link_tuner(rt2x00dev, qual, link->count); 377 rt2x00dev->ops->lib->link_tuner(rt2x00dev, qual, link->count);
388 378
389 /* 379 /*
@@ -413,12 +403,11 @@ void rt2x00link_start_watchdog(struct rt2x00_dev *rt2x00dev)
413{ 403{
414 struct link *link = &rt2x00dev->link; 404 struct link *link = &rt2x00dev->link;
415 405
416 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) || 406 if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) &&
417 !test_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags)) 407 rt2x00dev->ops->lib->watchdog)
418 return; 408 ieee80211_queue_delayed_work(rt2x00dev->hw,
419 409 &link->watchdog_work,
420 ieee80211_queue_delayed_work(rt2x00dev->hw, 410 WATCHDOG_INTERVAL);
421 &link->watchdog_work, WATCHDOG_INTERVAL);
422} 411}
423 412
424void rt2x00link_stop_watchdog(struct rt2x00_dev *rt2x00dev) 413void rt2x00link_stop_watchdog(struct rt2x00_dev *rt2x00dev)
@@ -447,8 +436,46 @@ static void rt2x00link_watchdog(struct work_struct *work)
447 WATCHDOG_INTERVAL); 436 WATCHDOG_INTERVAL);
448} 437}
449 438
439void rt2x00link_start_agc(struct rt2x00_dev *rt2x00dev)
440{
441 struct link *link = &rt2x00dev->link;
442
443 if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) &&
444 rt2x00dev->ops->lib->gain_calibration)
445 ieee80211_queue_delayed_work(rt2x00dev->hw,
446 &link->agc_work,
447 AGC_INTERVAL);
448}
449
450void rt2x00link_stop_agc(struct rt2x00_dev *rt2x00dev)
451{
452 cancel_delayed_work_sync(&rt2x00dev->link.agc_work);
453}
454
455static void rt2x00link_agc(struct work_struct *work)
456{
457 struct rt2x00_dev *rt2x00dev =
458 container_of(work, struct rt2x00_dev, link.agc_work.work);
459 struct link *link = &rt2x00dev->link;
460
461 /*
462 * When the radio is shutting down we should
463 * immediately cease the watchdog monitoring.
464 */
465 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
466 return;
467
468 rt2x00dev->ops->lib->gain_calibration(rt2x00dev);
469
470 if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
471 ieee80211_queue_delayed_work(rt2x00dev->hw,
472 &link->agc_work,
473 AGC_INTERVAL);
474}
475
450void rt2x00link_register(struct rt2x00_dev *rt2x00dev) 476void rt2x00link_register(struct rt2x00_dev *rt2x00dev)
451{ 477{
478 INIT_DELAYED_WORK(&rt2x00dev->link.agc_work, rt2x00link_agc);
452 INIT_DELAYED_WORK(&rt2x00dev->link.watchdog_work, rt2x00link_watchdog); 479 INIT_DELAYED_WORK(&rt2x00dev->link.watchdog_work, rt2x00link_watchdog);
453 INIT_DELAYED_WORK(&rt2x00dev->link.work, rt2x00link_tuner); 480 INIT_DELAYED_WORK(&rt2x00dev->link.work, rt2x00link_tuner);
454} 481}
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 661c6baad2b9..93bec140e598 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -119,7 +119,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
119 * Use the ATIM queue if appropriate and present. 119 * Use the ATIM queue if appropriate and present.
120 */ 120 */
121 if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM && 121 if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
122 test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) 122 test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags))
123 qid = QID_ATIM; 123 qid = QID_ATIM;
124 124
125 queue = rt2x00queue_get_tx_queue(rt2x00dev, qid); 125 queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
@@ -158,7 +158,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
158 return; 158 return;
159 159
160 exit_fail: 160 exit_fail:
161 ieee80211_stop_queue(rt2x00dev->hw, qid); 161 rt2x00queue_pause_queue(queue);
162 dev_kfree_skb_any(skb); 162 dev_kfree_skb_any(skb);
163} 163}
164EXPORT_SYMBOL_GPL(rt2x00mac_tx); 164EXPORT_SYMBOL_GPL(rt2x00mac_tx);
@@ -411,11 +411,11 @@ void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
411 * of different types, but has no a separate filter for PS Poll frames, 411 * of different types, but has no a separate filter for PS Poll frames,
412 * FIF_CONTROL flag implies FIF_PSPOLL. 412 * FIF_CONTROL flag implies FIF_PSPOLL.
413 */ 413 */
414 if (!test_bit(DRIVER_SUPPORT_CONTROL_FILTERS, &rt2x00dev->flags)) { 414 if (!test_bit(CAPABILITY_CONTROL_FILTERS, &rt2x00dev->cap_flags)) {
415 if (*total_flags & FIF_CONTROL || *total_flags & FIF_PSPOLL) 415 if (*total_flags & FIF_CONTROL || *total_flags & FIF_PSPOLL)
416 *total_flags |= FIF_CONTROL | FIF_PSPOLL; 416 *total_flags |= FIF_CONTROL | FIF_PSPOLL;
417 } 417 }
418 if (!test_bit(DRIVER_SUPPORT_CONTROL_FILTER_PSPOLL, &rt2x00dev->flags)) { 418 if (!test_bit(CAPABILITY_CONTROL_FILTER_PSPOLL, &rt2x00dev->cap_flags)) {
419 if (*total_flags & FIF_CONTROL) 419 if (*total_flags & FIF_CONTROL)
420 *total_flags |= FIF_PSPOLL; 420 *total_flags |= FIF_PSPOLL;
421 } 421 }
@@ -496,7 +496,7 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
496 496
497 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) 497 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
498 return 0; 498 return 0;
499 else if (!test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) 499 else if (!test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags))
500 return -EOPNOTSUPP; 500 return -EOPNOTSUPP;
501 else if (key->keylen > 32) 501 else if (key->keylen > 32)
502 return -ENOSPC; 502 return -ENOSPC;
@@ -562,7 +562,7 @@ EXPORT_SYMBOL_GPL(rt2x00mac_set_key);
562void rt2x00mac_sw_scan_start(struct ieee80211_hw *hw) 562void rt2x00mac_sw_scan_start(struct ieee80211_hw *hw)
563{ 563{
564 struct rt2x00_dev *rt2x00dev = hw->priv; 564 struct rt2x00_dev *rt2x00dev = hw->priv;
565 __set_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags); 565 set_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags);
566 rt2x00link_stop_tuner(rt2x00dev); 566 rt2x00link_stop_tuner(rt2x00dev);
567} 567}
568EXPORT_SYMBOL_GPL(rt2x00mac_sw_scan_start); 568EXPORT_SYMBOL_GPL(rt2x00mac_sw_scan_start);
@@ -570,7 +570,7 @@ EXPORT_SYMBOL_GPL(rt2x00mac_sw_scan_start);
570void rt2x00mac_sw_scan_complete(struct ieee80211_hw *hw) 570void rt2x00mac_sw_scan_complete(struct ieee80211_hw *hw)
571{ 571{
572 struct rt2x00_dev *rt2x00dev = hw->priv; 572 struct rt2x00_dev *rt2x00dev = hw->priv;
573 __clear_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags); 573 clear_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags);
574 rt2x00link_start_tuner(rt2x00dev); 574 rt2x00link_start_tuner(rt2x00dev);
575} 575}
576EXPORT_SYMBOL_GPL(rt2x00mac_sw_scan_complete); 576EXPORT_SYMBOL_GPL(rt2x00mac_sw_scan_complete);
@@ -737,3 +737,84 @@ void rt2x00mac_flush(struct ieee80211_hw *hw, bool drop)
737 rt2x00queue_flush_queue(queue, drop); 737 rt2x00queue_flush_queue(queue, drop);
738} 738}
739EXPORT_SYMBOL_GPL(rt2x00mac_flush); 739EXPORT_SYMBOL_GPL(rt2x00mac_flush);
740
741int rt2x00mac_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
742{
743 struct rt2x00_dev *rt2x00dev = hw->priv;
744 struct link_ant *ant = &rt2x00dev->link.ant;
745 struct antenna_setup *def = &rt2x00dev->default_ant;
746 struct antenna_setup setup;
747
748 // The antenna value is not supposed to be 0,
749 // or exceed the maximum number of antenna's.
750 if (!tx_ant || (tx_ant & ~3) || !rx_ant || (rx_ant & ~3))
751 return -EINVAL;
752
753 // When the client tried to configure the antenna to or from
754 // diversity mode, we must reset the default antenna as well
755 // as that controls the diversity switch.
756 if (ant->flags & ANTENNA_TX_DIVERSITY && tx_ant != 3)
757 ant->flags &= ~ANTENNA_TX_DIVERSITY;
758 if (ant->flags & ANTENNA_RX_DIVERSITY && rx_ant != 3)
759 ant->flags &= ~ANTENNA_RX_DIVERSITY;
760
761 // If diversity is being enabled, check if we need hardware
762 // or software diversity. In the latter case, reset the value,
763 // and make sure we update the antenna flags to have the
764 // link tuner pick up the diversity tuning.
765 if (tx_ant == 3 && def->tx == ANTENNA_SW_DIVERSITY) {
766 tx_ant = ANTENNA_SW_DIVERSITY;
767 ant->flags |= ANTENNA_TX_DIVERSITY;
768 }
769
770 if (rx_ant == 3 && def->rx == ANTENNA_SW_DIVERSITY) {
771 rx_ant = ANTENNA_SW_DIVERSITY;
772 ant->flags |= ANTENNA_RX_DIVERSITY;
773 }
774
775 setup.tx = tx_ant;
776 setup.rx = rx_ant;
777
778 rt2x00lib_config_antenna(rt2x00dev, setup);
779
780 return 0;
781}
782EXPORT_SYMBOL_GPL(rt2x00mac_set_antenna);
783
784int rt2x00mac_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
785{
786 struct rt2x00_dev *rt2x00dev = hw->priv;
787 struct link_ant *ant = &rt2x00dev->link.ant;
788 struct antenna_setup *active = &rt2x00dev->link.ant.active;
789
790 // When software diversity is active, we must report this to the
791 // client and not the current active antenna state.
792 if (ant->flags & ANTENNA_TX_DIVERSITY)
793 *tx_ant = ANTENNA_HW_DIVERSITY;
794 else
795 *tx_ant = active->tx;
796
797 if (ant->flags & ANTENNA_RX_DIVERSITY)
798 *rx_ant = ANTENNA_HW_DIVERSITY;
799 else
800 *rx_ant = active->rx;
801
802 return 0;
803}
804EXPORT_SYMBOL_GPL(rt2x00mac_get_antenna);
805
806void rt2x00mac_get_ringparam(struct ieee80211_hw *hw,
807 u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max)
808{
809 struct rt2x00_dev *rt2x00dev = hw->priv;
810 struct data_queue *queue;
811
812 tx_queue_for_each(rt2x00dev, queue) {
813 *tx += queue->length;
814 *tx_max += queue->limit;
815 }
816
817 *rx = rt2x00dev->rx->length;
818 *rx_max = rt2x00dev->rx->limit;
819}
820EXPORT_SYMBOL_GPL(rt2x00mac_get_ringparam);
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 4dd82b0b0520..17148bb24426 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -60,14 +60,15 @@ int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
60} 60}
61EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read); 61EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read);
62 62
63void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev) 63bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
64{ 64{
65 struct data_queue *queue = rt2x00dev->rx; 65 struct data_queue *queue = rt2x00dev->rx;
66 struct queue_entry *entry; 66 struct queue_entry *entry;
67 struct queue_entry_priv_pci *entry_priv; 67 struct queue_entry_priv_pci *entry_priv;
68 struct skb_frame_desc *skbdesc; 68 struct skb_frame_desc *skbdesc;
69 int max_rx = 16;
69 70
70 while (1) { 71 while (--max_rx) {
71 entry = rt2x00queue_get_entry(queue, Q_INDEX); 72 entry = rt2x00queue_get_entry(queue, Q_INDEX);
72 entry_priv = entry->priv_data; 73 entry_priv = entry->priv_data;
73 74
@@ -93,9 +94,20 @@ void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
93 */ 94 */
94 rt2x00lib_rxdone(entry); 95 rt2x00lib_rxdone(entry);
95 } 96 }
97
98 return !max_rx;
96} 99}
97EXPORT_SYMBOL_GPL(rt2x00pci_rxdone); 100EXPORT_SYMBOL_GPL(rt2x00pci_rxdone);
98 101
102void rt2x00pci_flush_queue(struct data_queue *queue, bool drop)
103{
104 unsigned int i;
105
106 for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++)
107 msleep(10);
108}
109EXPORT_SYMBOL_GPL(rt2x00pci_flush_queue);
110
99/* 111/*
100 * Device initialization handlers. 112 * Device initialization handlers.
101 */ 113 */
@@ -239,9 +251,8 @@ exit:
239 return -ENOMEM; 251 return -ENOMEM;
240} 252}
241 253
242int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) 254int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops)
243{ 255{
244 struct rt2x00_ops *ops = (struct rt2x00_ops *)id->driver_data;
245 struct ieee80211_hw *hw; 256 struct ieee80211_hw *hw;
246 struct rt2x00_dev *rt2x00dev; 257 struct rt2x00_dev *rt2x00dev;
247 int retval; 258 int retval;
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/rt2x00/rt2x00pci.h
index 746ce8fe8cf4..e2c99f2b9a14 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.h
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.h
@@ -101,8 +101,21 @@ struct queue_entry_priv_pci {
101/** 101/**
102 * rt2x00pci_rxdone - Handle RX done events 102 * rt2x00pci_rxdone - Handle RX done events
103 * @rt2x00dev: Device pointer, see &struct rt2x00_dev. 103 * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
104 *
105 * Returns true if there are still rx frames pending and false if all
106 * pending rx frames were processed.
107 */
108bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev);
109
110/**
111 * rt2x00pci_flush_queue - Flush data queue
112 * @queue: Data queue to stop
113 * @drop: True to drop all pending frames.
114 *
115 * This will wait for a maximum of 100ms, waiting for the queues
116 * to become empty.
104 */ 117 */
105void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev); 118void rt2x00pci_flush_queue(struct data_queue *queue, bool drop);
106 119
107/* 120/*
108 * Device initialization handlers. 121 * Device initialization handlers.
@@ -113,7 +126,7 @@ void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev);
113/* 126/*
114 * PCI driver handlers. 127 * PCI driver handlers.
115 */ 128 */
116int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id); 129int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops);
117void rt2x00pci_remove(struct pci_dev *pci_dev); 130void rt2x00pci_remove(struct pci_dev *pci_dev);
118#ifdef CONFIG_PM 131#ifdef CONFIG_PM
119int rt2x00pci_suspend(struct pci_dev *pci_dev, pm_message_t state); 132int rt2x00pci_suspend(struct pci_dev *pci_dev, pm_message_t state);
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 4358051bfe1a..ab8c16f8bcaf 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -60,7 +60,7 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry)
60 * at least 8 bytes bytes available in headroom for IV/EIV 60 * at least 8 bytes bytes available in headroom for IV/EIV
61 * and 8 bytes for ICV data as tailroon. 61 * and 8 bytes for ICV data as tailroon.
62 */ 62 */
63 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) { 63 if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags)) {
64 head_size += 8; 64 head_size += 8;
65 tail_size += 8; 65 tail_size += 8;
66 } 66 }
@@ -86,7 +86,7 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry)
86 memset(skbdesc, 0, sizeof(*skbdesc)); 86 memset(skbdesc, 0, sizeof(*skbdesc));
87 skbdesc->entry = entry; 87 skbdesc->entry = entry;
88 88
89 if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) { 89 if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) {
90 skbdesc->skb_dma = dma_map_single(rt2x00dev->dev, 90 skbdesc->skb_dma = dma_map_single(rt2x00dev->dev,
91 skb->data, 91 skb->data,
92 skb->len, 92 skb->len,
@@ -148,19 +148,6 @@ void rt2x00queue_align_frame(struct sk_buff *skb)
148 skb_trim(skb, frame_length); 148 skb_trim(skb, frame_length);
149} 149}
150 150
151void rt2x00queue_align_payload(struct sk_buff *skb, unsigned int header_length)
152{
153 unsigned int frame_length = skb->len;
154 unsigned int align = ALIGN_SIZE(skb, header_length);
155
156 if (!align)
157 return;
158
159 skb_push(skb, align);
160 memmove(skb->data, skb->data + align, frame_length);
161 skb_trim(skb, frame_length);
162}
163
164void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length) 151void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
165{ 152{
166 unsigned int payload_length = skb->len - header_length; 153 unsigned int payload_length = skb->len - header_length;
@@ -226,7 +213,7 @@ static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry,
226 213
227 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); 214 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
228 215
229 if (!test_bit(DRIVER_REQUIRE_SW_SEQNO, &entry->queue->rt2x00dev->flags)) 216 if (!test_bit(REQUIRE_SW_SEQNO, &entry->queue->rt2x00dev->cap_flags))
230 return; 217 return;
231 218
232 /* 219 /*
@@ -315,6 +302,85 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
315 } 302 }
316} 303}
317 304
305static void rt2x00queue_create_tx_descriptor_ht(struct queue_entry *entry,
306 struct txentry_desc *txdesc,
307 const struct rt2x00_rate *hwrate)
308{
309 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
310 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
311 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
312
313 if (tx_info->control.sta)
314 txdesc->u.ht.mpdu_density =
315 tx_info->control.sta->ht_cap.ampdu_density;
316
317 txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */
318
319 /*
320 * Only one STBC stream is supported for now.
321 */
322 if (tx_info->flags & IEEE80211_TX_CTL_STBC)
323 txdesc->u.ht.stbc = 1;
324
325 /*
326 * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the
327 * mcs rate to be used
328 */
329 if (txrate->flags & IEEE80211_TX_RC_MCS) {
330 txdesc->u.ht.mcs = txrate->idx;
331
332 /*
333 * MIMO PS should be set to 1 for STA's using dynamic SM PS
334 * when using more then one tx stream (>MCS7).
335 */
336 if (tx_info->control.sta && txdesc->u.ht.mcs > 7 &&
337 ((tx_info->control.sta->ht_cap.cap &
338 IEEE80211_HT_CAP_SM_PS) >>
339 IEEE80211_HT_CAP_SM_PS_SHIFT) ==
340 WLAN_HT_CAP_SM_PS_DYNAMIC)
341 __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
342 } else {
343 txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs);
344 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
345 txdesc->u.ht.mcs |= 0x08;
346 }
347
348 /*
349 * This frame is eligible for an AMPDU, however, don't aggregate
350 * frames that are intended to probe a specific tx rate.
351 */
352 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU &&
353 !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
354 __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags);
355
356 /*
357 * Set 40Mhz mode if necessary (for legacy rates this will
358 * duplicate the frame to both channels).
359 */
360 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH ||
361 txrate->flags & IEEE80211_TX_RC_DUP_DATA)
362 __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags);
363 if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
364 __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags);
365
366 /*
367 * Determine IFS values
368 * - Use TXOP_BACKOFF for management frames except beacons
369 * - Use TXOP_SIFS for fragment bursts
370 * - Use TXOP_HTTXOP for everything else
371 *
372 * Note: rt2800 devices won't use CTS protection (if used)
373 * for frames not transmitted with TXOP_HTTXOP
374 */
375 if (ieee80211_is_mgmt(hdr->frame_control) &&
376 !ieee80211_is_beacon(hdr->frame_control))
377 txdesc->u.ht.txop = TXOP_BACKOFF;
378 else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
379 txdesc->u.ht.txop = TXOP_SIFS;
380 else
381 txdesc->u.ht.txop = TXOP_HTTXOP;
382}
383
318static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry, 384static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
319 struct txentry_desc *txdesc) 385 struct txentry_desc *txdesc)
320{ 386{
@@ -409,8 +475,8 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
409 rt2x00crypto_create_tx_descriptor(entry, txdesc); 475 rt2x00crypto_create_tx_descriptor(entry, txdesc);
410 rt2x00queue_create_tx_descriptor_seq(entry, txdesc); 476 rt2x00queue_create_tx_descriptor_seq(entry, txdesc);
411 477
412 if (test_bit(DRIVER_REQUIRE_HT_TX_DESC, &rt2x00dev->flags)) 478 if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags))
413 rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate); 479 rt2x00queue_create_tx_descriptor_ht(entry, txdesc, hwrate);
414 else 480 else
415 rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate); 481 rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate);
416} 482}
@@ -449,7 +515,7 @@ static int rt2x00queue_write_tx_data(struct queue_entry *entry,
449 /* 515 /*
450 * Map the skb to DMA. 516 * Map the skb to DMA.
451 */ 517 */
452 if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) 518 if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags))
453 rt2x00queue_map_txskb(entry); 519 rt2x00queue_map_txskb(entry);
454 520
455 return 0; 521 return 0;
@@ -495,8 +561,11 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
495 struct skb_frame_desc *skbdesc; 561 struct skb_frame_desc *skbdesc;
496 u8 rate_idx, rate_flags; 562 u8 rate_idx, rate_flags;
497 563
498 if (unlikely(rt2x00queue_full(queue))) 564 if (unlikely(rt2x00queue_full(queue))) {
565 ERROR(queue->rt2x00dev,
566 "Dropping frame due to full tx queue %d.\n", queue->qid);
499 return -ENOBUFS; 567 return -ENOBUFS;
568 }
500 569
501 if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, 570 if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA,
502 &entry->flags))) { 571 &entry->flags))) {
@@ -539,7 +608,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
539 */ 608 */
540 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) && 609 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
541 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) { 610 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
542 if (test_bit(DRIVER_REQUIRE_COPY_IV, &queue->rt2x00dev->flags)) 611 if (test_bit(REQUIRE_COPY_IV, &queue->rt2x00dev->cap_flags))
543 rt2x00crypto_tx_copy_iv(skb, &txdesc); 612 rt2x00crypto_tx_copy_iv(skb, &txdesc);
544 else 613 else
545 rt2x00crypto_tx_remove_iv(skb, &txdesc); 614 rt2x00crypto_tx_remove_iv(skb, &txdesc);
@@ -553,9 +622,9 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
553 * PCI and USB devices, while header alignment only is valid 622 * PCI and USB devices, while header alignment only is valid
554 * for PCI devices. 623 * for PCI devices.
555 */ 624 */
556 if (test_bit(DRIVER_REQUIRE_L2PAD, &queue->rt2x00dev->flags)) 625 if (test_bit(REQUIRE_L2PAD, &queue->rt2x00dev->cap_flags))
557 rt2x00queue_insert_l2pad(entry->skb, txdesc.header_length); 626 rt2x00queue_insert_l2pad(entry->skb, txdesc.header_length);
558 else if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags)) 627 else if (test_bit(REQUIRE_DMA, &queue->rt2x00dev->cap_flags))
559 rt2x00queue_align_frame(entry->skb); 628 rt2x00queue_align_frame(entry->skb);
560 629
561 /* 630 /*
@@ -571,7 +640,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
571 640
572 set_bit(ENTRY_DATA_PENDING, &entry->flags); 641 set_bit(ENTRY_DATA_PENDING, &entry->flags);
573 642
574 rt2x00queue_index_inc(queue, Q_INDEX); 643 rt2x00queue_index_inc(entry, Q_INDEX);
575 rt2x00queue_write_tx_descriptor(entry, &txdesc); 644 rt2x00queue_write_tx_descriptor(entry, &txdesc);
576 rt2x00queue_kick_tx_queue(queue, &txdesc); 645 rt2x00queue_kick_tx_queue(queue, &txdesc);
577 646
@@ -660,10 +729,12 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
660 return ret; 729 return ret;
661} 730}
662 731
663void rt2x00queue_for_each_entry(struct data_queue *queue, 732bool rt2x00queue_for_each_entry(struct data_queue *queue,
664 enum queue_index start, 733 enum queue_index start,
665 enum queue_index end, 734 enum queue_index end,
666 void (*fn)(struct queue_entry *entry)) 735 void *data,
736 bool (*fn)(struct queue_entry *entry,
737 void *data))
667{ 738{
668 unsigned long irqflags; 739 unsigned long irqflags;
669 unsigned int index_start; 740 unsigned int index_start;
@@ -674,7 +745,7 @@ void rt2x00queue_for_each_entry(struct data_queue *queue,
674 ERROR(queue->rt2x00dev, 745 ERROR(queue->rt2x00dev,
675 "Entry requested from invalid index range (%d - %d)\n", 746 "Entry requested from invalid index range (%d - %d)\n",
676 start, end); 747 start, end);
677 return; 748 return true;
678 } 749 }
679 750
680 /* 751 /*
@@ -693,15 +764,23 @@ void rt2x00queue_for_each_entry(struct data_queue *queue,
693 * send out all frames in the correct order. 764 * send out all frames in the correct order.
694 */ 765 */
695 if (index_start < index_end) { 766 if (index_start < index_end) {
696 for (i = index_start; i < index_end; i++) 767 for (i = index_start; i < index_end; i++) {
697 fn(&queue->entries[i]); 768 if (fn(&queue->entries[i], data))
769 return true;
770 }
698 } else { 771 } else {
699 for (i = index_start; i < queue->limit; i++) 772 for (i = index_start; i < queue->limit; i++) {
700 fn(&queue->entries[i]); 773 if (fn(&queue->entries[i], data))
774 return true;
775 }
701 776
702 for (i = 0; i < index_end; i++) 777 for (i = 0; i < index_end; i++) {
703 fn(&queue->entries[i]); 778 if (fn(&queue->entries[i], data))
779 return true;
780 }
704 } 781 }
782
783 return false;
705} 784}
706EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry); 785EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
707 786
@@ -727,8 +806,9 @@ struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
727} 806}
728EXPORT_SYMBOL_GPL(rt2x00queue_get_entry); 807EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
729 808
730void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index) 809void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
731{ 810{
811 struct data_queue *queue = entry->queue;
732 unsigned long irqflags; 812 unsigned long irqflags;
733 813
734 if (unlikely(index >= Q_INDEX_MAX)) { 814 if (unlikely(index >= Q_INDEX_MAX)) {
@@ -743,7 +823,7 @@ void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
743 if (queue->index[index] >= queue->limit) 823 if (queue->index[index] >= queue->limit)
744 queue->index[index] = 0; 824 queue->index[index] = 0;
745 825
746 queue->last_action[index] = jiffies; 826 entry->last_action = jiffies;
747 827
748 if (index == Q_INDEX) { 828 if (index == Q_INDEX) {
749 queue->length++; 829 queue->length++;
@@ -848,7 +928,6 @@ EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue);
848 928
849void rt2x00queue_flush_queue(struct data_queue *queue, bool drop) 929void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
850{ 930{
851 unsigned int i;
852 bool started; 931 bool started;
853 bool tx_queue = 932 bool tx_queue =
854 (queue->qid == QID_AC_VO) || 933 (queue->qid == QID_AC_VO) ||
@@ -883,20 +962,12 @@ void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
883 } 962 }
884 963
885 /* 964 /*
886 * Check if driver supports flushing, we can only guarantee 965 * Check if driver supports flushing, if that is the case we can
887 * full support for flushing if the driver is able 966 * defer the flushing to the driver. Otherwise we must use the
888 * to cancel all pending frames (drop = true). 967 * alternative which just waits for the queue to become empty.
889 */
890 if (drop && queue->rt2x00dev->ops->lib->flush_queue)
891 queue->rt2x00dev->ops->lib->flush_queue(queue);
892
893 /*
894 * When we don't want to drop any frames, or when
895 * the driver doesn't fully flush the queue correcly,
896 * we must wait for the queue to become empty.
897 */ 968 */
898 for (i = 0; !rt2x00queue_empty(queue) && i < 100; i++) 969 if (likely(queue->rt2x00dev->ops->lib->flush_queue))
899 msleep(10); 970 queue->rt2x00dev->ops->lib->flush_queue(queue, drop);
900 971
901 /* 972 /*
902 * The queue flush has failed... 973 * The queue flush has failed...
@@ -969,10 +1040,8 @@ static void rt2x00queue_reset(struct data_queue *queue)
969 queue->count = 0; 1040 queue->count = 0;
970 queue->length = 0; 1041 queue->length = 0;
971 1042
972 for (i = 0; i < Q_INDEX_MAX; i++) { 1043 for (i = 0; i < Q_INDEX_MAX; i++)
973 queue->index[i] = 0; 1044 queue->index[i] = 0;
974 queue->last_action[i] = jiffies;
975 }
976 1045
977 spin_unlock_irqrestore(&queue->index_lock, irqflags); 1046 spin_unlock_irqrestore(&queue->index_lock, irqflags);
978} 1047}
@@ -1079,7 +1148,7 @@ int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
1079 if (status) 1148 if (status)
1080 goto exit; 1149 goto exit;
1081 1150
1082 if (test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) { 1151 if (test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) {
1083 status = rt2x00queue_alloc_entries(rt2x00dev->atim, 1152 status = rt2x00queue_alloc_entries(rt2x00dev->atim,
1084 rt2x00dev->ops->atim); 1153 rt2x00dev->ops->atim);
1085 if (status) 1154 if (status)
@@ -1131,7 +1200,7 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
1131 struct data_queue *queue; 1200 struct data_queue *queue;
1132 enum data_queue_qid qid; 1201 enum data_queue_qid qid;
1133 unsigned int req_atim = 1202 unsigned int req_atim =
1134 !!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags); 1203 !!test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags);
1135 1204
1136 /* 1205 /*
1137 * We need the following queues: 1206 * We need the following queues:
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index 217861f8d95f..167d45873dca 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -217,6 +217,7 @@ enum txdone_entry_desc_flags {
217 TXDONE_FALLBACK, 217 TXDONE_FALLBACK,
218 TXDONE_FAILURE, 218 TXDONE_FAILURE,
219 TXDONE_EXCESSIVE_RETRY, 219 TXDONE_EXCESSIVE_RETRY,
220 TXDONE_AMPDU,
220}; 221};
221 222
222/** 223/**
@@ -363,6 +364,7 @@ enum queue_entry_flags {
363 * struct queue_entry: Entry inside the &struct data_queue 364 * struct queue_entry: Entry inside the &struct data_queue
364 * 365 *
365 * @flags: Entry flags, see &enum queue_entry_flags. 366 * @flags: Entry flags, see &enum queue_entry_flags.
367 * @last_action: Timestamp of last change.
366 * @queue: The data queue (&struct data_queue) to which this entry belongs. 368 * @queue: The data queue (&struct data_queue) to which this entry belongs.
367 * @skb: The buffer which is currently being transmitted (for TX queue), 369 * @skb: The buffer which is currently being transmitted (for TX queue),
368 * or used to directly receive data in (for RX queue). 370 * or used to directly receive data in (for RX queue).
@@ -372,6 +374,7 @@ enum queue_entry_flags {
372 */ 374 */
373struct queue_entry { 375struct queue_entry {
374 unsigned long flags; 376 unsigned long flags;
377 unsigned long last_action;
375 378
376 struct data_queue *queue; 379 struct data_queue *queue;
377 380
@@ -462,7 +465,6 @@ struct data_queue {
462 unsigned short threshold; 465 unsigned short threshold;
463 unsigned short length; 466 unsigned short length;
464 unsigned short index[Q_INDEX_MAX]; 467 unsigned short index[Q_INDEX_MAX];
465 unsigned long last_action[Q_INDEX_MAX];
466 468
467 unsigned short txop; 469 unsigned short txop;
468 unsigned short aifs; 470 unsigned short aifs;
@@ -579,16 +581,22 @@ struct data_queue_desc {
579 * @queue: Pointer to @data_queue 581 * @queue: Pointer to @data_queue
580 * @start: &enum queue_index Pointer to start index 582 * @start: &enum queue_index Pointer to start index
581 * @end: &enum queue_index Pointer to end index 583 * @end: &enum queue_index Pointer to end index
584 * @data: Data to pass to the callback function
582 * @fn: The function to call for each &struct queue_entry 585 * @fn: The function to call for each &struct queue_entry
583 * 586 *
584 * This will walk through all entries in the queue, in chronological 587 * This will walk through all entries in the queue, in chronological
585 * order. This means it will start at the current @start pointer 588 * order. This means it will start at the current @start pointer
586 * and will walk through the queue until it reaches the @end pointer. 589 * and will walk through the queue until it reaches the @end pointer.
590 *
591 * If fn returns true for an entry rt2x00queue_for_each_entry will stop
592 * processing and return true as well.
587 */ 593 */
588void rt2x00queue_for_each_entry(struct data_queue *queue, 594bool rt2x00queue_for_each_entry(struct data_queue *queue,
589 enum queue_index start, 595 enum queue_index start,
590 enum queue_index end, 596 enum queue_index end,
591 void (*fn)(struct queue_entry *entry)); 597 void *data,
598 bool (*fn)(struct queue_entry *entry,
599 void *data));
592 600
593/** 601/**
594 * rt2x00queue_empty - Check if the queue is empty. 602 * rt2x00queue_empty - Check if the queue is empty.
@@ -628,22 +636,24 @@ static inline int rt2x00queue_threshold(struct data_queue *queue)
628 636
629/** 637/**
630 * rt2x00queue_status_timeout - Check if a timeout occurred for STATUS reports 638 * rt2x00queue_status_timeout - Check if a timeout occurred for STATUS reports
631 * @queue: Queue to check. 639 * @entry: Queue entry to check.
632 */ 640 */
633static inline int rt2x00queue_status_timeout(struct data_queue *queue) 641static inline int rt2x00queue_status_timeout(struct queue_entry *entry)
634{ 642{
635 return time_after(queue->last_action[Q_INDEX_DMA_DONE], 643 if (!test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
636 queue->last_action[Q_INDEX_DONE] + (HZ / 10)); 644 return false;
645 return time_after(jiffies, entry->last_action + msecs_to_jiffies(100));
637} 646}
638 647
639/** 648/**
640 * rt2x00queue_timeout - Check if a timeout occurred for DMA transfers 649 * rt2x00queue_dma_timeout - Check if a timeout occurred for DMA transfers
641 * @queue: Queue to check. 650 * @entry: Queue entry to check.
642 */ 651 */
643static inline int rt2x00queue_dma_timeout(struct data_queue *queue) 652static inline int rt2x00queue_dma_timeout(struct queue_entry *entry)
644{ 653{
645 return time_after(queue->last_action[Q_INDEX], 654 if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
646 queue->last_action[Q_INDEX_DMA_DONE] + (HZ / 10)); 655 return false;
656 return time_after(jiffies, entry->last_action + msecs_to_jiffies(100));
647} 657}
648 658
649/** 659/**
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 36f388f97d65..8f90f6268077 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -165,6 +165,59 @@ int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev,
165} 165}
166EXPORT_SYMBOL_GPL(rt2x00usb_regbusy_read); 166EXPORT_SYMBOL_GPL(rt2x00usb_regbusy_read);
167 167
168
169struct rt2x00_async_read_data {
170 __le32 reg;
171 struct usb_ctrlrequest cr;
172 struct rt2x00_dev *rt2x00dev;
173 bool (*callback)(struct rt2x00_dev *, int, u32);
174};
175
176static void rt2x00usb_register_read_async_cb(struct urb *urb)
177{
178 struct rt2x00_async_read_data *rd = urb->context;
179 if (rd->callback(rd->rt2x00dev, urb->status, le32_to_cpu(rd->reg))) {
180 if (usb_submit_urb(urb, GFP_ATOMIC) < 0)
181 kfree(rd);
182 } else
183 kfree(rd);
184}
185
186void rt2x00usb_register_read_async(struct rt2x00_dev *rt2x00dev,
187 const unsigned int offset,
188 bool (*callback)(struct rt2x00_dev*, int, u32))
189{
190 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
191 struct urb *urb;
192 struct rt2x00_async_read_data *rd;
193
194 rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
195 if (!rd)
196 return;
197
198 urb = usb_alloc_urb(0, GFP_ATOMIC);
199 if (!urb) {
200 kfree(rd);
201 return;
202 }
203
204 rd->rt2x00dev = rt2x00dev;
205 rd->callback = callback;
206 rd->cr.bRequestType = USB_VENDOR_REQUEST_IN;
207 rd->cr.bRequest = USB_MULTI_READ;
208 rd->cr.wValue = 0;
209 rd->cr.wIndex = cpu_to_le16(offset);
210 rd->cr.wLength = cpu_to_le16(sizeof(u32));
211
212 usb_fill_control_urb(urb, usb_dev, usb_rcvctrlpipe(usb_dev, 0),
213 (unsigned char *)(&rd->cr), &rd->reg, sizeof(rd->reg),
214 rt2x00usb_register_read_async_cb, rd);
215 if (usb_submit_urb(urb, GFP_ATOMIC) < 0)
216 kfree(rd);
217 usb_free_urb(urb);
218}
219EXPORT_SYMBOL_GPL(rt2x00usb_register_read_async);
220
168/* 221/*
169 * TX data handlers. 222 * TX data handlers.
170 */ 223 */
@@ -212,6 +265,9 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
212 if (!test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) 265 if (!test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
213 return; 266 return;
214 267
268 if (rt2x00dev->ops->lib->tx_dma_done)
269 rt2x00dev->ops->lib->tx_dma_done(entry);
270
215 /* 271 /*
216 * Report the frame as DMA done 272 * Report the frame as DMA done
217 */ 273 */
@@ -227,10 +283,12 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
227 * Schedule the delayed work for reading the TX status 283 * Schedule the delayed work for reading the TX status
228 * from the device. 284 * from the device.
229 */ 285 */
230 queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work); 286 if (!test_bit(REQUIRE_TXSTATUS_FIFO, &rt2x00dev->cap_flags) ||
287 !kfifo_is_empty(&rt2x00dev->txstatus_fifo))
288 queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
231} 289}
232 290
233static void rt2x00usb_kick_tx_entry(struct queue_entry *entry) 291static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void* data)
234{ 292{
235 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 293 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
236 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); 294 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
@@ -240,7 +298,7 @@ static void rt2x00usb_kick_tx_entry(struct queue_entry *entry)
240 298
241 if (!test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags) || 299 if (!test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags) ||
242 test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) 300 test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
243 return; 301 return false;
244 302
245 /* 303 /*
246 * USB devices cannot blindly pass the skb->len as the 304 * USB devices cannot blindly pass the skb->len as the
@@ -261,6 +319,8 @@ static void rt2x00usb_kick_tx_entry(struct queue_entry *entry)
261 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); 319 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
262 rt2x00lib_dmadone(entry); 320 rt2x00lib_dmadone(entry);
263 } 321 }
322
323 return false;
264} 324}
265 325
266/* 326/*
@@ -323,7 +383,7 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
323 queue_work(rt2x00dev->workqueue, &rt2x00dev->rxdone_work); 383 queue_work(rt2x00dev->workqueue, &rt2x00dev->rxdone_work);
324} 384}
325 385
326static void rt2x00usb_kick_rx_entry(struct queue_entry *entry) 386static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void* data)
327{ 387{
328 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 388 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
329 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); 389 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
@@ -332,7 +392,7 @@ static void rt2x00usb_kick_rx_entry(struct queue_entry *entry)
332 392
333 if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) || 393 if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
334 test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) 394 test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
335 return; 395 return false;
336 396
337 rt2x00lib_dmastart(entry); 397 rt2x00lib_dmastart(entry);
338 398
@@ -348,6 +408,8 @@ static void rt2x00usb_kick_rx_entry(struct queue_entry *entry)
348 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); 408 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
349 rt2x00lib_dmadone(entry); 409 rt2x00lib_dmadone(entry);
350 } 410 }
411
412 return false;
351} 413}
352 414
353void rt2x00usb_kick_queue(struct data_queue *queue) 415void rt2x00usb_kick_queue(struct data_queue *queue)
@@ -358,12 +420,18 @@ void rt2x00usb_kick_queue(struct data_queue *queue)
358 case QID_AC_BE: 420 case QID_AC_BE:
359 case QID_AC_BK: 421 case QID_AC_BK:
360 if (!rt2x00queue_empty(queue)) 422 if (!rt2x00queue_empty(queue))
361 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX, 423 rt2x00queue_for_each_entry(queue,
424 Q_INDEX_DONE,
425 Q_INDEX,
426 NULL,
362 rt2x00usb_kick_tx_entry); 427 rt2x00usb_kick_tx_entry);
363 break; 428 break;
364 case QID_RX: 429 case QID_RX:
365 if (!rt2x00queue_full(queue)) 430 if (!rt2x00queue_full(queue))
366 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX, 431 rt2x00queue_for_each_entry(queue,
432 Q_INDEX_DONE,
433 Q_INDEX,
434 NULL,
367 rt2x00usb_kick_rx_entry); 435 rt2x00usb_kick_rx_entry);
368 break; 436 break;
369 default: 437 default:
@@ -372,14 +440,14 @@ void rt2x00usb_kick_queue(struct data_queue *queue)
372} 440}
373EXPORT_SYMBOL_GPL(rt2x00usb_kick_queue); 441EXPORT_SYMBOL_GPL(rt2x00usb_kick_queue);
374 442
375static void rt2x00usb_flush_entry(struct queue_entry *entry) 443static bool rt2x00usb_flush_entry(struct queue_entry *entry, void* data)
376{ 444{
377 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 445 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
378 struct queue_entry_priv_usb *entry_priv = entry->priv_data; 446 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
379 struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data; 447 struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data;
380 448
381 if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) 449 if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
382 return; 450 return false;
383 451
384 usb_kill_urb(entry_priv->urb); 452 usb_kill_urb(entry_priv->urb);
385 453
@@ -387,17 +455,20 @@ static void rt2x00usb_flush_entry(struct queue_entry *entry)
387 * Kill guardian urb (if required by driver). 455 * Kill guardian urb (if required by driver).
388 */ 456 */
389 if ((entry->queue->qid == QID_BEACON) && 457 if ((entry->queue->qid == QID_BEACON) &&
390 (test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags))) 458 (test_bit(REQUIRE_BEACON_GUARD, &rt2x00dev->cap_flags)))
391 usb_kill_urb(bcn_priv->guardian_urb); 459 usb_kill_urb(bcn_priv->guardian_urb);
460
461 return false;
392} 462}
393 463
394void rt2x00usb_flush_queue(struct data_queue *queue) 464void rt2x00usb_flush_queue(struct data_queue *queue, bool drop)
395{ 465{
396 struct work_struct *completion; 466 struct work_struct *completion;
397 unsigned int i; 467 unsigned int i;
398 468
399 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX, 469 if (drop)
400 rt2x00usb_flush_entry); 470 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX, NULL,
471 rt2x00usb_flush_entry);
401 472
402 /* 473 /*
403 * Obtain the queue completion handler 474 * Obtain the queue completion handler
@@ -416,7 +487,7 @@ void rt2x00usb_flush_queue(struct data_queue *queue)
416 return; 487 return;
417 } 488 }
418 489
419 for (i = 0; i < 20; i++) { 490 for (i = 0; i < 10; i++) {
420 /* 491 /*
421 * Check if the driver is already done, otherwise we 492 * Check if the driver is already done, otherwise we
422 * have to sleep a little while to give the driver/hw 493 * have to sleep a little while to give the driver/hw
@@ -456,15 +527,31 @@ static void rt2x00usb_watchdog_tx_status(struct data_queue *queue)
456 queue_work(queue->rt2x00dev->workqueue, &queue->rt2x00dev->txdone_work); 527 queue_work(queue->rt2x00dev->workqueue, &queue->rt2x00dev->txdone_work);
457} 528}
458 529
530static int rt2x00usb_status_timeout(struct data_queue *queue)
531{
532 struct queue_entry *entry;
533
534 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
535 return rt2x00queue_status_timeout(entry);
536}
537
538static int rt2x00usb_dma_timeout(struct data_queue *queue)
539{
540 struct queue_entry *entry;
541
542 entry = rt2x00queue_get_entry(queue, Q_INDEX_DMA_DONE);
543 return rt2x00queue_dma_timeout(entry);
544}
545
459void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev) 546void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev)
460{ 547{
461 struct data_queue *queue; 548 struct data_queue *queue;
462 549
463 tx_queue_for_each(rt2x00dev, queue) { 550 tx_queue_for_each(rt2x00dev, queue) {
464 if (!rt2x00queue_empty(queue)) { 551 if (!rt2x00queue_empty(queue)) {
465 if (rt2x00queue_dma_timeout(queue)) 552 if (rt2x00usb_dma_timeout(queue))
466 rt2x00usb_watchdog_tx_dma(queue); 553 rt2x00usb_watchdog_tx_dma(queue);
467 if (rt2x00queue_status_timeout(queue)) 554 if (rt2x00usb_status_timeout(queue))
468 rt2x00usb_watchdog_tx_status(queue); 555 rt2x00usb_watchdog_tx_status(queue);
469 } 556 }
470 } 557 }
@@ -489,7 +576,7 @@ void rt2x00usb_clear_entry(struct queue_entry *entry)
489 entry->flags = 0; 576 entry->flags = 0;
490 577
491 if (entry->queue->qid == QID_RX) 578 if (entry->queue->qid == QID_RX)
492 rt2x00usb_kick_rx_entry(entry); 579 rt2x00usb_kick_rx_entry(entry, NULL);
493} 580}
494EXPORT_SYMBOL_GPL(rt2x00usb_clear_entry); 581EXPORT_SYMBOL_GPL(rt2x00usb_clear_entry);
495 582
@@ -583,7 +670,7 @@ static int rt2x00usb_alloc_entries(struct data_queue *queue)
583 * then we are done. 670 * then we are done.
584 */ 671 */
585 if (queue->qid != QID_BEACON || 672 if (queue->qid != QID_BEACON ||
586 !test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags)) 673 !test_bit(REQUIRE_BEACON_GUARD, &rt2x00dev->cap_flags))
587 return 0; 674 return 0;
588 675
589 for (i = 0; i < queue->limit; i++) { 676 for (i = 0; i < queue->limit; i++) {
@@ -618,7 +705,7 @@ static void rt2x00usb_free_entries(struct data_queue *queue)
618 * then we are done. 705 * then we are done.
619 */ 706 */
620 if (queue->qid != QID_BEACON || 707 if (queue->qid != QID_BEACON ||
621 !test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags)) 708 !test_bit(REQUIRE_BEACON_GUARD, &rt2x00dev->cap_flags))
622 return; 709 return;
623 710
624 for (i = 0; i < queue->limit; i++) { 711 for (i = 0; i < queue->limit; i++) {
@@ -707,10 +794,9 @@ exit:
707} 794}
708 795
709int rt2x00usb_probe(struct usb_interface *usb_intf, 796int rt2x00usb_probe(struct usb_interface *usb_intf,
710 const struct usb_device_id *id) 797 const struct rt2x00_ops *ops)
711{ 798{
712 struct usb_device *usb_dev = interface_to_usbdev(usb_intf); 799 struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
713 struct rt2x00_ops *ops = (struct rt2x00_ops *)id->driver_info;
714 struct ieee80211_hw *hw; 800 struct ieee80211_hw *hw;
715 struct rt2x00_dev *rt2x00dev; 801 struct rt2x00_dev *rt2x00dev;
716 int retval; 802 int retval;
@@ -735,6 +821,7 @@ int rt2x00usb_probe(struct usb_interface *usb_intf,
735 821
736 INIT_WORK(&rt2x00dev->rxdone_work, rt2x00usb_work_rxdone); 822 INIT_WORK(&rt2x00dev->rxdone_work, rt2x00usb_work_rxdone);
737 INIT_WORK(&rt2x00dev->txdone_work, rt2x00usb_work_txdone); 823 INIT_WORK(&rt2x00dev->txdone_work, rt2x00usb_work_txdone);
824 init_timer(&rt2x00dev->txstatus_timer);
738 825
739 retval = rt2x00usb_alloc_reg(rt2x00dev); 826 retval = rt2x00usb_alloc_reg(rt2x00dev);
740 if (retval) 827 if (retval)
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h
index e11c759ac9ed..323ca7b2b095 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.h
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.h
@@ -35,12 +35,6 @@
35}) 35})
36 36
37/* 37/*
38 * This variable should be used with the
39 * usb_driver structure initialization.
40 */
41#define USB_DEVICE_DATA(__ops) .driver_info = (kernel_ulong_t)(__ops)
42
43/*
44 * For USB vendor requests we need to pass a timeout 38 * For USB vendor requests we need to pass a timeout
45 * time in ms, for this we use the REGISTER_TIMEOUT, 39 * time in ms, for this we use the REGISTER_TIMEOUT,
46 * however when loading firmware a higher value is 40 * however when loading firmware a higher value is
@@ -345,6 +339,23 @@ int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev,
345 const struct rt2x00_field32 field, 339 const struct rt2x00_field32 field,
346 u32 *reg); 340 u32 *reg);
347 341
342/**
343 * rt2x00usb_register_read_async - Asynchronously read 32bit register word
344 * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
345 * @offset: Register offset
346 * @callback: Functon to call when read completes.
347 *
348 * Submit a control URB to read a 32bit register. This safe to
349 * be called from atomic context. The callback will be called
350 * when the URB completes. Otherwise the function is similar
351 * to rt2x00usb_register_read().
352 * When the callback function returns false, the memory will be cleaned up,
353 * when it returns true, the urb will be fired again.
354 */
355void rt2x00usb_register_read_async(struct rt2x00_dev *rt2x00dev,
356 const unsigned int offset,
357 bool (*callback)(struct rt2x00_dev*, int, u32));
358
348/* 359/*
349 * Radio handlers 360 * Radio handlers
350 */ 361 */
@@ -389,11 +400,13 @@ void rt2x00usb_kick_queue(struct data_queue *queue);
389/** 400/**
390 * rt2x00usb_flush_queue - Flush data queue 401 * rt2x00usb_flush_queue - Flush data queue
391 * @queue: Data queue to stop 402 * @queue: Data queue to stop
403 * @drop: True to drop all pending frames.
392 * 404 *
393 * This will walk through all entries of the queue and kill all 405 * This will walk through all entries of the queue and will optionally
394 * URB's which were send to the device. 406 * kill all URB's which were send to the device, or at least wait until
407 * they have been returned from the device..
395 */ 408 */
396void rt2x00usb_flush_queue(struct data_queue *queue); 409void rt2x00usb_flush_queue(struct data_queue *queue, bool drop);
397 410
398/** 411/**
399 * rt2x00usb_watchdog - Watchdog for USB communication 412 * rt2x00usb_watchdog - Watchdog for USB communication
@@ -416,7 +429,7 @@ void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev);
416 * USB driver handlers. 429 * USB driver handlers.
417 */ 430 */
418int rt2x00usb_probe(struct usb_interface *usb_intf, 431int rt2x00usb_probe(struct usb_interface *usb_intf,
419 const struct usb_device_id *id); 432 const struct rt2x00_ops *ops);
420void rt2x00usb_disconnect(struct usb_interface *usb_intf); 433void rt2x00usb_disconnect(struct usb_interface *usb_intf);
421#ifdef CONFIG_PM 434#ifdef CONFIG_PM
422int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state); 435int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state);
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 77e8113b91e1..9d35ec16a3a5 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -683,7 +683,7 @@ static void rt61pci_config_antenna_2x(struct rt2x00_dev *rt2x00dev,
683 683
684 rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, rt2x00_rf(rt2x00dev, RF2529)); 684 rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, rt2x00_rf(rt2x00dev, RF2529));
685 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 685 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END,
686 !test_bit(CONFIG_FRAME_TYPE, &rt2x00dev->flags)); 686 !test_bit(CAPABILITY_FRAME_TYPE, &rt2x00dev->cap_flags));
687 687
688 /* 688 /*
689 * Configure the RX antenna. 689 * Configure the RX antenna.
@@ -811,10 +811,10 @@ static void rt61pci_config_ant(struct rt2x00_dev *rt2x00dev,
811 811
812 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) { 812 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
813 sel = antenna_sel_a; 813 sel = antenna_sel_a;
814 lna = test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags); 814 lna = test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags);
815 } else { 815 } else {
816 sel = antenna_sel_bg; 816 sel = antenna_sel_bg;
817 lna = test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags); 817 lna = test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags);
818 } 818 }
819 819
820 for (i = 0; i < ARRAY_SIZE(antenna_sel_a); i++) 820 for (i = 0; i < ARRAY_SIZE(antenna_sel_a); i++)
@@ -834,7 +834,7 @@ static void rt61pci_config_ant(struct rt2x00_dev *rt2x00dev,
834 else if (rt2x00_rf(rt2x00dev, RF2527)) 834 else if (rt2x00_rf(rt2x00dev, RF2527))
835 rt61pci_config_antenna_2x(rt2x00dev, ant); 835 rt61pci_config_antenna_2x(rt2x00dev, ant);
836 else if (rt2x00_rf(rt2x00dev, RF2529)) { 836 else if (rt2x00_rf(rt2x00dev, RF2529)) {
837 if (test_bit(CONFIG_DOUBLE_ANTENNA, &rt2x00dev->flags)) 837 if (test_bit(CAPABILITY_DOUBLE_ANTENNA, &rt2x00dev->cap_flags))
838 rt61pci_config_antenna_2x(rt2x00dev, ant); 838 rt61pci_config_antenna_2x(rt2x00dev, ant);
839 else 839 else
840 rt61pci_config_antenna_2529(rt2x00dev, ant); 840 rt61pci_config_antenna_2529(rt2x00dev, ant);
@@ -848,13 +848,13 @@ static void rt61pci_config_lna_gain(struct rt2x00_dev *rt2x00dev,
848 short lna_gain = 0; 848 short lna_gain = 0;
849 849
850 if (libconf->conf->channel->band == IEEE80211_BAND_2GHZ) { 850 if (libconf->conf->channel->band == IEEE80211_BAND_2GHZ) {
851 if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) 851 if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags))
852 lna_gain += 14; 852 lna_gain += 14;
853 853
854 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom); 854 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom);
855 lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_BG_1); 855 lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_BG_1);
856 } else { 856 } else {
857 if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags)) 857 if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags))
858 lna_gain += 14; 858 lna_gain += 14;
859 859
860 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &eeprom); 860 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &eeprom);
@@ -1050,14 +1050,14 @@ static void rt61pci_link_tuner(struct rt2x00_dev *rt2x00dev,
1050 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) { 1050 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
1051 low_bound = 0x28; 1051 low_bound = 0x28;
1052 up_bound = 0x48; 1052 up_bound = 0x48;
1053 if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags)) { 1053 if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags)) {
1054 low_bound += 0x10; 1054 low_bound += 0x10;
1055 up_bound += 0x10; 1055 up_bound += 0x10;
1056 } 1056 }
1057 } else { 1057 } else {
1058 low_bound = 0x20; 1058 low_bound = 0x20;
1059 up_bound = 0x40; 1059 up_bound = 0x40;
1060 if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) { 1060 if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags)) {
1061 low_bound += 0x10; 1061 low_bound += 0x10;
1062 up_bound += 0x10; 1062 up_bound += 0x10;
1063 } 1063 }
@@ -2260,8 +2260,8 @@ static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev)
2260 rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS); 2260 rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
2261} 2261}
2262 2262
2263static void rt61pci_enable_interrupt(struct rt2x00_dev *rt2x00dev, 2263static inline void rt61pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
2264 struct rt2x00_field32 irq_field) 2264 struct rt2x00_field32 irq_field)
2265{ 2265{
2266 u32 reg; 2266 u32 reg;
2267 2267
@@ -2313,8 +2313,10 @@ static void rt61pci_tbtt_tasklet(unsigned long data)
2313static void rt61pci_rxdone_tasklet(unsigned long data) 2313static void rt61pci_rxdone_tasklet(unsigned long data)
2314{ 2314{
2315 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; 2315 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
2316 rt2x00pci_rxdone(rt2x00dev); 2316 if (rt2x00pci_rxdone(rt2x00dev))
2317 rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_RXDONE); 2317 rt2x00pci_rxdone(rt2x00dev);
2318 else
2319 rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_RXDONE);
2318} 2320}
2319 2321
2320static void rt61pci_autowake_tasklet(unsigned long data) 2322static void rt61pci_autowake_tasklet(unsigned long data)
@@ -2535,7 +2537,7 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
2535 * Determine number of antennas. 2537 * Determine number of antennas.
2536 */ 2538 */
2537 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_NUM) == 2) 2539 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_NUM) == 2)
2538 __set_bit(CONFIG_DOUBLE_ANTENNA, &rt2x00dev->flags); 2540 __set_bit(CAPABILITY_DOUBLE_ANTENNA, &rt2x00dev->cap_flags);
2539 2541
2540 /* 2542 /*
2541 * Identify default antenna configuration. 2543 * Identify default antenna configuration.
@@ -2549,20 +2551,20 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
2549 * Read the Frame type. 2551 * Read the Frame type.
2550 */ 2552 */
2551 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_FRAME_TYPE)) 2553 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_FRAME_TYPE))
2552 __set_bit(CONFIG_FRAME_TYPE, &rt2x00dev->flags); 2554 __set_bit(CAPABILITY_FRAME_TYPE, &rt2x00dev->cap_flags);
2553 2555
2554 /* 2556 /*
2555 * Detect if this device has a hardware controlled radio. 2557 * Detect if this device has a hardware controlled radio.
2556 */ 2558 */
2557 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) 2559 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO))
2558 __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags); 2560 __set_bit(CAPABILITY_HW_BUTTON, &rt2x00dev->cap_flags);
2559 2561
2560 /* 2562 /*
2561 * Read frequency offset and RF programming sequence. 2563 * Read frequency offset and RF programming sequence.
2562 */ 2564 */
2563 rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom); 2565 rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom);
2564 if (rt2x00_get_field16(eeprom, EEPROM_FREQ_SEQ)) 2566 if (rt2x00_get_field16(eeprom, EEPROM_FREQ_SEQ))
2565 __set_bit(CONFIG_RF_SEQUENCE, &rt2x00dev->flags); 2567 __set_bit(CAPABILITY_RF_SEQUENCE, &rt2x00dev->cap_flags);
2566 2568
2567 rt2x00dev->freq_offset = rt2x00_get_field16(eeprom, EEPROM_FREQ_OFFSET); 2569 rt2x00dev->freq_offset = rt2x00_get_field16(eeprom, EEPROM_FREQ_OFFSET);
2568 2570
@@ -2572,9 +2574,9 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
2572 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom); 2574 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom);
2573 2575
2574 if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_A)) 2576 if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_A))
2575 __set_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags); 2577 __set_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags);
2576 if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_BG)) 2578 if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_BG))
2577 __set_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags); 2579 __set_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags);
2578 2580
2579 /* 2581 /*
2580 * When working with a RF2529 chip without double antenna, 2582 * When working with a RF2529 chip without double antenna,
@@ -2582,7 +2584,7 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
2582 * eeprom word. 2584 * eeprom word.
2583 */ 2585 */
2584 if (rt2x00_rf(rt2x00dev, RF2529) && 2586 if (rt2x00_rf(rt2x00dev, RF2529) &&
2585 !test_bit(CONFIG_DOUBLE_ANTENNA, &rt2x00dev->flags)) { 2587 !test_bit(CAPABILITY_DOUBLE_ANTENNA, &rt2x00dev->cap_flags)) {
2586 rt2x00dev->default_ant.rx = 2588 rt2x00dev->default_ant.rx =
2587 ANTENNA_A + rt2x00_get_field16(eeprom, EEPROM_NIC_RX_FIXED); 2589 ANTENNA_A + rt2x00_get_field16(eeprom, EEPROM_NIC_RX_FIXED);
2588 rt2x00dev->default_ant.tx = 2590 rt2x00dev->default_ant.tx =
@@ -2797,7 +2799,7 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2797 spec->supported_bands = SUPPORT_BAND_2GHZ; 2799 spec->supported_bands = SUPPORT_BAND_2GHZ;
2798 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; 2800 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
2799 2801
2800 if (!test_bit(CONFIG_RF_SEQUENCE, &rt2x00dev->flags)) { 2802 if (!test_bit(CAPABILITY_RF_SEQUENCE, &rt2x00dev->cap_flags)) {
2801 spec->num_channels = 14; 2803 spec->num_channels = 14;
2802 spec->channels = rf_vals_noseq; 2804 spec->channels = rf_vals_noseq;
2803 } else { 2805 } else {
@@ -2867,16 +2869,16 @@ static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev)
2867 * This device has multiple filters for control frames, 2869 * This device has multiple filters for control frames,
2868 * but has no a separate filter for PS Poll frames. 2870 * but has no a separate filter for PS Poll frames.
2869 */ 2871 */
2870 __set_bit(DRIVER_SUPPORT_CONTROL_FILTERS, &rt2x00dev->flags); 2872 __set_bit(CAPABILITY_CONTROL_FILTERS, &rt2x00dev->cap_flags);
2871 2873
2872 /* 2874 /*
2873 * This device requires firmware and DMA mapped skbs. 2875 * This device requires firmware and DMA mapped skbs.
2874 */ 2876 */
2875 __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags); 2877 __set_bit(REQUIRE_FIRMWARE, &rt2x00dev->cap_flags);
2876 __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags); 2878 __set_bit(REQUIRE_DMA, &rt2x00dev->cap_flags);
2877 if (!modparam_nohwcrypt) 2879 if (!modparam_nohwcrypt)
2878 __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags); 2880 __set_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags);
2879 __set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags); 2881 __set_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags);
2880 2882
2881 /* 2883 /*
2882 * Set the rssi offset. 2884 * Set the rssi offset.
@@ -2977,6 +2979,9 @@ static const struct ieee80211_ops rt61pci_mac80211_ops = {
2977 .get_tsf = rt61pci_get_tsf, 2979 .get_tsf = rt61pci_get_tsf,
2978 .rfkill_poll = rt2x00mac_rfkill_poll, 2980 .rfkill_poll = rt2x00mac_rfkill_poll,
2979 .flush = rt2x00mac_flush, 2981 .flush = rt2x00mac_flush,
2982 .set_antenna = rt2x00mac_set_antenna,
2983 .get_antenna = rt2x00mac_get_antenna,
2984 .get_ringparam = rt2x00mac_get_ringparam,
2980}; 2985};
2981 2986
2982static const struct rt2x00lib_ops rt61pci_rt2x00_ops = { 2987static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
@@ -3001,6 +3006,7 @@ static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
3001 .start_queue = rt61pci_start_queue, 3006 .start_queue = rt61pci_start_queue,
3002 .kick_queue = rt61pci_kick_queue, 3007 .kick_queue = rt61pci_kick_queue,
3003 .stop_queue = rt61pci_stop_queue, 3008 .stop_queue = rt61pci_stop_queue,
3009 .flush_queue = rt2x00pci_flush_queue,
3004 .write_tx_desc = rt61pci_write_tx_desc, 3010 .write_tx_desc = rt61pci_write_tx_desc,
3005 .write_beacon = rt61pci_write_beacon, 3011 .write_beacon = rt61pci_write_beacon,
3006 .clear_beacon = rt61pci_clear_beacon, 3012 .clear_beacon = rt61pci_clear_beacon,
@@ -3058,11 +3064,11 @@ static const struct rt2x00_ops rt61pci_ops = {
3058 */ 3064 */
3059static DEFINE_PCI_DEVICE_TABLE(rt61pci_device_table) = { 3065static DEFINE_PCI_DEVICE_TABLE(rt61pci_device_table) = {
3060 /* RT2561s */ 3066 /* RT2561s */
3061 { PCI_DEVICE(0x1814, 0x0301), PCI_DEVICE_DATA(&rt61pci_ops) }, 3067 { PCI_DEVICE(0x1814, 0x0301) },
3062 /* RT2561 v2 */ 3068 /* RT2561 v2 */
3063 { PCI_DEVICE(0x1814, 0x0302), PCI_DEVICE_DATA(&rt61pci_ops) }, 3069 { PCI_DEVICE(0x1814, 0x0302) },
3064 /* RT2661 */ 3070 /* RT2661 */
3065 { PCI_DEVICE(0x1814, 0x0401), PCI_DEVICE_DATA(&rt61pci_ops) }, 3071 { PCI_DEVICE(0x1814, 0x0401) },
3066 { 0, } 3072 { 0, }
3067}; 3073};
3068 3074
@@ -3077,10 +3083,16 @@ MODULE_FIRMWARE(FIRMWARE_RT2561s);
3077MODULE_FIRMWARE(FIRMWARE_RT2661); 3083MODULE_FIRMWARE(FIRMWARE_RT2661);
3078MODULE_LICENSE("GPL"); 3084MODULE_LICENSE("GPL");
3079 3085
3086static int rt61pci_probe(struct pci_dev *pci_dev,
3087 const struct pci_device_id *id)
3088{
3089 return rt2x00pci_probe(pci_dev, &rt61pci_ops);
3090}
3091
3080static struct pci_driver rt61pci_driver = { 3092static struct pci_driver rt61pci_driver = {
3081 .name = KBUILD_MODNAME, 3093 .name = KBUILD_MODNAME,
3082 .id_table = rt61pci_device_table, 3094 .id_table = rt61pci_device_table,
3083 .probe = rt2x00pci_probe, 3095 .probe = rt61pci_probe,
3084 .remove = __devexit_p(rt2x00pci_remove), 3096 .remove = __devexit_p(rt2x00pci_remove),
3085 .suspend = rt2x00pci_suspend, 3097 .suspend = rt2x00pci_suspend,
3086 .resume = rt2x00pci_resume, 3098 .resume = rt2x00pci_resume,
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 02f1148c577e..ad20953cbf05 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -595,7 +595,7 @@ static void rt73usb_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
595 switch (ant->rx) { 595 switch (ant->rx) {
596 case ANTENNA_HW_DIVERSITY: 596 case ANTENNA_HW_DIVERSITY:
597 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2); 597 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2);
598 temp = !test_bit(CONFIG_FRAME_TYPE, &rt2x00dev->flags) 598 temp = !test_bit(CAPABILITY_FRAME_TYPE, &rt2x00dev->cap_flags)
599 && (rt2x00dev->curr_band != IEEE80211_BAND_5GHZ); 599 && (rt2x00dev->curr_band != IEEE80211_BAND_5GHZ);
600 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, temp); 600 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, temp);
601 break; 601 break;
@@ -636,7 +636,7 @@ static void rt73usb_config_antenna_2x(struct rt2x00_dev *rt2x00dev,
636 636
637 rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, 0); 637 rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, 0);
638 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 638 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END,
639 !test_bit(CONFIG_FRAME_TYPE, &rt2x00dev->flags)); 639 !test_bit(CAPABILITY_FRAME_TYPE, &rt2x00dev->cap_flags));
640 640
641 /* 641 /*
642 * Configure the RX antenna. 642 * Configure the RX antenna.
@@ -709,10 +709,10 @@ static void rt73usb_config_ant(struct rt2x00_dev *rt2x00dev,
709 709
710 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) { 710 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
711 sel = antenna_sel_a; 711 sel = antenna_sel_a;
712 lna = test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags); 712 lna = test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags);
713 } else { 713 } else {
714 sel = antenna_sel_bg; 714 sel = antenna_sel_bg;
715 lna = test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags); 715 lna = test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags);
716 } 716 }
717 717
718 for (i = 0; i < ARRAY_SIZE(antenna_sel_a); i++) 718 for (i = 0; i < ARRAY_SIZE(antenna_sel_a); i++)
@@ -740,7 +740,7 @@ static void rt73usb_config_lna_gain(struct rt2x00_dev *rt2x00dev,
740 short lna_gain = 0; 740 short lna_gain = 0;
741 741
742 if (libconf->conf->channel->band == IEEE80211_BAND_2GHZ) { 742 if (libconf->conf->channel->band == IEEE80211_BAND_2GHZ) {
743 if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) 743 if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags))
744 lna_gain += 14; 744 lna_gain += 14;
745 745
746 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom); 746 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom);
@@ -930,7 +930,7 @@ static void rt73usb_link_tuner(struct rt2x00_dev *rt2x00dev,
930 low_bound = 0x28; 930 low_bound = 0x28;
931 up_bound = 0x48; 931 up_bound = 0x48;
932 932
933 if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags)) { 933 if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags)) {
934 low_bound += 0x10; 934 low_bound += 0x10;
935 up_bound += 0x10; 935 up_bound += 0x10;
936 } 936 }
@@ -946,7 +946,7 @@ static void rt73usb_link_tuner(struct rt2x00_dev *rt2x00dev,
946 up_bound = 0x1c; 946 up_bound = 0x1c;
947 } 947 }
948 948
949 if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) { 949 if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags)) {
950 low_bound += 0x14; 950 low_bound += 0x14;
951 up_bound += 0x10; 951 up_bound += 0x10;
952 } 952 }
@@ -1661,7 +1661,7 @@ static int rt73usb_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
1661 } 1661 }
1662 1662
1663 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) { 1663 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
1664 if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags)) { 1664 if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags)) {
1665 if (lna == 3 || lna == 2) 1665 if (lna == 3 || lna == 2)
1666 offset += 10; 1666 offset += 10;
1667 } else { 1667 } else {
@@ -1899,13 +1899,13 @@ static int rt73usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
1899 * Read the Frame type. 1899 * Read the Frame type.
1900 */ 1900 */
1901 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_FRAME_TYPE)) 1901 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_FRAME_TYPE))
1902 __set_bit(CONFIG_FRAME_TYPE, &rt2x00dev->flags); 1902 __set_bit(CAPABILITY_FRAME_TYPE, &rt2x00dev->cap_flags);
1903 1903
1904 /* 1904 /*
1905 * Detect if this device has an hardware controlled radio. 1905 * Detect if this device has an hardware controlled radio.
1906 */ 1906 */
1907 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) 1907 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO))
1908 __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags); 1908 __set_bit(CAPABILITY_HW_BUTTON, &rt2x00dev->cap_flags);
1909 1909
1910 /* 1910 /*
1911 * Read frequency offset. 1911 * Read frequency offset.
@@ -1919,8 +1919,8 @@ static int rt73usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
1919 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom); 1919 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom);
1920 1920
1921 if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA)) { 1921 if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA)) {
1922 __set_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags); 1922 __set_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags);
1923 __set_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags); 1923 __set_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags);
1924 } 1924 }
1925 1925
1926 /* 1926 /*
@@ -2200,16 +2200,16 @@ static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev)
2200 * This device has multiple filters for control frames, 2200 * This device has multiple filters for control frames,
2201 * but has no a separate filter for PS Poll frames. 2201 * but has no a separate filter for PS Poll frames.
2202 */ 2202 */
2203 __set_bit(DRIVER_SUPPORT_CONTROL_FILTERS, &rt2x00dev->flags); 2203 __set_bit(CAPABILITY_CONTROL_FILTERS, &rt2x00dev->cap_flags);
2204 2204
2205 /* 2205 /*
2206 * This device requires firmware. 2206 * This device requires firmware.
2207 */ 2207 */
2208 __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags); 2208 __set_bit(REQUIRE_FIRMWARE, &rt2x00dev->cap_flags);
2209 if (!modparam_nohwcrypt) 2209 if (!modparam_nohwcrypt)
2210 __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags); 2210 __set_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags);
2211 __set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags); 2211 __set_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags);
2212 __set_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags); 2212 __set_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags);
2213 2213
2214 /* 2214 /*
2215 * Set the rssi offset. 2215 * Set the rssi offset.
@@ -2311,6 +2311,9 @@ static const struct ieee80211_ops rt73usb_mac80211_ops = {
2311 .get_tsf = rt73usb_get_tsf, 2311 .get_tsf = rt73usb_get_tsf,
2312 .rfkill_poll = rt2x00mac_rfkill_poll, 2312 .rfkill_poll = rt2x00mac_rfkill_poll,
2313 .flush = rt2x00mac_flush, 2313 .flush = rt2x00mac_flush,
2314 .set_antenna = rt2x00mac_set_antenna,
2315 .get_antenna = rt2x00mac_get_antenna,
2316 .get_ringparam = rt2x00mac_get_ringparam,
2314}; 2317};
2315 2318
2316static const struct rt2x00lib_ops rt73usb_rt2x00_ops = { 2319static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
@@ -2389,114 +2392,113 @@ static const struct rt2x00_ops rt73usb_ops = {
2389 */ 2392 */
2390static struct usb_device_id rt73usb_device_table[] = { 2393static struct usb_device_id rt73usb_device_table[] = {
2391 /* AboCom */ 2394 /* AboCom */
2392 { USB_DEVICE(0x07b8, 0xb21b), USB_DEVICE_DATA(&rt73usb_ops) }, 2395 { USB_DEVICE(0x07b8, 0xb21b) },
2393 { USB_DEVICE(0x07b8, 0xb21c), USB_DEVICE_DATA(&rt73usb_ops) }, 2396 { USB_DEVICE(0x07b8, 0xb21c) },
2394 { USB_DEVICE(0x07b8, 0xb21d), USB_DEVICE_DATA(&rt73usb_ops) }, 2397 { USB_DEVICE(0x07b8, 0xb21d) },
2395 { USB_DEVICE(0x07b8, 0xb21e), USB_DEVICE_DATA(&rt73usb_ops) }, 2398 { USB_DEVICE(0x07b8, 0xb21e) },
2396 { USB_DEVICE(0x07b8, 0xb21f), USB_DEVICE_DATA(&rt73usb_ops) }, 2399 { USB_DEVICE(0x07b8, 0xb21f) },
2397 /* AL */ 2400 /* AL */
2398 { USB_DEVICE(0x14b2, 0x3c10), USB_DEVICE_DATA(&rt73usb_ops) }, 2401 { USB_DEVICE(0x14b2, 0x3c10) },
2399 /* Amigo */ 2402 /* Amigo */
2400 { USB_DEVICE(0x148f, 0x9021), USB_DEVICE_DATA(&rt73usb_ops) }, 2403 { USB_DEVICE(0x148f, 0x9021) },
2401 { USB_DEVICE(0x0eb0, 0x9021), USB_DEVICE_DATA(&rt73usb_ops) }, 2404 { USB_DEVICE(0x0eb0, 0x9021) },
2402 /* AMIT */ 2405 /* AMIT */
2403 { USB_DEVICE(0x18c5, 0x0002), USB_DEVICE_DATA(&rt73usb_ops) }, 2406 { USB_DEVICE(0x18c5, 0x0002) },
2404 /* Askey */ 2407 /* Askey */
2405 { USB_DEVICE(0x1690, 0x0722), USB_DEVICE_DATA(&rt73usb_ops) }, 2408 { USB_DEVICE(0x1690, 0x0722) },
2406 /* ASUS */ 2409 /* ASUS */
2407 { USB_DEVICE(0x0b05, 0x1723), USB_DEVICE_DATA(&rt73usb_ops) }, 2410 { USB_DEVICE(0x0b05, 0x1723) },
2408 { USB_DEVICE(0x0b05, 0x1724), USB_DEVICE_DATA(&rt73usb_ops) }, 2411 { USB_DEVICE(0x0b05, 0x1724) },
2409 /* Belkin */ 2412 /* Belkin */
2410 { USB_DEVICE(0x050d, 0x7050), USB_DEVICE_DATA(&rt73usb_ops) }, 2413 { USB_DEVICE(0x050d, 0x705a) },
2411 { USB_DEVICE(0x050d, 0x705a), USB_DEVICE_DATA(&rt73usb_ops) }, 2414 { USB_DEVICE(0x050d, 0x905b) },
2412 { USB_DEVICE(0x050d, 0x905b), USB_DEVICE_DATA(&rt73usb_ops) }, 2415 { USB_DEVICE(0x050d, 0x905c) },
2413 { USB_DEVICE(0x050d, 0x905c), USB_DEVICE_DATA(&rt73usb_ops) },
2414 /* Billionton */ 2416 /* Billionton */
2415 { USB_DEVICE(0x1631, 0xc019), USB_DEVICE_DATA(&rt73usb_ops) }, 2417 { USB_DEVICE(0x1631, 0xc019) },
2416 { USB_DEVICE(0x08dd, 0x0120), USB_DEVICE_DATA(&rt73usb_ops) }, 2418 { USB_DEVICE(0x08dd, 0x0120) },
2417 /* Buffalo */ 2419 /* Buffalo */
2418 { USB_DEVICE(0x0411, 0x00d8), USB_DEVICE_DATA(&rt73usb_ops) }, 2420 { USB_DEVICE(0x0411, 0x00d8) },
2419 { USB_DEVICE(0x0411, 0x00d9), USB_DEVICE_DATA(&rt73usb_ops) }, 2421 { USB_DEVICE(0x0411, 0x00d9) },
2420 { USB_DEVICE(0x0411, 0x00f4), USB_DEVICE_DATA(&rt73usb_ops) }, 2422 { USB_DEVICE(0x0411, 0x00f4) },
2421 { USB_DEVICE(0x0411, 0x0116), USB_DEVICE_DATA(&rt73usb_ops) }, 2423 { USB_DEVICE(0x0411, 0x0116) },
2422 { USB_DEVICE(0x0411, 0x0119), USB_DEVICE_DATA(&rt73usb_ops) }, 2424 { USB_DEVICE(0x0411, 0x0119) },
2423 { USB_DEVICE(0x0411, 0x0137), USB_DEVICE_DATA(&rt73usb_ops) }, 2425 { USB_DEVICE(0x0411, 0x0137) },
2424 /* CEIVA */ 2426 /* CEIVA */
2425 { USB_DEVICE(0x178d, 0x02be), USB_DEVICE_DATA(&rt73usb_ops) }, 2427 { USB_DEVICE(0x178d, 0x02be) },
2426 /* CNet */ 2428 /* CNet */
2427 { USB_DEVICE(0x1371, 0x9022), USB_DEVICE_DATA(&rt73usb_ops) }, 2429 { USB_DEVICE(0x1371, 0x9022) },
2428 { USB_DEVICE(0x1371, 0x9032), USB_DEVICE_DATA(&rt73usb_ops) }, 2430 { USB_DEVICE(0x1371, 0x9032) },
2429 /* Conceptronic */ 2431 /* Conceptronic */
2430 { USB_DEVICE(0x14b2, 0x3c22), USB_DEVICE_DATA(&rt73usb_ops) }, 2432 { USB_DEVICE(0x14b2, 0x3c22) },
2431 /* Corega */ 2433 /* Corega */
2432 { USB_DEVICE(0x07aa, 0x002e), USB_DEVICE_DATA(&rt73usb_ops) }, 2434 { USB_DEVICE(0x07aa, 0x002e) },
2433 /* D-Link */ 2435 /* D-Link */
2434 { USB_DEVICE(0x07d1, 0x3c03), USB_DEVICE_DATA(&rt73usb_ops) }, 2436 { USB_DEVICE(0x07d1, 0x3c03) },
2435 { USB_DEVICE(0x07d1, 0x3c04), USB_DEVICE_DATA(&rt73usb_ops) }, 2437 { USB_DEVICE(0x07d1, 0x3c04) },
2436 { USB_DEVICE(0x07d1, 0x3c06), USB_DEVICE_DATA(&rt73usb_ops) }, 2438 { USB_DEVICE(0x07d1, 0x3c06) },
2437 { USB_DEVICE(0x07d1, 0x3c07), USB_DEVICE_DATA(&rt73usb_ops) }, 2439 { USB_DEVICE(0x07d1, 0x3c07) },
2438 /* Edimax */ 2440 /* Edimax */
2439 { USB_DEVICE(0x7392, 0x7318), USB_DEVICE_DATA(&rt73usb_ops) }, 2441 { USB_DEVICE(0x7392, 0x7318) },
2440 { USB_DEVICE(0x7392, 0x7618), USB_DEVICE_DATA(&rt73usb_ops) }, 2442 { USB_DEVICE(0x7392, 0x7618) },
2441 /* EnGenius */ 2443 /* EnGenius */
2442 { USB_DEVICE(0x1740, 0x3701), USB_DEVICE_DATA(&rt73usb_ops) }, 2444 { USB_DEVICE(0x1740, 0x3701) },
2443 /* Gemtek */ 2445 /* Gemtek */
2444 { USB_DEVICE(0x15a9, 0x0004), USB_DEVICE_DATA(&rt73usb_ops) }, 2446 { USB_DEVICE(0x15a9, 0x0004) },
2445 /* Gigabyte */ 2447 /* Gigabyte */
2446 { USB_DEVICE(0x1044, 0x8008), USB_DEVICE_DATA(&rt73usb_ops) }, 2448 { USB_DEVICE(0x1044, 0x8008) },
2447 { USB_DEVICE(0x1044, 0x800a), USB_DEVICE_DATA(&rt73usb_ops) }, 2449 { USB_DEVICE(0x1044, 0x800a) },
2448 /* Huawei-3Com */ 2450 /* Huawei-3Com */
2449 { USB_DEVICE(0x1472, 0x0009), USB_DEVICE_DATA(&rt73usb_ops) }, 2451 { USB_DEVICE(0x1472, 0x0009) },
2450 /* Hercules */ 2452 /* Hercules */
2451 { USB_DEVICE(0x06f8, 0xe002), USB_DEVICE_DATA(&rt73usb_ops) }, 2453 { USB_DEVICE(0x06f8, 0xe002) },
2452 { USB_DEVICE(0x06f8, 0xe010), USB_DEVICE_DATA(&rt73usb_ops) }, 2454 { USB_DEVICE(0x06f8, 0xe010) },
2453 { USB_DEVICE(0x06f8, 0xe020), USB_DEVICE_DATA(&rt73usb_ops) }, 2455 { USB_DEVICE(0x06f8, 0xe020) },
2454 /* Linksys */ 2456 /* Linksys */
2455 { USB_DEVICE(0x13b1, 0x0020), USB_DEVICE_DATA(&rt73usb_ops) }, 2457 { USB_DEVICE(0x13b1, 0x0020) },
2456 { USB_DEVICE(0x13b1, 0x0023), USB_DEVICE_DATA(&rt73usb_ops) }, 2458 { USB_DEVICE(0x13b1, 0x0023) },
2457 { USB_DEVICE(0x13b1, 0x0028), USB_DEVICE_DATA(&rt73usb_ops) }, 2459 { USB_DEVICE(0x13b1, 0x0028) },
2458 /* MSI */ 2460 /* MSI */
2459 { USB_DEVICE(0x0db0, 0x4600), USB_DEVICE_DATA(&rt73usb_ops) }, 2461 { USB_DEVICE(0x0db0, 0x4600) },
2460 { USB_DEVICE(0x0db0, 0x6877), USB_DEVICE_DATA(&rt73usb_ops) }, 2462 { USB_DEVICE(0x0db0, 0x6877) },
2461 { USB_DEVICE(0x0db0, 0x6874), USB_DEVICE_DATA(&rt73usb_ops) }, 2463 { USB_DEVICE(0x0db0, 0x6874) },
2462 { USB_DEVICE(0x0db0, 0xa861), USB_DEVICE_DATA(&rt73usb_ops) }, 2464 { USB_DEVICE(0x0db0, 0xa861) },
2463 { USB_DEVICE(0x0db0, 0xa874), USB_DEVICE_DATA(&rt73usb_ops) }, 2465 { USB_DEVICE(0x0db0, 0xa874) },
2464 /* Ovislink */ 2466 /* Ovislink */
2465 { USB_DEVICE(0x1b75, 0x7318), USB_DEVICE_DATA(&rt73usb_ops) }, 2467 { USB_DEVICE(0x1b75, 0x7318) },
2466 /* Ralink */ 2468 /* Ralink */
2467 { USB_DEVICE(0x04bb, 0x093d), USB_DEVICE_DATA(&rt73usb_ops) }, 2469 { USB_DEVICE(0x04bb, 0x093d) },
2468 { USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt73usb_ops) }, 2470 { USB_DEVICE(0x148f, 0x2573) },
2469 { USB_DEVICE(0x148f, 0x2671), USB_DEVICE_DATA(&rt73usb_ops) }, 2471 { USB_DEVICE(0x148f, 0x2671) },
2470 { USB_DEVICE(0x0812, 0x3101), USB_DEVICE_DATA(&rt73usb_ops) }, 2472 { USB_DEVICE(0x0812, 0x3101) },
2471 /* Qcom */ 2473 /* Qcom */
2472 { USB_DEVICE(0x18e8, 0x6196), USB_DEVICE_DATA(&rt73usb_ops) }, 2474 { USB_DEVICE(0x18e8, 0x6196) },
2473 { USB_DEVICE(0x18e8, 0x6229), USB_DEVICE_DATA(&rt73usb_ops) }, 2475 { USB_DEVICE(0x18e8, 0x6229) },
2474 { USB_DEVICE(0x18e8, 0x6238), USB_DEVICE_DATA(&rt73usb_ops) }, 2476 { USB_DEVICE(0x18e8, 0x6238) },
2475 /* Samsung */ 2477 /* Samsung */
2476 { USB_DEVICE(0x04e8, 0x4471), USB_DEVICE_DATA(&rt73usb_ops) }, 2478 { USB_DEVICE(0x04e8, 0x4471) },
2477 /* Senao */ 2479 /* Senao */
2478 { USB_DEVICE(0x1740, 0x7100), USB_DEVICE_DATA(&rt73usb_ops) }, 2480 { USB_DEVICE(0x1740, 0x7100) },
2479 /* Sitecom */ 2481 /* Sitecom */
2480 { USB_DEVICE(0x0df6, 0x0024), USB_DEVICE_DATA(&rt73usb_ops) }, 2482 { USB_DEVICE(0x0df6, 0x0024) },
2481 { USB_DEVICE(0x0df6, 0x0027), USB_DEVICE_DATA(&rt73usb_ops) }, 2483 { USB_DEVICE(0x0df6, 0x0027) },
2482 { USB_DEVICE(0x0df6, 0x002f), USB_DEVICE_DATA(&rt73usb_ops) }, 2484 { USB_DEVICE(0x0df6, 0x002f) },
2483 { USB_DEVICE(0x0df6, 0x90ac), USB_DEVICE_DATA(&rt73usb_ops) }, 2485 { USB_DEVICE(0x0df6, 0x90ac) },
2484 { USB_DEVICE(0x0df6, 0x9712), USB_DEVICE_DATA(&rt73usb_ops) }, 2486 { USB_DEVICE(0x0df6, 0x9712) },
2485 /* Surecom */ 2487 /* Surecom */
2486 { USB_DEVICE(0x0769, 0x31f3), USB_DEVICE_DATA(&rt73usb_ops) }, 2488 { USB_DEVICE(0x0769, 0x31f3) },
2487 /* Tilgin */ 2489 /* Tilgin */
2488 { USB_DEVICE(0x6933, 0x5001), USB_DEVICE_DATA(&rt73usb_ops) }, 2490 { USB_DEVICE(0x6933, 0x5001) },
2489 /* Philips */ 2491 /* Philips */
2490 { USB_DEVICE(0x0471, 0x200a), USB_DEVICE_DATA(&rt73usb_ops) }, 2492 { USB_DEVICE(0x0471, 0x200a) },
2491 /* Planex */ 2493 /* Planex */
2492 { USB_DEVICE(0x2019, 0xab01), USB_DEVICE_DATA(&rt73usb_ops) }, 2494 { USB_DEVICE(0x2019, 0xab01) },
2493 { USB_DEVICE(0x2019, 0xab50), USB_DEVICE_DATA(&rt73usb_ops) }, 2495 { USB_DEVICE(0x2019, 0xab50) },
2494 /* WideTell */ 2496 /* WideTell */
2495 { USB_DEVICE(0x7167, 0x3840), USB_DEVICE_DATA(&rt73usb_ops) }, 2497 { USB_DEVICE(0x7167, 0x3840) },
2496 /* Zcom */ 2498 /* Zcom */
2497 { USB_DEVICE(0x0cde, 0x001c), USB_DEVICE_DATA(&rt73usb_ops) }, 2499 { USB_DEVICE(0x0cde, 0x001c) },
2498 /* ZyXEL */ 2500 /* ZyXEL */
2499 { USB_DEVICE(0x0586, 0x3415), USB_DEVICE_DATA(&rt73usb_ops) }, 2501 { USB_DEVICE(0x0586, 0x3415) },
2500 { 0, } 2502 { 0, }
2501}; 2503};
2502 2504
@@ -2508,10 +2510,16 @@ MODULE_DEVICE_TABLE(usb, rt73usb_device_table);
2508MODULE_FIRMWARE(FIRMWARE_RT2571); 2510MODULE_FIRMWARE(FIRMWARE_RT2571);
2509MODULE_LICENSE("GPL"); 2511MODULE_LICENSE("GPL");
2510 2512
2513static int rt73usb_probe(struct usb_interface *usb_intf,
2514 const struct usb_device_id *id)
2515{
2516 return rt2x00usb_probe(usb_intf, &rt73usb_ops);
2517}
2518
2511static struct usb_driver rt73usb_driver = { 2519static struct usb_driver rt73usb_driver = {
2512 .name = KBUILD_MODNAME, 2520 .name = KBUILD_MODNAME,
2513 .id_table = rt73usb_device_table, 2521 .id_table = rt73usb_device_table,
2514 .probe = rt2x00usb_probe, 2522 .probe = rt73usb_probe,
2515 .disconnect = rt2x00usb_disconnect, 2523 .disconnect = rt2x00usb_disconnect,
2516 .suspend = rt2x00usb_suspend, 2524 .suspend = rt2x00usb_suspend,
2517 .resume = rt2x00usb_resume, 2525 .resume = rt2x00usb_resume,
diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/rtlwifi/Kconfig
index ce49e0ce7cad..5aee8b22d74e 100644
--- a/drivers/net/wireless/rtlwifi/Kconfig
+++ b/drivers/net/wireless/rtlwifi/Kconfig
@@ -10,6 +10,17 @@ config RTL8192CE
10 10
11 If you choose to build it as a module, it will be called rtl8192ce 11 If you choose to build it as a module, it will be called rtl8192ce
12 12
13config RTL8192SE
14 tristate "Realtek RTL8192SE/RTL8191SE PCIe Wireless Network Adapter"
15 depends on MAC80211 && EXPERIMENTAL
16 select FW_LOADER
17 select RTLWIFI
18 ---help---
19 This is the driver for Realtek RTL8192SE/RTL8191SE 802.11n PCIe
20 wireless network adapters.
21
22 If you choose to build it as a module, it will be called rtl8192se
23
13config RTL8192CU 24config RTL8192CU
14 tristate "Realtek RTL8192CU/RTL8188CU USB Wireless Network Adapter" 25 tristate "Realtek RTL8192CU/RTL8188CU USB Wireless Network Adapter"
15 depends on MAC80211 && USB && EXPERIMENTAL 26 depends on MAC80211 && USB && EXPERIMENTAL
@@ -24,10 +35,10 @@ config RTL8192CU
24 35
25config RTLWIFI 36config RTLWIFI
26 tristate 37 tristate
27 depends on RTL8192CE || RTL8192CU 38 depends on RTL8192CE || RTL8192CU || RTL8192SE
28 default m 39 default m
29 40
30config RTL8192C_COMMON 41config RTL8192C_COMMON
31 tristate 42 tristate
32 depends on RTL8192CE || RTL8192CU 43 depends on RTL8192CE || RTL8192CU || RTL8192SE
33 default m 44 default m
diff --git a/drivers/net/wireless/rtlwifi/Makefile b/drivers/net/wireless/rtlwifi/Makefile
index ec9393f24799..7acce83c3785 100644
--- a/drivers/net/wireless/rtlwifi/Makefile
+++ b/drivers/net/wireless/rtlwifi/Makefile
@@ -22,5 +22,6 @@ endif
22obj-$(CONFIG_RTL8192C_COMMON) += rtl8192c/ 22obj-$(CONFIG_RTL8192C_COMMON) += rtl8192c/
23obj-$(CONFIG_RTL8192CE) += rtl8192ce/ 23obj-$(CONFIG_RTL8192CE) += rtl8192ce/
24obj-$(CONFIG_RTL8192CU) += rtl8192cu/ 24obj-$(CONFIG_RTL8192CU) += rtl8192cu/
25obj-$(CONFIG_RTL8192SE) += rtl8192se/
25 26
26ccflags-y += -D__CHECK_ENDIAN__ 27ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 0d7d93e1d398..ccb6da38fe22 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -50,8 +50,9 @@
50 *3) functions called by core.c 50 *3) functions called by core.c
51 *4) wq & timer callback functions 51 *4) wq & timer callback functions
52 *5) frame process functions 52 *5) frame process functions
53 *6) sysfs functions 53 *6) IOT functions
54 *7) ... 54 *7) sysfs functions
55 *8) ...
55 */ 56 */
56 57
57/********************************************************* 58/*********************************************************
@@ -59,7 +60,7 @@
59 * mac80211 init functions 60 * mac80211 init functions
60 * 61 *
61 *********************************************************/ 62 *********************************************************/
62static struct ieee80211_channel rtl_channeltable[] = { 63static struct ieee80211_channel rtl_channeltable_2g[] = {
63 {.center_freq = 2412, .hw_value = 1,}, 64 {.center_freq = 2412, .hw_value = 1,},
64 {.center_freq = 2417, .hw_value = 2,}, 65 {.center_freq = 2417, .hw_value = 2,},
65 {.center_freq = 2422, .hw_value = 3,}, 66 {.center_freq = 2422, .hw_value = 3,},
@@ -76,7 +77,34 @@ static struct ieee80211_channel rtl_channeltable[] = {
76 {.center_freq = 2484, .hw_value = 14,}, 77 {.center_freq = 2484, .hw_value = 14,},
77}; 78};
78 79
79static struct ieee80211_rate rtl_ratetable[] = { 80static struct ieee80211_channel rtl_channeltable_5g[] = {
81 {.center_freq = 5180, .hw_value = 36,},
82 {.center_freq = 5200, .hw_value = 40,},
83 {.center_freq = 5220, .hw_value = 44,},
84 {.center_freq = 5240, .hw_value = 48,},
85 {.center_freq = 5260, .hw_value = 52,},
86 {.center_freq = 5280, .hw_value = 56,},
87 {.center_freq = 5300, .hw_value = 60,},
88 {.center_freq = 5320, .hw_value = 64,},
89 {.center_freq = 5500, .hw_value = 100,},
90 {.center_freq = 5520, .hw_value = 104,},
91 {.center_freq = 5540, .hw_value = 108,},
92 {.center_freq = 5560, .hw_value = 112,},
93 {.center_freq = 5580, .hw_value = 116,},
94 {.center_freq = 5600, .hw_value = 120,},
95 {.center_freq = 5620, .hw_value = 124,},
96 {.center_freq = 5640, .hw_value = 128,},
97 {.center_freq = 5660, .hw_value = 132,},
98 {.center_freq = 5680, .hw_value = 136,},
99 {.center_freq = 5700, .hw_value = 140,},
100 {.center_freq = 5745, .hw_value = 149,},
101 {.center_freq = 5765, .hw_value = 153,},
102 {.center_freq = 5785, .hw_value = 157,},
103 {.center_freq = 5805, .hw_value = 161,},
104 {.center_freq = 5825, .hw_value = 165,},
105};
106
107static struct ieee80211_rate rtl_ratetable_2g[] = {
80 {.bitrate = 10, .hw_value = 0x00,}, 108 {.bitrate = 10, .hw_value = 0x00,},
81 {.bitrate = 20, .hw_value = 0x01,}, 109 {.bitrate = 20, .hw_value = 0x01,},
82 {.bitrate = 55, .hw_value = 0x02,}, 110 {.bitrate = 55, .hw_value = 0x02,},
@@ -91,18 +119,57 @@ static struct ieee80211_rate rtl_ratetable[] = {
91 {.bitrate = 540, .hw_value = 0x0b,}, 119 {.bitrate = 540, .hw_value = 0x0b,},
92}; 120};
93 121
122static struct ieee80211_rate rtl_ratetable_5g[] = {
123 {.bitrate = 60, .hw_value = 0x04,},
124 {.bitrate = 90, .hw_value = 0x05,},
125 {.bitrate = 120, .hw_value = 0x06,},
126 {.bitrate = 180, .hw_value = 0x07,},
127 {.bitrate = 240, .hw_value = 0x08,},
128 {.bitrate = 360, .hw_value = 0x09,},
129 {.bitrate = 480, .hw_value = 0x0a,},
130 {.bitrate = 540, .hw_value = 0x0b,},
131};
132
94static const struct ieee80211_supported_band rtl_band_2ghz = { 133static const struct ieee80211_supported_band rtl_band_2ghz = {
95 .band = IEEE80211_BAND_2GHZ, 134 .band = IEEE80211_BAND_2GHZ,
96 135
97 .channels = rtl_channeltable, 136 .channels = rtl_channeltable_2g,
98 .n_channels = ARRAY_SIZE(rtl_channeltable), 137 .n_channels = ARRAY_SIZE(rtl_channeltable_2g),
99 138
100 .bitrates = rtl_ratetable, 139 .bitrates = rtl_ratetable_2g,
101 .n_bitrates = ARRAY_SIZE(rtl_ratetable), 140 .n_bitrates = ARRAY_SIZE(rtl_ratetable_2g),
102 141
103 .ht_cap = {0}, 142 .ht_cap = {0},
104}; 143};
105 144
145static struct ieee80211_supported_band rtl_band_5ghz = {
146 .band = IEEE80211_BAND_5GHZ,
147
148 .channels = rtl_channeltable_5g,
149 .n_channels = ARRAY_SIZE(rtl_channeltable_5g),
150
151 .bitrates = rtl_ratetable_5g,
152 .n_bitrates = ARRAY_SIZE(rtl_ratetable_5g),
153
154 .ht_cap = {0},
155};
156
157static const u8 tid_to_ac[] = {
158 2, /* IEEE80211_AC_BE */
159 3, /* IEEE80211_AC_BK */
160 3, /* IEEE80211_AC_BK */
161 2, /* IEEE80211_AC_BE */
162 1, /* IEEE80211_AC_VI */
163 1, /* IEEE80211_AC_VI */
164 0, /* IEEE80211_AC_VO */
165 0, /* IEEE80211_AC_VO */
166};
167
168u8 rtl_tid_to_ac(struct ieee80211_hw *hw, u8 tid)
169{
170 return tid_to_ac[tid];
171}
172
106static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw, 173static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
107 struct ieee80211_sta_ht_cap *ht_cap) 174 struct ieee80211_sta_ht_cap *ht_cap)
108{ 175{
@@ -115,6 +182,9 @@ static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
115 IEEE80211_HT_CAP_SGI_20 | 182 IEEE80211_HT_CAP_SGI_20 |
116 IEEE80211_HT_CAP_DSSSCCK40 | IEEE80211_HT_CAP_MAX_AMSDU; 183 IEEE80211_HT_CAP_DSSSCCK40 | IEEE80211_HT_CAP_MAX_AMSDU;
117 184
185 if (rtlpriv->rtlhal.disable_amsdu_8k)
186 ht_cap->cap &= ~IEEE80211_HT_CAP_MAX_AMSDU;
187
118 /* 188 /*
119 *Maximum length of AMPDU that the STA can receive. 189 *Maximum length of AMPDU that the STA can receive.
120 *Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets) 190 *Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets)
@@ -159,37 +229,99 @@ static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
159 229
160static void _rtl_init_mac80211(struct ieee80211_hw *hw) 230static void _rtl_init_mac80211(struct ieee80211_hw *hw)
161{ 231{
232 struct rtl_priv *rtlpriv = rtl_priv(hw);
233 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
162 struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw)); 234 struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
163 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); 235 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
164 struct ieee80211_supported_band *sband; 236 struct ieee80211_supported_band *sband;
165 237
166 /* <1> use mac->bands as mem for hw->wiphy->bands */
167 sband = &(rtlmac->bands[IEEE80211_BAND_2GHZ]);
168 238
169 /* 239 if (rtlhal->macphymode == SINGLEMAC_SINGLEPHY && rtlhal->bandset ==
170 * <2> set hw->wiphy->bands[IEEE80211_BAND_2GHZ] 240 BAND_ON_BOTH) {
171 * to default value(1T1R) 241 /* 1: 2.4 G bands */
172 */ 242 /* <1> use mac->bands as mem for hw->wiphy->bands */
173 memcpy(&(rtlmac->bands[IEEE80211_BAND_2GHZ]), &rtl_band_2ghz, 243 sband = &(rtlmac->bands[IEEE80211_BAND_2GHZ]);
174 sizeof(struct ieee80211_supported_band)); 244
245 /* <2> set hw->wiphy->bands[IEEE80211_BAND_2GHZ]
246 * to default value(1T1R) */
247 memcpy(&(rtlmac->bands[IEEE80211_BAND_2GHZ]), &rtl_band_2ghz,
248 sizeof(struct ieee80211_supported_band));
175 249
176 /* <3> init ht cap base on ant_num */ 250 /* <3> init ht cap base on ant_num */
177 _rtl_init_hw_ht_capab(hw, &sband->ht_cap); 251 _rtl_init_hw_ht_capab(hw, &sband->ht_cap);
178 252
179 /* <4> set mac->sband to wiphy->sband */ 253 /* <4> set mac->sband to wiphy->sband */
180 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; 254 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
181 255
256 /* 2: 5 G bands */
257 /* <1> use mac->bands as mem for hw->wiphy->bands */
258 sband = &(rtlmac->bands[IEEE80211_BAND_5GHZ]);
259
260 /* <2> set hw->wiphy->bands[IEEE80211_BAND_5GHZ]
261 * to default value(1T1R) */
262 memcpy(&(rtlmac->bands[IEEE80211_BAND_5GHZ]), &rtl_band_5ghz,
263 sizeof(struct ieee80211_supported_band));
264
265 /* <3> init ht cap base on ant_num */
266 _rtl_init_hw_ht_capab(hw, &sband->ht_cap);
267
268 /* <4> set mac->sband to wiphy->sband */
269 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
270 } else {
271 if (rtlhal->current_bandtype == BAND_ON_2_4G) {
272 /* <1> use mac->bands as mem for hw->wiphy->bands */
273 sband = &(rtlmac->bands[IEEE80211_BAND_2GHZ]);
274
275 /* <2> set hw->wiphy->bands[IEEE80211_BAND_2GHZ]
276 * to default value(1T1R) */
277 memcpy(&(rtlmac->bands[IEEE80211_BAND_2GHZ]),
278 &rtl_band_2ghz,
279 sizeof(struct ieee80211_supported_band));
280
281 /* <3> init ht cap base on ant_num */
282 _rtl_init_hw_ht_capab(hw, &sband->ht_cap);
283
284 /* <4> set mac->sband to wiphy->sband */
285 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
286 } else if (rtlhal->current_bandtype == BAND_ON_5G) {
287 /* <1> use mac->bands as mem for hw->wiphy->bands */
288 sband = &(rtlmac->bands[IEEE80211_BAND_5GHZ]);
289
290 /* <2> set hw->wiphy->bands[IEEE80211_BAND_5GHZ]
291 * to default value(1T1R) */
292 memcpy(&(rtlmac->bands[IEEE80211_BAND_5GHZ]),
293 &rtl_band_5ghz,
294 sizeof(struct ieee80211_supported_band));
295
296 /* <3> init ht cap base on ant_num */
297 _rtl_init_hw_ht_capab(hw, &sband->ht_cap);
298
299 /* <4> set mac->sband to wiphy->sband */
300 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
301 } else {
302 RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
303 ("Err BAND %d\n",
304 rtlhal->current_bandtype));
305 }
306 }
182 /* <5> set hw caps */ 307 /* <5> set hw caps */
183 hw->flags = IEEE80211_HW_SIGNAL_DBM | 308 hw->flags = IEEE80211_HW_SIGNAL_DBM |
184 IEEE80211_HW_RX_INCLUDES_FCS | 309 IEEE80211_HW_RX_INCLUDES_FCS |
185 IEEE80211_HW_BEACON_FILTER | IEEE80211_HW_AMPDU_AGGREGATION | /*PS*/ 310 IEEE80211_HW_BEACON_FILTER |
186 /*IEEE80211_HW_SUPPORTS_PS | */ 311 IEEE80211_HW_AMPDU_AGGREGATION |
187 /*IEEE80211_HW_PS_NULLFUNC_STACK | */
188 /*IEEE80211_HW_SUPPORTS_DYNAMIC_PS | */
189 IEEE80211_HW_REPORTS_TX_ACK_STATUS | 0; 312 IEEE80211_HW_REPORTS_TX_ACK_STATUS | 0;
190 313
314 /* swlps or hwlps has been set in diff chip in init_sw_vars */
315 if (rtlpriv->psc.swctrl_lps)
316 hw->flags |= IEEE80211_HW_SUPPORTS_PS |
317 IEEE80211_HW_PS_NULLFUNC_STACK |
318 /* IEEE80211_HW_SUPPORTS_DYNAMIC_PS | */
319 0;
320
191 hw->wiphy->interface_modes = 321 hw->wiphy->interface_modes =
192 BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC); 322 BIT(NL80211_IFTYPE_AP) |
323 BIT(NL80211_IFTYPE_STATION) |
324 BIT(NL80211_IFTYPE_ADHOC);
193 325
194 hw->wiphy->rts_threshold = 2347; 326 hw->wiphy->rts_threshold = 2347;
195 327
@@ -199,9 +331,10 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
199 /* TODO: Correct this value for our hw */ 331 /* TODO: Correct this value for our hw */
200 /* TODO: define these hard code value */ 332 /* TODO: define these hard code value */
201 hw->channel_change_time = 100; 333 hw->channel_change_time = 100;
202 hw->max_listen_interval = 5; 334 hw->max_listen_interval = 10;
203 hw->max_rate_tries = 4; 335 hw->max_rate_tries = 4;
204 /* hw->max_rates = 1; */ 336 /* hw->max_rates = 1; */
337 hw->sta_data_size = sizeof(struct rtl_sta_info);
205 338
206 /* <6> mac address */ 339 /* <6> mac address */
207 if (is_valid_ether_addr(rtlefuse->dev_addr)) { 340 if (is_valid_ether_addr(rtlefuse->dev_addr)) {
@@ -230,6 +363,10 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
230 (void *)rtl_watchdog_wq_callback); 363 (void *)rtl_watchdog_wq_callback);
231 INIT_DELAYED_WORK(&rtlpriv->works.ips_nic_off_wq, 364 INIT_DELAYED_WORK(&rtlpriv->works.ips_nic_off_wq,
232 (void *)rtl_ips_nic_off_wq_callback); 365 (void *)rtl_ips_nic_off_wq_callback);
366 INIT_DELAYED_WORK(&rtlpriv->works.ps_work,
367 (void *)rtl_swlps_wq_callback);
368 INIT_DELAYED_WORK(&rtlpriv->works.ps_rfon_wq,
369 (void *)rtl_swlps_rfon_wq_callback);
233 370
234} 371}
235 372
@@ -241,6 +378,8 @@ void rtl_deinit_deferred_work(struct ieee80211_hw *hw)
241 378
242 cancel_delayed_work(&rtlpriv->works.watchdog_wq); 379 cancel_delayed_work(&rtlpriv->works.watchdog_wq);
243 cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq); 380 cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq);
381 cancel_delayed_work(&rtlpriv->works.ps_work);
382 cancel_delayed_work(&rtlpriv->works.ps_rfon_wq);
244} 383}
245 384
246void rtl_init_rfkill(struct ieee80211_hw *hw) 385void rtl_init_rfkill(struct ieee80211_hw *hw)
@@ -251,14 +390,16 @@ void rtl_init_rfkill(struct ieee80211_hw *hw)
251 bool blocked; 390 bool blocked;
252 u8 valid = 0; 391 u8 valid = 0;
253 392
254 radio_state = rtlpriv->cfg->ops->radio_onoff_checking(hw, &valid); 393 /*set init state to on */
394 rtlpriv->rfkill.rfkill_state = 1;
395 wiphy_rfkill_set_hw_state(hw->wiphy, 0);
255 396
256 /*set init state to that of switch */ 397 radio_state = rtlpriv->cfg->ops->radio_onoff_checking(hw, &valid);
257 rtlpriv->rfkill.rfkill_state = radio_state;
258 printk(KERN_INFO "rtlwifi: wireless switch is %s\n",
259 rtlpriv->rfkill.rfkill_state ? "on" : "off");
260 398
261 if (valid) { 399 if (valid) {
400 printk(KERN_INFO "rtlwifi: wireless switch is %s\n",
401 rtlpriv->rfkill.rfkill_state ? "on" : "off");
402
262 rtlpriv->rfkill.rfkill_state = radio_state; 403 rtlpriv->rfkill.rfkill_state = radio_state;
263 404
264 blocked = (rtlpriv->rfkill.rfkill_state == 1) ? 0 : 1; 405 blocked = (rtlpriv->rfkill.rfkill_state == 1) ? 0 : 1;
@@ -308,6 +449,8 @@ int rtl_init_core(struct ieee80211_hw *hw)
308 spin_lock_init(&rtlpriv->locks.rf_ps_lock); 449 spin_lock_init(&rtlpriv->locks.rf_ps_lock);
309 spin_lock_init(&rtlpriv->locks.rf_lock); 450 spin_lock_init(&rtlpriv->locks.rf_lock);
310 spin_lock_init(&rtlpriv->locks.lps_lock); 451 spin_lock_init(&rtlpriv->locks.lps_lock);
452 spin_lock_init(&rtlpriv->locks.waitq_lock);
453 spin_lock_init(&rtlpriv->locks.cck_and_rw_pagea_lock);
311 454
312 rtlmac->link_state = MAC80211_NOLINK; 455 rtlmac->link_state = MAC80211_NOLINK;
313 456
@@ -327,12 +470,6 @@ void rtl_init_rx_config(struct ieee80211_hw *hw)
327 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 470 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
328 471
329 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *) (&mac->rx_conf)); 472 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *) (&mac->rx_conf));
330 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_MGT_FILTER,
331 (u8 *) (&mac->rx_mgt_filter));
332 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_CTRL_FILTER,
333 (u8 *) (&mac->rx_ctrl_filter));
334 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_DATA_FILTER,
335 (u8 *) (&mac->rx_data_filter));
336} 473}
337 474
338/********************************************************* 475/*********************************************************
@@ -359,28 +496,40 @@ static void _rtl_qurey_shortpreamble_mode(struct ieee80211_hw *hw,
359} 496}
360 497
361static void _rtl_query_shortgi(struct ieee80211_hw *hw, 498static void _rtl_query_shortgi(struct ieee80211_hw *hw,
499 struct ieee80211_sta *sta,
362 struct rtl_tcb_desc *tcb_desc, 500 struct rtl_tcb_desc *tcb_desc,
363 struct ieee80211_tx_info *info) 501 struct ieee80211_tx_info *info)
364{ 502{
365 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 503 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
366 u8 rate_flag = info->control.rates[0].flags; 504 u8 rate_flag = info->control.rates[0].flags;
367 505 u8 sgi_40 = 0, sgi_20 = 0, bw_40 = 0;
368 tcb_desc->use_shortgi = false; 506 tcb_desc->use_shortgi = false;
369 507
370 if (!mac->ht_enable) 508 if (sta == NULL)
509 return;
510
511 sgi_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40;
512 sgi_20 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20;
513
514 if (!(sta->ht_cap.ht_supported))
371 return; 515 return;
372 516
373 if (!mac->sgi_40 && !mac->sgi_20) 517 if (!sgi_40 && !sgi_20)
374 return; 518 return;
375 519
376 if ((mac->bw_40 == true) && mac->sgi_40) 520 if (mac->opmode == NL80211_IFTYPE_STATION)
521 bw_40 = mac->bw_40;
522 else if (mac->opmode == NL80211_IFTYPE_AP ||
523 mac->opmode == NL80211_IFTYPE_ADHOC)
524 bw_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40;
525
526 if ((bw_40 == true) && sgi_40)
377 tcb_desc->use_shortgi = true; 527 tcb_desc->use_shortgi = true;
378 else if ((mac->bw_40 == false) && mac->sgi_20) 528 else if ((bw_40 == false) && sgi_20)
379 tcb_desc->use_shortgi = true; 529 tcb_desc->use_shortgi = true;
380 530
381 if (!(rate_flag & IEEE80211_TX_RC_SHORT_GI)) 531 if (!(rate_flag & IEEE80211_TX_RC_SHORT_GI))
382 tcb_desc->use_shortgi = false; 532 tcb_desc->use_shortgi = false;
383
384} 533}
385 534
386static void _rtl_query_protection_mode(struct ieee80211_hw *hw, 535static void _rtl_query_protection_mode(struct ieee80211_hw *hw,
@@ -408,19 +557,25 @@ static void _rtl_query_protection_mode(struct ieee80211_hw *hw,
408 tcb_desc->rts_enable = true; 557 tcb_desc->rts_enable = true;
409 tcb_desc->rts_rate = rtlpriv->cfg->maps[RTL_RC_OFDM_RATE24M]; 558 tcb_desc->rts_rate = rtlpriv->cfg->maps[RTL_RC_OFDM_RATE24M];
410 } 559 }
411
412} 560}
413 561
414static void _rtl_txrate_selectmode(struct ieee80211_hw *hw, 562static void _rtl_txrate_selectmode(struct ieee80211_hw *hw,
563 struct ieee80211_sta *sta,
415 struct rtl_tcb_desc *tcb_desc) 564 struct rtl_tcb_desc *tcb_desc)
416{ 565{
417 struct rtl_priv *rtlpriv = rtl_priv(hw); 566 struct rtl_priv *rtlpriv = rtl_priv(hw);
418 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 567 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
568 struct rtl_sta_info *sta_entry = NULL;
569 u8 ratr_index = 7;
419 570
571 if (sta) {
572 sta_entry = (struct rtl_sta_info *) sta->drv_priv;
573 ratr_index = sta_entry->ratr_index;
574 }
420 if (!tcb_desc->disable_ratefallback || !tcb_desc->use_driver_rate) { 575 if (!tcb_desc->disable_ratefallback || !tcb_desc->use_driver_rate) {
421 if (mac->opmode == NL80211_IFTYPE_STATION) 576 if (mac->opmode == NL80211_IFTYPE_STATION) {
422 tcb_desc->ratr_index = 0; 577 tcb_desc->ratr_index = 0;
423 else if (mac->opmode == NL80211_IFTYPE_ADHOC) { 578 } else if (mac->opmode == NL80211_IFTYPE_ADHOC) {
424 if (tcb_desc->multicast || tcb_desc->broadcast) { 579 if (tcb_desc->multicast || tcb_desc->broadcast) {
425 tcb_desc->hw_rate = 580 tcb_desc->hw_rate =
426 rtlpriv->cfg->maps[RTL_RC_CCK_RATE2M]; 581 rtlpriv->cfg->maps[RTL_RC_CCK_RATE2M];
@@ -428,36 +583,61 @@ static void _rtl_txrate_selectmode(struct ieee80211_hw *hw,
428 } else { 583 } else {
429 /* TODO */ 584 /* TODO */
430 } 585 }
586 tcb_desc->ratr_index = ratr_index;
587 } else if (mac->opmode == NL80211_IFTYPE_AP) {
588 tcb_desc->ratr_index = ratr_index;
431 } 589 }
432 } 590 }
433 591
434 if (rtlpriv->dm.useramask) { 592 if (rtlpriv->dm.useramask) {
435 /* TODO we will differentiate adhoc and station futrue */ 593 /* TODO we will differentiate adhoc and station futrue */
436 tcb_desc->mac_id = 0; 594 if (mac->opmode == NL80211_IFTYPE_STATION) {
437 595 tcb_desc->mac_id = 0;
438 if ((mac->mode == WIRELESS_MODE_N_24G) || 596
439 (mac->mode == WIRELESS_MODE_N_5G)) { 597 if (mac->mode == WIRELESS_MODE_N_24G)
440 tcb_desc->ratr_index = RATR_INX_WIRELESS_NGB; 598 tcb_desc->ratr_index = RATR_INX_WIRELESS_NGB;
441 } else if (mac->mode & WIRELESS_MODE_G) { 599 else if (mac->mode == WIRELESS_MODE_N_5G)
442 tcb_desc->ratr_index = RATR_INX_WIRELESS_GB; 600 tcb_desc->ratr_index = RATR_INX_WIRELESS_NG;
443 } else if (mac->mode & WIRELESS_MODE_B) { 601 else if (mac->mode & WIRELESS_MODE_G)
444 tcb_desc->ratr_index = RATR_INX_WIRELESS_B; 602 tcb_desc->ratr_index = RATR_INX_WIRELESS_GB;
603 else if (mac->mode & WIRELESS_MODE_B)
604 tcb_desc->ratr_index = RATR_INX_WIRELESS_B;
605 else if (mac->mode & WIRELESS_MODE_A)
606 tcb_desc->ratr_index = RATR_INX_WIRELESS_G;
607 } else if (mac->opmode == NL80211_IFTYPE_AP ||
608 mac->opmode == NL80211_IFTYPE_ADHOC) {
609 if (NULL != sta) {
610 if (sta->aid > 0)
611 tcb_desc->mac_id = sta->aid + 1;
612 else
613 tcb_desc->mac_id = 1;
614 } else {
615 tcb_desc->mac_id = 0;
616 }
445 } 617 }
446 } 618 }
447 619
448} 620}
449 621
450static void _rtl_query_bandwidth_mode(struct ieee80211_hw *hw, 622static void _rtl_query_bandwidth_mode(struct ieee80211_hw *hw,
623 struct ieee80211_sta *sta,
451 struct rtl_tcb_desc *tcb_desc) 624 struct rtl_tcb_desc *tcb_desc)
452{ 625{
453 struct rtl_priv *rtlpriv = rtl_priv(hw); 626 struct rtl_priv *rtlpriv = rtl_priv(hw);
454 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 627 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
455 628
456 tcb_desc->packet_bw = false; 629 tcb_desc->packet_bw = false;
457 630 if (!sta)
458 if (!mac->bw_40 || !mac->ht_enable)
459 return; 631 return;
460 632 if (mac->opmode == NL80211_IFTYPE_AP ||
633 mac->opmode == NL80211_IFTYPE_ADHOC) {
634 if (!(sta->ht_cap.ht_supported) ||
635 !(sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
636 return;
637 } else if (mac->opmode == NL80211_IFTYPE_STATION) {
638 if (!mac->bw_40 || !(sta->ht_cap.ht_supported))
639 return;
640 }
461 if (tcb_desc->multicast || tcb_desc->broadcast) 641 if (tcb_desc->multicast || tcb_desc->broadcast)
462 return; 642 return;
463 643
@@ -484,22 +664,21 @@ static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw)
484 664
485void rtl_get_tcb_desc(struct ieee80211_hw *hw, 665void rtl_get_tcb_desc(struct ieee80211_hw *hw,
486 struct ieee80211_tx_info *info, 666 struct ieee80211_tx_info *info,
667 struct ieee80211_sta *sta,
487 struct sk_buff *skb, struct rtl_tcb_desc *tcb_desc) 668 struct sk_buff *skb, struct rtl_tcb_desc *tcb_desc)
488{ 669{
489 struct rtl_priv *rtlpriv = rtl_priv(hw); 670 struct rtl_priv *rtlpriv = rtl_priv(hw);
490 struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw)); 671 struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
491 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); 672 struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
492 struct ieee80211_rate *txrate; 673 struct ieee80211_rate *txrate;
493 __le16 fc = hdr->frame_control; 674 __le16 fc = hdr->frame_control;
494 675
495 memset(tcb_desc, 0, sizeof(struct rtl_tcb_desc)); 676 txrate = ieee80211_get_tx_rate(hw, info);
677 tcb_desc->hw_rate = txrate->hw_value;
496 678
497 if (ieee80211_is_data(fc)) { 679 if (ieee80211_is_data(fc)) {
498 txrate = ieee80211_get_tx_rate(hw, info);
499 tcb_desc->hw_rate = txrate->hw_value;
500
501 /* 680 /*
502 *we set data rate RTL_RC_CCK_RATE1M 681 *we set data rate INX 0
503 *in rtl_rc.c if skb is special data or 682 *in rtl_rc.c if skb is special data or
504 *mgt which need low data rate. 683 *mgt which need low data rate.
505 */ 684 */
@@ -508,12 +687,11 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
508 *So tcb_desc->hw_rate is just used for 687 *So tcb_desc->hw_rate is just used for
509 *special data and mgt frames 688 *special data and mgt frames
510 */ 689 */
511 if (tcb_desc->hw_rate < rtlpriv->cfg->maps[RTL_RC_CCK_RATE11M]) { 690 if (info->control.rates[0].idx == 0 &&
691 ieee80211_is_nullfunc(fc)) {
512 tcb_desc->use_driver_rate = true; 692 tcb_desc->use_driver_rate = true;
513 tcb_desc->ratr_index = 7; 693 tcb_desc->ratr_index = RATR_INX_WIRELESS_MC;
514 694
515 tcb_desc->hw_rate =
516 rtlpriv->cfg->maps[RTL_RC_CCK_RATE1M];
517 tcb_desc->disable_ratefallback = 1; 695 tcb_desc->disable_ratefallback = 1;
518 } else { 696 } else {
519 /* 697 /*
@@ -523,7 +701,7 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
523 *and N rate will all be controlled by FW 701 *and N rate will all be controlled by FW
524 *when tcb_desc->use_driver_rate = false 702 *when tcb_desc->use_driver_rate = false
525 */ 703 */
526 if (rtlmac->ht_enable) { 704 if (sta && (sta->ht_cap.ht_supported)) {
527 tcb_desc->hw_rate = _rtl_get_highest_n_rate(hw); 705 tcb_desc->hw_rate = _rtl_get_highest_n_rate(hw);
528 } else { 706 } else {
529 if (rtlmac->mode == WIRELESS_MODE_B) { 707 if (rtlmac->mode == WIRELESS_MODE_B) {
@@ -541,43 +719,25 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
541 else if (is_broadcast_ether_addr(ieee80211_get_DA(hdr))) 719 else if (is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
542 tcb_desc->broadcast = 1; 720 tcb_desc->broadcast = 1;
543 721
544 _rtl_txrate_selectmode(hw, tcb_desc); 722 _rtl_txrate_selectmode(hw, sta, tcb_desc);
545 _rtl_query_bandwidth_mode(hw, tcb_desc); 723 _rtl_query_bandwidth_mode(hw, sta, tcb_desc);
546 _rtl_qurey_shortpreamble_mode(hw, tcb_desc, info); 724 _rtl_qurey_shortpreamble_mode(hw, tcb_desc, info);
547 _rtl_query_shortgi(hw, tcb_desc, info); 725 _rtl_query_shortgi(hw, sta, tcb_desc, info);
548 _rtl_query_protection_mode(hw, tcb_desc, info); 726 _rtl_query_protection_mode(hw, tcb_desc, info);
549 } else { 727 } else {
550 tcb_desc->use_driver_rate = true; 728 tcb_desc->use_driver_rate = true;
551 tcb_desc->ratr_index = 7; 729 tcb_desc->ratr_index = RATR_INX_WIRELESS_MC;
552 tcb_desc->disable_ratefallback = 1; 730 tcb_desc->disable_ratefallback = 1;
553 tcb_desc->mac_id = 0; 731 tcb_desc->mac_id = 0;
554 732 tcb_desc->packet_bw = false;
555 tcb_desc->hw_rate = rtlpriv->cfg->maps[RTL_RC_CCK_RATE1M];
556 } 733 }
557} 734}
558EXPORT_SYMBOL(rtl_get_tcb_desc); 735EXPORT_SYMBOL(rtl_get_tcb_desc);
559 736
560bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb)
561{
562 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
563 struct rtl_priv *rtlpriv = rtl_priv(hw);
564 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
565 __le16 fc = hdr->frame_control;
566
567 if (ieee80211_is_auth(fc)) {
568 RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, ("MAC80211_LINKING\n"));
569 rtl_ips_nic_on(hw);
570
571 mac->link_state = MAC80211_LINKING;
572 }
573
574 return true;
575}
576
577bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) 737bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
578{ 738{
579 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 739 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
580 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); 740 struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
581 struct rtl_priv *rtlpriv = rtl_priv(hw); 741 struct rtl_priv *rtlpriv = rtl_priv(hw);
582 __le16 fc = hdr->frame_control; 742 __le16 fc = hdr->frame_control;
583 u8 *act = (u8 *) (((u8 *) skb->data + MAC80211_3ADDR_LEN)); 743 u8 *act = (u8 *) (((u8 *) skb->data + MAC80211_3ADDR_LEN));
@@ -622,22 +782,20 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
622u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) 782u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
623{ 783{
624 struct rtl_priv *rtlpriv = rtl_priv(hw); 784 struct rtl_priv *rtlpriv = rtl_priv(hw);
625 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
626 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 785 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
627 __le16 fc = hdr->frame_control; 786 __le16 fc = rtl_get_fc(skb);
628 u16 ether_type; 787 u16 ether_type;
629 u8 mac_hdr_len = ieee80211_get_hdrlen_from_skb(skb); 788 u8 mac_hdr_len = ieee80211_get_hdrlen_from_skb(skb);
630 const struct iphdr *ip; 789 const struct iphdr *ip;
631 790
632 if (!ieee80211_is_data(fc)) 791 if (!ieee80211_is_data(fc))
633 goto end; 792 return false;
634 793
635 if (ieee80211_is_nullfunc(fc))
636 return true;
637 794
638 ip = (struct iphdr *)((u8 *) skb->data + mac_hdr_len + 795 ip = (struct iphdr *)((u8 *) skb->data + mac_hdr_len +
639 SNAP_SIZE + PROTOC_TYPE_SIZE); 796 SNAP_SIZE + PROTOC_TYPE_SIZE);
640 ether_type = *(u16 *) ((u8 *) skb->data + mac_hdr_len + SNAP_SIZE); 797 ether_type = *(u16 *) ((u8 *) skb->data + mac_hdr_len + SNAP_SIZE);
798 /* ether_type = ntohs(ether_type); */
641 799
642 if (ETH_P_IP == ether_type) { 800 if (ETH_P_IP == ether_type) {
643 if (IPPROTO_UDP == ip->protocol) { 801 if (IPPROTO_UDP == ip->protocol) {
@@ -686,7 +844,6 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
686 return true; 844 return true;
687 } 845 }
688 846
689end:
690 return false; 847 return false;
691} 848}
692 849
@@ -695,61 +852,92 @@ end:
695 * functions called by core.c 852 * functions called by core.c
696 * 853 *
697 *********************************************************/ 854 *********************************************************/
698int rtl_tx_agg_start(struct ieee80211_hw *hw, const u8 *ra, u16 tid, u16 *ssn) 855int rtl_tx_agg_start(struct ieee80211_hw *hw,
856 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
699{ 857{
700 struct rtl_priv *rtlpriv = rtl_priv(hw); 858 struct rtl_priv *rtlpriv = rtl_priv(hw);
701 struct rtl_tid_data *tid_data; 859 struct rtl_tid_data *tid_data;
702 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 860 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
861 struct rtl_sta_info *sta_entry = NULL;
703 862
704 RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, 863 if (sta == NULL)
705 ("on ra = %pM tid = %d\n", ra, tid)); 864 return -EINVAL;
706 865
707 if (unlikely(tid >= MAX_TID_COUNT)) 866 if (unlikely(tid >= MAX_TID_COUNT))
708 return -EINVAL; 867 return -EINVAL;
709 868
710 if (mac->tids[tid].agg.agg_state != RTL_AGG_OFF) { 869 sta_entry = (struct rtl_sta_info *)sta->drv_priv;
711 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, 870 if (!sta_entry)
712 ("Start AGG when state is not RTL_AGG_OFF !\n"));
713 return -ENXIO; 871 return -ENXIO;
714 } 872 tid_data = &sta_entry->tids[tid];
715
716 tid_data = &mac->tids[tid];
717 *ssn = SEQ_TO_SN(tid_data->seq_number);
718 873
719 RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, 874 RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG,
720 ("HW queue is empty tid:%d\n", tid)); 875 ("on ra = %pM tid = %d seq:%d\n", sta->addr, tid,
721 tid_data->agg.agg_state = RTL_AGG_ON; 876 tid_data->seq_number));
877
878 *ssn = tid_data->seq_number;
879 tid_data->agg.agg_state = RTL_AGG_START;
722 880
723 ieee80211_start_tx_ba_cb_irqsafe(mac->vif, ra, tid); 881 ieee80211_start_tx_ba_cb_irqsafe(mac->vif, sta->addr, tid);
724 882
725 return 0; 883 return 0;
726} 884}
727 885
728int rtl_tx_agg_stop(struct ieee80211_hw *hw, const u8 * ra, u16 tid) 886int rtl_tx_agg_stop(struct ieee80211_hw *hw,
887 struct ieee80211_sta *sta, u16 tid)
729{ 888{
730 int ssn = -1;
731 struct rtl_priv *rtlpriv = rtl_priv(hw); 889 struct rtl_priv *rtlpriv = rtl_priv(hw);
732 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 890 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
733 struct rtl_tid_data *tid_data; 891 struct rtl_tid_data *tid_data;
892 struct rtl_sta_info *sta_entry = NULL;
893
894 if (sta == NULL)
895 return -EINVAL;
734 896
735 if (!ra) { 897 if (!sta->addr) {
736 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("ra = NULL\n")); 898 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("ra = NULL\n"));
737 return -EINVAL; 899 return -EINVAL;
738 } 900 }
739 901
902 RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG,
903 ("on ra = %pM tid = %d\n", sta->addr, tid));
904
740 if (unlikely(tid >= MAX_TID_COUNT)) 905 if (unlikely(tid >= MAX_TID_COUNT))
741 return -EINVAL; 906 return -EINVAL;
742 907
743 if (mac->tids[tid].agg.agg_state != RTL_AGG_ON) 908 sta_entry = (struct rtl_sta_info *)sta->drv_priv;
744 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, 909 tid_data = &sta_entry->tids[tid];
745 ("Stopping AGG while state not ON or starting\n")); 910 sta_entry->tids[tid].agg.agg_state = RTL_AGG_STOP;
911
912 ieee80211_stop_tx_ba_cb_irqsafe(mac->vif, sta->addr, tid);
746 913
747 tid_data = &mac->tids[tid]; 914 return 0;
748 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; 915}
749 916
750 mac->tids[tid].agg.agg_state = RTL_AGG_OFF; 917int rtl_tx_agg_oper(struct ieee80211_hw *hw,
918 struct ieee80211_sta *sta, u16 tid)
919{
920 struct rtl_priv *rtlpriv = rtl_priv(hw);
921 struct rtl_tid_data *tid_data;
922 struct rtl_sta_info *sta_entry = NULL;
751 923
752 ieee80211_stop_tx_ba_cb_irqsafe(mac->vif, ra, tid); 924 if (sta == NULL)
925 return -EINVAL;
926
927 if (!sta->addr) {
928 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("ra = NULL\n"));
929 return -EINVAL;
930 }
931
932 RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG,
933 ("on ra = %pM tid = %d\n", sta->addr, tid));
934
935 if (unlikely(tid >= MAX_TID_COUNT))
936 return -EINVAL;
937
938 sta_entry = (struct rtl_sta_info *)sta->drv_priv;
939 tid_data = &sta_entry->tids[tid];
940 sta_entry->tids[tid].agg.agg_state = RTL_AGG_OPERATIONAL;
753 941
754 return 0; 942 return 0;
755} 943}
@@ -768,18 +956,16 @@ void rtl_watchdog_wq_callback(void *data)
768 struct rtl_priv *rtlpriv = rtl_priv(hw); 956 struct rtl_priv *rtlpriv = rtl_priv(hw);
769 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 957 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
770 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 958 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
771
772 bool busytraffic = false; 959 bool busytraffic = false;
773 bool higher_busytraffic = false; 960 bool higher_busytraffic = false;
774 bool higher_busyrxtraffic = false; 961 bool higher_busyrxtraffic = false;
775 bool higher_busytxtraffic = false; 962 u8 idx, tid;
776
777 u8 idx = 0;
778 u32 rx_cnt_inp4eriod = 0; 963 u32 rx_cnt_inp4eriod = 0;
779 u32 tx_cnt_inp4eriod = 0; 964 u32 tx_cnt_inp4eriod = 0;
780 u32 aver_rx_cnt_inperiod = 0; 965 u32 aver_rx_cnt_inperiod = 0;
781 u32 aver_tx_cnt_inperiod = 0; 966 u32 aver_tx_cnt_inperiod = 0;
782 967 u32 aver_tidtx_inperiod[MAX_TID_COUNT] = {0};
968 u32 tidtx_inp4eriod[MAX_TID_COUNT] = {0};
783 bool enter_ps = false; 969 bool enter_ps = false;
784 970
785 if (is_hal_stop(rtlhal)) 971 if (is_hal_stop(rtlhal))
@@ -793,9 +979,6 @@ void rtl_watchdog_wq_callback(void *data)
793 mac->cnt_after_linked = 0; 979 mac->cnt_after_linked = 0;
794 } 980 }
795 981
796 /* <2> DM */
797 rtlpriv->cfg->ops->dm_watchdog(hw);
798
799 /* 982 /*
800 *<3> to check if traffic busy, if 983 *<3> to check if traffic busy, if
801 * busytraffic we don't change channel 984 * busytraffic we don't change channel
@@ -834,8 +1017,27 @@ void rtl_watchdog_wq_callback(void *data)
834 /* Extremely high Rx data. */ 1017 /* Extremely high Rx data. */
835 if (aver_rx_cnt_inperiod > 5000) 1018 if (aver_rx_cnt_inperiod > 5000)
836 higher_busyrxtraffic = true; 1019 higher_busyrxtraffic = true;
1020 }
1021
1022 /* check every tid's tx traffic */
1023 for (tid = 0; tid <= 7; tid++) {
1024 for (idx = 0; idx <= 2; idx++)
1025 rtlpriv->link_info.tidtx_in4period[tid][idx] =
1026 rtlpriv->link_info.tidtx_in4period[tid]
1027 [idx + 1];
1028 rtlpriv->link_info.tidtx_in4period[tid][3] =
1029 rtlpriv->link_info.tidtx_inperiod[tid];
1030
1031 for (idx = 0; idx <= 3; idx++)
1032 tidtx_inp4eriod[tid] +=
1033 rtlpriv->link_info.tidtx_in4period[tid][idx];
1034 aver_tidtx_inperiod[tid] = tidtx_inp4eriod[tid] / 4;
1035 if (aver_tidtx_inperiod[tid] > 5000)
1036 rtlpriv->link_info.higher_busytxtraffic[tid] =
1037 true;
837 else 1038 else
838 higher_busytxtraffic = false; 1039 rtlpriv->link_info.higher_busytxtraffic[tid] =
1040 false;
839 } 1041 }
840 1042
841 if (((rtlpriv->link_info.num_rx_inperiod + 1043 if (((rtlpriv->link_info.num_rx_inperiod +
@@ -854,11 +1056,15 @@ void rtl_watchdog_wq_callback(void *data)
854 1056
855 rtlpriv->link_info.num_rx_inperiod = 0; 1057 rtlpriv->link_info.num_rx_inperiod = 0;
856 rtlpriv->link_info.num_tx_inperiod = 0; 1058 rtlpriv->link_info.num_tx_inperiod = 0;
1059 for (tid = 0; tid <= 7; tid++)
1060 rtlpriv->link_info.tidtx_inperiod[tid] = 0;
857 1061
858 rtlpriv->link_info.busytraffic = busytraffic; 1062 rtlpriv->link_info.busytraffic = busytraffic;
859 rtlpriv->link_info.higher_busytraffic = higher_busytraffic; 1063 rtlpriv->link_info.higher_busytraffic = higher_busytraffic;
860 rtlpriv->link_info.higher_busyrxtraffic = higher_busyrxtraffic; 1064 rtlpriv->link_info.higher_busyrxtraffic = higher_busyrxtraffic;
861 1065
1066 /* <3> DM */
1067 rtlpriv->cfg->ops->dm_watchdog(hw);
862} 1068}
863 1069
864void rtl_watch_dog_timer_callback(unsigned long data) 1070void rtl_watch_dog_timer_callback(unsigned long data)
@@ -875,6 +1081,268 @@ void rtl_watch_dog_timer_callback(unsigned long data)
875 1081
876/********************************************************* 1082/*********************************************************
877 * 1083 *
1084 * frame process functions
1085 *
1086 *********************************************************/
1087u8 *rtl_find_ie(u8 *data, unsigned int len, u8 ie)
1088{
1089 struct ieee80211_mgmt *mgmt = (void *)data;
1090 u8 *pos, *end;
1091
1092 pos = (u8 *)mgmt->u.beacon.variable;
1093 end = data + len;
1094 while (pos < end) {
1095 if (pos + 2 + pos[1] > end)
1096 return NULL;
1097
1098 if (pos[0] == ie)
1099 return pos;
1100
1101 pos += 2 + pos[1];
1102 }
1103 return NULL;
1104}
1105
1106/* when we use 2 rx ants we send IEEE80211_SMPS_OFF */
1107/* when we use 1 rx ant we send IEEE80211_SMPS_STATIC */
1108static struct sk_buff *rtl_make_smps_action(struct ieee80211_hw *hw,
1109 enum ieee80211_smps_mode smps, u8 *da, u8 *bssid)
1110{
1111 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1112 struct sk_buff *skb;
1113 struct ieee80211_mgmt *action_frame;
1114
1115 /* 27 = header + category + action + smps mode */
1116 skb = dev_alloc_skb(27 + hw->extra_tx_headroom);
1117 if (!skb)
1118 return NULL;
1119
1120 skb_reserve(skb, hw->extra_tx_headroom);
1121 action_frame = (void *)skb_put(skb, 27);
1122 memset(action_frame, 0, 27);
1123 memcpy(action_frame->da, da, ETH_ALEN);
1124 memcpy(action_frame->sa, rtlefuse->dev_addr, ETH_ALEN);
1125 memcpy(action_frame->bssid, bssid, ETH_ALEN);
1126 action_frame->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
1127 IEEE80211_STYPE_ACTION);
1128 action_frame->u.action.category = WLAN_CATEGORY_HT;
1129 action_frame->u.action.u.ht_smps.action = WLAN_HT_ACTION_SMPS;
1130 switch (smps) {
1131 case IEEE80211_SMPS_AUTOMATIC:/* 0 */
1132 case IEEE80211_SMPS_NUM_MODES:/* 4 */
1133 WARN_ON(1);
1134 case IEEE80211_SMPS_OFF:/* 1 */ /*MIMO_PS_NOLIMIT*/
1135 action_frame->u.action.u.ht_smps.smps_control =
1136 WLAN_HT_SMPS_CONTROL_DISABLED;/* 0 */
1137 break;
1138 case IEEE80211_SMPS_STATIC:/* 2 */ /*MIMO_PS_STATIC*/
1139 action_frame->u.action.u.ht_smps.smps_control =
1140 WLAN_HT_SMPS_CONTROL_STATIC;/* 1 */
1141 break;
1142 case IEEE80211_SMPS_DYNAMIC:/* 3 */ /*MIMO_PS_DYNAMIC*/
1143 action_frame->u.action.u.ht_smps.smps_control =
1144 WLAN_HT_SMPS_CONTROL_DYNAMIC;/* 3 */
1145 break;
1146 }
1147
1148 return skb;
1149}
1150
1151int rtl_send_smps_action(struct ieee80211_hw *hw,
1152 struct ieee80211_sta *sta, u8 *da, u8 *bssid,
1153 enum ieee80211_smps_mode smps)
1154{
1155 struct rtl_priv *rtlpriv = rtl_priv(hw);
1156 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1157 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1158 struct sk_buff *skb = rtl_make_smps_action(hw, smps, da, bssid);
1159 struct rtl_tcb_desc tcb_desc;
1160 memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
1161
1162 if (rtlpriv->mac80211.act_scanning)
1163 goto err_free;
1164
1165 if (!sta)
1166 goto err_free;
1167
1168 if (unlikely(is_hal_stop(rtlhal) || ppsc->rfpwr_state != ERFON))
1169 goto err_free;
1170
1171 if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status))
1172 goto err_free;
1173
1174 /* this is a type = mgmt * stype = action frame */
1175 if (skb) {
1176 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1177 struct rtl_sta_info *sta_entry =
1178 (struct rtl_sta_info *) sta->drv_priv;
1179 sta_entry->mimo_ps = smps;
1180 rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0);
1181
1182 info->control.rates[0].idx = 0;
1183 info->control.sta = sta;
1184 info->band = hw->conf.channel->band;
1185 rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc);
1186 }
1187err_free:
1188 return 0;
1189}
1190
1191/*********************************************************
1192 *
1193 * IOT functions
1194 *
1195 *********************************************************/
1196static bool rtl_chk_vendor_ouisub(struct ieee80211_hw *hw,
1197 struct octet_string vendor_ie)
1198{
1199 struct rtl_priv *rtlpriv = rtl_priv(hw);
1200 bool matched = false;
1201 static u8 athcap_1[] = { 0x00, 0x03, 0x7F };
1202 static u8 athcap_2[] = { 0x00, 0x13, 0x74 };
1203 static u8 broadcap_1[] = { 0x00, 0x10, 0x18 };
1204 static u8 broadcap_2[] = { 0x00, 0x0a, 0xf7 };
1205 static u8 broadcap_3[] = { 0x00, 0x05, 0xb5 };
1206 static u8 racap[] = { 0x00, 0x0c, 0x43 };
1207 static u8 ciscocap[] = { 0x00, 0x40, 0x96 };
1208 static u8 marvcap[] = { 0x00, 0x50, 0x43 };
1209
1210 if (memcmp(vendor_ie.octet, athcap_1, 3) == 0 ||
1211 memcmp(vendor_ie.octet, athcap_2, 3) == 0) {
1212 rtlpriv->mac80211.vendor = PEER_ATH;
1213 matched = true;
1214 } else if (memcmp(vendor_ie.octet, broadcap_1, 3) == 0 ||
1215 memcmp(vendor_ie.octet, broadcap_2, 3) == 0 ||
1216 memcmp(vendor_ie.octet, broadcap_3, 3) == 0) {
1217 rtlpriv->mac80211.vendor = PEER_BROAD;
1218 matched = true;
1219 } else if (memcmp(vendor_ie.octet, racap, 3) == 0) {
1220 rtlpriv->mac80211.vendor = PEER_RAL;
1221 matched = true;
1222 } else if (memcmp(vendor_ie.octet, ciscocap, 3) == 0) {
1223 rtlpriv->mac80211.vendor = PEER_CISCO;
1224 matched = true;
1225 } else if (memcmp(vendor_ie.octet, marvcap, 3) == 0) {
1226 rtlpriv->mac80211.vendor = PEER_MARV;
1227 matched = true;
1228 }
1229
1230 return matched;
1231}
1232
1233static bool rtl_find_221_ie(struct ieee80211_hw *hw, u8 *data,
1234 unsigned int len)
1235{
1236 struct ieee80211_mgmt *mgmt = (void *)data;
1237 struct octet_string vendor_ie;
1238 u8 *pos, *end;
1239
1240 pos = (u8 *)mgmt->u.beacon.variable;
1241 end = data + len;
1242 while (pos < end) {
1243 if (pos[0] == 221) {
1244 vendor_ie.length = pos[1];
1245 vendor_ie.octet = &pos[2];
1246 if (rtl_chk_vendor_ouisub(hw, vendor_ie))
1247 return true;
1248 }
1249
1250 if (pos + 2 + pos[1] > end)
1251 return false;
1252
1253 pos += 2 + pos[1];
1254 }
1255 return false;
1256}
1257
1258void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len)
1259{
1260 struct rtl_priv *rtlpriv = rtl_priv(hw);
1261 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1262 struct ieee80211_hdr *hdr = (void *)data;
1263 u32 vendor = PEER_UNKNOWN;
1264
1265 static u8 ap3_1[3] = { 0x00, 0x14, 0xbf };
1266 static u8 ap3_2[3] = { 0x00, 0x1a, 0x70 };
1267 static u8 ap3_3[3] = { 0x00, 0x1d, 0x7e };
1268 static u8 ap4_1[3] = { 0x00, 0x90, 0xcc };
1269 static u8 ap4_2[3] = { 0x00, 0x0e, 0x2e };
1270 static u8 ap4_3[3] = { 0x00, 0x18, 0x02 };
1271 static u8 ap4_4[3] = { 0x00, 0x17, 0x3f };
1272 static u8 ap4_5[3] = { 0x00, 0x1c, 0xdf };
1273 static u8 ap5_1[3] = { 0x00, 0x1c, 0xf0 };
1274 static u8 ap5_2[3] = { 0x00, 0x21, 0x91 };
1275 static u8 ap5_3[3] = { 0x00, 0x24, 0x01 };
1276 static u8 ap5_4[3] = { 0x00, 0x15, 0xe9 };
1277 static u8 ap5_5[3] = { 0x00, 0x17, 0x9A };
1278 static u8 ap5_6[3] = { 0x00, 0x18, 0xE7 };
1279 static u8 ap6_1[3] = { 0x00, 0x17, 0x94 };
1280 static u8 ap7_1[3] = { 0x00, 0x14, 0xa4 };
1281
1282 if (mac->opmode != NL80211_IFTYPE_STATION)
1283 return;
1284
1285 if (mac->link_state == MAC80211_NOLINK) {
1286 mac->vendor = PEER_UNKNOWN;
1287 return;
1288 }
1289
1290 if (mac->cnt_after_linked > 2)
1291 return;
1292
1293 /* check if this really is a beacon */
1294 if (!ieee80211_is_beacon(hdr->frame_control))
1295 return;
1296
1297 /* min. beacon length + FCS_LEN */
1298 if (len <= 40 + FCS_LEN)
1299 return;
1300
1301 /* and only beacons from the associated BSSID, please */
1302 if (compare_ether_addr(hdr->addr3, rtlpriv->mac80211.bssid))
1303 return;
1304
1305 if (rtl_find_221_ie(hw, data, len))
1306 vendor = mac->vendor;
1307
1308 if ((memcmp(mac->bssid, ap5_1, 3) == 0) ||
1309 (memcmp(mac->bssid, ap5_2, 3) == 0) ||
1310 (memcmp(mac->bssid, ap5_3, 3) == 0) ||
1311 (memcmp(mac->bssid, ap5_4, 3) == 0) ||
1312 (memcmp(mac->bssid, ap5_5, 3) == 0) ||
1313 (memcmp(mac->bssid, ap5_6, 3) == 0) ||
1314 vendor == PEER_ATH) {
1315 vendor = PEER_ATH;
1316 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, ("=>ath find\n"));
1317 } else if ((memcmp(mac->bssid, ap4_4, 3) == 0) ||
1318 (memcmp(mac->bssid, ap4_5, 3) == 0) ||
1319 (memcmp(mac->bssid, ap4_1, 3) == 0) ||
1320 (memcmp(mac->bssid, ap4_2, 3) == 0) ||
1321 (memcmp(mac->bssid, ap4_3, 3) == 0) ||
1322 vendor == PEER_RAL) {
1323 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, ("=>ral findn\n"));
1324 vendor = PEER_RAL;
1325 } else if (memcmp(mac->bssid, ap6_1, 3) == 0 ||
1326 vendor == PEER_CISCO) {
1327 vendor = PEER_CISCO;
1328 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, ("=>cisco find\n"));
1329 } else if ((memcmp(mac->bssid, ap3_1, 3) == 0) ||
1330 (memcmp(mac->bssid, ap3_2, 3) == 0) ||
1331 (memcmp(mac->bssid, ap3_3, 3) == 0) ||
1332 vendor == PEER_BROAD) {
1333 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, ("=>broad find\n"));
1334 vendor = PEER_BROAD;
1335 } else if (memcmp(mac->bssid, ap7_1, 3) == 0 ||
1336 vendor == PEER_MARV) {
1337 vendor = PEER_MARV;
1338 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, ("=>marv find\n"));
1339 }
1340
1341 mac->vendor = vendor;
1342}
1343
1344/*********************************************************
1345 *
878 * sysfs functions 1346 * sysfs functions
879 * 1347 *
880 *********************************************************/ 1348 *********************************************************/
@@ -940,12 +1408,13 @@ static int __init rtl_core_module_init(void)
940 if (rtl_rate_control_register()) 1408 if (rtl_rate_control_register())
941 printk(KERN_ERR "rtlwifi: Unable to register rtl_rc," 1409 printk(KERN_ERR "rtlwifi: Unable to register rtl_rc,"
942 "use default RC !!\n"); 1410 "use default RC !!\n");
1411
943 return 0; 1412 return 0;
944} 1413}
945 1414
946static void __exit rtl_core_module_exit(void) 1415static void __exit rtl_core_module_exit(void)
947{ 1416{
948 /*RC*/ 1417 /*RC*/
949 rtl_rate_control_unregister(); 1418 rtl_rate_control_unregister();
950} 1419}
951 1420
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h
index 043045342bc7..a91f3eee59c8 100644
--- a/drivers/net/wireless/rtlwifi/base.h
+++ b/drivers/net/wireless/rtlwifi/base.h
@@ -24,13 +24,26 @@
24 * Hsinchu 300, Taiwan. 24 * Hsinchu 300, Taiwan.
25 * 25 *
26 * Larry Finger <Larry.Finger@lwfinger.net> 26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
27 *****************************************************************************/ 28 *****************************************************************************/
28 29
29#ifndef __RTL_BASE_H__ 30#ifndef __RTL_BASE_H__
30#define __RTL_BASE_H__ 31#define __RTL_BASE_H__
31 32
33enum ap_peer {
34 PEER_UNKNOWN = 0,
35 PEER_RTL = 1,
36 PEER_RTL_92SE = 2,
37 PEER_BROAD = 3,
38 PEER_RAL = 4,
39 PEER_ATH = 5,
40 PEER_CISCO = 6,
41 PEER_MARV = 7,
42 PEER_AIRGO = 9,
43 PEER_MAX = 10,
44} ;
45
32#define RTL_DUMMY_OFFSET 0 46#define RTL_DUMMY_OFFSET 0
33#define RTL_RX_DESC_SIZE 24
34#define RTL_DUMMY_UNIT 8 47#define RTL_DUMMY_UNIT 8
35#define RTL_TX_DUMMY_SIZE (RTL_DUMMY_OFFSET * RTL_DUMMY_UNIT) 48#define RTL_TX_DUMMY_SIZE (RTL_DUMMY_OFFSET * RTL_DUMMY_UNIT)
36#define RTL_TX_DESC_SIZE 32 49#define RTL_TX_DESC_SIZE 32
@@ -53,6 +66,14 @@
53#define FRAME_OFFSET_SEQUENCE 22 66#define FRAME_OFFSET_SEQUENCE 22
54#define FRAME_OFFSET_ADDRESS4 24 67#define FRAME_OFFSET_ADDRESS4 24
55 68
69#define SET_80211_HDR_FRAME_CONTROL(_hdr, _val) \
70 WRITEEF2BYTE(_hdr, _val)
71#define SET_80211_HDR_TYPE_AND_SUBTYPE(_hdr, _val) \
72 WRITEEF1BYTE(_hdr, _val)
73#define SET_80211_HDR_PWR_MGNT(_hdr, _val) \
74 SET_BITS_TO_LE_2BYTE(_hdr, 12, 1, _val)
75#define SET_80211_HDR_TO_DS(_hdr, _val) \
76 SET_BITS_TO_LE_2BYTE(_hdr, 8, 1, _val)
56 77
57#define SET_80211_PS_POLL_AID(_hdr, _val) \ 78#define SET_80211_PS_POLL_AID(_hdr, _val) \
58 (*(u16 *)((u8 *)(_hdr) + 2) = le16_to_cpu(_val)) 79 (*(u16 *)((u8 *)(_hdr) + 2) = le16_to_cpu(_val))
@@ -64,11 +85,27 @@
64#define SET_80211_HDR_DURATION(_hdr, _val) \ 85#define SET_80211_HDR_DURATION(_hdr, _val) \
65 (*(u16 *)((u8 *)(_hdr) + FRAME_OFFSET_DURATION) = le16_to_cpu(_val)) 86 (*(u16 *)((u8 *)(_hdr) + FRAME_OFFSET_DURATION) = le16_to_cpu(_val))
66#define SET_80211_HDR_ADDRESS1(_hdr, _val) \ 87#define SET_80211_HDR_ADDRESS1(_hdr, _val) \
67 memcpy((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS1, (u8*)(_val), ETH_ALEN) 88 CP_MACADDR((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS1, (u8 *)(_val))
68#define SET_80211_HDR_ADDRESS2(_hdr, _val) \ 89#define SET_80211_HDR_ADDRESS2(_hdr, _val) \
69 memcpy((u8 *)(_hdr) + FRAME_OFFSET_ADDRESS2, (u8 *)(_val), ETH_ALEN) 90 CP_MACADDR((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS2, (u8 *)(_val))
70#define SET_80211_HDR_ADDRESS3(_hdr, _val) \ 91#define SET_80211_HDR_ADDRESS3(_hdr, _val) \
71 memcpy((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS3, (u8 *)(_val), ETH_ALEN) 92 CP_MACADDR((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS3, (u8 *)(_val))
93#define SET_80211_HDR_FRAGMENT_SEQUENCE(_hdr, _val) \
94 WRITEEF2BYTE((u8 *)(_hdr)+FRAME_OFFSET_SEQUENCE, _val)
95
96#define SET_BEACON_PROBE_RSP_TIME_STAMP_LOW(__phdr, __val) \
97 WRITEEF4BYTE(((u8 *)(__phdr)) + 24, __val)
98#define SET_BEACON_PROBE_RSP_TIME_STAMP_HIGH(__phdr, __val) \
99 WRITEEF4BYTE(((u8 *)(__phdr)) + 28, __val)
100#define SET_BEACON_PROBE_RSP_BEACON_INTERVAL(__phdr, __val) \
101 WRITEEF2BYTE(((u8 *)(__phdr)) + 32, __val)
102#define GET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr) \
103 READEF2BYTE(((u8 *)(__phdr)) + 34)
104#define SET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, __val) \
105 WRITEEF2BYTE(((u8 *)(__phdr)) + 34, __val)
106#define MASK_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, __val) \
107 SET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, \
108 (GET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr) & (~(__val))))
72 109
73int rtl_init_core(struct ieee80211_hw *hw); 110int rtl_init_core(struct ieee80211_hw *hw);
74void rtl_deinit_core(struct ieee80211_hw *hw); 111void rtl_deinit_core(struct ieee80211_hw *hw);
@@ -80,18 +117,27 @@ void rtl_watch_dog_timer_callback(unsigned long data);
80void rtl_deinit_deferred_work(struct ieee80211_hw *hw); 117void rtl_deinit_deferred_work(struct ieee80211_hw *hw);
81 118
82bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx); 119bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
83bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb);
84u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx); 120u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
85 121
86void rtl_watch_dog_timer_callback(unsigned long data); 122void rtl_watch_dog_timer_callback(unsigned long data);
87int rtl_tx_agg_start(struct ieee80211_hw *hw, const u8 *ra, 123int rtl_tx_agg_start(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
88 u16 tid, u16 *ssn); 124 u16 tid, u16 *ssn);
89int rtl_tx_agg_stop(struct ieee80211_hw *hw, const u8 *ra, u16 tid); 125int rtl_tx_agg_stop(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
126 u16 tid);
127int rtl_tx_agg_oper(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
128 u16 tid);
90void rtl_watchdog_wq_callback(void *data); 129void rtl_watchdog_wq_callback(void *data);
91 130
92void rtl_get_tcb_desc(struct ieee80211_hw *hw, 131void rtl_get_tcb_desc(struct ieee80211_hw *hw,
93 struct ieee80211_tx_info *info, 132 struct ieee80211_tx_info *info,
133 struct ieee80211_sta *sta,
94 struct sk_buff *skb, struct rtl_tcb_desc *tcb_desc); 134 struct sk_buff *skb, struct rtl_tcb_desc *tcb_desc);
95 135
136int rtl_send_smps_action(struct ieee80211_hw *hw,
137 struct ieee80211_sta *sta, u8 *da, u8 *bssid,
138 enum ieee80211_smps_mode smps);
139u8 *rtl_find_ie(u8 *data, unsigned int len, u8 ie);
140void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len);
141u8 rtl_tid_to_ac(struct ieee80211_hw *hw, u8 tid);
96extern struct attribute_group rtl_attribute_group; 142extern struct attribute_group rtl_attribute_group;
97#endif 143#endif
diff --git a/drivers/net/wireless/rtlwifi/cam.c b/drivers/net/wireless/rtlwifi/cam.c
index 52c9c1367cac..7295af0536b7 100644
--- a/drivers/net/wireless/rtlwifi/cam.c
+++ b/drivers/net/wireless/rtlwifi/cam.c
@@ -23,6 +23,8 @@
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, 23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan. 24 * Hsinchu 300, Taiwan.
25 * 25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
26 *****************************************************************************/ 28 *****************************************************************************/
27 29
28#include "wifi.h" 30#include "wifi.h"
@@ -49,7 +51,7 @@ static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no,
49 u32 target_content = 0; 51 u32 target_content = 0;
50 u8 entry_i; 52 u8 entry_i;
51 53
52 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, 54 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
53 ("key_cont_128:\n %x:%x:%x:%x:%x:%x\n", 55 ("key_cont_128:\n %x:%x:%x:%x:%x:%x\n",
54 key_cont_128[0], key_cont_128[1], 56 key_cont_128[0], key_cont_128[1],
55 key_cont_128[2], key_cont_128[3], 57 key_cont_128[2], key_cont_128[3],
@@ -68,15 +70,13 @@ static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no,
68 rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], 70 rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
69 target_command); 71 target_command);
70 72
71 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, 73 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
72 ("rtl_cam_program_entry(): " 74 ("WRITE %x: %x\n",
73 "WRITE %x: %x\n",
74 rtlpriv->cfg->maps[WCAMI], target_content)); 75 rtlpriv->cfg->maps[WCAMI], target_content));
75 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, 76 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
76 ("The Key ID is %d\n", entry_no)); 77 ("The Key ID is %d\n", entry_no));
77 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, 78 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
78 ("rtl_cam_program_entry(): " 79 ("WRITE %x: %x\n",
79 "WRITE %x: %x\n",
80 rtlpriv->cfg->maps[RWCAM], target_command)); 80 rtlpriv->cfg->maps[RWCAM], target_command));
81 81
82 } else if (entry_i == 1) { 82 } else if (entry_i == 1) {
@@ -91,12 +91,10 @@ static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no,
91 rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], 91 rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
92 target_command); 92 target_command);
93 93
94 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, 94 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
95 ("rtl_cam_program_entry(): WRITE A4: %x\n", 95 ("WRITE A4: %x\n", target_content));
96 target_content)); 96 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
97 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, 97 ("WRITE A0: %x\n", target_command));
98 ("rtl_cam_program_entry(): WRITE A0: %x\n",
99 target_command));
100 98
101 } else { 99 } else {
102 100
@@ -113,16 +111,14 @@ static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no,
113 target_command); 111 target_command);
114 udelay(100); 112 udelay(100);
115 113
116 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, 114 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
117 ("rtl_cam_program_entry(): WRITE A4: %x\n", 115 ("WRITE A4: %x\n", target_content));
118 target_content)); 116 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
119 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, 117 ("WRITE A0: %x\n", target_command));
120 ("rtl_cam_program_entry(): WRITE A0: %x\n",
121 target_command));
122 } 118 }
123 } 119 }
124 120
125 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, 121 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
126 ("after set key, usconfig:%x\n", us_config)); 122 ("after set key, usconfig:%x\n", us_config));
127} 123}
128 124
@@ -289,3 +285,71 @@ void rtl_cam_empty_entry(struct ieee80211_hw *hw, u8 uc_index)
289 285
290} 286}
291EXPORT_SYMBOL(rtl_cam_empty_entry); 287EXPORT_SYMBOL(rtl_cam_empty_entry);
288
289u8 rtl_cam_get_free_entry(struct ieee80211_hw *hw, u8 *sta_addr)
290{
291 struct rtl_priv *rtlpriv = rtl_priv(hw);
292 u32 bitmap = (rtlpriv->sec.hwsec_cam_bitmap) >> 4;
293 u8 entry_idx = 0;
294 u8 i, *addr;
295
296 if (NULL == sta_addr) {
297 RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG,
298 ("sta_addr is NULL.\n"));
299 return TOTAL_CAM_ENTRY;
300 }
301 /* Does STA already exist? */
302 for (i = 4; i < TOTAL_CAM_ENTRY; i++) {
303 addr = rtlpriv->sec.hwsec_cam_sta_addr[i];
304 if (memcmp(addr, sta_addr, ETH_ALEN) == 0)
305 return i;
306 }
307 /* Get a free CAM entry. */
308 for (entry_idx = 4; entry_idx < TOTAL_CAM_ENTRY; entry_idx++) {
309 if ((bitmap & BIT(0)) == 0) {
310 RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG,
311 ("-----hwsec_cam_bitmap: 0x%x entry_idx=%d\n",
312 rtlpriv->sec.hwsec_cam_bitmap, entry_idx));
313 rtlpriv->sec.hwsec_cam_bitmap |= BIT(0) << entry_idx;
314 memcpy(rtlpriv->sec.hwsec_cam_sta_addr[entry_idx],
315 sta_addr, ETH_ALEN);
316 return entry_idx;
317 }
318 bitmap = bitmap >> 1;
319 }
320 return TOTAL_CAM_ENTRY;
321}
322EXPORT_SYMBOL(rtl_cam_get_free_entry);
323
324void rtl_cam_del_entry(struct ieee80211_hw *hw, u8 *sta_addr)
325{
326 struct rtl_priv *rtlpriv = rtl_priv(hw);
327 u32 bitmap;
328 u8 i, *addr;
329
330 if (NULL == sta_addr) {
331 RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG,
332 ("sta_addr is NULL.\n"));
333 }
334
335 if ((sta_addr[0]|sta_addr[1]|sta_addr[2]|sta_addr[3]|\
336 sta_addr[4]|sta_addr[5]) == 0) {
337 RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG,
338 ("sta_addr is 00:00:00:00:00:00.\n"));
339 return;
340 }
341 /* Does STA already exist? */
342 for (i = 4; i < TOTAL_CAM_ENTRY; i++) {
343 addr = rtlpriv->sec.hwsec_cam_sta_addr[i];
344 bitmap = (rtlpriv->sec.hwsec_cam_bitmap) >> i;
345 if (((bitmap & BIT(0)) == BIT(0)) &&
346 (memcmp(addr, sta_addr, ETH_ALEN) == 0)) {
347 /* Remove from HW Security CAM */
348 memset(rtlpriv->sec.hwsec_cam_sta_addr[i], 0, ETH_ALEN);
349 rtlpriv->sec.hwsec_cam_bitmap &= ~(BIT(0) << i);
350 printk(KERN_INFO "&&&&&&&&&del entry %d\n", i);
351 }
352 }
353 return;
354}
355EXPORT_SYMBOL(rtl_cam_del_entry);
diff --git a/drivers/net/wireless/rtlwifi/cam.h b/drivers/net/wireless/rtlwifi/cam.h
index dd82f057d53d..c62da4eefc75 100644
--- a/drivers/net/wireless/rtlwifi/cam.h
+++ b/drivers/net/wireless/rtlwifi/cam.h
@@ -23,12 +23,13 @@
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, 23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan. 24 * Hsinchu 300, Taiwan.
25 * 25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
26 *****************************************************************************/ 28 *****************************************************************************/
27 29
28#ifndef __RTL_CAM_H_ 30#ifndef __RTL_CAM_H_
29#define __RTL_CAM_H_ 31#define __RTL_CAM_H_
30 32
31#define TOTAL_CAM_ENTRY 32
32#define CAM_CONTENT_COUNT 8 33#define CAM_CONTENT_COUNT 8
33 34
34#define CFG_DEFAULT_KEY BIT(5) 35#define CFG_DEFAULT_KEY BIT(5)
@@ -49,5 +50,7 @@ int rtl_cam_delete_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
49void rtl_cam_mark_invalid(struct ieee80211_hw *hw, u8 uc_index); 50void rtl_cam_mark_invalid(struct ieee80211_hw *hw, u8 uc_index);
50void rtl_cam_empty_entry(struct ieee80211_hw *hw, u8 uc_index); 51void rtl_cam_empty_entry(struct ieee80211_hw *hw, u8 uc_index);
51void rtl_cam_reset_sec_info(struct ieee80211_hw *hw); 52void rtl_cam_reset_sec_info(struct ieee80211_hw *hw);
53u8 rtl_cam_get_free_entry(struct ieee80211_hw *hw, u8 *sta_addr);
54void rtl_cam_del_entry(struct ieee80211_hw *hw, u8 *sta_addr);
52 55
53#endif 56#endif
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index e4f4aee8f298..fc89cd8c8320 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -24,6 +24,7 @@
24 * Hsinchu 300, Taiwan. 24 * Hsinchu 300, Taiwan.
25 * 25 *
26 * Larry Finger <Larry.Finger@lwfinger.net> 26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
27 *****************************************************************************/ 28 *****************************************************************************/
28 29
29#include "wifi.h" 30#include "wifi.h"
@@ -35,7 +36,7 @@
35/*mutex for start & stop is must here. */ 36/*mutex for start & stop is must here. */
36static int rtl_op_start(struct ieee80211_hw *hw) 37static int rtl_op_start(struct ieee80211_hw *hw)
37{ 38{
38 int err = 0; 39 int err;
39 struct rtl_priv *rtlpriv = rtl_priv(hw); 40 struct rtl_priv *rtlpriv = rtl_priv(hw);
40 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 41 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
41 42
@@ -45,10 +46,8 @@ static int rtl_op_start(struct ieee80211_hw *hw)
45 return 0; 46 return 0;
46 mutex_lock(&rtlpriv->locks.conf_mutex); 47 mutex_lock(&rtlpriv->locks.conf_mutex);
47 err = rtlpriv->intf_ops->adapter_start(hw); 48 err = rtlpriv->intf_ops->adapter_start(hw);
48 if (err) 49 if (!err)
49 goto out; 50 rtl_watch_dog_timer_callback((unsigned long)hw);
50 rtl_watch_dog_timer_callback((unsigned long)hw);
51out:
52 mutex_unlock(&rtlpriv->locks.conf_mutex); 51 mutex_unlock(&rtlpriv->locks.conf_mutex);
53 return err; 52 return err;
54} 53}
@@ -72,6 +71,7 @@ static void rtl_op_stop(struct ieee80211_hw *hw)
72 71
73 mac->link_state = MAC80211_NOLINK; 72 mac->link_state = MAC80211_NOLINK;
74 memset(mac->bssid, 0, 6); 73 memset(mac->bssid, 0, 6);
74 mac->vendor = PEER_UNKNOWN;
75 75
76 /*reset sec info */ 76 /*reset sec info */
77 rtl_cam_reset_sec_info(hw); 77 rtl_cam_reset_sec_info(hw);
@@ -87,6 +87,8 @@ static void rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
87 struct rtl_priv *rtlpriv = rtl_priv(hw); 87 struct rtl_priv *rtlpriv = rtl_priv(hw);
88 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 88 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
89 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 89 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
90 struct rtl_tcb_desc tcb_desc;
91 memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
90 92
91 if (unlikely(is_hal_stop(rtlhal) || ppsc->rfpwr_state != ERFON)) 93 if (unlikely(is_hal_stop(rtlhal) || ppsc->rfpwr_state != ERFON))
92 goto err_free; 94 goto err_free;
@@ -94,8 +96,8 @@ static void rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
94 if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status)) 96 if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status))
95 goto err_free; 97 goto err_free;
96 98
97 99 if (!rtlpriv->intf_ops->waitq_insert(hw, skb))
98 rtlpriv->intf_ops->adapter_tx(hw, skb); 100 rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc);
99 101
100 return; 102 return;
101 103
@@ -136,10 +138,26 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
136 138
137 mac->link_state = MAC80211_LINKED; 139 mac->link_state = MAC80211_LINKED;
138 rtlpriv->cfg->ops->set_bcn_reg(hw); 140 rtlpriv->cfg->ops->set_bcn_reg(hw);
141 if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G)
142 mac->basic_rates = 0xfff;
143 else
144 mac->basic_rates = 0xff0;
145 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
146 (u8 *) (&mac->basic_rates));
147
139 break; 148 break;
140 case NL80211_IFTYPE_AP: 149 case NL80211_IFTYPE_AP:
141 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, 150 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
142 ("NL80211_IFTYPE_AP\n")); 151 ("NL80211_IFTYPE_AP\n"));
152
153 mac->link_state = MAC80211_LINKED;
154 rtlpriv->cfg->ops->set_bcn_reg(hw);
155 if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G)
156 mac->basic_rates = 0xfff;
157 else
158 mac->basic_rates = 0xff0;
159 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
160 (u8 *) (&mac->basic_rates));
143 break; 161 break;
144 default: 162 default:
145 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 163 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
@@ -186,13 +204,12 @@ static void rtl_op_remove_interface(struct ieee80211_hw *hw,
186 mac->vif = NULL; 204 mac->vif = NULL;
187 mac->link_state = MAC80211_NOLINK; 205 mac->link_state = MAC80211_NOLINK;
188 memset(mac->bssid, 0, 6); 206 memset(mac->bssid, 0, 6);
207 mac->vendor = PEER_UNKNOWN;
189 mac->opmode = NL80211_IFTYPE_UNSPECIFIED; 208 mac->opmode = NL80211_IFTYPE_UNSPECIFIED;
190 rtlpriv->cfg->ops->set_network_type(hw, mac->opmode); 209 rtlpriv->cfg->ops->set_network_type(hw, mac->opmode);
191
192 mutex_unlock(&rtlpriv->locks.conf_mutex); 210 mutex_unlock(&rtlpriv->locks.conf_mutex);
193} 211}
194 212
195
196static int rtl_op_config(struct ieee80211_hw *hw, u32 changed) 213static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
197{ 214{
198 struct rtl_priv *rtlpriv = rtl_priv(hw); 215 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -224,10 +241,25 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
224 241
225 /*For LPS */ 242 /*For LPS */
226 if (changed & IEEE80211_CONF_CHANGE_PS) { 243 if (changed & IEEE80211_CONF_CHANGE_PS) {
227 if (conf->flags & IEEE80211_CONF_PS) 244 cancel_delayed_work(&rtlpriv->works.ps_work);
228 rtl_lps_enter(hw); 245 cancel_delayed_work(&rtlpriv->works.ps_rfon_wq);
229 else 246 if (conf->flags & IEEE80211_CONF_PS) {
230 rtl_lps_leave(hw); 247 rtlpriv->psc.sw_ps_enabled = true;
248 /* sleep here is must, or we may recv the beacon and
249 * cause mac80211 into wrong ps state, this will cause
250 * power save nullfunc send fail, and further cause
251 * pkt loss, So sleep must quickly but not immediatly
252 * because that will cause nullfunc send by mac80211
253 * fail, and cause pkt loss, we have tested that 5mA
254 * is worked very well */
255 if (!rtlpriv->psc.multi_buffered)
256 queue_delayed_work(rtlpriv->works.rtl_wq,
257 &rtlpriv->works.ps_work,
258 MSECS(5));
259 } else {
260 rtl_swlps_rf_awake(hw);
261 rtlpriv->psc.sw_ps_enabled = false;
262 }
231 } 263 }
232 264
233 if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) { 265 if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
@@ -259,7 +291,7 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
259 case NL80211_CHAN_NO_HT: 291 case NL80211_CHAN_NO_HT:
260 /* SC */ 292 /* SC */
261 mac->cur_40_prime_sc = 293 mac->cur_40_prime_sc =
262 PRIME_CHNL_OFFSET_DONT_CARE; 294 PRIME_CHNL_OFFSET_DONT_CARE;
263 rtlphy->current_chan_bw = HT_CHANNEL_WIDTH_20; 295 rtlphy->current_chan_bw = HT_CHANNEL_WIDTH_20;
264 mac->bw_40 = false; 296 mac->bw_40 = false;
265 break; 297 break;
@@ -267,7 +299,7 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
267 /* SC */ 299 /* SC */
268 mac->cur_40_prime_sc = PRIME_CHNL_OFFSET_UPPER; 300 mac->cur_40_prime_sc = PRIME_CHNL_OFFSET_UPPER;
269 rtlphy->current_chan_bw = 301 rtlphy->current_chan_bw =
270 HT_CHANNEL_WIDTH_20_40; 302 HT_CHANNEL_WIDTH_20_40;
271 mac->bw_40 = true; 303 mac->bw_40 = true;
272 304
273 /*wide channel */ 305 /*wide channel */
@@ -278,7 +310,7 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
278 /* SC */ 310 /* SC */
279 mac->cur_40_prime_sc = PRIME_CHNL_OFFSET_LOWER; 311 mac->cur_40_prime_sc = PRIME_CHNL_OFFSET_LOWER;
280 rtlphy->current_chan_bw = 312 rtlphy->current_chan_bw =
281 HT_CHANNEL_WIDTH_20_40; 313 HT_CHANNEL_WIDTH_20_40;
282 mac->bw_40 = true; 314 mac->bw_40 = true;
283 315
284 /*wide channel */ 316 /*wide channel */
@@ -288,16 +320,29 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
288 default: 320 default:
289 mac->bw_40 = false; 321 mac->bw_40 = false;
290 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 322 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
291 ("switch case not processed\n")); 323 ("switch case not processed\n"));
292 break; 324 break;
293 } 325 }
294 326
295 if (wide_chan <= 0) 327 if (wide_chan <= 0)
296 wide_chan = 1; 328 wide_chan = 1;
329
330 /* In scanning, before we go offchannel we may send a ps=1 null
331 * to AP, and then we may send a ps = 0 null to AP quickly, but
332 * first null may have caused AP to put lots of packet to hw tx
333 * buffer. These packets must be tx'd before we go off channel
334 * so we must delay more time to let AP flush these packets
335 * before going offchannel, or dis-association or delete BA will
336 * happen by AP
337 */
338 if (rtlpriv->mac80211.offchan_deley) {
339 rtlpriv->mac80211.offchan_deley = false;
340 mdelay(50);
341 }
297 rtlphy->current_channel = wide_chan; 342 rtlphy->current_channel = wide_chan;
298 343
299 rtlpriv->cfg->ops->set_channel_access(hw);
300 rtlpriv->cfg->ops->switch_channel(hw); 344 rtlpriv->cfg->ops->switch_channel(hw);
345 rtlpriv->cfg->ops->set_channel_access(hw);
301 rtlpriv->cfg->ops->set_bw_mode(hw, 346 rtlpriv->cfg->ops->set_bw_mode(hw,
302 hw->conf.channel_type); 347 hw->conf.channel_type);
303 } 348 }
@@ -345,27 +390,28 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
345 } 390 }
346 } 391 }
347 392
348 if (changed_flags & FIF_BCN_PRBRESP_PROMISC) { 393 /* if ssid not set to hw don't check bssid
349 /* 394 * here just used for linked scanning, & linked
350 *TODO: BIT(5) is probe response BIT(8) is beacon 395 * and nolink check bssid is set in set network_type */
351 *TODO: Use define for BIT(5) and BIT(8) 396 if ((changed_flags & FIF_BCN_PRBRESP_PROMISC) &&
352 */ 397 (mac->link_state >= MAC80211_LINKED)) {
353 if (*new_flags & FIF_BCN_PRBRESP_PROMISC) 398 if (mac->opmode != NL80211_IFTYPE_AP) {
354 mac->rx_mgt_filter |= (BIT(5) | BIT(8)); 399 if (*new_flags & FIF_BCN_PRBRESP_PROMISC) {
355 else 400 rtlpriv->cfg->ops->set_chk_bssid(hw, false);
356 mac->rx_mgt_filter &= ~(BIT(5) | BIT(8)); 401 } else {
402 rtlpriv->cfg->ops->set_chk_bssid(hw, true);
403 }
404 }
357 } 405 }
358 406
359 if (changed_flags & FIF_CONTROL) { 407 if (changed_flags & FIF_CONTROL) {
360 if (*new_flags & FIF_CONTROL) { 408 if (*new_flags & FIF_CONTROL) {
361 mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACF]; 409 mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACF];
362 mac->rx_ctrl_filter |= RTL_SUPPORTED_CTRL_FILTER;
363 410
364 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, 411 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
365 ("Enable receive control frame.\n")); 412 ("Enable receive control frame.\n"));
366 } else { 413 } else {
367 mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACF]; 414 mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACF];
368 mac->rx_ctrl_filter &= ~RTL_SUPPORTED_CTRL_FILTER;
369 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, 415 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
370 ("Disable receive control frame.\n")); 416 ("Disable receive control frame.\n"));
371 } 417 }
@@ -382,14 +428,54 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
382 ("Disable receive other BSS's frame.\n")); 428 ("Disable receive other BSS's frame.\n"));
383 } 429 }
384 } 430 }
385
386 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *) (&mac->rx_conf));
387 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_MGT_FILTER,
388 (u8 *) (&mac->rx_mgt_filter));
389 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_CTRL_FILTER,
390 (u8 *) (&mac->rx_ctrl_filter));
391} 431}
432static int rtl_op_sta_add(struct ieee80211_hw *hw,
433 struct ieee80211_vif *vif,
434 struct ieee80211_sta *sta)
435{
436 struct rtl_priv *rtlpriv = rtl_priv(hw);
437 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
438 struct rtl_sta_info *sta_entry;
439
440 if (sta) {
441 sta_entry = (struct rtl_sta_info *) sta->drv_priv;
442 if (rtlhal->current_bandtype == BAND_ON_2_4G) {
443 sta_entry->wireless_mode = WIRELESS_MODE_G;
444 if (sta->supp_rates[0] <= 0xf)
445 sta_entry->wireless_mode = WIRELESS_MODE_B;
446 if (sta->ht_cap.ht_supported == true)
447 sta_entry->wireless_mode = WIRELESS_MODE_N_24G;
448 } else if (rtlhal->current_bandtype == BAND_ON_5G) {
449 sta_entry->wireless_mode = WIRELESS_MODE_A;
450 if (sta->ht_cap.ht_supported == true)
451 sta_entry->wireless_mode = WIRELESS_MODE_N_24G;
452 }
453
454 /* I found some times mac80211 give wrong supp_rates for adhoc*/
455 if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC)
456 sta_entry->wireless_mode = WIRELESS_MODE_G;
392 457
458 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
459 ("Add sta addr is "MAC_FMT"\n", MAC_ARG(sta->addr)));
460 rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0);
461 }
462 return 0;
463}
464static int rtl_op_sta_remove(struct ieee80211_hw *hw,
465 struct ieee80211_vif *vif,
466 struct ieee80211_sta *sta)
467{
468 struct rtl_priv *rtlpriv = rtl_priv(hw);
469 struct rtl_sta_info *sta_entry;
470 if (sta) {
471 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
472 ("Remove sta addr is "MAC_FMT"\n", MAC_ARG(sta->addr)));
473 sta_entry = (struct rtl_sta_info *) sta->drv_priv;
474 sta_entry->wireless_mode = 0;
475 sta_entry->ratr_index = 0;
476 }
477 return 0;
478}
393static int _rtl_get_hal_qnum(u16 queue) 479static int _rtl_get_hal_qnum(u16 queue)
394{ 480{
395 int qnum; 481 int qnum;
@@ -446,19 +532,18 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
446 struct ieee80211_bss_conf *bss_conf, u32 changed) 532 struct ieee80211_bss_conf *bss_conf, u32 changed)
447{ 533{
448 struct rtl_priv *rtlpriv = rtl_priv(hw); 534 struct rtl_priv *rtlpriv = rtl_priv(hw);
535 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
449 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 536 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
450 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 537 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
538 struct ieee80211_sta *sta = NULL;
451 539
452 mutex_lock(&rtlpriv->locks.conf_mutex); 540 mutex_lock(&rtlpriv->locks.conf_mutex);
453
454 if ((vif->type == NL80211_IFTYPE_ADHOC) || 541 if ((vif->type == NL80211_IFTYPE_ADHOC) ||
455 (vif->type == NL80211_IFTYPE_AP) || 542 (vif->type == NL80211_IFTYPE_AP) ||
456 (vif->type == NL80211_IFTYPE_MESH_POINT)) { 543 (vif->type == NL80211_IFTYPE_MESH_POINT)) {
457
458 if ((changed & BSS_CHANGED_BEACON) || 544 if ((changed & BSS_CHANGED_BEACON) ||
459 (changed & BSS_CHANGED_BEACON_ENABLED && 545 (changed & BSS_CHANGED_BEACON_ENABLED &&
460 bss_conf->enable_beacon)) { 546 bss_conf->enable_beacon)) {
461
462 if (mac->beacon_enabled == 0) { 547 if (mac->beacon_enabled == 0) {
463 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, 548 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
464 ("BSS_CHANGED_BEACON_ENABLED\n")); 549 ("BSS_CHANGED_BEACON_ENABLED\n"));
@@ -470,8 +555,13 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
470 rtlpriv->cfg->maps 555 rtlpriv->cfg->maps
471 [RTL_IBSS_INT_MASKS], 556 [RTL_IBSS_INT_MASKS],
472 0); 557 0);
558
559 if (rtlpriv->cfg->ops->linked_set_reg)
560 rtlpriv->cfg->ops->linked_set_reg(hw);
473 } 561 }
474 } else { 562 }
563 if ((changed & BSS_CHANGED_BEACON_ENABLED &&
564 !bss_conf->enable_beacon)) {
475 if (mac->beacon_enabled == 1) { 565 if (mac->beacon_enabled == 1) {
476 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, 566 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
477 ("ADHOC DISABLE BEACON\n")); 567 ("ADHOC DISABLE BEACON\n"));
@@ -482,7 +572,6 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
482 [RTL_IBSS_INT_MASKS]); 572 [RTL_IBSS_INT_MASKS]);
483 } 573 }
484 } 574 }
485
486 if (changed & BSS_CHANGED_BEACON_INT) { 575 if (changed & BSS_CHANGED_BEACON_INT) {
487 RT_TRACE(rtlpriv, COMP_BEACON, DBG_TRACE, 576 RT_TRACE(rtlpriv, COMP_BEACON, DBG_TRACE,
488 ("BSS_CHANGED_BEACON_INT\n")); 577 ("BSS_CHANGED_BEACON_INT\n"));
@@ -494,11 +583,25 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
494 /*TODO: reference to enum ieee80211_bss_change */ 583 /*TODO: reference to enum ieee80211_bss_change */
495 if (changed & BSS_CHANGED_ASSOC) { 584 if (changed & BSS_CHANGED_ASSOC) {
496 if (bss_conf->assoc) { 585 if (bss_conf->assoc) {
586 /* we should reset all sec info & cam
587 * before set cam after linked, we should not
588 * reset in disassoc, that will cause tkip->wep
589 * fail because some flag will be wrong */
590 /* reset sec info */
591 rtl_cam_reset_sec_info(hw);
592 /* reset cam to fix wep fail issue
593 * when change from wpa to wep */
594 rtl_cam_reset_all_entry(hw);
595
497 mac->link_state = MAC80211_LINKED; 596 mac->link_state = MAC80211_LINKED;
498 mac->cnt_after_linked = 0; 597 mac->cnt_after_linked = 0;
499 mac->assoc_id = bss_conf->aid; 598 mac->assoc_id = bss_conf->aid;
500 memcpy(mac->bssid, bss_conf->bssid, 6); 599 memcpy(mac->bssid, bss_conf->bssid, 6);
501 600
601 if (rtlpriv->cfg->ops->linked_set_reg)
602 rtlpriv->cfg->ops->linked_set_reg(hw);
603 if (mac->opmode == NL80211_IFTYPE_STATION && sta)
604 rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0);
502 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, 605 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
503 ("BSS_CHANGED_ASSOC\n")); 606 ("BSS_CHANGED_ASSOC\n"));
504 } else { 607 } else {
@@ -507,9 +610,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
507 610
508 mac->link_state = MAC80211_NOLINK; 611 mac->link_state = MAC80211_NOLINK;
509 memset(mac->bssid, 0, 6); 612 memset(mac->bssid, 0, 6);
510 613 mac->vendor = PEER_UNKNOWN;
511 /* reset sec info */
512 rtl_cam_reset_sec_info(hw);
513 614
514 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, 615 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
515 ("BSS_CHANGED_UN_ASSOC\n")); 616 ("BSS_CHANGED_UN_ASSOC\n"));
@@ -546,14 +647,10 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
546 } 647 }
547 648
548 if (changed & BSS_CHANGED_HT) { 649 if (changed & BSS_CHANGED_HT) {
549 struct ieee80211_sta *sta = NULL;
550
551 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE, 650 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
552 ("BSS_CHANGED_HT\n")); 651 ("BSS_CHANGED_HT\n"));
553
554 rcu_read_lock(); 652 rcu_read_lock();
555 sta = ieee80211_find_sta(mac->vif, mac->bssid); 653 sta = get_sta(hw, vif, (u8 *)bss_conf->bssid);
556
557 if (sta) { 654 if (sta) {
558 if (sta->ht_cap.ampdu_density > 655 if (sta->ht_cap.ampdu_density >
559 mac->current_ampdu_density) 656 mac->current_ampdu_density)
@@ -575,9 +672,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
575 } 672 }
576 673
577 if (changed & BSS_CHANGED_BSSID) { 674 if (changed & BSS_CHANGED_BSSID) {
578 struct ieee80211_sta *sta = NULL;
579 u32 basic_rates; 675 u32 basic_rates;
580 u8 i;
581 676
582 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BSSID, 677 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BSSID,
583 (u8 *) bss_conf->bssid); 678 (u8 *) bss_conf->bssid);
@@ -585,96 +680,65 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
585 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, 680 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
586 (MAC_FMT "\n", MAC_ARG(bss_conf->bssid))); 681 (MAC_FMT "\n", MAC_ARG(bss_conf->bssid)));
587 682
683 mac->vendor = PEER_UNKNOWN;
588 memcpy(mac->bssid, bss_conf->bssid, 6); 684 memcpy(mac->bssid, bss_conf->bssid, 6);
589 if (is_valid_ether_addr(bss_conf->bssid)) { 685 rtlpriv->cfg->ops->set_network_type(hw, vif->type);
590 switch (vif->type) {
591 case NL80211_IFTYPE_UNSPECIFIED:
592 break;
593 case NL80211_IFTYPE_ADHOC:
594 break;
595 case NL80211_IFTYPE_STATION:
596 break;
597 case NL80211_IFTYPE_AP:
598 break;
599 default:
600 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
601 ("switch case not process\n"));
602 break;
603 }
604 rtlpriv->cfg->ops->set_network_type(hw, vif->type);
605 } else
606 rtlpriv->cfg->ops->set_network_type(hw,
607 NL80211_IFTYPE_UNSPECIFIED);
608
609 memset(mac->mcs, 0, 16);
610 mac->ht_enable = false;
611 mac->sgi_40 = false;
612 mac->sgi_20 = false;
613
614 if (!bss_conf->use_short_slot)
615 mac->mode = WIRELESS_MODE_B;
616 else
617 mac->mode = WIRELESS_MODE_G;
618 686
619 rcu_read_lock(); 687 rcu_read_lock();
620 sta = ieee80211_find_sta(mac->vif, mac->bssid); 688 sta = get_sta(hw, vif, (u8 *)bss_conf->bssid);
689 if (!sta) {
690 rcu_read_unlock();
691 goto out;
692 }
621 693
622 if (sta) { 694 if (rtlhal->current_bandtype == BAND_ON_5G) {
623 if (sta->ht_cap.ht_supported) { 695 mac->mode = WIRELESS_MODE_A;
696 } else {
697 if (sta->supp_rates[0] <= 0xf)
698 mac->mode = WIRELESS_MODE_B;
699 else
700 mac->mode = WIRELESS_MODE_G;
701 }
702
703 if (sta->ht_cap.ht_supported) {
704 if (rtlhal->current_bandtype == BAND_ON_2_4G)
624 mac->mode = WIRELESS_MODE_N_24G; 705 mac->mode = WIRELESS_MODE_N_24G;
625 mac->ht_enable = true; 706 else
626 } 707 mac->mode = WIRELESS_MODE_N_5G;
708 }
627 709
628 if (mac->ht_enable) { 710 /* just station need it, because ibss & ap mode will
629 u16 ht_cap = sta->ht_cap.cap; 711 * set in sta_add, and will be NULL here */
630 memcpy(mac->mcs, (u8 *) (&sta->ht_cap.mcs), 16); 712 if (mac->opmode == NL80211_IFTYPE_STATION) {
631 713 struct rtl_sta_info *sta_entry;
632 for (i = 0; i < 16; i++) 714 sta_entry = (struct rtl_sta_info *) sta->drv_priv;
633 RT_TRACE(rtlpriv, COMP_MAC80211, 715 sta_entry->wireless_mode = mac->mode;
634 DBG_LOUD, ("%x ", 716 }
635 mac->mcs[i])); 717
636 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, 718 if (sta->ht_cap.ht_supported) {
637 ("\n")); 719 mac->ht_enable = true;
638 720
639 if (ht_cap & IEEE80211_HT_CAP_SGI_40) 721 /*
640 mac->sgi_40 = true; 722 * for cisco 1252 bw20 it's wrong
641 723 * if (ht_cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
642 if (ht_cap & IEEE80211_HT_CAP_SGI_20) 724 * mac->bw_40 = true;
643 mac->sgi_20 = true; 725 * }
644 726 * */
645 /*
646 * for cisco 1252 bw20 it's wrong
647 * if (ht_cap &
648 * IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
649 * mac->bw_40 = true;
650 * }
651 */
652 }
653 } 727 }
654 rcu_read_unlock();
655 728
656 /*mac80211 just give us CCK rates any time
657 *So we add G rate in basic rates when
658 not in B mode*/
659 if (changed & BSS_CHANGED_BASIC_RATES) { 729 if (changed & BSS_CHANGED_BASIC_RATES) {
660 if (mac->mode == WIRELESS_MODE_B) 730 /* for 5G must << RATE_6M_INDEX=4,
661 basic_rates = bss_conf->basic_rates | 0x00f; 731 * because 5G have no cck rate*/
732 if (rtlhal->current_bandtype == BAND_ON_5G)
733 basic_rates = sta->supp_rates[1] << 4;
662 else 734 else
663 basic_rates = bss_conf->basic_rates | 0xff0; 735 basic_rates = sta->supp_rates[0];
664
665 if (!vif)
666 goto out;
667 736
668 mac->basic_rates = basic_rates; 737 mac->basic_rates = basic_rates;
669 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE, 738 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
670 (u8 *) (&basic_rates)); 739 (u8 *) (&basic_rates));
671
672 if (rtlpriv->dm.useramask)
673 rtlpriv->cfg->ops->update_rate_mask(hw, 0);
674 else
675 rtlpriv->cfg->ops->update_rate_table(hw);
676
677 } 740 }
741 rcu_read_unlock();
678 } 742 }
679 743
680 /* 744 /*
@@ -760,16 +824,17 @@ static int rtl_op_ampdu_action(struct ieee80211_hw *hw,
760 case IEEE80211_AMPDU_TX_START: 824 case IEEE80211_AMPDU_TX_START:
761 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE, 825 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
762 ("IEEE80211_AMPDU_TX_START: TID:%d\n", tid)); 826 ("IEEE80211_AMPDU_TX_START: TID:%d\n", tid));
763 return rtl_tx_agg_start(hw, sta->addr, tid, ssn); 827 return rtl_tx_agg_start(hw, sta, tid, ssn);
764 break; 828 break;
765 case IEEE80211_AMPDU_TX_STOP: 829 case IEEE80211_AMPDU_TX_STOP:
766 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE, 830 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
767 ("IEEE80211_AMPDU_TX_STOP: TID:%d\n", tid)); 831 ("IEEE80211_AMPDU_TX_STOP: TID:%d\n", tid));
768 return rtl_tx_agg_stop(hw, sta->addr, tid); 832 return rtl_tx_agg_stop(hw, sta, tid);
769 break; 833 break;
770 case IEEE80211_AMPDU_TX_OPERATIONAL: 834 case IEEE80211_AMPDU_TX_OPERATIONAL:
771 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE, 835 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
772 ("IEEE80211_AMPDU_TX_OPERATIONAL:TID:%d\n", tid)); 836 ("IEEE80211_AMPDU_TX_OPERATIONAL:TID:%d\n", tid));
837 rtl_tx_agg_oper(hw, sta, tid);
773 break; 838 break;
774 case IEEE80211_AMPDU_RX_START: 839 case IEEE80211_AMPDU_RX_START:
775 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE, 840 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
@@ -799,8 +864,12 @@ static void rtl_op_sw_scan_start(struct ieee80211_hw *hw)
799 if (mac->link_state == MAC80211_LINKED) { 864 if (mac->link_state == MAC80211_LINKED) {
800 rtl_lps_leave(hw); 865 rtl_lps_leave(hw);
801 mac->link_state = MAC80211_LINKED_SCANNING; 866 mac->link_state = MAC80211_LINKED_SCANNING;
802 } else 867 } else {
803 rtl_ips_nic_on(hw); 868 rtl_ips_nic_on(hw);
869 }
870
871 /* Dual mac */
872 rtlpriv->rtlhal.load_imrandiqk_setting_for2g = false;
804 873
805 rtlpriv->cfg->ops->led_control(hw, LED_CTL_SITE_SURVEY); 874 rtlpriv->cfg->ops->led_control(hw, LED_CTL_SITE_SURVEY);
806 rtlpriv->cfg->ops->scan_operation_backup(hw, SCAN_OPT_BACKUP); 875 rtlpriv->cfg->ops->scan_operation_backup(hw, SCAN_OPT_BACKUP);
@@ -812,22 +881,19 @@ static void rtl_op_sw_scan_complete(struct ieee80211_hw *hw)
812 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 881 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
813 882
814 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, ("\n")); 883 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, ("\n"));
815
816 rtlpriv->cfg->ops->scan_operation_backup(hw, SCAN_OPT_RESTORE);
817 mac->act_scanning = false; 884 mac->act_scanning = false;
885 /* Dual mac */
886 rtlpriv->rtlhal.load_imrandiqk_setting_for2g = false;
887
818 if (mac->link_state == MAC80211_LINKED_SCANNING) { 888 if (mac->link_state == MAC80211_LINKED_SCANNING) {
819 mac->link_state = MAC80211_LINKED; 889 mac->link_state = MAC80211_LINKED;
820 890 if (mac->opmode == NL80211_IFTYPE_STATION) {
821 /* fix fwlps issue */ 891 /* fix fwlps issue */
822 rtlpriv->cfg->ops->set_network_type(hw, mac->opmode); 892 rtlpriv->cfg->ops->set_network_type(hw, mac->opmode);
823 893 }
824 if (rtlpriv->dm.useramask)
825 rtlpriv->cfg->ops->update_rate_mask(hw, 0);
826 else
827 rtlpriv->cfg->ops->update_rate_table(hw);
828
829 } 894 }
830 895
896 rtlpriv->cfg->ops->scan_operation_backup(hw, SCAN_OPT_RESTORE);
831} 897}
832 898
833static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 899static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
@@ -858,49 +924,73 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
858 rtl_ips_nic_on(hw); 924 rtl_ips_nic_on(hw);
859 mutex_lock(&rtlpriv->locks.conf_mutex); 925 mutex_lock(&rtlpriv->locks.conf_mutex);
860 /* <1> get encryption alg */ 926 /* <1> get encryption alg */
927
861 switch (key->cipher) { 928 switch (key->cipher) {
862 case WLAN_CIPHER_SUITE_WEP40: 929 case WLAN_CIPHER_SUITE_WEP40:
863 key_type = WEP40_ENCRYPTION; 930 key_type = WEP40_ENCRYPTION;
864 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("alg:WEP40\n")); 931 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("alg:WEP40\n"));
865 rtlpriv->sec.use_defaultkey = true;
866 break; 932 break;
867 case WLAN_CIPHER_SUITE_WEP104: 933 case WLAN_CIPHER_SUITE_WEP104:
868 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, 934 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
869 ("alg:WEP104\n")); 935 ("alg:WEP104\n"));
870 key_type = WEP104_ENCRYPTION; 936 key_type = WEP104_ENCRYPTION;
871 rtlpriv->sec.use_defaultkey = true;
872 break; 937 break;
873 case WLAN_CIPHER_SUITE_TKIP: 938 case WLAN_CIPHER_SUITE_TKIP:
874 key_type = TKIP_ENCRYPTION; 939 key_type = TKIP_ENCRYPTION;
875 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("alg:TKIP\n")); 940 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("alg:TKIP\n"));
876 if (mac->opmode == NL80211_IFTYPE_ADHOC)
877 rtlpriv->sec.use_defaultkey = true;
878 break; 941 break;
879 case WLAN_CIPHER_SUITE_CCMP: 942 case WLAN_CIPHER_SUITE_CCMP:
880 key_type = AESCCMP_ENCRYPTION; 943 key_type = AESCCMP_ENCRYPTION;
881 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("alg:CCMP\n")); 944 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("alg:CCMP\n"));
882 if (mac->opmode == NL80211_IFTYPE_ADHOC)
883 rtlpriv->sec.use_defaultkey = true;
884 break; 945 break;
885 default: 946 default:
886 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 947 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
887 ("alg_err:%x!!!!:\n", key->cipher)); 948 ("alg_err:%x!!!!:\n", key->cipher));
888 goto out_unlock; 949 goto out_unlock;
889 } 950 }
951 if (key_type == WEP40_ENCRYPTION ||
952 key_type == WEP104_ENCRYPTION ||
953 mac->opmode == NL80211_IFTYPE_ADHOC)
954 rtlpriv->sec.use_defaultkey = true;
955
890 /* <2> get key_idx */ 956 /* <2> get key_idx */
891 key_idx = (u8) (key->keyidx); 957 key_idx = (u8) (key->keyidx);
892 if (key_idx > 3) 958 if (key_idx > 3)
893 goto out_unlock; 959 goto out_unlock;
894 /* <3> if pairwise key enable_hw_sec */ 960 /* <3> if pairwise key enable_hw_sec */
895 group_key = !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE); 961 group_key = !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE);
896 if ((!group_key) || (mac->opmode == NL80211_IFTYPE_ADHOC) || 962
897 rtlpriv->sec.pairwise_enc_algorithm == NO_ENCRYPTION) { 963 /* wep always be group key, but there are two conditions:
898 if (rtlpriv->sec.pairwise_enc_algorithm == NO_ENCRYPTION && 964 * 1) wep only: is just for wep enc, in this condition
899 (key_type == WEP40_ENCRYPTION || 965 * rtlpriv->sec.pairwise_enc_algorithm == NO_ENCRYPTION
900 key_type == WEP104_ENCRYPTION)) 966 * will be true & enable_hw_sec will be set when wep
901 wep_only = true; 967 * ke setting.
902 rtlpriv->sec.pairwise_enc_algorithm = key_type; 968 * 2) wep(group) + AES(pairwise): some AP like cisco
903 rtlpriv->cfg->ops->enable_hw_sec(hw); 969 * may use it, in this condition enable_hw_sec will not
970 * be set when wep key setting */
971 /* we must reset sec_info after lingked before set key,
972 * or some flag will be wrong*/
973 if (mac->opmode == NL80211_IFTYPE_AP) {
974 if (!group_key || key_type == WEP40_ENCRYPTION ||
975 key_type == WEP104_ENCRYPTION) {
976 if (group_key)
977 wep_only = true;
978 rtlpriv->cfg->ops->enable_hw_sec(hw);
979 }
980 } else {
981 if ((!group_key) || (mac->opmode == NL80211_IFTYPE_ADHOC) ||
982 rtlpriv->sec.pairwise_enc_algorithm == NO_ENCRYPTION) {
983 if (rtlpriv->sec.pairwise_enc_algorithm ==
984 NO_ENCRYPTION &&
985 (key_type == WEP40_ENCRYPTION ||
986 key_type == WEP104_ENCRYPTION))
987 wep_only = true;
988 rtlpriv->sec.pairwise_enc_algorithm = key_type;
989 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
990 ("set enable_hw_sec, key_type:%x(OPEN:0 WEP40:1"
991 " TKIP:2 AES:4 WEP104:5)\n", key_type));
992 rtlpriv->cfg->ops->enable_hw_sec(hw);
993 }
904 } 994 }
905 /* <4> set key based on cmd */ 995 /* <4> set key based on cmd */
906 switch (cmd) { 996 switch (cmd) {
@@ -932,6 +1022,7 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
932 if (!sta) { 1022 if (!sta) {
933 RT_ASSERT(false, ("pairwise key withnot" 1023 RT_ASSERT(false, ("pairwise key withnot"
934 "mac_addr\n")); 1024 "mac_addr\n"));
1025
935 err = -EOPNOTSUPP; 1026 err = -EOPNOTSUPP;
936 goto out_unlock; 1027 goto out_unlock;
937 } 1028 }
@@ -959,6 +1050,10 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
959 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, 1050 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
960 ("disable key delete one entry\n")); 1051 ("disable key delete one entry\n"));
961 /*set local buf about wep key. */ 1052 /*set local buf about wep key. */
1053 if (mac->opmode == NL80211_IFTYPE_AP) {
1054 if (sta)
1055 rtl_cam_del_entry(hw, sta->addr);
1056 }
962 memset(rtlpriv->sec.key_buf[key_idx], 0, key->keylen); 1057 memset(rtlpriv->sec.key_buf[key_idx], 0, key->keylen);
963 rtlpriv->sec.key_len[key_idx] = 0; 1058 rtlpriv->sec.key_len[key_idx] = 0;
964 memcpy(mac_addr, zero_addr, ETH_ALEN); 1059 memcpy(mac_addr, zero_addr, ETH_ALEN);
@@ -1011,6 +1106,18 @@ static void rtl_op_rfkill_poll(struct ieee80211_hw *hw)
1011 mutex_unlock(&rtlpriv->locks.conf_mutex); 1106 mutex_unlock(&rtlpriv->locks.conf_mutex);
1012} 1107}
1013 1108
1109/* this function is called by mac80211 to flush tx buffer
1110 * before switch channle or power save, or tx buffer packet
1111 * maybe send after offchannel or rf sleep, this may cause
1112 * dis-association by AP */
1113static void rtl_op_flush(struct ieee80211_hw *hw, bool drop)
1114{
1115 struct rtl_priv *rtlpriv = rtl_priv(hw);
1116
1117 if (rtlpriv->intf_ops->flush)
1118 rtlpriv->intf_ops->flush(hw, drop);
1119}
1120
1014const struct ieee80211_ops rtl_ops = { 1121const struct ieee80211_ops rtl_ops = {
1015 .start = rtl_op_start, 1122 .start = rtl_op_start,
1016 .stop = rtl_op_stop, 1123 .stop = rtl_op_stop,
@@ -1019,6 +1126,8 @@ const struct ieee80211_ops rtl_ops = {
1019 .remove_interface = rtl_op_remove_interface, 1126 .remove_interface = rtl_op_remove_interface,
1020 .config = rtl_op_config, 1127 .config = rtl_op_config,
1021 .configure_filter = rtl_op_configure_filter, 1128 .configure_filter = rtl_op_configure_filter,
1129 .sta_add = rtl_op_sta_add,
1130 .sta_remove = rtl_op_sta_remove,
1022 .set_key = rtl_op_set_key, 1131 .set_key = rtl_op_set_key,
1023 .conf_tx = rtl_op_conf_tx, 1132 .conf_tx = rtl_op_conf_tx,
1024 .bss_info_changed = rtl_op_bss_info_changed, 1133 .bss_info_changed = rtl_op_bss_info_changed,
@@ -1030,4 +1139,5 @@ const struct ieee80211_ops rtl_ops = {
1030 .sw_scan_start = rtl_op_sw_scan_start, 1139 .sw_scan_start = rtl_op_sw_scan_start,
1031 .sw_scan_complete = rtl_op_sw_scan_complete, 1140 .sw_scan_complete = rtl_op_sw_scan_complete,
1032 .rfkill_poll = rtl_op_rfkill_poll, 1141 .rfkill_poll = rtl_op_rfkill_poll,
1142 .flush = rtl_op_flush,
1033}; 1143};
diff --git a/drivers/net/wireless/rtlwifi/core.h b/drivers/net/wireless/rtlwifi/core.h
index 0ef31c3c6196..4b247db2861d 100644
--- a/drivers/net/wireless/rtlwifi/core.h
+++ b/drivers/net/wireless/rtlwifi/core.h
@@ -24,6 +24,7 @@
24 * Hsinchu 300, Taiwan. 24 * Hsinchu 300, Taiwan.
25 * 25 *
26 * Larry Finger <Larry.Finger@lwfinger.net> 26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
27 *****************************************************************************/ 28 *****************************************************************************/
28 29
29#ifndef __RTL_CORE_H__ 30#ifndef __RTL_CORE_H__
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
index 590f14f45a89..50de6f5d8a56 100644
--- a/drivers/net/wireless/rtlwifi/efuse.c
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -52,8 +52,6 @@ static const struct efuse_map RTL8712_SDIO_EFUSE_TABLE[] = {
52 {11, 0, 0, 28} 52 {11, 0, 0, 28}
53}; 53};
54 54
55static void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset,
56 u8 *pbuf);
57static void efuse_shadow_read_1byte(struct ieee80211_hw *hw, u16 offset, 55static void efuse_shadow_read_1byte(struct ieee80211_hw *hw, u16 offset,
58 u8 *value); 56 u8 *value);
59static void efuse_shadow_read_2byte(struct ieee80211_hw *hw, u16 offset, 57static void efuse_shadow_read_2byte(struct ieee80211_hw *hw, u16 offset,
@@ -79,7 +77,7 @@ static void efuse_word_enable_data_read(u8 word_en, u8 *sourdata,
79 u8 *targetdata); 77 u8 *targetdata);
80static u8 efuse_word_enable_data_write(struct ieee80211_hw *hw, 78static u8 efuse_word_enable_data_write(struct ieee80211_hw *hw,
81 u16 efuse_addr, u8 word_en, u8 *data); 79 u16 efuse_addr, u8 word_en, u8 *data);
82static void efuse_power_switch(struct ieee80211_hw *hw, u8 bwrite, 80static void efuse_power_switch(struct ieee80211_hw *hw, u8 write,
83 u8 pwrstate); 81 u8 pwrstate);
84static u16 efuse_get_current_size(struct ieee80211_hw *hw); 82static u16 efuse_get_current_size(struct ieee80211_hw *hw);
85static u8 efuse_calculate_word_cnts(u8 word_en); 83static u8 efuse_calculate_word_cnts(u8 word_en);
@@ -115,8 +113,10 @@ u8 efuse_read_1byte(struct ieee80211_hw *hw, u16 address)
115 u8 bytetemp; 113 u8 bytetemp;
116 u8 temp; 114 u8 temp;
117 u32 k = 0; 115 u32 k = 0;
116 const u32 efuse_len =
117 rtlpriv->cfg->maps[EFUSE_REAL_CONTENT_SIZE];
118 118
119 if (address < EFUSE_REAL_CONTENT_LEN) { 119 if (address < efuse_len) {
120 temp = address & 0xFF; 120 temp = address & 0xFF;
121 rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1, 121 rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1,
122 temp); 122 temp);
@@ -158,11 +158,13 @@ void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value)
158 u8 bytetemp; 158 u8 bytetemp;
159 u8 temp; 159 u8 temp;
160 u32 k = 0; 160 u32 k = 0;
161 const u32 efuse_len =
162 rtlpriv->cfg->maps[EFUSE_REAL_CONTENT_SIZE];
161 163
162 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, 164 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
163 ("Addr=%x Data =%x\n", address, value)); 165 ("Addr=%x Data =%x\n", address, value));
164 166
165 if (address < EFUSE_REAL_CONTENT_LEN) { 167 if (address < efuse_len) {
166 rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL], value); 168 rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL], value);
167 169
168 temp = address & 0xFF; 170 temp = address & 0xFF;
@@ -198,7 +200,7 @@ void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value)
198 200
199} 201}
200 202
201static void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf) 203void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf)
202{ 204{
203 struct rtl_priv *rtlpriv = rtl_priv(hw); 205 struct rtl_priv *rtlpriv = rtl_priv(hw);
204 u32 value32; 206 u32 value32;
@@ -233,26 +235,45 @@ void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
233{ 235{
234 struct rtl_priv *rtlpriv = rtl_priv(hw); 236 struct rtl_priv *rtlpriv = rtl_priv(hw);
235 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); 237 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
236 u8 efuse_tbl[EFUSE_MAP_LEN]; 238 u8 *efuse_tbl;
237 u8 rtemp8[1]; 239 u8 rtemp8[1];
238 u16 efuse_addr = 0; 240 u16 efuse_addr = 0;
239 u8 offset, wren; 241 u8 offset, wren;
240 u16 i; 242 u16 i;
241 u16 j; 243 u16 j;
242 u16 efuse_word[EFUSE_MAX_SECTION][EFUSE_MAX_WORD_UNIT]; 244 const u16 efuse_max_section =
245 rtlpriv->cfg->maps[EFUSE_MAX_SECTION_MAP];
246 const u32 efuse_len =
247 rtlpriv->cfg->maps[EFUSE_REAL_CONTENT_SIZE];
248 u16 **efuse_word;
243 u16 efuse_utilized = 0; 249 u16 efuse_utilized = 0;
244 u8 efuse_usage; 250 u8 efuse_usage;
245 251
246 if ((_offset + _size_byte) > EFUSE_MAP_LEN) { 252 if ((_offset + _size_byte) > rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]) {
247 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, 253 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
248 ("read_efuse(): Invalid offset(%#x) with read " 254 ("read_efuse(): Invalid offset(%#x) with read "
249 "bytes(%#x)!!\n", _offset, _size_byte)); 255 "bytes(%#x)!!\n", _offset, _size_byte));
250 return; 256 return;
251 } 257 }
252 258
253 for (i = 0; i < EFUSE_MAX_SECTION; i++) 259 /* allocate memory for efuse_tbl and efuse_word */
260 efuse_tbl = kmalloc(rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE] *
261 sizeof(u8), GFP_ATOMIC);
262 if (!efuse_tbl)
263 return;
264 efuse_word = kmalloc(EFUSE_MAX_WORD_UNIT * sizeof(u16 *), GFP_ATOMIC);
265 if (!efuse_word)
266 goto done;
267 for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
268 efuse_word[i] = kmalloc(efuse_max_section * sizeof(u16),
269 GFP_ATOMIC);
270 if (!efuse_word[i])
271 goto done;
272 }
273
274 for (i = 0; i < efuse_max_section; i++)
254 for (j = 0; j < EFUSE_MAX_WORD_UNIT; j++) 275 for (j = 0; j < EFUSE_MAX_WORD_UNIT; j++)
255 efuse_word[i][j] = 0xFFFF; 276 efuse_word[j][i] = 0xFFFF;
256 277
257 read_efuse_byte(hw, efuse_addr, rtemp8); 278 read_efuse_byte(hw, efuse_addr, rtemp8);
258 if (*rtemp8 != 0xFF) { 279 if (*rtemp8 != 0xFF) {
@@ -262,10 +283,10 @@ void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
262 efuse_addr++; 283 efuse_addr++;
263 } 284 }
264 285
265 while ((*rtemp8 != 0xFF) && (efuse_addr < EFUSE_REAL_CONTENT_LEN)) { 286 while ((*rtemp8 != 0xFF) && (efuse_addr < efuse_len)) {
266 offset = ((*rtemp8 >> 4) & 0x0f); 287 offset = ((*rtemp8 >> 4) & 0x0f);
267 288
268 if (offset < EFUSE_MAX_SECTION) { 289 if (offset < efuse_max_section) {
269 wren = (*rtemp8 & 0x0f); 290 wren = (*rtemp8 & 0x0f);
270 RTPRINT(rtlpriv, FEEPROM, EFUSE_READ_ALL, 291 RTPRINT(rtlpriv, FEEPROM, EFUSE_READ_ALL,
271 ("offset-%d Worden=%x\n", offset, wren)); 292 ("offset-%d Worden=%x\n", offset, wren));
@@ -279,9 +300,10 @@ void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
279 read_efuse_byte(hw, efuse_addr, rtemp8); 300 read_efuse_byte(hw, efuse_addr, rtemp8);
280 efuse_addr++; 301 efuse_addr++;
281 efuse_utilized++; 302 efuse_utilized++;
282 efuse_word[offset][i] = (*rtemp8 & 0xff); 303 efuse_word[i][offset] =
304 (*rtemp8 & 0xff);
283 305
284 if (efuse_addr >= EFUSE_REAL_CONTENT_LEN) 306 if (efuse_addr >= efuse_len)
285 break; 307 break;
286 308
287 RTPRINT(rtlpriv, FEEPROM, 309 RTPRINT(rtlpriv, FEEPROM,
@@ -291,10 +313,10 @@ void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
291 read_efuse_byte(hw, efuse_addr, rtemp8); 313 read_efuse_byte(hw, efuse_addr, rtemp8);
292 efuse_addr++; 314 efuse_addr++;
293 efuse_utilized++; 315 efuse_utilized++;
294 efuse_word[offset][i] |= 316 efuse_word[i][offset] |=
295 (((u16)*rtemp8 << 8) & 0xff00); 317 (((u16)*rtemp8 << 8) & 0xff00);
296 318
297 if (efuse_addr >= EFUSE_REAL_CONTENT_LEN) 319 if (efuse_addr >= efuse_len)
298 break; 320 break;
299 } 321 }
300 322
@@ -305,18 +327,18 @@ void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
305 RTPRINT(rtlpriv, FEEPROM, EFUSE_READ_ALL, 327 RTPRINT(rtlpriv, FEEPROM, EFUSE_READ_ALL,
306 ("Addr=%d\n", efuse_addr)); 328 ("Addr=%d\n", efuse_addr));
307 read_efuse_byte(hw, efuse_addr, rtemp8); 329 read_efuse_byte(hw, efuse_addr, rtemp8);
308 if (*rtemp8 != 0xFF && (efuse_addr < 512)) { 330 if (*rtemp8 != 0xFF && (efuse_addr < efuse_len)) {
309 efuse_utilized++; 331 efuse_utilized++;
310 efuse_addr++; 332 efuse_addr++;
311 } 333 }
312 } 334 }
313 335
314 for (i = 0; i < EFUSE_MAX_SECTION; i++) { 336 for (i = 0; i < efuse_max_section; i++) {
315 for (j = 0; j < EFUSE_MAX_WORD_UNIT; j++) { 337 for (j = 0; j < EFUSE_MAX_WORD_UNIT; j++) {
316 efuse_tbl[(i * 8) + (j * 2)] = 338 efuse_tbl[(i * 8) + (j * 2)] =
317 (efuse_word[i][j] & 0xff); 339 (efuse_word[j][i] & 0xff);
318 efuse_tbl[(i * 8) + ((j * 2) + 1)] = 340 efuse_tbl[(i * 8) + ((j * 2) + 1)] =
319 ((efuse_word[i][j] >> 8) & 0xff); 341 ((efuse_word[j][i] >> 8) & 0xff);
320 } 342 }
321 } 343 }
322 344
@@ -324,12 +346,17 @@ void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
324 pbuf[i] = efuse_tbl[_offset + i]; 346 pbuf[i] = efuse_tbl[_offset + i];
325 347
326 rtlefuse->efuse_usedbytes = efuse_utilized; 348 rtlefuse->efuse_usedbytes = efuse_utilized;
327 efuse_usage = (u8)((efuse_utilized * 100) / EFUSE_REAL_CONTENT_LEN); 349 efuse_usage = (u8) ((efuse_utilized * 100) / efuse_len);
328 rtlefuse->efuse_usedpercentage = efuse_usage; 350 rtlefuse->efuse_usedpercentage = efuse_usage;
329 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_EFUSE_BYTES, 351 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_EFUSE_BYTES,
330 (u8 *)&efuse_utilized); 352 (u8 *)&efuse_utilized);
331 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_EFUSE_USAGE, 353 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_EFUSE_USAGE,
332 (u8 *)&efuse_usage); 354 (u8 *)&efuse_usage);
355done:
356 for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++)
357 kfree(efuse_word[i]);
358 kfree(efuse_word);
359 kfree(efuse_tbl);
333} 360}
334 361
335bool efuse_shadow_update_chk(struct ieee80211_hw *hw) 362bool efuse_shadow_update_chk(struct ieee80211_hw *hw)
@@ -338,11 +365,11 @@ bool efuse_shadow_update_chk(struct ieee80211_hw *hw)
338 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); 365 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
339 u8 section_idx, i, Base; 366 u8 section_idx, i, Base;
340 u16 words_need = 0, hdr_num = 0, totalbytes, efuse_used; 367 u16 words_need = 0, hdr_num = 0, totalbytes, efuse_used;
341 bool bwordchanged, bresult = true; 368 bool wordchanged, result = true;
342 369
343 for (section_idx = 0; section_idx < 16; section_idx++) { 370 for (section_idx = 0; section_idx < 16; section_idx++) {
344 Base = section_idx * 8; 371 Base = section_idx * 8;
345 bwordchanged = false; 372 wordchanged = false;
346 373
347 for (i = 0; i < 8; i = i + 2) { 374 for (i = 0; i < 8; i = i + 2) {
348 if ((rtlefuse->efuse_map[EFUSE_INIT_MAP][Base + i] != 375 if ((rtlefuse->efuse_map[EFUSE_INIT_MAP][Base + i] !=
@@ -351,11 +378,11 @@ bool efuse_shadow_update_chk(struct ieee80211_hw *hw)
351 rtlefuse->efuse_map[EFUSE_MODIFY_MAP][Base + i + 378 rtlefuse->efuse_map[EFUSE_MODIFY_MAP][Base + i +
352 1])) { 379 1])) {
353 words_need++; 380 words_need++;
354 bwordchanged = true; 381 wordchanged = true;
355 } 382 }
356 } 383 }
357 384
358 if (bwordchanged == true) 385 if (wordchanged == true)
359 hdr_num++; 386 hdr_num++;
360 } 387 }
361 388
@@ -364,14 +391,14 @@ bool efuse_shadow_update_chk(struct ieee80211_hw *hw)
364 391
365 if ((totalbytes + efuse_used) >= 392 if ((totalbytes + efuse_used) >=
366 (EFUSE_MAX_SIZE - EFUSE_OOB_PROTECT_BYTES)) 393 (EFUSE_MAX_SIZE - EFUSE_OOB_PROTECT_BYTES))
367 bresult = false; 394 result = false;
368 395
369 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, 396 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
370 ("efuse_shadow_update_chk(): totalbytes(%#x), " 397 ("efuse_shadow_update_chk(): totalbytes(%#x), "
371 "hdr_num(%#x), words_need(%#x), efuse_used(%d)\n", 398 "hdr_num(%#x), words_need(%#x), efuse_used(%d)\n",
372 totalbytes, hdr_num, words_need, efuse_used)); 399 totalbytes, hdr_num, words_need, efuse_used));
373 400
374 return bresult; 401 return result;
375} 402}
376 403
377void efuse_shadow_read(struct ieee80211_hw *hw, u8 type, 404void efuse_shadow_read(struct ieee80211_hw *hw, u8 type,
@@ -394,7 +421,7 @@ void efuse_shadow_write(struct ieee80211_hw *hw, u8 type, u16 offset,
394 else if (type == 2) 421 else if (type == 2)
395 efuse_shadow_write_2byte(hw, offset, (u16) value); 422 efuse_shadow_write_2byte(hw, offset, (u16) value);
396 else if (type == 4) 423 else if (type == 4)
397 efuse_shadow_write_4byte(hw, offset, (u32) value); 424 efuse_shadow_write_4byte(hw, offset, value);
398 425
399} 426}
400 427
@@ -478,9 +505,10 @@ void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw)
478 struct rtl_priv *rtlpriv = rtl_priv(hw); 505 struct rtl_priv *rtlpriv = rtl_priv(hw);
479 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); 506 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
480 507
481 if (rtlefuse->autoload_failflag == true) { 508 if (rtlefuse->autoload_failflag == true)
482 memset(&rtlefuse->efuse_map[EFUSE_INIT_MAP][0], 0xFF, 128); 509 memset(&rtlefuse->efuse_map[EFUSE_INIT_MAP][0], 0xFF,
483 } else 510 rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
511 else
484 efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]); 512 efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
485 513
486 memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0], 514 memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
@@ -572,7 +600,7 @@ static int efuse_one_byte_read(struct ieee80211_hw *hw, u16 addr, u8 *data)
572{ 600{
573 struct rtl_priv *rtlpriv = rtl_priv(hw); 601 struct rtl_priv *rtlpriv = rtl_priv(hw);
574 u8 tmpidx = 0; 602 u8 tmpidx = 0;
575 int bresult; 603 int result;
576 604
577 rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1, 605 rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1,
578 (u8) (addr & 0xff)); 606 (u8) (addr & 0xff));
@@ -592,19 +620,18 @@ static int efuse_one_byte_read(struct ieee80211_hw *hw, u16 addr, u8 *data)
592 620
593 if (tmpidx < 100) { 621 if (tmpidx < 100) {
594 *data = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]); 622 *data = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]);
595 bresult = true; 623 result = true;
596 } else { 624 } else {
597 *data = 0xff; 625 *data = 0xff;
598 bresult = false; 626 result = false;
599 } 627 }
600 return bresult; 628 return result;
601} 629}
602 630
603static int efuse_one_byte_write(struct ieee80211_hw *hw, u16 addr, u8 data) 631static int efuse_one_byte_write(struct ieee80211_hw *hw, u16 addr, u8 data)
604{ 632{
605 struct rtl_priv *rtlpriv = rtl_priv(hw); 633 struct rtl_priv *rtlpriv = rtl_priv(hw);
606 u8 tmpidx = 0; 634 u8 tmpidx = 0;
607 bool bresult;
608 635
609 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, 636 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
610 ("Addr = %x Data=%x\n", addr, data)); 637 ("Addr = %x Data=%x\n", addr, data));
@@ -626,17 +653,16 @@ static int efuse_one_byte_write(struct ieee80211_hw *hw, u16 addr, u8 data)
626 } 653 }
627 654
628 if (tmpidx < 100) 655 if (tmpidx < 100)
629 bresult = true; 656 return true;
630 else
631 bresult = false;
632 657
633 return bresult; 658 return false;
634} 659}
635 660
636static void efuse_read_all_map(struct ieee80211_hw *hw, u8 * efuse) 661static void efuse_read_all_map(struct ieee80211_hw *hw, u8 * efuse)
637{ 662{
663 struct rtl_priv *rtlpriv = rtl_priv(hw);
638 efuse_power_switch(hw, false, true); 664 efuse_power_switch(hw, false, true);
639 read_efuse(hw, 0, 128, efuse); 665 read_efuse(hw, 0, rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE], efuse);
640 efuse_power_switch(hw, false, false); 666 efuse_power_switch(hw, false, false);
641} 667}
642 668
@@ -644,7 +670,7 @@ static void efuse_read_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
644 u8 efuse_data, u8 offset, u8 *tmpdata, 670 u8 efuse_data, u8 offset, u8 *tmpdata,
645 u8 *readstate) 671 u8 *readstate)
646{ 672{
647 bool bdataempty = true; 673 bool dataempty = true;
648 u8 hoffset; 674 u8 hoffset;
649 u8 tmpidx; 675 u8 tmpidx;
650 u8 hworden; 676 u8 hworden;
@@ -660,13 +686,13 @@ static void efuse_read_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
660 &efuse_data)) { 686 &efuse_data)) {
661 tmpdata[tmpidx] = efuse_data; 687 tmpdata[tmpidx] = efuse_data;
662 if (efuse_data != 0xff) 688 if (efuse_data != 0xff)
663 bdataempty = true; 689 dataempty = true;
664 } 690 }
665 } 691 }
666 692
667 if (bdataempty == true) 693 if (dataempty == true) {
668 *readstate = PG_STATE_DATA; 694 *readstate = PG_STATE_DATA;
669 else { 695 } else {
670 *efuse_addr = *efuse_addr + (word_cnts * 2) + 1; 696 *efuse_addr = *efuse_addr + (word_cnts * 2) + 1;
671 *readstate = PG_STATE_HEADER; 697 *readstate = PG_STATE_HEADER;
672 } 698 }
@@ -680,12 +706,9 @@ static void efuse_read_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
680static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, u8 *data) 706static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, u8 *data)
681{ 707{
682 u8 readstate = PG_STATE_HEADER; 708 u8 readstate = PG_STATE_HEADER;
683 709 bool continual = true;
684 bool bcontinual = true;
685
686 u8 efuse_data, word_cnts = 0; 710 u8 efuse_data, word_cnts = 0;
687 u16 efuse_addr = 0; 711 u16 efuse_addr = 0;
688 u8 hworden = 0;
689 u8 tmpdata[8]; 712 u8 tmpdata[8];
690 713
691 if (data == NULL) 714 if (data == NULL)
@@ -696,7 +719,7 @@ static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, u8 *data)
696 memset(data, 0xff, PGPKT_DATA_SIZE * sizeof(u8)); 719 memset(data, 0xff, PGPKT_DATA_SIZE * sizeof(u8));
697 memset(tmpdata, 0xff, PGPKT_DATA_SIZE * sizeof(u8)); 720 memset(tmpdata, 0xff, PGPKT_DATA_SIZE * sizeof(u8));
698 721
699 while (bcontinual && (efuse_addr < EFUSE_MAX_SIZE)) { 722 while (continual && (efuse_addr < EFUSE_MAX_SIZE)) {
700 if (readstate & PG_STATE_HEADER) { 723 if (readstate & PG_STATE_HEADER) {
701 if (efuse_one_byte_read(hw, efuse_addr, &efuse_data) 724 if (efuse_one_byte_read(hw, efuse_addr, &efuse_data)
702 && (efuse_data != 0xFF)) 725 && (efuse_data != 0xFF))
@@ -705,9 +728,9 @@ static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, u8 *data)
705 offset, tmpdata, 728 offset, tmpdata,
706 &readstate); 729 &readstate);
707 else 730 else
708 bcontinual = false; 731 continual = false;
709 } else if (readstate & PG_STATE_DATA) { 732 } else if (readstate & PG_STATE_DATA) {
710 efuse_word_enable_data_read(hworden, tmpdata, data); 733 efuse_word_enable_data_read(0, tmpdata, data);
711 efuse_addr = efuse_addr + (word_cnts * 2) + 1; 734 efuse_addr = efuse_addr + (word_cnts * 2) + 1;
712 readstate = PG_STATE_HEADER; 735 readstate = PG_STATE_HEADER;
713 } 736 }
@@ -725,13 +748,13 @@ static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, u8 *data)
725} 748}
726 749
727static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr, 750static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
728 u8 efuse_data, u8 offset, int *bcontinual, 751 u8 efuse_data, u8 offset, int *continual,
729 u8 *write_state, struct pgpkt_struct *target_pkt, 752 u8 *write_state, struct pgpkt_struct *target_pkt,
730 int *repeat_times, int *bresult, u8 word_en) 753 int *repeat_times, int *result, u8 word_en)
731{ 754{
732 struct rtl_priv *rtlpriv = rtl_priv(hw); 755 struct rtl_priv *rtlpriv = rtl_priv(hw);
733 struct pgpkt_struct tmp_pkt; 756 struct pgpkt_struct tmp_pkt;
734 int bdataempty = true; 757 bool dataempty = true;
735 u8 originaldata[8 * sizeof(u8)]; 758 u8 originaldata[8 * sizeof(u8)];
736 u8 badworden = 0x0F; 759 u8 badworden = 0x0F;
737 u8 match_word_en, tmp_word_en; 760 u8 match_word_en, tmp_word_en;
@@ -751,10 +774,10 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
751 u16 address = *efuse_addr + 1 + tmpindex; 774 u16 address = *efuse_addr + 1 + tmpindex;
752 if (efuse_one_byte_read(hw, address, 775 if (efuse_one_byte_read(hw, address,
753 &efuse_data) && (efuse_data != 0xFF)) 776 &efuse_data) && (efuse_data != 0xFF))
754 bdataempty = false; 777 dataempty = false;
755 } 778 }
756 779
757 if (bdataempty == false) { 780 if (dataempty == false) {
758 *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1; 781 *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1;
759 *write_state = PG_STATE_HEADER; 782 *write_state = PG_STATE_HEADER;
760 } else { 783 } else {
@@ -799,24 +822,25 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
799 tmp_word_en &= (~BIT(1)); 822 tmp_word_en &= (~BIT(1));
800 823
801 if ((target_pkt->word_en & BIT(2)) ^ 824 if ((target_pkt->word_en & BIT(2)) ^
802 (match_word_en & BIT(2))) 825 (match_word_en & BIT(2)))
803 tmp_word_en &= (~BIT(2)); 826 tmp_word_en &= (~BIT(2));
804 827
805 if ((target_pkt->word_en & BIT(3)) ^ 828 if ((target_pkt->word_en & BIT(3)) ^
806 (match_word_en & BIT(3))) 829 (match_word_en & BIT(3)))
807 tmp_word_en &= (~BIT(3)); 830 tmp_word_en &= (~BIT(3));
808 831
809 if ((tmp_word_en & 0x0F) != 0x0F) { 832 if ((tmp_word_en & 0x0F) != 0x0F) {
810 *efuse_addr = efuse_get_current_size(hw); 833 *efuse_addr = efuse_get_current_size(hw);
811 target_pkt->offset = offset; 834 target_pkt->offset = offset;
812 target_pkt->word_en = tmp_word_en; 835 target_pkt->word_en = tmp_word_en;
813 } else 836 } else {
814 *bcontinual = false; 837 *continual = false;
838 }
815 *write_state = PG_STATE_HEADER; 839 *write_state = PG_STATE_HEADER;
816 *repeat_times += 1; 840 *repeat_times += 1;
817 if (*repeat_times > EFUSE_REPEAT_THRESHOLD_) { 841 if (*repeat_times > EFUSE_REPEAT_THRESHOLD_) {
818 *bcontinual = false; 842 *continual = false;
819 *bresult = false; 843 *result = false;
820 } 844 }
821 } else { 845 } else {
822 *efuse_addr += (2 * tmp_word_cnts) + 1; 846 *efuse_addr += (2 * tmp_word_cnts) + 1;
@@ -830,9 +854,9 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
830} 854}
831 855
832static void efuse_write_data_case2(struct ieee80211_hw *hw, u16 *efuse_addr, 856static void efuse_write_data_case2(struct ieee80211_hw *hw, u16 *efuse_addr,
833 int *bcontinual, u8 *write_state, 857 int *continual, u8 *write_state,
834 struct pgpkt_struct target_pkt, 858 struct pgpkt_struct target_pkt,
835 int *repeat_times, int *bresult) 859 int *repeat_times, int *result)
836{ 860{
837 struct rtl_priv *rtlpriv = rtl_priv(hw); 861 struct rtl_priv *rtlpriv = rtl_priv(hw);
838 struct pgpkt_struct tmp_pkt; 862 struct pgpkt_struct tmp_pkt;
@@ -846,14 +870,14 @@ static void efuse_write_data_case2(struct ieee80211_hw *hw, u16 *efuse_addr,
846 efuse_one_byte_write(hw, *efuse_addr, pg_header); 870 efuse_one_byte_write(hw, *efuse_addr, pg_header);
847 efuse_one_byte_read(hw, *efuse_addr, &tmp_header); 871 efuse_one_byte_read(hw, *efuse_addr, &tmp_header);
848 872
849 if (tmp_header == pg_header) 873 if (tmp_header == pg_header) {
850 *write_state = PG_STATE_DATA; 874 *write_state = PG_STATE_DATA;
851 else if (tmp_header == 0xFF) { 875 } else if (tmp_header == 0xFF) {
852 *write_state = PG_STATE_HEADER; 876 *write_state = PG_STATE_HEADER;
853 *repeat_times += 1; 877 *repeat_times += 1;
854 if (*repeat_times > EFUSE_REPEAT_THRESHOLD_) { 878 if (*repeat_times > EFUSE_REPEAT_THRESHOLD_) {
855 *bcontinual = false; 879 *continual = false;
856 *bresult = false; 880 *result = false;
857 } 881 }
858 } else { 882 } else {
859 tmp_pkt.offset = (tmp_header >> 4) & 0x0F; 883 tmp_pkt.offset = (tmp_header >> 4) & 0x0F;
@@ -875,17 +899,19 @@ static void efuse_write_data_case2(struct ieee80211_hw *hw, u16 *efuse_addr,
875 reorg_worden, 899 reorg_worden,
876 originaldata); 900 originaldata);
877 *efuse_addr = efuse_get_current_size(hw); 901 *efuse_addr = efuse_get_current_size(hw);
878 } else 902 } else {
879 *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) 903 *efuse_addr = *efuse_addr + (tmp_word_cnts * 2)
880 + 1; 904 + 1;
881 } else 905 }
906 } else {
882 *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1; 907 *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1;
908 }
883 909
884 *write_state = PG_STATE_HEADER; 910 *write_state = PG_STATE_HEADER;
885 *repeat_times += 1; 911 *repeat_times += 1;
886 if (*repeat_times > EFUSE_REPEAT_THRESHOLD_) { 912 if (*repeat_times > EFUSE_REPEAT_THRESHOLD_) {
887 *bcontinual = false; 913 *continual = false;
888 *bresult = false; 914 *result = false;
889 } 915 }
890 916
891 RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, 917 RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
@@ -899,7 +925,7 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
899 struct rtl_priv *rtlpriv = rtl_priv(hw); 925 struct rtl_priv *rtlpriv = rtl_priv(hw);
900 struct pgpkt_struct target_pkt; 926 struct pgpkt_struct target_pkt;
901 u8 write_state = PG_STATE_HEADER; 927 u8 write_state = PG_STATE_HEADER;
902 int bcontinual = true, bdataempty = true, bresult = true; 928 int continual = true, dataempty = true, result = true;
903 u16 efuse_addr = 0; 929 u16 efuse_addr = 0;
904 u8 efuse_data; 930 u8 efuse_data;
905 u8 target_word_cnts = 0; 931 u8 target_word_cnts = 0;
@@ -923,11 +949,11 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
923 949
924 RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, ("efuse Power ON\n")); 950 RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, ("efuse Power ON\n"));
925 951
926 while (bcontinual && (efuse_addr < 952 while (continual && (efuse_addr <
927 (EFUSE_MAX_SIZE - EFUSE_OOB_PROTECT_BYTES))) { 953 (EFUSE_MAX_SIZE - EFUSE_OOB_PROTECT_BYTES))) {
928 954
929 if (write_state == PG_STATE_HEADER) { 955 if (write_state == PG_STATE_HEADER) {
930 bdataempty = true; 956 dataempty = true;
931 badworden = 0x0F; 957 badworden = 0x0F;
932 RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, 958 RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
933 ("efuse PG_STATE_HEADER\n")); 959 ("efuse PG_STATE_HEADER\n"));
@@ -936,32 +962,30 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
936 (efuse_data != 0xFF)) 962 (efuse_data != 0xFF))
937 efuse_write_data_case1(hw, &efuse_addr, 963 efuse_write_data_case1(hw, &efuse_addr,
938 efuse_data, offset, 964 efuse_data, offset,
939 &bcontinual, 965 &continual,
940 &write_state, &target_pkt, 966 &write_state, &target_pkt,
941 &repeat_times, &bresult, 967 &repeat_times, &result,
942 word_en); 968 word_en);
943 else 969 else
944 efuse_write_data_case2(hw, &efuse_addr, 970 efuse_write_data_case2(hw, &efuse_addr,
945 &bcontinual, 971 &continual,
946 &write_state, 972 &write_state,
947 target_pkt, 973 target_pkt,
948 &repeat_times, 974 &repeat_times,
949 &bresult); 975 &result);
950 976
951 } else if (write_state == PG_STATE_DATA) { 977 } else if (write_state == PG_STATE_DATA) {
952 RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, 978 RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
953 ("efuse PG_STATE_DATA\n")); 979 ("efuse PG_STATE_DATA\n"));
954 badworden = 0x0f;
955 badworden = 980 badworden =
956 efuse_word_enable_data_write(hw, efuse_addr + 1, 981 efuse_word_enable_data_write(hw, efuse_addr + 1,
957 target_pkt.word_en, 982 target_pkt.word_en,
958 target_pkt.data); 983 target_pkt.data);
959 984
960 if ((badworden & 0x0F) == 0x0F) { 985 if ((badworden & 0x0F) == 0x0F) {
961 bcontinual = false; 986 continual = false;
962 } else { 987 } else {
963 efuse_addr = 988 efuse_addr += (2 * target_word_cnts) + 1;
964 efuse_addr + (2 * target_word_cnts) + 1;
965 989
966 target_pkt.offset = offset; 990 target_pkt.offset = offset;
967 target_pkt.word_en = badworden; 991 target_pkt.word_en = badworden;
@@ -971,8 +995,8 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
971 write_state = PG_STATE_HEADER; 995 write_state = PG_STATE_HEADER;
972 repeat_times++; 996 repeat_times++;
973 if (repeat_times > EFUSE_REPEAT_THRESHOLD_) { 997 if (repeat_times > EFUSE_REPEAT_THRESHOLD_) {
974 bcontinual = false; 998 continual = false;
975 bresult = false; 999 result = false;
976 } 1000 }
977 RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, 1001 RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
978 ("efuse PG_STATE_HEADER-3\n")); 1002 ("efuse PG_STATE_HEADER-3\n"));
@@ -1072,13 +1096,15 @@ static u8 efuse_word_enable_data_write(struct ieee80211_hw *hw,
1072 return badworden; 1096 return badworden;
1073} 1097}
1074 1098
1075static void efuse_power_switch(struct ieee80211_hw *hw, u8 bwrite, u8 pwrstate) 1099static void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate)
1076{ 1100{
1077 struct rtl_priv *rtlpriv = rtl_priv(hw); 1101 struct rtl_priv *rtlpriv = rtl_priv(hw);
1102 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1078 u8 tempval; 1103 u8 tempval;
1079 u16 tmpV16; 1104 u16 tmpV16;
1080 1105
1081 if (pwrstate == true) { 1106 if (pwrstate && (rtlhal->hw_type !=
1107 HARDWARE_TYPE_RTL8192SE)) {
1082 tmpV16 = rtl_read_word(rtlpriv, 1108 tmpV16 = rtl_read_word(rtlpriv,
1083 rtlpriv->cfg->maps[SYS_ISO_CTRL]); 1109 rtlpriv->cfg->maps[SYS_ISO_CTRL]);
1084 if (!(tmpV16 & rtlpriv->cfg->maps[EFUSE_PWC_EV12V])) { 1110 if (!(tmpV16 & rtlpriv->cfg->maps[EFUSE_PWC_EV12V])) {
@@ -1106,20 +1132,29 @@ static void efuse_power_switch(struct ieee80211_hw *hw, u8 bwrite, u8 pwrstate)
1106 } 1132 }
1107 } 1133 }
1108 1134
1109 if (pwrstate == true) { 1135 if (pwrstate) {
1110 if (bwrite == true) { 1136 if (write) {
1111 tempval = rtl_read_byte(rtlpriv, 1137 tempval = rtl_read_byte(rtlpriv,
1112 rtlpriv->cfg->maps[EFUSE_TEST] + 1138 rtlpriv->cfg->maps[EFUSE_TEST] +
1113 3); 1139 3);
1114 tempval &= 0x0F; 1140
1115 tempval |= (VOLTAGE_V25 << 4); 1141 if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE) {
1142 tempval &= 0x0F;
1143 tempval |= (VOLTAGE_V25 << 4);
1144 }
1145
1116 rtl_write_byte(rtlpriv, 1146 rtl_write_byte(rtlpriv,
1117 rtlpriv->cfg->maps[EFUSE_TEST] + 3, 1147 rtlpriv->cfg->maps[EFUSE_TEST] + 3,
1118 (tempval | 0x80)); 1148 (tempval | 0x80));
1119 } 1149 }
1120 1150
1151 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) {
1152 rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CLK],
1153 0x03);
1154 }
1155
1121 } else { 1156 } else {
1122 if (bwrite == true) { 1157 if (write) {
1123 tempval = rtl_read_byte(rtlpriv, 1158 tempval = rtl_read_byte(rtlpriv,
1124 rtlpriv->cfg->maps[EFUSE_TEST] + 1159 rtlpriv->cfg->maps[EFUSE_TEST] +
1125 3); 1160 3);
@@ -1128,18 +1163,23 @@ static void efuse_power_switch(struct ieee80211_hw *hw, u8 bwrite, u8 pwrstate)
1128 (tempval & 0x7F)); 1163 (tempval & 0x7F));
1129 } 1164 }
1130 1165
1166 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) {
1167 rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CLK],
1168 0x02);
1169 }
1170
1131 } 1171 }
1132 1172
1133} 1173}
1134 1174
1135static u16 efuse_get_current_size(struct ieee80211_hw *hw) 1175static u16 efuse_get_current_size(struct ieee80211_hw *hw)
1136{ 1176{
1137 int bcontinual = true; 1177 int continual = true;
1138 u16 efuse_addr = 0; 1178 u16 efuse_addr = 0;
1139 u8 hoffset, hworden; 1179 u8 hoffset, hworden;
1140 u8 efuse_data, word_cnts; 1180 u8 efuse_data, word_cnts;
1141 1181
1142 while (bcontinual && efuse_one_byte_read(hw, efuse_addr, &efuse_data) 1182 while (continual && efuse_one_byte_read(hw, efuse_addr, &efuse_data)
1143 && (efuse_addr < EFUSE_MAX_SIZE)) { 1183 && (efuse_addr < EFUSE_MAX_SIZE)) {
1144 if (efuse_data != 0xFF) { 1184 if (efuse_data != 0xFF) {
1145 hoffset = (efuse_data >> 4) & 0x0F; 1185 hoffset = (efuse_data >> 4) & 0x0F;
@@ -1147,7 +1187,7 @@ static u16 efuse_get_current_size(struct ieee80211_hw *hw)
1147 word_cnts = efuse_calculate_word_cnts(hworden); 1187 word_cnts = efuse_calculate_word_cnts(hworden);
1148 efuse_addr = efuse_addr + (word_cnts * 2) + 1; 1188 efuse_addr = efuse_addr + (word_cnts * 2) + 1;
1149 } else { 1189 } else {
1150 bcontinual = false; 1190 continual = false;
1151 } 1191 }
1152 } 1192 }
1153 1193
diff --git a/drivers/net/wireless/rtlwifi/efuse.h b/drivers/net/wireless/rtlwifi/efuse.h
index 47774dd4c2a6..164dabaa7615 100644
--- a/drivers/net/wireless/rtlwifi/efuse.h
+++ b/drivers/net/wireless/rtlwifi/efuse.h
@@ -30,9 +30,10 @@
30#ifndef __RTL_EFUSE_H_ 30#ifndef __RTL_EFUSE_H_
31#define __RTL_EFUSE_H_ 31#define __RTL_EFUSE_H_
32 32
33#define EFUSE_IC_ID_OFFSET 506
34
33#define EFUSE_REAL_CONTENT_LEN 512 35#define EFUSE_REAL_CONTENT_LEN 512
34#define EFUSE_MAP_LEN 128 36#define EFUSE_MAP_LEN 128
35#define EFUSE_MAX_SECTION 16
36#define EFUSE_MAX_WORD_UNIT 4 37#define EFUSE_MAX_WORD_UNIT 4
37 38
38#define EFUSE_INIT_MAP 0 39#define EFUSE_INIT_MAP 0
@@ -52,6 +53,7 @@
52#define _PRE_EXECUTE_READ_CMD_ 53#define _PRE_EXECUTE_READ_CMD_
53 54
54#define EFUSE_REPEAT_THRESHOLD_ 3 55#define EFUSE_REPEAT_THRESHOLD_ 3
56#define EFUSE_ERROE_HANDLE 1
55 57
56struct efuse_map { 58struct efuse_map {
57 u8 offset; 59 u8 offset;
@@ -103,6 +105,7 @@ struct efuse_priv {
103 u8 tx_power_g[14]; 105 u8 tx_power_g[14];
104}; 106};
105 107
108extern void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
106extern void efuse_initialize(struct ieee80211_hw *hw); 109extern void efuse_initialize(struct ieee80211_hw *hw);
107extern u8 efuse_read_1byte(struct ieee80211_hw *hw, u16 address); 110extern u8 efuse_read_1byte(struct ieee80211_hw *hw, u16 address);
108extern void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value); 111extern void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value);
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 5938f6ee21e4..a40952845436 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -32,6 +32,7 @@
32#include "pci.h" 32#include "pci.h"
33#include "base.h" 33#include "base.h"
34#include "ps.h" 34#include "ps.h"
35#include "efuse.h"
35 36
36static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = { 37static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = {
37 INTEL_VENDOR_ID, 38 INTEL_VENDOR_ID,
@@ -40,6 +41,31 @@ static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = {
40 SIS_VENDOR_ID 41 SIS_VENDOR_ID
41}; 42};
42 43
44static const u8 ac_to_hwq[] = {
45 VO_QUEUE,
46 VI_QUEUE,
47 BE_QUEUE,
48 BK_QUEUE
49};
50
51static u8 _rtl_mac_to_hwqueue(struct ieee80211_hw *hw,
52 struct sk_buff *skb)
53{
54 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
55 __le16 fc = rtl_get_fc(skb);
56 u8 queue_index = skb_get_queue_mapping(skb);
57
58 if (unlikely(ieee80211_is_beacon(fc)))
59 return BEACON_QUEUE;
60 if (ieee80211_is_mgmt(fc))
61 return MGNT_QUEUE;
62 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
63 if (ieee80211_is_nullfunc(fc))
64 return HIGH_QUEUE;
65
66 return ac_to_hwq[queue_index];
67}
68
43/* Update PCI dependent default settings*/ 69/* Update PCI dependent default settings*/
44static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw) 70static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
45{ 71{
@@ -48,6 +74,7 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
48 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 74 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
49 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 75 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
50 u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor; 76 u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
77 u8 init_aspm;
51 78
52 ppsc->reg_rfps_level = 0; 79 ppsc->reg_rfps_level = 0;
53 ppsc->support_aspm = 0; 80 ppsc->support_aspm = 0;
@@ -125,7 +152,7 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
125 bool support_backdoor = true; 152 bool support_backdoor = true;
126 ppsc->support_aspm = support_aspm; 153 ppsc->support_aspm = support_aspm;
127 154
128 /*if(priv->oem_id == RT_CID_TOSHIBA && 155 /*if (priv->oem_id == RT_CID_TOSHIBA &&
129 !priv->ndis_adapter.amd_l1_patch) 156 !priv->ndis_adapter.amd_l1_patch)
130 support_backdoor = false; */ 157 support_backdoor = false; */
131 158
@@ -145,6 +172,13 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
145 ("switch case not process\n")); 172 ("switch case not process\n"));
146 break; 173 break;
147 } 174 }
175
176 /* toshiba aspm issue, toshiba will set aspm selfly
177 * so we should not set aspm in driver */
178 pci_read_config_byte(rtlpci->pdev, 0x80, &init_aspm);
179 if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8192SE &&
180 init_aspm == 0x43)
181 ppsc->support_aspm = false;
148} 182}
149 183
150static bool _rtl_pci_platform_switch_device_pci_aspm( 184static bool _rtl_pci_platform_switch_device_pci_aspm(
@@ -152,28 +186,28 @@ static bool _rtl_pci_platform_switch_device_pci_aspm(
152 u8 value) 186 u8 value)
153{ 187{
154 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 188 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
155 bool bresult = false; 189 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
156 190
157 value |= 0x40; 191 if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE)
192 value |= 0x40;
158 193
159 pci_write_config_byte(rtlpci->pdev, 0x80, value); 194 pci_write_config_byte(rtlpci->pdev, 0x80, value);
160 195
161 return bresult; 196 return false;
162} 197}
163 198
164/*When we set 0x01 to enable clk request. Set 0x0 to disable clk req.*/ 199/*When we set 0x01 to enable clk request. Set 0x0 to disable clk req.*/
165static bool _rtl_pci_switch_clk_req(struct ieee80211_hw *hw, u8 value) 200static bool _rtl_pci_switch_clk_req(struct ieee80211_hw *hw, u8 value)
166{ 201{
167 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 202 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
168 u8 buffer; 203 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
169 bool bresult = false;
170
171 buffer = value;
172 204
173 pci_write_config_byte(rtlpci->pdev, 0x81, value); 205 pci_write_config_byte(rtlpci->pdev, 0x81, value);
174 bresult = true;
175 206
176 return bresult; 207 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
208 udelay(100);
209
210 return true;
177} 211}
178 212
179/*Disable RTL8192SE ASPM & Disable Pci Bridge ASPM*/ 213/*Disable RTL8192SE ASPM & Disable Pci Bridge ASPM*/
@@ -191,6 +225,10 @@ static void rtl_pci_disable_aspm(struct ieee80211_hw *hw)
191 u16 pcibridge_linkctrlreg = pcipriv->ndis_adapter. 225 u16 pcibridge_linkctrlreg = pcipriv->ndis_adapter.
192 pcibridge_linkctrlreg; 226 pcibridge_linkctrlreg;
193 u16 aspmlevel = 0; 227 u16 aspmlevel = 0;
228 u8 tmp_u1b = 0;
229
230 if (!ppsc->support_aspm)
231 return;
194 232
195 if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) { 233 if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
196 RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE, 234 RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
@@ -204,11 +242,8 @@ static void rtl_pci_disable_aspm(struct ieee80211_hw *hw)
204 _rtl_pci_switch_clk_req(hw, 0x0); 242 _rtl_pci_switch_clk_req(hw, 0x0);
205 } 243 }
206 244
207 if (1) { 245 /*for promising device will in L0 state after an I/O. */
208 /*for promising device will in L0 state after an I/O. */ 246 pci_read_config_byte(rtlpci->pdev, 0x80, &tmp_u1b);
209 u8 tmp_u1b;
210 pci_read_config_byte(rtlpci->pdev, 0x80, &tmp_u1b);
211 }
212 247
213 /*Set corresponding value. */ 248 /*Set corresponding value. */
214 aspmlevel |= BIT(0) | BIT(1); 249 aspmlevel |= BIT(0) | BIT(1);
@@ -224,7 +259,6 @@ static void rtl_pci_disable_aspm(struct ieee80211_hw *hw)
224 rtl_pci_raw_write_port_uchar(PCI_CONF_DATA, pcibridge_linkctrlreg); 259 rtl_pci_raw_write_port_uchar(PCI_CONF_DATA, pcibridge_linkctrlreg);
225 260
226 udelay(50); 261 udelay(50);
227
228} 262}
229 263
230/* 264/*
@@ -249,6 +283,9 @@ static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
249 u8 u_pcibridge_aspmsetting; 283 u8 u_pcibridge_aspmsetting;
250 u8 u_device_aspmsetting; 284 u8 u_device_aspmsetting;
251 285
286 if (!ppsc->support_aspm)
287 return;
288
252 if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) { 289 if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
253 RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE, 290 RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
254 ("PCI(Bridge) UNKNOWN.\n")); 291 ("PCI(Bridge) UNKNOWN.\n"));
@@ -293,7 +330,7 @@ static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
293 RT_RF_OFF_LEVL_CLK_REQ) ? 1 : 0); 330 RT_RF_OFF_LEVL_CLK_REQ) ? 1 : 0);
294 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ); 331 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ);
295 } 332 }
296 udelay(200); 333 udelay(100);
297} 334}
298 335
299static bool rtl_pci_get_amd_l1_patch(struct ieee80211_hw *hw) 336static bool rtl_pci_get_amd_l1_patch(struct ieee80211_hw *hw)
@@ -330,13 +367,13 @@ static void rtl_pci_get_linkcontrol_field(struct ieee80211_hw *hw)
330 u8 capabilityoffset = pcipriv->ndis_adapter.pcibridge_pciehdr_offset; 367 u8 capabilityoffset = pcipriv->ndis_adapter.pcibridge_pciehdr_offset;
331 u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport; 368 u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
332 u8 linkctrl_reg; 369 u8 linkctrl_reg;
333 u8 num4bBytes; 370 u8 num4bbytes;
334 371
335 num4bBytes = (capabilityoffset + 0x10) / 4; 372 num4bbytes = (capabilityoffset + 0x10) / 4;
336 373
337 /*Read Link Control Register */ 374 /*Read Link Control Register */
338 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS, 375 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
339 pcicfg_addrport + (num4bBytes << 2)); 376 pcicfg_addrport + (num4bbytes << 2));
340 rtl_pci_raw_read_port_uchar(PCI_CONF_DATA, &linkctrl_reg); 377 rtl_pci_raw_read_port_uchar(PCI_CONF_DATA, &linkctrl_reg);
341 378
342 pcipriv->ndis_adapter.pcibridge_linkctrlreg = linkctrl_reg; 379 pcipriv->ndis_adapter.pcibridge_linkctrlreg = linkctrl_reg;
@@ -369,7 +406,7 @@ static void rtl_pci_parse_configuration(struct pci_dev *pdev,
369 pci_write_config_byte(pdev, 0x70f, tmp); 406 pci_write_config_byte(pdev, 0x70f, tmp);
370} 407}
371 408
372static void _rtl_pci_initialize_adapter_common(struct ieee80211_hw *hw) 409static void rtl_pci_init_aspm(struct ieee80211_hw *hw)
373{ 410{
374 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 411 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
375 412
@@ -383,52 +420,6 @@ static void _rtl_pci_initialize_adapter_common(struct ieee80211_hw *hw)
383 420
384} 421}
385 422
386static void rtl_pci_init_aspm(struct ieee80211_hw *hw)
387{
388 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
389
390 /*close ASPM for AMD defaultly */
391 rtlpci->const_amdpci_aspm = 0;
392
393 /*
394 * ASPM PS mode.
395 * 0 - Disable ASPM,
396 * 1 - Enable ASPM without Clock Req,
397 * 2 - Enable ASPM with Clock Req,
398 * 3 - Always Enable ASPM with Clock Req,
399 * 4 - Always Enable ASPM without Clock Req.
400 * set defult to RTL8192CE:3 RTL8192E:2
401 * */
402 rtlpci->const_pci_aspm = 3;
403
404 /*Setting for PCI-E device */
405 rtlpci->const_devicepci_aspm_setting = 0x03;
406
407 /*Setting for PCI-E bridge */
408 rtlpci->const_hostpci_aspm_setting = 0x02;
409
410 /*
411 * In Hw/Sw Radio Off situation.
412 * 0 - Default,
413 * 1 - From ASPM setting without low Mac Pwr,
414 * 2 - From ASPM setting with low Mac Pwr,
415 * 3 - Bus D3
416 * set default to RTL8192CE:0 RTL8192SE:2
417 */
418 rtlpci->const_hwsw_rfoff_d3 = 0;
419
420 /*
421 * This setting works for those device with
422 * backdoor ASPM setting such as EPHY setting.
423 * 0 - Not support ASPM,
424 * 1 - Support ASPM,
425 * 2 - According to chipset.
426 */
427 rtlpci->const_support_pciaspm = 1;
428
429 _rtl_pci_initialize_adapter_common(hw);
430}
431
432static void _rtl_pci_io_handler_init(struct device *dev, 423static void _rtl_pci_io_handler_init(struct device *dev,
433 struct ieee80211_hw *hw) 424 struct ieee80211_hw *hw)
434{ 425{
@@ -450,6 +441,90 @@ static void _rtl_pci_io_handler_release(struct ieee80211_hw *hw)
450{ 441{
451} 442}
452 443
444static bool _rtl_update_earlymode_info(struct ieee80211_hw *hw,
445 struct sk_buff *skb, struct rtl_tcb_desc *tcb_desc, u8 tid)
446{
447 struct rtl_priv *rtlpriv = rtl_priv(hw);
448 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
449 u8 additionlen = FCS_LEN;
450 struct sk_buff *next_skb;
451
452 /* here open is 4, wep/tkip is 8, aes is 12*/
453 if (info->control.hw_key)
454 additionlen += info->control.hw_key->icv_len;
455
456 /* The most skb num is 6 */
457 tcb_desc->empkt_num = 0;
458 spin_lock_bh(&rtlpriv->locks.waitq_lock);
459 skb_queue_walk(&rtlpriv->mac80211.skb_waitq[tid], next_skb) {
460 struct ieee80211_tx_info *next_info;
461
462 next_info = IEEE80211_SKB_CB(next_skb);
463 if (next_info->flags & IEEE80211_TX_CTL_AMPDU) {
464 tcb_desc->empkt_len[tcb_desc->empkt_num] =
465 next_skb->len + additionlen;
466 tcb_desc->empkt_num++;
467 } else {
468 break;
469 }
470
471 if (skb_queue_is_last(&rtlpriv->mac80211.skb_waitq[tid],
472 next_skb))
473 break;
474
475 if (tcb_desc->empkt_num >= 5)
476 break;
477 }
478 spin_unlock_bh(&rtlpriv->locks.waitq_lock);
479
480 return true;
481}
482
483/* just for early mode now */
484static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw)
485{
486 struct rtl_priv *rtlpriv = rtl_priv(hw);
487 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
488 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
489 struct sk_buff *skb = NULL;
490 struct ieee80211_tx_info *info = NULL;
491 int tid; /* should be int */
492
493 if (!rtlpriv->rtlhal.earlymode_enable)
494 return;
495
496 /* we juse use em for BE/BK/VI/VO */
497 for (tid = 7; tid >= 0; tid--) {
498 u8 hw_queue = ac_to_hwq[rtl_tid_to_ac(hw, tid)];
499 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
500 while (!mac->act_scanning &&
501 rtlpriv->psc.rfpwr_state == ERFON) {
502 struct rtl_tcb_desc tcb_desc;
503 memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
504
505 spin_lock_bh(&rtlpriv->locks.waitq_lock);
506 if (!skb_queue_empty(&mac->skb_waitq[tid]) &&
507 (ring->entries - skb_queue_len(&ring->queue) > 5)) {
508 skb = skb_dequeue(&mac->skb_waitq[tid]);
509 } else {
510 spin_unlock_bh(&rtlpriv->locks.waitq_lock);
511 break;
512 }
513 spin_unlock_bh(&rtlpriv->locks.waitq_lock);
514
515 /* Some macaddr can't do early mode. like
516 * multicast/broadcast/no_qos data */
517 info = IEEE80211_SKB_CB(skb);
518 if (info->flags & IEEE80211_TX_CTL_AMPDU)
519 _rtl_update_earlymode_info(hw, skb,
520 &tcb_desc, tid);
521
522 rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc);
523 }
524 }
525}
526
527
453static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio) 528static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
454{ 529{
455 struct rtl_priv *rtlpriv = rtl_priv(hw); 530 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -461,6 +536,8 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
461 struct rtl_tx_desc *entry = &ring->desc[ring->idx]; 536 struct rtl_tx_desc *entry = &ring->desc[ring->idx];
462 struct sk_buff *skb; 537 struct sk_buff *skb;
463 struct ieee80211_tx_info *info; 538 struct ieee80211_tx_info *info;
539 __le16 fc;
540 u8 tid;
464 541
465 u8 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) entry, true, 542 u8 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) entry, true,
466 HW_DESC_OWN); 543 HW_DESC_OWN);
@@ -481,6 +558,10 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
481 HW_DESC_TXBUFF_ADDR), 558 HW_DESC_TXBUFF_ADDR),
482 skb->len, PCI_DMA_TODEVICE); 559 skb->len, PCI_DMA_TODEVICE);
483 560
561 /* remove early mode header */
562 if (rtlpriv->rtlhal.earlymode_enable)
563 skb_pull(skb, EM_HDR_LEN);
564
484 RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_TRACE, 565 RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_TRACE,
485 ("new ring->idx:%d, " 566 ("new ring->idx:%d, "
486 "free: skb_queue_len:%d, free: seq:%x\n", 567 "free: skb_queue_len:%d, free: seq:%x\n",
@@ -488,6 +569,30 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
488 skb_queue_len(&ring->queue), 569 skb_queue_len(&ring->queue),
489 *(u16 *) (skb->data + 22))); 570 *(u16 *) (skb->data + 22)));
490 571
572 if (prio == TXCMD_QUEUE) {
573 dev_kfree_skb(skb);
574 goto tx_status_ok;
575
576 }
577
578 /* for sw LPS, just after NULL skb send out, we can
579 * sure AP kown we are sleeped, our we should not let
580 * rf to sleep*/
581 fc = rtl_get_fc(skb);
582 if (ieee80211_is_nullfunc(fc)) {
583 if (ieee80211_has_pm(fc)) {
584 rtlpriv->mac80211.offchan_deley = true;
585 rtlpriv->psc.state_inap = 1;
586 } else {
587 rtlpriv->psc.state_inap = 0;
588 }
589 }
590
591 /* update tid tx pkt num */
592 tid = rtl_get_tid(skb);
593 if (tid <= 7)
594 rtlpriv->link_info.tidtx_inperiod[tid]++;
595
491 info = IEEE80211_SKB_CB(skb); 596 info = IEEE80211_SKB_CB(skb);
492 ieee80211_tx_info_clear_status(info); 597 ieee80211_tx_info_clear_status(info);
493 598
@@ -510,7 +615,7 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
510 skb_get_queue_mapping 615 skb_get_queue_mapping
511 (skb)); 616 (skb));
512 } 617 }
513 618tx_status_ok:
514 skb = NULL; 619 skb = NULL;
515 } 620 }
516 621
@@ -582,23 +687,21 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
582 *skb_trim(skb, skb->len - 4); 687 *skb_trim(skb, skb->len - 4);
583 */ 688 */
584 689
585 hdr = (struct ieee80211_hdr *)(skb->data); 690 hdr = rtl_get_hdr(skb);
586 fc = hdr->frame_control; 691 fc = rtl_get_fc(skb);
587 692
588 if (!stats.crc) { 693 if (!stats.crc || !stats.hwerror) {
589 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, 694 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status,
590 sizeof(rx_status)); 695 sizeof(rx_status));
591 696
592 if (is_broadcast_ether_addr(hdr->addr1)) 697 if (is_broadcast_ether_addr(hdr->addr1)) {
593 ;/*TODO*/ 698 ;/*TODO*/
594 else { 699 } else if (is_multicast_ether_addr(hdr->addr1)) {
595 if (is_multicast_ether_addr(hdr->addr1)) 700 ;/*TODO*/
596 ;/*TODO*/ 701 } else {
597 else { 702 unicast = true;
598 unicast = true; 703 rtlpriv->stats.rxbytesunicast +=
599 rtlpriv->stats.rxbytesunicast += 704 skb->len;
600 skb->len;
601 }
602 } 705 }
603 706
604 rtl_is_special_data(hw, skb, false); 707 rtl_is_special_data(hw, skb, false);
@@ -612,28 +715,38 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
612 num_rx_inperiod++; 715 num_rx_inperiod++;
613 } 716 }
614 717
615 if (unlikely(!rtl_action_proc(hw, skb, 718 /* for sw lps */
616 false))) { 719 rtl_swlps_beacon(hw, (void *)skb->data,
720 skb->len);
721 rtl_recognize_peer(hw, (void *)skb->data,
722 skb->len);
723 if ((rtlpriv->mac80211.opmode ==
724 NL80211_IFTYPE_AP) &&
725 (rtlpriv->rtlhal.current_bandtype ==
726 BAND_ON_2_4G) &&
727 (ieee80211_is_beacon(fc) ||
728 ieee80211_is_probe_resp(fc))) {
617 dev_kfree_skb_any(skb); 729 dev_kfree_skb_any(skb);
618 } else { 730 } else {
619 struct sk_buff *uskb = NULL; 731 if (unlikely(!rtl_action_proc(hw, skb,
620 u8 *pdata; 732 false))) {
621 uskb = dev_alloc_skb(skb->len + 128); 733 dev_kfree_skb_any(skb);
622 if (!uskb) { 734 } else {
623 RT_TRACE(rtlpriv, 735 struct sk_buff *uskb = NULL;
624 (COMP_INTR | COMP_RECV), 736 u8 *pdata;
625 DBG_EMERG, 737 uskb = dev_alloc_skb(skb->len
626 ("can't alloc rx skb\n")); 738 + 128);
627 goto done; 739 memcpy(IEEE80211_SKB_RXCB(uskb),
740 &rx_status,
741 sizeof(rx_status));
742 pdata = (u8 *)skb_put(uskb,
743 skb->len);
744 memcpy(pdata, skb->data,
745 skb->len);
746 dev_kfree_skb_any(skb);
747
748 ieee80211_rx_irqsafe(hw, uskb);
628 } 749 }
629 memcpy(IEEE80211_SKB_RXCB(uskb),
630 &rx_status,
631 sizeof(rx_status));
632 pdata = (u8 *)skb_put(uskb, skb->len);
633 memcpy(pdata, skb->data, skb->len);
634 dev_kfree_skb_any(skb);
635
636 ieee80211_rx_irqsafe(hw, uskb);
637 } 750 }
638 } else { 751 } else {
639 dev_kfree_skb_any(skb); 752 dev_kfree_skb_any(skb);
@@ -648,7 +761,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
648 new_skb = dev_alloc_skb(rtlpci->rxbuffersize); 761 new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
649 if (unlikely(!new_skb)) { 762 if (unlikely(!new_skb)) {
650 RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV), 763 RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV),
651 DBG_EMERG, 764 DBG_DMESG,
652 ("can't alloc skb for rx\n")); 765 ("can't alloc skb for rx\n"));
653 goto done; 766 goto done;
654 } 767 }
@@ -666,7 +779,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
666 779
667 } 780 }
668done: 781done:
669 bufferaddress = (u32)(*((dma_addr_t *) skb->cb)); 782 bufferaddress = (*((dma_addr_t *)skb->cb));
670 tmp_one = 1; 783 tmp_one = 1;
671 rtlpriv->cfg->ops->set_desc((u8 *) pdesc, false, 784 rtlpriv->cfg->ops->set_desc((u8 *) pdesc, false,
672 HW_DESC_RXBUFF_ADDR, 785 HW_DESC_RXBUFF_ADDR,
@@ -695,6 +808,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
695 struct ieee80211_hw *hw = dev_id; 808 struct ieee80211_hw *hw = dev_id;
696 struct rtl_priv *rtlpriv = rtl_priv(hw); 809 struct rtl_priv *rtlpriv = rtl_priv(hw);
697 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 810 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
811 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
698 unsigned long flags; 812 unsigned long flags;
699 u32 inta = 0; 813 u32 inta = 0;
700 u32 intb = 0; 814 u32 intb = 0;
@@ -781,23 +895,36 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
781 _rtl_pci_tx_isr(hw, VO_QUEUE); 895 _rtl_pci_tx_isr(hw, VO_QUEUE);
782 } 896 }
783 897
898 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) {
899 if (inta & rtlpriv->cfg->maps[RTL_IMR_COMDOK]) {
900 rtlpriv->link_info.num_tx_inperiod++;
901
902 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
903 ("CMD TX OK interrupt!\n"));
904 _rtl_pci_tx_isr(hw, TXCMD_QUEUE);
905 }
906 }
907
784 /*<2> Rx related */ 908 /*<2> Rx related */
785 if (inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) { 909 if (inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) {
786 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, ("Rx ok interrupt!\n")); 910 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, ("Rx ok interrupt!\n"));
787 tasklet_schedule(&rtlpriv->works.irq_tasklet); 911 _rtl_pci_rx_interrupt(hw);
788 } 912 }
789 913
790 if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RDU])) { 914 if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RDU])) {
791 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, 915 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
792 ("rx descriptor unavailable!\n")); 916 ("rx descriptor unavailable!\n"));
793 tasklet_schedule(&rtlpriv->works.irq_tasklet); 917 _rtl_pci_rx_interrupt(hw);
794 } 918 }
795 919
796 if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) { 920 if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) {
797 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, ("rx overflow !\n")); 921 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, ("rx overflow !\n"));
798 tasklet_schedule(&rtlpriv->works.irq_tasklet); 922 _rtl_pci_rx_interrupt(hw);
799 } 923 }
800 924
925 if (rtlpriv->rtlhal.earlymode_enable)
926 tasklet_schedule(&rtlpriv->works.irq_tasklet);
927
801 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); 928 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
802 return IRQ_HANDLED; 929 return IRQ_HANDLED;
803 930
@@ -808,7 +935,7 @@ done:
808 935
809static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw) 936static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
810{ 937{
811 _rtl_pci_rx_interrupt(hw); 938 _rtl_pci_tx_chk_waitq(hw);
812} 939}
813 940
814static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw) 941static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
@@ -816,14 +943,15 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
816 struct rtl_priv *rtlpriv = rtl_priv(hw); 943 struct rtl_priv *rtlpriv = rtl_priv(hw);
817 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 944 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
818 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 945 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
819 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE]; 946 struct rtl8192_tx_ring *ring = NULL;
820 struct ieee80211_hdr *hdr = NULL; 947 struct ieee80211_hdr *hdr = NULL;
821 struct ieee80211_tx_info *info = NULL; 948 struct ieee80211_tx_info *info = NULL;
822 struct sk_buff *pskb = NULL; 949 struct sk_buff *pskb = NULL;
823 struct rtl_tx_desc *pdesc = NULL; 950 struct rtl_tx_desc *pdesc = NULL;
824 unsigned int queue_index; 951 struct rtl_tcb_desc tcb_desc;
825 u8 temp_one = 1; 952 u8 temp_one = 1;
826 953
954 memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
827 ring = &rtlpci->tx_ring[BEACON_QUEUE]; 955 ring = &rtlpci->tx_ring[BEACON_QUEUE];
828 pskb = __skb_dequeue(&ring->queue); 956 pskb = __skb_dequeue(&ring->queue);
829 if (pskb) 957 if (pskb)
@@ -833,14 +961,11 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
833 pskb = ieee80211_beacon_get(hw, mac->vif); 961 pskb = ieee80211_beacon_get(hw, mac->vif);
834 if (pskb == NULL) 962 if (pskb == NULL)
835 return; 963 return;
836 hdr = (struct ieee80211_hdr *)(pskb->data); 964 hdr = rtl_get_hdr(pskb);
837 info = IEEE80211_SKB_CB(pskb); 965 info = IEEE80211_SKB_CB(pskb);
838
839 queue_index = BEACON_QUEUE;
840
841 pdesc = &ring->desc[0]; 966 pdesc = &ring->desc[0];
842 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc, 967 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
843 info, pskb, queue_index); 968 info, pskb, BEACON_QUEUE, &tcb_desc);
844 969
845 __skb_queue_tail(&ring->queue, pskb); 970 __skb_queue_tail(&ring->queue, pskb);
846 971
@@ -882,7 +1007,6 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
882 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 1007 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
883 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 1008 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
884 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 1009 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
885 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
886 1010
887 rtlpci->up_first_time = true; 1011 rtlpci->up_first_time = true;
888 rtlpci->being_init_adapter = false; 1012 rtlpci->being_init_adapter = false;
@@ -890,31 +1014,20 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
890 rtlhal->hw = hw; 1014 rtlhal->hw = hw;
891 rtlpci->pdev = pdev; 1015 rtlpci->pdev = pdev;
892 1016
893 ppsc->inactiveps = false;
894 ppsc->leisure_ps = true;
895 ppsc->fwctrl_lps = true;
896 ppsc->reg_fwctrl_lps = 3;
897 ppsc->reg_max_lps_awakeintvl = 5;
898
899 if (ppsc->reg_fwctrl_lps == 1)
900 ppsc->fwctrl_psmode = FW_PS_MIN_MODE;
901 else if (ppsc->reg_fwctrl_lps == 2)
902 ppsc->fwctrl_psmode = FW_PS_MAX_MODE;
903 else if (ppsc->reg_fwctrl_lps == 3)
904 ppsc->fwctrl_psmode = FW_PS_DTIM_MODE;
905
906 /*Tx/Rx related var */ 1017 /*Tx/Rx related var */
907 _rtl_pci_init_trx_var(hw); 1018 _rtl_pci_init_trx_var(hw);
908 1019
909 /*IBSS*/ mac->beacon_interval = 100; 1020 /*IBSS*/ mac->beacon_interval = 100;
910 1021
911 /*AMPDU*/ mac->min_space_cfg = 0; 1022 /*AMPDU*/
1023 mac->min_space_cfg = 0;
912 mac->max_mss_density = 0; 1024 mac->max_mss_density = 0;
913 /*set sane AMPDU defaults */ 1025 /*set sane AMPDU defaults */
914 mac->current_ampdu_density = 7; 1026 mac->current_ampdu_density = 7;
915 mac->current_ampdu_factor = 3; 1027 mac->current_ampdu_factor = 3;
916 1028
917 /*QOS*/ rtlpci->acm_method = eAcmWay2_SW; 1029 /*QOS*/
1030 rtlpci->acm_method = eAcmWay2_SW;
918 1031
919 /*task */ 1032 /*task */
920 tasklet_init(&rtlpriv->works.irq_tasklet, 1033 tasklet_init(&rtlpriv->works.irq_tasklet,
@@ -955,7 +1068,8 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
955 ("queue:%d, ring_addr:%p\n", prio, ring)); 1068 ("queue:%d, ring_addr:%p\n", prio, ring));
956 1069
957 for (i = 0; i < entries; i++) { 1070 for (i = 0; i < entries; i++) {
958 nextdescaddress = (u32) dma + ((i + 1) % entries) * 1071 nextdescaddress = (u32) dma +
1072 ((i + 1) % entries) *
959 sizeof(*ring); 1073 sizeof(*ring);
960 1074
961 rtlpriv->cfg->ops->set_desc((u8 *)&(ring[i]), 1075 rtlpriv->cfg->ops->set_desc((u8 *)&(ring[i]),
@@ -1020,7 +1134,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
1020 rtlpci->rxbuffersize, 1134 rtlpci->rxbuffersize,
1021 PCI_DMA_FROMDEVICE); 1135 PCI_DMA_FROMDEVICE);
1022 1136
1023 bufferaddress = (u32)(*((dma_addr_t *)skb->cb)); 1137 bufferaddress = (*((dma_addr_t *)skb->cb));
1024 rtlpriv->cfg->ops->set_desc((u8 *)entry, false, 1138 rtlpriv->cfg->ops->set_desc((u8 *)entry, false,
1025 HW_DESC_RXBUFF_ADDR, 1139 HW_DESC_RXBUFF_ADDR,
1026 (u8 *)&bufferaddress); 1140 (u8 *)&bufferaddress);
@@ -1203,72 +1317,73 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
1203 return 0; 1317 return 0;
1204} 1318}
1205 1319
1206static unsigned int _rtl_mac_to_hwqueue(__le16 fc, 1320static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw,
1207 unsigned int mac80211_queue_index) 1321 struct sk_buff *skb)
1208{ 1322{
1209 unsigned int hw_queue_index; 1323 struct rtl_priv *rtlpriv = rtl_priv(hw);
1210 1324 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1211 if (unlikely(ieee80211_is_beacon(fc))) { 1325 struct ieee80211_sta *sta = info->control.sta;
1212 hw_queue_index = BEACON_QUEUE; 1326 struct rtl_sta_info *sta_entry = NULL;
1213 goto out; 1327 u8 tid = rtl_get_tid(skb);
1214 } 1328
1215 1329 if (!sta)
1216 if (ieee80211_is_mgmt(fc)) { 1330 return false;
1217 hw_queue_index = MGNT_QUEUE; 1331 sta_entry = (struct rtl_sta_info *)sta->drv_priv;
1218 goto out; 1332
1219 } 1333 if (!rtlpriv->rtlhal.earlymode_enable)
1220 1334 return false;
1221 switch (mac80211_queue_index) { 1335 if (sta_entry->tids[tid].agg.agg_state != RTL_AGG_OPERATIONAL)
1222 case 0: 1336 return false;
1223 hw_queue_index = VO_QUEUE; 1337 if (_rtl_mac_to_hwqueue(hw, skb) > VO_QUEUE)
1224 break; 1338 return false;
1225 case 1: 1339 if (tid > 7)
1226 hw_queue_index = VI_QUEUE; 1340 return false;
1227 break; 1341
1228 case 2: 1342 /* maybe every tid should be checked */
1229 hw_queue_index = BE_QUEUE;; 1343 if (!rtlpriv->link_info.higher_busytxtraffic[tid])
1230 break; 1344 return false;
1231 case 3: 1345
1232 hw_queue_index = BK_QUEUE; 1346 spin_lock_bh(&rtlpriv->locks.waitq_lock);
1233 break; 1347 skb_queue_tail(&rtlpriv->mac80211.skb_waitq[tid], skb);
1234 default: 1348 spin_unlock_bh(&rtlpriv->locks.waitq_lock);
1235 hw_queue_index = BE_QUEUE;
1236 RT_ASSERT(false, ("QSLT_BE queue, skb_queue:%d\n",
1237 mac80211_queue_index));
1238 break;
1239 }
1240 1349
1241out: 1350 return true;
1242 return hw_queue_index;
1243} 1351}
1244 1352
1245static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 1353static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
1354 struct rtl_tcb_desc *ptcb_desc)
1246{ 1355{
1247 struct rtl_priv *rtlpriv = rtl_priv(hw); 1356 struct rtl_priv *rtlpriv = rtl_priv(hw);
1248 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 1357 struct rtl_sta_info *sta_entry = NULL;
1249 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1358 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1359 struct ieee80211_sta *sta = info->control.sta;
1250 struct rtl8192_tx_ring *ring; 1360 struct rtl8192_tx_ring *ring;
1251 struct rtl_tx_desc *pdesc; 1361 struct rtl_tx_desc *pdesc;
1252 u8 idx; 1362 u8 idx;
1253 unsigned int queue_index, hw_queue; 1363 u8 hw_queue = _rtl_mac_to_hwqueue(hw, skb);
1254 unsigned long flags; 1364 unsigned long flags;
1255 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); 1365 struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
1256 __le16 fc = hdr->frame_control; 1366 __le16 fc = rtl_get_fc(skb);
1257 u8 *pda_addr = hdr->addr1; 1367 u8 *pda_addr = hdr->addr1;
1258 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 1368 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1259 /*ssn */ 1369 /*ssn */
1260 u8 *qc = NULL;
1261 u8 tid = 0; 1370 u8 tid = 0;
1262 u16 seq_number = 0; 1371 u16 seq_number = 0;
1263 u8 own; 1372 u8 own;
1264 u8 temp_one = 1; 1373 u8 temp_one = 1;
1265 1374
1266 if (ieee80211_is_mgmt(fc)) 1375 if (ieee80211_is_auth(fc)) {
1267 rtl_tx_mgmt_proc(hw, skb); 1376 RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, ("MAC80211_LINKING\n"));
1268 rtl_action_proc(hw, skb, true); 1377 rtl_ips_nic_on(hw);
1378 }
1379
1380 if (rtlpriv->psc.sw_ps_enabled) {
1381 if (ieee80211_is_data(fc) && !ieee80211_is_nullfunc(fc) &&
1382 !ieee80211_has_pm(fc))
1383 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
1384 }
1269 1385
1270 queue_index = skb_get_queue_mapping(skb); 1386 rtl_action_proc(hw, skb, true);
1271 hw_queue = _rtl_mac_to_hwqueue(fc, queue_index);
1272 1387
1273 if (is_multicast_ether_addr(pda_addr)) 1388 if (is_multicast_ether_addr(pda_addr))
1274 rtlpriv->stats.txbytesmulticast += skb->len; 1389 rtlpriv->stats.txbytesmulticast += skb->len;
@@ -1278,7 +1393,6 @@ static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1278 rtlpriv->stats.txbytesunicast += skb->len; 1393 rtlpriv->stats.txbytesunicast += skb->len;
1279 1394
1280 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); 1395 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
1281
1282 ring = &rtlpci->tx_ring[hw_queue]; 1396 ring = &rtlpci->tx_ring[hw_queue];
1283 if (hw_queue != BEACON_QUEUE) 1397 if (hw_queue != BEACON_QUEUE)
1284 idx = (ring->idx + skb_queue_len(&ring->queue)) % 1398 idx = (ring->idx + skb_queue_len(&ring->queue)) %
@@ -1301,43 +1415,30 @@ static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1301 return skb->len; 1415 return skb->len;
1302 } 1416 }
1303 1417
1304 /*
1305 *if(ieee80211_is_nullfunc(fc)) {
1306 * spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
1307 * return 1;
1308 *}
1309 */
1310
1311 if (ieee80211_is_data_qos(fc)) { 1418 if (ieee80211_is_data_qos(fc)) {
1312 qc = ieee80211_get_qos_ctl(hdr); 1419 tid = rtl_get_tid(skb);
1313 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; 1420 if (sta) {
1314 1421 sta_entry = (struct rtl_sta_info *)sta->drv_priv;
1315 seq_number = mac->tids[tid].seq_number; 1422 seq_number = (le16_to_cpu(hdr->seq_ctrl) &
1316 seq_number &= IEEE80211_SCTL_SEQ; 1423 IEEE80211_SCTL_SEQ) >> 4;
1317 /* 1424 seq_number += 1;
1318 *hdr->seq_ctrl = hdr->seq_ctrl & 1425
1319 *cpu_to_le16(IEEE80211_SCTL_FRAG); 1426 if (!ieee80211_has_morefrags(hdr->frame_control))
1320 *hdr->seq_ctrl |= cpu_to_le16(seq_number); 1427 sta_entry->tids[tid].seq_number = seq_number;
1321 */ 1428 }
1322
1323 seq_number += 1;
1324 } 1429 }
1325 1430
1326 if (ieee80211_is_data(fc)) 1431 if (ieee80211_is_data(fc))
1327 rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX); 1432 rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
1328 1433
1329 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc, 1434 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc,
1330 info, skb, hw_queue); 1435 info, skb, hw_queue, ptcb_desc);
1331 1436
1332 __skb_queue_tail(&ring->queue, skb); 1437 __skb_queue_tail(&ring->queue, skb);
1333 1438
1334 rtlpriv->cfg->ops->set_desc((u8 *) pdesc, true, 1439 rtlpriv->cfg->ops->set_desc((u8 *)pdesc, true,
1335 HW_DESC_OWN, (u8 *)&temp_one); 1440 HW_DESC_OWN, (u8 *)&temp_one);
1336 1441
1337 if (!ieee80211_has_morefrags(hdr->frame_control)) {
1338 if (qc)
1339 mac->tids[tid].seq_number = seq_number;
1340 }
1341 1442
1342 if ((ring->entries - skb_queue_len(&ring->queue)) < 2 && 1443 if ((ring->entries - skb_queue_len(&ring->queue)) < 2 &&
1343 hw_queue != BEACON_QUEUE) { 1444 hw_queue != BEACON_QUEUE) {
@@ -1359,6 +1460,35 @@ static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1359 return 0; 1460 return 0;
1360} 1461}
1361 1462
1463static void rtl_pci_flush(struct ieee80211_hw *hw, bool drop)
1464{
1465 struct rtl_priv *rtlpriv = rtl_priv(hw);
1466 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
1467 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1468 u16 i = 0;
1469 int queue_id;
1470 struct rtl8192_tx_ring *ring;
1471
1472 for (queue_id = RTL_PCI_MAX_TX_QUEUE_COUNT - 1; queue_id >= 0;) {
1473 u32 queue_len;
1474 ring = &pcipriv->dev.tx_ring[queue_id];
1475 queue_len = skb_queue_len(&ring->queue);
1476 if (queue_len == 0 || queue_id == BEACON_QUEUE ||
1477 queue_id == TXCMD_QUEUE) {
1478 queue_id--;
1479 continue;
1480 } else {
1481 msleep(20);
1482 i++;
1483 }
1484
1485 /* we just wait 1s for all queues */
1486 if (rtlpriv->psc.rfpwr_state == ERFOFF ||
1487 is_hal_stop(rtlhal) || i >= 200)
1488 return;
1489 }
1490}
1491
1362static void rtl_pci_deinit(struct ieee80211_hw *hw) 1492static void rtl_pci_deinit(struct ieee80211_hw *hw)
1363{ 1493{
1364 struct rtl_priv *rtlpriv = rtl_priv(hw); 1494 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1477,11 +1607,14 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
1477 struct pci_dev *bridge_pdev = pdev->bus->self; 1607 struct pci_dev *bridge_pdev = pdev->bus->self;
1478 u16 venderid; 1608 u16 venderid;
1479 u16 deviceid; 1609 u16 deviceid;
1610 u8 revisionid;
1480 u16 irqline; 1611 u16 irqline;
1481 u8 tmp; 1612 u8 tmp;
1482 1613
1614 pcipriv->ndis_adapter.pcibridge_vendor = PCI_BRIDGE_VENDOR_UNKNOWN;
1483 venderid = pdev->vendor; 1615 venderid = pdev->vendor;
1484 deviceid = pdev->device; 1616 deviceid = pdev->device;
1617 pci_read_config_byte(pdev, 0x8, &revisionid);
1485 pci_read_config_word(pdev, 0x3C, &irqline); 1618 pci_read_config_word(pdev, 0x3C, &irqline);
1486 1619
1487 if (deviceid == RTL_PCI_8192_DID || 1620 if (deviceid == RTL_PCI_8192_DID ||
@@ -1492,7 +1625,7 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
1492 deviceid == RTL_PCI_8173_DID || 1625 deviceid == RTL_PCI_8173_DID ||
1493 deviceid == RTL_PCI_8172_DID || 1626 deviceid == RTL_PCI_8172_DID ||
1494 deviceid == RTL_PCI_8171_DID) { 1627 deviceid == RTL_PCI_8171_DID) {
1495 switch (pdev->revision) { 1628 switch (revisionid) {
1496 case RTL_PCI_REVISION_ID_8192PCIE: 1629 case RTL_PCI_REVISION_ID_8192PCIE:
1497 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, 1630 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1498 ("8192 PCI-E is found - " 1631 ("8192 PCI-E is found - "
@@ -1521,6 +1654,12 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
1521 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, 1654 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1522 ("8192C PCI-E is found - " 1655 ("8192C PCI-E is found - "
1523 "vid/did=%x/%x\n", venderid, deviceid)); 1656 "vid/did=%x/%x\n", venderid, deviceid));
1657 } else if (deviceid == RTL_PCI_8192DE_DID ||
1658 deviceid == RTL_PCI_8192DE_DID2) {
1659 rtlhal->hw_type = HARDWARE_TYPE_RTL8192DE;
1660 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1661 ("8192D PCI-E is found - "
1662 "vid/did=%x/%x\n", venderid, deviceid));
1524 } else { 1663 } else {
1525 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, 1664 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1526 ("Err: Unknown device -" 1665 ("Err: Unknown device -"
@@ -1529,6 +1668,25 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
1529 rtlhal->hw_type = RTL_DEFAULT_HARDWARE_TYPE; 1668 rtlhal->hw_type = RTL_DEFAULT_HARDWARE_TYPE;
1530 } 1669 }
1531 1670
1671 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE) {
1672 if (revisionid == 0 || revisionid == 1) {
1673 if (revisionid == 0) {
1674 RT_TRACE(rtlpriv, COMP_INIT,
1675 DBG_LOUD, ("Find 92DE MAC0.\n"));
1676 rtlhal->interfaceindex = 0;
1677 } else if (revisionid == 1) {
1678 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1679 ("Find 92DE MAC1.\n"));
1680 rtlhal->interfaceindex = 1;
1681 }
1682 } else {
1683 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1684 ("Unknown device - "
1685 "VendorID/DeviceID=%x/%x, Revision=%x\n",
1686 venderid, deviceid, revisionid));
1687 rtlhal->interfaceindex = 0;
1688 }
1689 }
1532 /*find bus info */ 1690 /*find bus info */
1533 pcipriv->ndis_adapter.busnumber = pdev->bus->number; 1691 pcipriv->ndis_adapter.busnumber = pdev->bus->number;
1534 pcipriv->ndis_adapter.devnumber = PCI_SLOT(pdev->devfn); 1692 pcipriv->ndis_adapter.devnumber = PCI_SLOT(pdev->devfn);
@@ -1554,12 +1712,12 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
1554 PCI_SLOT(bridge_pdev->devfn); 1712 PCI_SLOT(bridge_pdev->devfn);
1555 pcipriv->ndis_adapter.pcibridge_funcnum = 1713 pcipriv->ndis_adapter.pcibridge_funcnum =
1556 PCI_FUNC(bridge_pdev->devfn); 1714 PCI_FUNC(bridge_pdev->devfn);
1557 pcipriv->ndis_adapter.pcibridge_pciehdr_offset =
1558 pci_pcie_cap(bridge_pdev);
1559 pcipriv->ndis_adapter.pcicfg_addrport = 1715 pcipriv->ndis_adapter.pcicfg_addrport =
1560 (pcipriv->ndis_adapter.pcibridge_busnum << 16) | 1716 (pcipriv->ndis_adapter.pcibridge_busnum << 16) |
1561 (pcipriv->ndis_adapter.pcibridge_devnum << 11) | 1717 (pcipriv->ndis_adapter.pcibridge_devnum << 11) |
1562 (pcipriv->ndis_adapter.pcibridge_funcnum << 8) | (1 << 31); 1718 (pcipriv->ndis_adapter.pcibridge_funcnum << 8) | (1 << 31);
1719 pcipriv->ndis_adapter.pcibridge_pciehdr_offset =
1720 pci_pcie_cap(bridge_pdev);
1563 pcipriv->ndis_adapter.num4bytes = 1721 pcipriv->ndis_adapter.num4bytes =
1564 (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10) / 4; 1722 (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10) / 4;
1565 1723
@@ -1642,6 +1800,11 @@ int __devinit rtl_pci_probe(struct pci_dev *pdev,
1642 pcipriv = (void *)rtlpriv->priv; 1800 pcipriv = (void *)rtlpriv->priv;
1643 pcipriv->dev.pdev = pdev; 1801 pcipriv->dev.pdev = pdev;
1644 1802
1803 /* init cfg & intf_ops */
1804 rtlpriv->rtlhal.interface = INTF_PCI;
1805 rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data);
1806 rtlpriv->intf_ops = &rtl_pci_ops;
1807
1645 /* 1808 /*
1646 *init dbgp flags before all 1809 *init dbgp flags before all
1647 *other functions, because we will 1810 *other functions, because we will
@@ -1659,13 +1822,14 @@ int __devinit rtl_pci_probe(struct pci_dev *pdev,
1659 return err; 1822 return err;
1660 } 1823 }
1661 1824
1662 pmem_start = pci_resource_start(pdev, 2); 1825 pmem_start = pci_resource_start(pdev, rtlpriv->cfg->bar_id);
1663 pmem_len = pci_resource_len(pdev, 2); 1826 pmem_len = pci_resource_len(pdev, rtlpriv->cfg->bar_id);
1664 pmem_flags = pci_resource_flags(pdev, 2); 1827 pmem_flags = pci_resource_flags(pdev, rtlpriv->cfg->bar_id);
1665 1828
1666 /*shared mem start */ 1829 /*shared mem start */
1667 rtlpriv->io.pci_mem_start = 1830 rtlpriv->io.pci_mem_start =
1668 (unsigned long)pci_iomap(pdev, 2, pmem_len); 1831 (unsigned long)pci_iomap(pdev,
1832 rtlpriv->cfg->bar_id, pmem_len);
1669 if (rtlpriv->io.pci_mem_start == 0) { 1833 if (rtlpriv->io.pci_mem_start == 0) {
1670 RT_ASSERT(false, ("Can't map PCI mem\n")); 1834 RT_ASSERT(false, ("Can't map PCI mem\n"));
1671 goto fail2; 1835 goto fail2;
@@ -1684,11 +1848,6 @@ int __devinit rtl_pci_probe(struct pci_dev *pdev,
1684 pci_write_config_byte(pdev, 0x04, 0x06); 1848 pci_write_config_byte(pdev, 0x04, 0x06);
1685 pci_write_config_byte(pdev, 0x04, 0x07); 1849 pci_write_config_byte(pdev, 0x04, 0x07);
1686 1850
1687 /* init cfg & intf_ops */
1688 rtlpriv->rtlhal.interface = INTF_PCI;
1689 rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data);
1690 rtlpriv->intf_ops = &rtl_pci_ops;
1691
1692 /* find adapter */ 1851 /* find adapter */
1693 _rtl_pci_find_adapter(pdev, hw); 1852 _rtl_pci_find_adapter(pdev, hw);
1694 1853
@@ -1806,7 +1965,6 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
1806 1965
1807 rtl_pci_deinit(hw); 1966 rtl_pci_deinit(hw);
1808 rtl_deinit_core(hw); 1967 rtl_deinit_core(hw);
1809 rtlpriv->cfg->ops->deinit_sw_leds(hw);
1810 _rtl_pci_io_handler_release(hw); 1968 _rtl_pci_io_handler_release(hw);
1811 rtlpriv->cfg->ops->deinit_sw_vars(hw); 1969 rtlpriv->cfg->ops->deinit_sw_vars(hw);
1812 1970
@@ -1821,6 +1979,9 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
1821 } 1979 }
1822 1980
1823 pci_disable_device(pdev); 1981 pci_disable_device(pdev);
1982
1983 rtl_pci_disable_aspm(hw);
1984
1824 pci_set_drvdata(pdev, NULL); 1985 pci_set_drvdata(pdev, NULL);
1825 1986
1826 ieee80211_free_hw(hw); 1987 ieee80211_free_hw(hw);
@@ -1844,10 +2005,15 @@ no need to call hw_disable here.
1844****************************************/ 2005****************************************/
1845int rtl_pci_suspend(struct pci_dev *pdev, pm_message_t state) 2006int rtl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1846{ 2007{
2008 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
2009 struct rtl_priv *rtlpriv = rtl_priv(hw);
2010
2011 rtlpriv->cfg->ops->hw_suspend(hw);
2012 rtl_deinit_rfkill(hw);
2013
1847 pci_save_state(pdev); 2014 pci_save_state(pdev);
1848 pci_disable_device(pdev); 2015 pci_disable_device(pdev);
1849 pci_set_power_state(pdev, PCI_D3hot); 2016 pci_set_power_state(pdev, PCI_D3hot);
1850
1851 return 0; 2017 return 0;
1852} 2018}
1853EXPORT_SYMBOL(rtl_pci_suspend); 2019EXPORT_SYMBOL(rtl_pci_suspend);
@@ -1855,6 +2021,8 @@ EXPORT_SYMBOL(rtl_pci_suspend);
1855int rtl_pci_resume(struct pci_dev *pdev) 2021int rtl_pci_resume(struct pci_dev *pdev)
1856{ 2022{
1857 int ret; 2023 int ret;
2024 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
2025 struct rtl_priv *rtlpriv = rtl_priv(hw);
1858 2026
1859 pci_set_power_state(pdev, PCI_D0); 2027 pci_set_power_state(pdev, PCI_D0);
1860 ret = pci_enable_device(pdev); 2028 ret = pci_enable_device(pdev);
@@ -1865,15 +2033,20 @@ int rtl_pci_resume(struct pci_dev *pdev)
1865 2033
1866 pci_restore_state(pdev); 2034 pci_restore_state(pdev);
1867 2035
2036 rtlpriv->cfg->ops->hw_resume(hw);
2037 rtl_init_rfkill(hw);
1868 return 0; 2038 return 0;
1869} 2039}
1870EXPORT_SYMBOL(rtl_pci_resume); 2040EXPORT_SYMBOL(rtl_pci_resume);
1871 2041
1872struct rtl_intf_ops rtl_pci_ops = { 2042struct rtl_intf_ops rtl_pci_ops = {
2043 .read_efuse_byte = read_efuse_byte,
1873 .adapter_start = rtl_pci_start, 2044 .adapter_start = rtl_pci_start,
1874 .adapter_stop = rtl_pci_stop, 2045 .adapter_stop = rtl_pci_stop,
1875 .adapter_tx = rtl_pci_tx, 2046 .adapter_tx = rtl_pci_tx,
2047 .flush = rtl_pci_flush,
1876 .reset_trx_ring = rtl_pci_reset_trx_ring, 2048 .reset_trx_ring = rtl_pci_reset_trx_ring,
2049 .waitq_insert = rtl_pci_tx_chk_waitq_insert,
1877 2050
1878 .disable_aspm = rtl_pci_disable_aspm, 2051 .disable_aspm = rtl_pci_disable_aspm,
1879 .enable_aspm = rtl_pci_enable_aspm, 2052 .enable_aspm = rtl_pci_enable_aspm,
diff --git a/drivers/net/wireless/rtlwifi/pci.h b/drivers/net/wireless/rtlwifi/pci.h
index 0caa81429726..671b1f5aa0cf 100644
--- a/drivers/net/wireless/rtlwifi/pci.h
+++ b/drivers/net/wireless/rtlwifi/pci.h
@@ -102,8 +102,8 @@
102#define RTL_PCI_8191CE_DID 0x8177 /*8192ce */ 102#define RTL_PCI_8191CE_DID 0x8177 /*8192ce */
103#define RTL_PCI_8188CE_DID 0x8176 /*8192ce */ 103#define RTL_PCI_8188CE_DID 0x8176 /*8192ce */
104#define RTL_PCI_8192CU_DID 0x8191 /*8192ce */ 104#define RTL_PCI_8192CU_DID 0x8191 /*8192ce */
105#define RTL_PCI_8192DE_DID 0x092D /*8192ce */ 105#define RTL_PCI_8192DE_DID 0x8193 /*8192de */
106#define RTL_PCI_8192DU_DID 0x092D /*8192ce */ 106#define RTL_PCI_8192DE_DID2 0x002B /*92DE*/
107 107
108/*8192 support 16 pages of IO registers*/ 108/*8192 support 16 pages of IO registers*/
109#define RTL_MEM_MAPPED_IO_RANGE_8190PCI 0x1000 109#define RTL_MEM_MAPPED_IO_RANGE_8190PCI 0x1000
@@ -129,6 +129,11 @@ enum pci_bridge_vendor {
129 PCI_BRIDGE_VENDOR_MAX, 129 PCI_BRIDGE_VENDOR_MAX,
130}; 130};
131 131
132struct rtl_pci_capabilities_header {
133 u8 capability_id;
134 u8 next;
135};
136
132struct rtl_rx_desc { 137struct rtl_rx_desc {
133 u32 dword[8]; 138 u32 dword[8];
134} __packed; 139} __packed;
@@ -161,7 +166,9 @@ struct rtl_pci {
161 166
162 bool driver_is_goingto_unload; 167 bool driver_is_goingto_unload;
163 bool up_first_time; 168 bool up_first_time;
169 bool first_init;
164 bool being_init_adapter; 170 bool being_init_adapter;
171 bool init_ready;
165 bool irq_enabled; 172 bool irq_enabled;
166 173
167 /*Tx */ 174 /*Tx */
@@ -192,11 +199,14 @@ struct rtl_pci {
192 u8 const_devicepci_aspm_setting; 199 u8 const_devicepci_aspm_setting;
193 /*If it supports ASPM, Offset[560h] = 0x40, 200 /*If it supports ASPM, Offset[560h] = 0x40,
194 otherwise Offset[560h] = 0x00. */ 201 otherwise Offset[560h] = 0x00. */
195 bool b_support_aspm; 202 bool support_aspm;
196 bool b_support_backdoor; 203 bool support_backdoor;
197 204
198 /*QOS & EDCA */ 205 /*QOS & EDCA */
199 enum acm_method acm_method; 206 enum acm_method acm_method;
207
208 u16 shortretry_limit;
209 u16 longretry_limit;
200}; 210};
201 211
202struct mp_adapter { 212struct mp_adapter {
@@ -227,6 +237,7 @@ struct rtl_pci_priv {
227 struct rtl_pci dev; 237 struct rtl_pci dev;
228 struct mp_adapter ndis_adapter; 238 struct mp_adapter ndis_adapter;
229 struct rtl_led_ctl ledctl; 239 struct rtl_led_ctl ledctl;
240 struct bt_coexist_info bt_coexist;
230}; 241};
231 242
232#define rtl_pcipriv(hw) (((struct rtl_pci_priv *)(rtl_priv(hw))->priv)) 243#define rtl_pcipriv(hw) (((struct rtl_pci_priv *)(rtl_priv(hw))->priv))
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index 6b7e217b6b89..2bb71195e976 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -36,7 +36,6 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
36 struct rtl_priv *rtlpriv = rtl_priv(hw); 36 struct rtl_priv *rtlpriv = rtl_priv(hw);
37 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 37 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
38 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 38 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
39 bool init_status = true;
40 39
41 /*<1> reset trx ring */ 40 /*<1> reset trx ring */
42 if (rtlhal->interface == INTF_PCI) 41 if (rtlhal->interface == INTF_PCI)
@@ -49,7 +48,6 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
49 /*<2> Enable Adapter */ 48 /*<2> Enable Adapter */
50 rtlpriv->cfg->ops->hw_init(hw); 49 rtlpriv->cfg->ops->hw_init(hw);
51 RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); 50 RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
52 /*init_status = false; */
53 51
54 /*<3> Enable Interrupt */ 52 /*<3> Enable Interrupt */
55 rtlpriv->cfg->ops->enable_interrupt(hw); 53 rtlpriv->cfg->ops->enable_interrupt(hw);
@@ -57,13 +55,12 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
57 /*<enable timer> */ 55 /*<enable timer> */
58 rtl_watch_dog_timer_callback((unsigned long)hw); 56 rtl_watch_dog_timer_callback((unsigned long)hw);
59 57
60 return init_status; 58 return true;
61} 59}
62EXPORT_SYMBOL(rtl_ps_enable_nic); 60EXPORT_SYMBOL(rtl_ps_enable_nic);
63 61
64bool rtl_ps_disable_nic(struct ieee80211_hw *hw) 62bool rtl_ps_disable_nic(struct ieee80211_hw *hw)
65{ 63{
66 bool status = true;
67 struct rtl_priv *rtlpriv = rtl_priv(hw); 64 struct rtl_priv *rtlpriv = rtl_priv(hw);
68 65
69 /*<1> Stop all timer */ 66 /*<1> Stop all timer */
@@ -75,7 +72,7 @@ bool rtl_ps_disable_nic(struct ieee80211_hw *hw)
75 /*<3> Disable Adapter */ 72 /*<3> Disable Adapter */
76 rtlpriv->cfg->ops->hw_disable(hw); 73 rtlpriv->cfg->ops->hw_disable(hw);
77 74
78 return status; 75 return true;
79} 76}
80EXPORT_SYMBOL(rtl_ps_disable_nic); 77EXPORT_SYMBOL(rtl_ps_disable_nic);
81 78
@@ -193,12 +190,13 @@ static void _rtl_ps_inactive_ps(struct ieee80211_hw *hw)
193 190
194 ppsc->swrf_processing = true; 191 ppsc->swrf_processing = true;
195 192
196 if (ppsc->inactive_pwrstate == ERFON && rtlhal->interface == INTF_PCI) { 193 if (ppsc->inactive_pwrstate == ERFOFF &&
194 rtlhal->interface == INTF_PCI) {
197 if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) && 195 if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) &&
198 RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM) && 196 RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM) &&
199 rtlhal->interface == INTF_PCI) { 197 rtlhal->interface == INTF_PCI) {
200 rtlpriv->intf_ops->disable_aspm(hw); 198 rtlpriv->intf_ops->disable_aspm(hw);
201 RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM); 199 RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
202 } 200 }
203 } 201 }
204 202
@@ -207,9 +205,10 @@ static void _rtl_ps_inactive_ps(struct ieee80211_hw *hw)
207 205
208 if (ppsc->inactive_pwrstate == ERFOFF && 206 if (ppsc->inactive_pwrstate == ERFOFF &&
209 rtlhal->interface == INTF_PCI) { 207 rtlhal->interface == INTF_PCI) {
210 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) { 208 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM &&
209 !RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
211 rtlpriv->intf_ops->enable_aspm(hw); 210 rtlpriv->intf_ops->enable_aspm(hw);
212 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM); 211 RT_SET_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
213 } 212 }
214 } 213 }
215 214
@@ -233,6 +232,9 @@ void rtl_ips_nic_off_wq_callback(void *data)
233 return; 232 return;
234 } 233 }
235 234
235 if (mac->link_state > MAC80211_NOLINK)
236 return;
237
236 if (is_hal_stop(rtlhal)) 238 if (is_hal_stop(rtlhal))
237 return; 239 return;
238 240
@@ -284,10 +286,14 @@ void rtl_ips_nic_off(struct ieee80211_hw *hw)
284void rtl_ips_nic_on(struct ieee80211_hw *hw) 286void rtl_ips_nic_on(struct ieee80211_hw *hw)
285{ 287{
286 struct rtl_priv *rtlpriv = rtl_priv(hw); 288 struct rtl_priv *rtlpriv = rtl_priv(hw);
289 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
287 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 290 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
288 enum rf_pwrstate rtstate; 291 enum rf_pwrstate rtstate;
289 unsigned long flags; 292 unsigned long flags;
290 293
294 if (mac->opmode != NL80211_IFTYPE_STATION)
295 return;
296
291 spin_lock_irqsave(&rtlpriv->locks.ips_lock, flags); 297 spin_lock_irqsave(&rtlpriv->locks.ips_lock, flags);
292 298
293 if (ppsc->inactiveps) { 299 if (ppsc->inactiveps) {
@@ -370,8 +376,7 @@ static void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
370 * mode and set RPWM to turn RF on. 376 * mode and set RPWM to turn RF on.
371 */ 377 */
372 378
373 if ((ppsc->fwctrl_lps) && (ppsc->leisure_ps) && 379 if ((ppsc->fwctrl_lps) && ppsc->report_linked) {
374 ppsc->report_linked) {
375 bool fw_current_inps; 380 bool fw_current_inps;
376 if (ppsc->dot11_psmode == EACTIVE) { 381 if (ppsc->dot11_psmode == EACTIVE) {
377 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, 382 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
@@ -425,7 +430,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
425 struct rtl_priv *rtlpriv = rtl_priv(hw); 430 struct rtl_priv *rtlpriv = rtl_priv(hw);
426 unsigned long flag; 431 unsigned long flag;
427 432
428 if (!(ppsc->fwctrl_lps && ppsc->leisure_ps)) 433 if (!ppsc->fwctrl_lps)
429 return; 434 return;
430 435
431 if (rtlpriv->sec.being_setkey) 436 if (rtlpriv->sec.being_setkey)
@@ -446,17 +451,16 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
446 451
447 spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag); 452 spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
448 453
449 if (ppsc->leisure_ps) { 454 /* Idle for a while if we connect to AP a while ago. */
450 /* Idle for a while if we connect to AP a while ago. */ 455 if (mac->cnt_after_linked >= 2) {
451 if (mac->cnt_after_linked >= 2) { 456 if (ppsc->dot11_psmode == EACTIVE) {
452 if (ppsc->dot11_psmode == EACTIVE) { 457 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
453 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
454 ("Enter 802.11 power save mode...\n")); 458 ("Enter 802.11 power save mode...\n"));
455 459
456 rtl_lps_set_psmode(hw, EAUTOPS); 460 rtl_lps_set_psmode(hw, EAUTOPS);
457 }
458 } 461 }
459 } 462 }
463
460 spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag); 464 spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
461} 465}
462 466
@@ -470,17 +474,17 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
470 474
471 spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag); 475 spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
472 476
473 if (ppsc->fwctrl_lps && ppsc->leisure_ps) { 477 if (ppsc->fwctrl_lps) {
474 if (ppsc->dot11_psmode != EACTIVE) { 478 if (ppsc->dot11_psmode != EACTIVE) {
475 479
476 /*FIX ME */ 480 /*FIX ME */
477 rtlpriv->cfg->ops->enable_interrupt(hw); 481 rtlpriv->cfg->ops->enable_interrupt(hw);
478 482
479 if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM && 483 if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM &&
480 RT_IN_PS_LEVEL(ppsc, RT_RF_LPS_LEVEL_ASPM) && 484 RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM) &&
481 rtlhal->interface == INTF_PCI) { 485 rtlhal->interface == INTF_PCI) {
482 rtlpriv->intf_ops->disable_aspm(hw); 486 rtlpriv->intf_ops->disable_aspm(hw);
483 RT_CLEAR_PS_LEVEL(ppsc, RT_RF_LPS_LEVEL_ASPM); 487 RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
484 } 488 }
485 489
486 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, 490 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
@@ -491,3 +495,214 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
491 } 495 }
492 spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag); 496 spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
493} 497}
498
499/* For sw LPS*/
500void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len)
501{
502 struct rtl_priv *rtlpriv = rtl_priv(hw);
503 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
504 struct ieee80211_hdr *hdr = (void *) data;
505 struct ieee80211_tim_ie *tim_ie;
506 u8 *tim;
507 u8 tim_len;
508 bool u_buffed;
509 bool m_buffed;
510
511 if (mac->opmode != NL80211_IFTYPE_STATION)
512 return;
513
514 if (!rtlpriv->psc.swctrl_lps)
515 return;
516
517 if (rtlpriv->mac80211.link_state != MAC80211_LINKED)
518 return;
519
520 if (!rtlpriv->psc.sw_ps_enabled)
521 return;
522
523 if (rtlpriv->psc.fwctrl_lps)
524 return;
525
526 if (likely(!(hw->conf.flags & IEEE80211_CONF_PS)))
527 return;
528
529 /* check if this really is a beacon */
530 if (!ieee80211_is_beacon(hdr->frame_control))
531 return;
532
533 /* min. beacon length + FCS_LEN */
534 if (len <= 40 + FCS_LEN)
535 return;
536
537 /* and only beacons from the associated BSSID, please */
538 if (compare_ether_addr(hdr->addr3, rtlpriv->mac80211.bssid))
539 return;
540
541 rtlpriv->psc.last_beacon = jiffies;
542
543 tim = rtl_find_ie(data, len - FCS_LEN, WLAN_EID_TIM);
544 if (!tim)
545 return;
546
547 if (tim[1] < sizeof(*tim_ie))
548 return;
549
550 tim_len = tim[1];
551 tim_ie = (struct ieee80211_tim_ie *) &tim[2];
552
553 if (!WARN_ON_ONCE(!hw->conf.ps_dtim_period))
554 rtlpriv->psc.dtim_counter = tim_ie->dtim_count;
555
556 /* Check whenever the PHY can be turned off again. */
557
558 /* 1. What about buffered unicast traffic for our AID? */
559 u_buffed = ieee80211_check_tim(tim_ie, tim_len,
560 rtlpriv->mac80211.assoc_id);
561
562 /* 2. Maybe the AP wants to send multicast/broadcast data? */
563 m_buffed = tim_ie->bitmap_ctrl & 0x01;
564 rtlpriv->psc.multi_buffered = m_buffed;
565
566 /* unicast will process by mac80211 through
567 * set ~IEEE80211_CONF_PS, So we just check
568 * multicast frames here */
569 if (!m_buffed) {
570 /* back to low-power land. and delay is
571 * prevent null power save frame tx fail */
572 queue_delayed_work(rtlpriv->works.rtl_wq,
573 &rtlpriv->works.ps_work, MSECS(5));
574 } else {
575 RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, ("u_bufferd: %x, "
576 "m_buffered: %x\n", u_buffed, m_buffed));
577 }
578}
579
580void rtl_swlps_rf_awake(struct ieee80211_hw *hw)
581{
582 struct rtl_priv *rtlpriv = rtl_priv(hw);
583 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
584 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
585 unsigned long flag;
586
587 if (!rtlpriv->psc.swctrl_lps)
588 return;
589 if (mac->link_state != MAC80211_LINKED)
590 return;
591
592 if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM &&
593 RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
594 rtlpriv->intf_ops->disable_aspm(hw);
595 RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
596 }
597
598 spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
599 rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS, false);
600 spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
601}
602
603void rtl_swlps_rfon_wq_callback(void *data)
604{
605 struct rtl_works *rtlworks =
606 container_of_dwork_rtl(data, struct rtl_works, ps_rfon_wq);
607 struct ieee80211_hw *hw = rtlworks->hw;
608
609 rtl_swlps_rf_awake(hw);
610}
611
612void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
613{
614 struct rtl_priv *rtlpriv = rtl_priv(hw);
615 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
616 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
617 unsigned long flag;
618 u8 sleep_intv;
619
620 if (!rtlpriv->psc.sw_ps_enabled)
621 return;
622
623 if ((rtlpriv->sec.being_setkey) ||
624 (mac->opmode == NL80211_IFTYPE_ADHOC))
625 return;
626
627 /*sleep after linked 10s, to let DHCP and 4-way handshake ok enough!! */
628 if ((mac->link_state != MAC80211_LINKED) || (mac->cnt_after_linked < 5))
629 return;
630
631 if (rtlpriv->link_info.busytraffic)
632 return;
633
634 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
635 if (rtlpriv->psc.rfchange_inprogress) {
636 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
637 return;
638 }
639 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
640
641 spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
642 rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS, false);
643 spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
644
645 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM &&
646 !RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
647 rtlpriv->intf_ops->enable_aspm(hw);
648 RT_SET_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
649 }
650
651 /* here is power save alg, when this beacon is DTIM
652 * we will set sleep time to dtim_period * n;
653 * when this beacon is not DTIM, we will set sleep
654 * time to sleep_intv = rtlpriv->psc.dtim_counter or
655 * MAX_SW_LPS_SLEEP_INTV(default set to 5) */
656
657 if (rtlpriv->psc.dtim_counter == 0) {
658 if (hw->conf.ps_dtim_period == 1)
659 sleep_intv = hw->conf.ps_dtim_period * 2;
660 else
661 sleep_intv = hw->conf.ps_dtim_period;
662 } else {
663 sleep_intv = rtlpriv->psc.dtim_counter;
664 }
665
666 if (sleep_intv > MAX_SW_LPS_SLEEP_INTV)
667 sleep_intv = MAX_SW_LPS_SLEEP_INTV;
668
669 /* this print should always be dtim_conter = 0 &
670 * sleep = dtim_period, that meaons, we should
671 * awake before every dtim */
672 RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
673 ("dtim_counter:%x will sleep :%d"
674 " beacon_intv\n", rtlpriv->psc.dtim_counter, sleep_intv));
675
676 /* we tested that 40ms is enough for sw & hw sw delay */
677 queue_delayed_work(rtlpriv->works.rtl_wq, &rtlpriv->works.ps_rfon_wq,
678 MSECS(sleep_intv * mac->vif->bss_conf.beacon_int - 40));
679}
680
681
682void rtl_swlps_wq_callback(void *data)
683{
684 struct rtl_works *rtlworks = container_of_dwork_rtl(data,
685 struct rtl_works,
686 ps_work);
687 struct ieee80211_hw *hw = rtlworks->hw;
688 struct rtl_priv *rtlpriv = rtl_priv(hw);
689 bool ps = false;
690
691 ps = (hw->conf.flags & IEEE80211_CONF_PS);
692
693 /* we can sleep after ps null send ok */
694 if (rtlpriv->psc.state_inap) {
695 rtl_swlps_rf_sleep(hw);
696
697 if (rtlpriv->psc.state && !ps) {
698 rtlpriv->psc.sleep_ms = jiffies_to_msecs(jiffies -
699 rtlpriv->psc.last_action);
700 }
701
702 if (ps)
703 rtlpriv->psc.last_slept = jiffies;
704
705 rtlpriv->psc.last_action = jiffies;
706 rtlpriv->psc.state = ps;
707 }
708}
diff --git a/drivers/net/wireless/rtlwifi/ps.h b/drivers/net/wireless/rtlwifi/ps.h
index ae56da801a23..e3bf89840370 100644
--- a/drivers/net/wireless/rtlwifi/ps.h
+++ b/drivers/net/wireless/rtlwifi/ps.h
@@ -30,6 +30,8 @@
30#ifndef __REALTEK_RTL_PCI_PS_H__ 30#ifndef __REALTEK_RTL_PCI_PS_H__
31#define __REALTEK_RTL_PCI_PS_H__ 31#define __REALTEK_RTL_PCI_PS_H__
32 32
33#define MAX_SW_LPS_SLEEP_INTV 5
34
33bool rtl_ps_set_rf_state(struct ieee80211_hw *hw, 35bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
34 enum rf_pwrstate state_toset, u32 changesource, 36 enum rf_pwrstate state_toset, u32 changesource,
35 bool protect_or_not); 37 bool protect_or_not);
@@ -40,4 +42,11 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw);
40void rtl_ips_nic_off_wq_callback(void *data); 42void rtl_ips_nic_off_wq_callback(void *data);
41void rtl_lps_enter(struct ieee80211_hw *hw); 43void rtl_lps_enter(struct ieee80211_hw *hw);
42void rtl_lps_leave(struct ieee80211_hw *hw); 44void rtl_lps_leave(struct ieee80211_hw *hw);
45
46void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len);
47void rtl_swlps_wq_callback(void *data);
48void rtl_swlps_rfon_wq_callback(void *data);
49void rtl_swlps_rf_awake(struct ieee80211_hw *hw);
50void rtl_swlps_rf_sleep(struct ieee80211_hw *hw);
51
43#endif 52#endif
diff --git a/drivers/net/wireless/rtlwifi/rc.c b/drivers/net/wireless/rtlwifi/rc.c
index 91634107434a..30da68a77786 100644
--- a/drivers/net/wireless/rtlwifi/rc.c
+++ b/drivers/net/wireless/rtlwifi/rc.c
@@ -38,17 +38,14 @@
38 *CCK11M or OFDM_54M based on wireless mode. 38 *CCK11M or OFDM_54M based on wireless mode.
39 */ 39 */
40static u8 _rtl_rc_get_highest_rix(struct rtl_priv *rtlpriv, 40static u8 _rtl_rc_get_highest_rix(struct rtl_priv *rtlpriv,
41 struct ieee80211_sta *sta,
41 struct sk_buff *skb, bool not_data) 42 struct sk_buff *skb, bool not_data)
42{ 43{
43 struct rtl_mac *rtlmac = rtl_mac(rtlpriv); 44 struct rtl_mac *rtlmac = rtl_mac(rtlpriv);
44 45 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
45 /* 46 struct rtl_phy *rtlphy = &(rtlpriv->phy);
46 *mgt use 1M, although we have check it 47 struct rtl_sta_info *sta_entry = NULL;
47 *before this function use rate_control_send_low, 48 u8 wireless_mode = 0;
48 *we still check it here
49 */
50 if (not_data)
51 return rtlpriv->cfg->maps[RTL_RC_CCK_RATE1M];
52 49
53 /* 50 /*
54 *this rate is no use for true rate, firmware 51 *this rate is no use for true rate, firmware
@@ -57,35 +54,78 @@ static u8 _rtl_rc_get_highest_rix(struct rtl_priv *rtlpriv,
57 *2.in rtl_get_tcb_desc when we check rate is 54 *2.in rtl_get_tcb_desc when we check rate is
58 * 1M we will not use FW rate but user rate. 55 * 1M we will not use FW rate but user rate.
59 */ 56 */
60 if (rtl_is_special_data(rtlpriv->mac80211.hw, skb, true)) { 57 if (rtlmac->opmode == NL80211_IFTYPE_AP ||
61 return rtlpriv->cfg->maps[RTL_RC_CCK_RATE1M]; 58 rtlmac->opmode == NL80211_IFTYPE_ADHOC) {
59 if (sta) {
60 sta_entry = (struct rtl_sta_info *) sta->drv_priv;
61 wireless_mode = sta_entry->wireless_mode;
62 } else {
63 return 0;
64 }
65 } else {
66 wireless_mode = rtlmac->mode;
67 }
68
69 if (rtl_is_special_data(rtlpriv->mac80211.hw, skb, true) ||
70 not_data) {
71 return 0;
62 } else { 72 } else {
63 if (rtlmac->mode == WIRELESS_MODE_B) 73 if (rtlhal->current_bandtype == BAND_ON_2_4G) {
64 return rtlpriv->cfg->maps[RTL_RC_CCK_RATE11M]; 74 if (wireless_mode == WIRELESS_MODE_B) {
65 else 75 return B_MODE_MAX_RIX;
66 return rtlpriv->cfg->maps[RTL_RC_OFDM_RATE54M]; 76 } else if (wireless_mode == WIRELESS_MODE_G) {
77 return G_MODE_MAX_RIX;
78 } else {
79 if (get_rf_type(rtlphy) != RF_2T2R)
80 return N_MODE_MCS7_RIX;
81 else
82 return N_MODE_MCS15_RIX;
83 }
84 } else {
85 if (wireless_mode == WIRELESS_MODE_A) {
86 return A_MODE_MAX_RIX;
87 } else {
88 if (get_rf_type(rtlphy) != RF_2T2R)
89 return N_MODE_MCS7_RIX;
90 else
91 return N_MODE_MCS15_RIX;
92 }
93 }
67 } 94 }
68} 95}
69 96
70static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv, 97static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
98 struct ieee80211_sta *sta,
71 struct ieee80211_tx_rate *rate, 99 struct ieee80211_tx_rate *rate,
72 struct ieee80211_tx_rate_control *txrc, 100 struct ieee80211_tx_rate_control *txrc,
73 u8 tries, u8 rix, int rtsctsenable, 101 u8 tries, char rix, int rtsctsenable,
74 bool not_data) 102 bool not_data)
75{ 103{
76 struct rtl_mac *mac = rtl_mac(rtlpriv); 104 struct rtl_mac *mac = rtl_mac(rtlpriv);
105 u8 sgi_20 = 0, sgi_40 = 0;
77 106
107 if (sta) {
108 sgi_20 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20;
109 sgi_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40;
110 }
78 rate->count = tries; 111 rate->count = tries;
79 rate->idx = (rix > 0x2) ? rix : 0x2; 112 rate->idx = rix >= 0x00 ? rix : 0x00;
80 113
81 if (!not_data) { 114 if (!not_data) {
82 if (txrc->short_preamble) 115 if (txrc->short_preamble)
83 rate->flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE; 116 rate->flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE;
84 if (mac->bw_40) 117 if (mac->opmode == NL80211_IFTYPE_AP ||
85 rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; 118 mac->opmode == NL80211_IFTYPE_ADHOC) {
86 if (mac->sgi_20 || mac->sgi_40) 119 if (sta && (sta->ht_cap.cap &
120 IEEE80211_HT_CAP_SUP_WIDTH_20_40))
121 rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
122 } else {
123 if (mac->bw_40)
124 rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
125 }
126 if (sgi_20 || sgi_40)
87 rate->flags |= IEEE80211_TX_RC_SHORT_GI; 127 rate->flags |= IEEE80211_TX_RC_SHORT_GI;
88 if (mac->ht_enable) 128 if (sta && sta->ht_cap.ht_supported)
89 rate->flags |= IEEE80211_TX_RC_MCS; 129 rate->flags |= IEEE80211_TX_RC_MCS;
90 } 130 }
91} 131}
@@ -97,39 +137,39 @@ static void rtl_get_rate(void *ppriv, struct ieee80211_sta *sta,
97 struct sk_buff *skb = txrc->skb; 137 struct sk_buff *skb = txrc->skb;
98 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 138 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
99 struct ieee80211_tx_rate *rates = tx_info->control.rates; 139 struct ieee80211_tx_rate *rates = tx_info->control.rates;
100 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 140 __le16 fc = rtl_get_fc(skb);
101 __le16 fc = hdr->frame_control;
102 u8 try_per_rate, i, rix; 141 u8 try_per_rate, i, rix;
103 bool not_data = !ieee80211_is_data(fc); 142 bool not_data = !ieee80211_is_data(fc);
104 143
105 if (rate_control_send_low(sta, priv_sta, txrc)) 144 if (rate_control_send_low(sta, priv_sta, txrc))
106 return; 145 return;
107 146
108 rix = _rtl_rc_get_highest_rix(rtlpriv, skb, not_data); 147 rix = _rtl_rc_get_highest_rix(rtlpriv, sta, skb, not_data);
109
110 try_per_rate = 1; 148 try_per_rate = 1;
111 _rtl_rc_rate_set_series(rtlpriv, &rates[0], txrc, 149 _rtl_rc_rate_set_series(rtlpriv, sta, &rates[0], txrc,
112 try_per_rate, rix, 1, not_data); 150 try_per_rate, rix, 1, not_data);
113 151
114 if (!not_data) { 152 if (!not_data) {
115 for (i = 1; i < 4; i++) 153 for (i = 1; i < 4; i++)
116 _rtl_rc_rate_set_series(rtlpriv, &rates[i], 154 _rtl_rc_rate_set_series(rtlpriv, sta, &rates[i],
117 txrc, i, (rix - i), 1, 155 txrc, i, (rix - i), 1,
118 not_data); 156 not_data);
119 } 157 }
120} 158}
121 159
122static bool _rtl_tx_aggr_check(struct rtl_priv *rtlpriv, u16 tid) 160static bool _rtl_tx_aggr_check(struct rtl_priv *rtlpriv,
161 struct rtl_sta_info *sta_entry, u16 tid)
123{ 162{
124 struct rtl_mac *mac = rtl_mac(rtlpriv); 163 struct rtl_mac *mac = rtl_mac(rtlpriv);
125 164
126 if (mac->act_scanning) 165 if (mac->act_scanning)
127 return false; 166 return false;
128 167
129 if (mac->cnt_after_linked < 3) 168 if (mac->opmode == NL80211_IFTYPE_STATION &&
169 mac->cnt_after_linked < 3)
130 return false; 170 return false;
131 171
132 if (mac->tids[tid].agg.agg_state == RTL_AGG_OFF) 172 if (sta_entry->tids[tid].agg.agg_state == RTL_AGG_STOP)
133 return true; 173 return true;
134 174
135 return false; 175 return false;
@@ -143,11 +183,9 @@ static void rtl_tx_status(void *ppriv,
143{ 183{
144 struct rtl_priv *rtlpriv = ppriv; 184 struct rtl_priv *rtlpriv = ppriv;
145 struct rtl_mac *mac = rtl_mac(rtlpriv); 185 struct rtl_mac *mac = rtl_mac(rtlpriv);
146 struct ieee80211_hdr *hdr; 186 struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
147 __le16 fc; 187 __le16 fc = rtl_get_fc(skb);
148 188 struct rtl_sta_info *sta_entry;
149 hdr = (struct ieee80211_hdr *)skb->data;
150 fc = hdr->frame_control;
151 189
152 if (!priv_sta || !ieee80211_is_data(fc)) 190 if (!priv_sta || !ieee80211_is_data(fc))
153 return; 191 return;
@@ -159,17 +197,21 @@ static void rtl_tx_status(void *ppriv,
159 || is_broadcast_ether_addr(ieee80211_get_DA(hdr))) 197 || is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
160 return; 198 return;
161 199
162 /* Check if aggregation has to be enabled for this tid */ 200 if (sta) {
163 if (conf_is_ht(&mac->hw->conf) && 201 /* Check if aggregation has to be enabled for this tid */
164 !(skb->protocol == cpu_to_be16(ETH_P_PAE))) { 202 sta_entry = (struct rtl_sta_info *) sta->drv_priv;
165 if (ieee80211_is_data_qos(fc)) { 203 if ((sta->ht_cap.ht_supported == true) &&
166 u8 *qc, tid; 204 !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
167 205 if (ieee80211_is_data_qos(fc)) {
168 qc = ieee80211_get_qos_ctl(hdr); 206 u8 tid = rtl_get_tid(skb);
169 tid = qc[0] & 0xf; 207 if (_rtl_tx_aggr_check(rtlpriv, sta_entry,
170 208 tid)) {
171 if (_rtl_tx_aggr_check(rtlpriv, tid)) 209 sta_entry->tids[tid].agg.agg_state =
172 ieee80211_start_tx_ba_session(sta, tid, 5000); 210 RTL_AGG_PROGRESS;
211 ieee80211_start_tx_ba_session(sta,
212 tid, 5000);
213 }
214 }
173 } 215 }
174 } 216 }
175} 217}
@@ -178,43 +220,6 @@ static void rtl_rate_init(void *ppriv,
178 struct ieee80211_supported_band *sband, 220 struct ieee80211_supported_band *sband,
179 struct ieee80211_sta *sta, void *priv_sta) 221 struct ieee80211_sta *sta, void *priv_sta)
180{ 222{
181 struct rtl_priv *rtlpriv = ppriv;
182 struct rtl_mac *mac = rtl_mac(rtlpriv);
183 u8 is_ht = conf_is_ht(&mac->hw->conf);
184
185 if ((mac->opmode == NL80211_IFTYPE_STATION) ||
186 (mac->opmode == NL80211_IFTYPE_MESH_POINT) ||
187 (mac->opmode == NL80211_IFTYPE_ADHOC)) {
188
189 switch (sband->band) {
190 case IEEE80211_BAND_2GHZ:
191 rtlpriv->rate_priv->cur_ratetab_idx =
192 RATR_INX_WIRELESS_G;
193 if (is_ht)
194 rtlpriv->rate_priv->cur_ratetab_idx =
195 RATR_INX_WIRELESS_NGB;
196 break;
197 case IEEE80211_BAND_5GHZ:
198 rtlpriv->rate_priv->cur_ratetab_idx =
199 RATR_INX_WIRELESS_A;
200 if (is_ht)
201 rtlpriv->rate_priv->cur_ratetab_idx =
202 RATR_INX_WIRELESS_NGB;
203 break;
204 default:
205 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
206 ("Invalid band\n"));
207 rtlpriv->rate_priv->cur_ratetab_idx =
208 RATR_INX_WIRELESS_NGB;
209 break;
210 }
211
212 RT_TRACE(rtlpriv, COMP_RATE, DBG_DMESG,
213 ("Choosing rate table index: %d\n",
214 rtlpriv->rate_priv->cur_ratetab_idx));
215
216 }
217
218} 223}
219 224
220static void rtl_rate_update(void *ppriv, 225static void rtl_rate_update(void *ppriv,
@@ -223,49 +228,6 @@ static void rtl_rate_update(void *ppriv,
223 u32 changed, 228 u32 changed,
224 enum nl80211_channel_type oper_chan_type) 229 enum nl80211_channel_type oper_chan_type)
225{ 230{
226 struct rtl_priv *rtlpriv = ppriv;
227 struct rtl_mac *mac = rtl_mac(rtlpriv);
228 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
229 bool oper_cw40 = false, oper_sgi40;
230 bool local_cw40 = mac->bw_40;
231 bool local_sgi40 = mac->sgi_40;
232 u8 is_ht = conf_is_ht(&mac->hw->conf);
233
234 if (changed & IEEE80211_RC_HT_CHANGED) {
235 if (mac->opmode != NL80211_IFTYPE_STATION)
236 return;
237
238 if (rtlhal->hw->conf.channel_type == NL80211_CHAN_HT40MINUS ||
239 rtlhal->hw->conf.channel_type == NL80211_CHAN_HT40PLUS)
240 oper_cw40 = true;
241
242 oper_sgi40 = mac->sgi_40;
243
244 if ((local_cw40 != oper_cw40) || (local_sgi40 != oper_sgi40)) {
245 switch (sband->band) {
246 case IEEE80211_BAND_2GHZ:
247 rtlpriv->rate_priv->cur_ratetab_idx =
248 RATR_INX_WIRELESS_G;
249 if (is_ht)
250 rtlpriv->rate_priv->cur_ratetab_idx =
251 RATR_INX_WIRELESS_NGB;
252 break;
253 case IEEE80211_BAND_5GHZ:
254 rtlpriv->rate_priv->cur_ratetab_idx =
255 RATR_INX_WIRELESS_A;
256 if (is_ht)
257 rtlpriv->rate_priv->cur_ratetab_idx =
258 RATR_INX_WIRELESS_NGB;
259 break;
260 default:
261 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
262 ("Invalid band\n"));
263 rtlpriv->rate_priv->cur_ratetab_idx =
264 RATR_INX_WIRELESS_NGB;
265 break;
266 }
267 }
268 }
269} 231}
270 232
271static void *rtl_rate_alloc(struct ieee80211_hw *hw, 233static void *rtl_rate_alloc(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rc.h b/drivers/net/wireless/rtlwifi/rc.h
index b4667c035f0b..4afa2c20adcf 100644
--- a/drivers/net/wireless/rtlwifi/rc.h
+++ b/drivers/net/wireless/rtlwifi/rc.h
@@ -30,8 +30,15 @@
30#ifndef __RTL_RC_H__ 30#ifndef __RTL_RC_H__
31#define __RTL_RC_H__ 31#define __RTL_RC_H__
32 32
33#define B_MODE_MAX_RIX 3
34#define G_MODE_MAX_RIX 11
35#define A_MODE_MAX_RIX 7
36
37/* in mac80211 mcs0-mcs15 is idx0-idx15*/
38#define N_MODE_MCS7_RIX 7
39#define N_MODE_MCS15_RIX 15
40
33struct rtl_rate_priv { 41struct rtl_rate_priv {
34 u8 cur_ratetab_idx;
35 u8 ht_cap; 42 u8 ht_cap;
36}; 43};
37 44
diff --git a/drivers/net/wireless/rtlwifi/regd.c b/drivers/net/wireless/rtlwifi/regd.c
index d26f957981ad..8f6718f163e5 100644
--- a/drivers/net/wireless/rtlwifi/regd.c
+++ b/drivers/net/wireless/rtlwifi/regd.c
@@ -66,31 +66,83 @@ static struct country_code_to_enum_rd allCountries[] = {
66 NL80211_RRF_PASSIVE_SCAN | \ 66 NL80211_RRF_PASSIVE_SCAN | \
67 NL80211_RRF_NO_OFDM) 67 NL80211_RRF_NO_OFDM)
68 68
69/* 5G chan 36 - chan 64*/
70#define RTL819x_5GHZ_5150_5350 \
71 REG_RULE(5150-10, 5350+10, 40, 0, 30, \
72 NL80211_RRF_PASSIVE_SCAN | \
73 NL80211_RRF_NO_IBSS)
74
75/* 5G chan 100 - chan 165*/
76#define RTL819x_5GHZ_5470_5850 \
77 REG_RULE(5470-10, 5850+10, 40, 0, 30, \
78 NL80211_RRF_PASSIVE_SCAN | \
79 NL80211_RRF_NO_IBSS)
80
81/* 5G chan 149 - chan 165*/
82#define RTL819x_5GHZ_5725_5850 \
83 REG_RULE(5725-10, 5850+10, 40, 0, 30, \
84 NL80211_RRF_PASSIVE_SCAN | \
85 NL80211_RRF_NO_IBSS)
86
87#define RTL819x_5GHZ_ALL \
88 (RTL819x_5GHZ_5150_5350, RTL819x_5GHZ_5470_5850)
89
69static const struct ieee80211_regdomain rtl_regdom_11 = { 90static const struct ieee80211_regdomain rtl_regdom_11 = {
70 .n_reg_rules = 1, 91 .n_reg_rules = 1,
71 .alpha2 = "99", 92 .alpha2 = "99",
72 .reg_rules = { 93 .reg_rules = {
73 RTL819x_2GHZ_CH01_11, 94 RTL819x_2GHZ_CH01_11,
74 } 95 }
96};
97
98static const struct ieee80211_regdomain rtl_regdom_12_13 = {
99 .n_reg_rules = 2,
100 .alpha2 = "99",
101 .reg_rules = {
102 RTL819x_2GHZ_CH01_11,
103 RTL819x_2GHZ_CH12_13,
104 }
75}; 105};
76 106
77static const struct ieee80211_regdomain rtl_regdom_global = { 107static const struct ieee80211_regdomain rtl_regdom_no_midband = {
78 .n_reg_rules = 3, 108 .n_reg_rules = 3,
79 .alpha2 = "99", 109 .alpha2 = "99",
80 .reg_rules = { 110 .reg_rules = {
81 RTL819x_2GHZ_CH01_11, 111 RTL819x_2GHZ_CH01_11,
82 RTL819x_2GHZ_CH12_13, 112 RTL819x_5GHZ_5150_5350,
83 RTL819x_2GHZ_CH14, 113 RTL819x_5GHZ_5725_5850,
84 } 114 }
85}; 115};
86 116
87static const struct ieee80211_regdomain rtl_regdom_world = { 117static const struct ieee80211_regdomain rtl_regdom_60_64 = {
88 .n_reg_rules = 2, 118 .n_reg_rules = 3,
89 .alpha2 = "99", 119 .alpha2 = "99",
90 .reg_rules = { 120 .reg_rules = {
91 RTL819x_2GHZ_CH01_11, 121 RTL819x_2GHZ_CH01_11,
92 RTL819x_2GHZ_CH12_13, 122 RTL819x_2GHZ_CH12_13,
93 } 123 RTL819x_5GHZ_5725_5850,
124 }
125};
126
127static const struct ieee80211_regdomain rtl_regdom_14_60_64 = {
128 .n_reg_rules = 4,
129 .alpha2 = "99",
130 .reg_rules = {
131 RTL819x_2GHZ_CH01_11,
132 RTL819x_2GHZ_CH12_13,
133 RTL819x_2GHZ_CH14,
134 RTL819x_5GHZ_5725_5850,
135 }
136};
137
138static const struct ieee80211_regdomain rtl_regdom_14 = {
139 .n_reg_rules = 3,
140 .alpha2 = "99",
141 .reg_rules = {
142 RTL819x_2GHZ_CH01_11,
143 RTL819x_2GHZ_CH12_13,
144 RTL819x_2GHZ_CH14,
145 }
94}; 146};
95 147
96static bool _rtl_is_radar_freq(u16 center_freq) 148static bool _rtl_is_radar_freq(u16 center_freq)
@@ -162,6 +214,8 @@ static void _rtl_reg_apply_active_scan_flags(struct wiphy *wiphy,
162 u32 bandwidth = 0; 214 u32 bandwidth = 0;
163 int r; 215 int r;
164 216
217 if (!wiphy->bands[IEEE80211_BAND_2GHZ])
218 return;
165 sband = wiphy->bands[IEEE80211_BAND_2GHZ]; 219 sband = wiphy->bands[IEEE80211_BAND_2GHZ];
166 220
167 /* 221 /*
@@ -292,25 +346,26 @@ static const struct ieee80211_regdomain *_rtl_regdomain_select(
292{ 346{
293 switch (reg->country_code) { 347 switch (reg->country_code) {
294 case COUNTRY_CODE_FCC: 348 case COUNTRY_CODE_FCC:
349 return &rtl_regdom_no_midband;
295 case COUNTRY_CODE_IC: 350 case COUNTRY_CODE_IC:
296 return &rtl_regdom_11; 351 return &rtl_regdom_11;
297 case COUNTRY_CODE_ETSI: 352 case COUNTRY_CODE_ETSI:
353 case COUNTRY_CODE_TELEC_NETGEAR:
354 return &rtl_regdom_60_64;
298 case COUNTRY_CODE_SPAIN: 355 case COUNTRY_CODE_SPAIN:
299 case COUNTRY_CODE_FRANCE: 356 case COUNTRY_CODE_FRANCE:
300 case COUNTRY_CODE_ISRAEL: 357 case COUNTRY_CODE_ISRAEL:
301 case COUNTRY_CODE_TELEC_NETGEAR: 358 case COUNTRY_CODE_WORLD_WIDE_13:
302 return &rtl_regdom_world; 359 return &rtl_regdom_12_13;
303 case COUNTRY_CODE_MKK: 360 case COUNTRY_CODE_MKK:
304 case COUNTRY_CODE_MKK1: 361 case COUNTRY_CODE_MKK1:
305 case COUNTRY_CODE_TELEC: 362 case COUNTRY_CODE_TELEC:
306 case COUNTRY_CODE_MIC: 363 case COUNTRY_CODE_MIC:
307 return &rtl_regdom_global; 364 return &rtl_regdom_14_60_64;
308 case COUNTRY_CODE_GLOBAL_DOMAIN: 365 case COUNTRY_CODE_GLOBAL_DOMAIN:
309 return &rtl_regdom_global; 366 return &rtl_regdom_14;
310 case COUNTRY_CODE_WORLD_WIDE_13:
311 return &rtl_regdom_world;
312 default: 367 default:
313 return &rtl_regdom_world; 368 return &rtl_regdom_no_midband;
314 } 369 }
315} 370}
316 371
@@ -323,9 +378,11 @@ static int _rtl_regd_init_wiphy(struct rtl_regulatory *reg,
323 const struct ieee80211_regdomain *regd; 378 const struct ieee80211_regdomain *regd;
324 379
325 wiphy->reg_notifier = reg_notifier; 380 wiphy->reg_notifier = reg_notifier;
381
326 wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY; 382 wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
327 wiphy->flags &= ~WIPHY_FLAG_STRICT_REGULATORY; 383 wiphy->flags &= ~WIPHY_FLAG_STRICT_REGULATORY;
328 wiphy->flags &= ~WIPHY_FLAG_DISABLE_BEACON_HINTS; 384 wiphy->flags &= ~WIPHY_FLAG_DISABLE_BEACON_HINTS;
385
329 regd = _rtl_regdomain_select(reg); 386 regd = _rtl_regdomain_select(reg);
330 wiphy_apply_custom_regulatory(wiphy, regd); 387 wiphy_apply_custom_regulatory(wiphy, regd);
331 _rtl_reg_apply_radar_flags(wiphy); 388 _rtl_reg_apply_radar_flags(wiphy);
@@ -355,8 +412,8 @@ int rtl_regd_init(struct ieee80211_hw *hw,
355 if (wiphy == NULL || &rtlpriv->regd == NULL) 412 if (wiphy == NULL || &rtlpriv->regd == NULL)
356 return -EINVAL; 413 return -EINVAL;
357 414
358 /* force the channel plan to world wide 13 */ 415 /* init country_code from efuse channel plan */
359 rtlpriv->regd.country_code = COUNTRY_CODE_WORLD_WIDE_13; 416 rtlpriv->regd.country_code = rtlpriv->efuse.channel_plan;
360 417
361 RT_TRACE(rtlpriv, COMP_REGD, DBG_TRACE, 418 RT_TRACE(rtlpriv, COMP_REGD, DBG_TRACE,
362 (KERN_DEBUG "rtl: EEPROM regdomain: 0x%0x\n", 419 (KERN_DEBUG "rtl: EEPROM regdomain: 0x%0x\n",
@@ -373,8 +430,8 @@ int rtl_regd_init(struct ieee80211_hw *hw,
373 country = _rtl_regd_find_country(rtlpriv->regd.country_code); 430 country = _rtl_regd_find_country(rtlpriv->regd.country_code);
374 431
375 if (country) { 432 if (country) {
376 rtlpriv->regd.alpha2[0] = country->isoName[0]; 433 rtlpriv->regd.alpha2[0] = country->iso_name[0];
377 rtlpriv->regd.alpha2[1] = country->isoName[1]; 434 rtlpriv->regd.alpha2[1] = country->iso_name[1];
378 } else { 435 } else {
379 rtlpriv->regd.alpha2[0] = '0'; 436 rtlpriv->regd.alpha2[0] = '0';
380 rtlpriv->regd.alpha2[1] = '0'; 437 rtlpriv->regd.alpha2[1] = '0';
diff --git a/drivers/net/wireless/rtlwifi/regd.h b/drivers/net/wireless/rtlwifi/regd.h
index 4cdbc4ae76d4..d23118938fac 100644
--- a/drivers/net/wireless/rtlwifi/regd.h
+++ b/drivers/net/wireless/rtlwifi/regd.h
@@ -32,7 +32,7 @@
32 32
33struct country_code_to_enum_rd { 33struct country_code_to_enum_rd {
34 u16 countrycode; 34 u16 countrycode;
35 const char *isoName; 35 const char *iso_name;
36}; 36};
37 37
38enum country_code_type_t { 38enum country_code_type_t {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index bb023274414c..97183829b9be 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -28,10 +28,26 @@
28 *****************************************************************************/ 28 *****************************************************************************/
29 29
30#include "dm_common.h" 30#include "dm_common.h"
31#include "phy_common.h"
32#include "../pci.h"
33#include "../base.h"
31 34
32struct dig_t dm_digtable; 35struct dig_t dm_digtable;
33static struct ps_t dm_pstable; 36static struct ps_t dm_pstable;
34 37
38#define BT_RSSI_STATE_NORMAL_POWER BIT_OFFSET_LEN_MASK_32(0, 1)
39#define BT_RSSI_STATE_AMDPU_OFF BIT_OFFSET_LEN_MASK_32(1, 1)
40#define BT_RSSI_STATE_SPECIAL_LOW BIT_OFFSET_LEN_MASK_32(2, 1)
41#define BT_RSSI_STATE_BG_EDCA_LOW BIT_OFFSET_LEN_MASK_32(3, 1)
42#define BT_RSSI_STATE_TXPOWER_LOW BIT_OFFSET_LEN_MASK_32(4, 1)
43
44#define RTLPRIV (struct rtl_priv *)
45#define GET_UNDECORATED_AVERAGE_RSSI(_priv) \
46 ((RTLPRIV(_priv))->mac80211.opmode == \
47 NL80211_IFTYPE_ADHOC) ? \
48 ((RTLPRIV(_priv))->dm.entry_min_undecoratedsmoothed_pwdb) : \
49 ((RTLPRIV(_priv))->dm.undecorated_smoothed_pwdb)
50
35static const u32 ofdmswing_table[OFDM_TABLE_SIZE] = { 51static const u32 ofdmswing_table[OFDM_TABLE_SIZE] = {
36 0x7f8001fe, 52 0x7f8001fe,
37 0x788001e2, 53 0x788001e2,
@@ -304,7 +320,7 @@ static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw)
304 320
305static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw) 321static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
306{ 322{
307 static u8 binitialized; /* initialized to false */ 323 static u8 initialized; /* initialized to false */
308 struct rtl_priv *rtlpriv = rtl_priv(hw); 324 struct rtl_priv *rtlpriv = rtl_priv(hw);
309 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 325 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
310 long rssi_strength = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; 326 long rssi_strength = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
@@ -315,11 +331,11 @@ static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
315 331
316 if ((multi_sta == false) || (dm_digtable.cursta_connectctate != 332 if ((multi_sta == false) || (dm_digtable.cursta_connectctate !=
317 DIG_STA_DISCONNECT)) { 333 DIG_STA_DISCONNECT)) {
318 binitialized = false; 334 initialized = false;
319 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; 335 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
320 return; 336 return;
321 } else if (binitialized == false) { 337 } else if (initialized == false) {
322 binitialized = true; 338 initialized = true;
323 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0; 339 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
324 dm_digtable.cur_igvalue = 0x20; 340 dm_digtable.cur_igvalue = 0x20;
325 rtl92c_dm_write_dig(hw); 341 rtl92c_dm_write_dig(hw);
@@ -461,10 +477,7 @@ static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
461 if (mac->act_scanning == true) 477 if (mac->act_scanning == true)
462 return; 478 return;
463 479
464 if ((mac->link_state > MAC80211_NOLINK) && 480 if (mac->link_state >= MAC80211_LINKED)
465 (mac->link_state < MAC80211_LINKED))
466 dm_digtable.cursta_connectctate = DIG_STA_BEFORE_CONNECT;
467 else if (mac->link_state >= MAC80211_LINKED)
468 dm_digtable.cursta_connectctate = DIG_STA_CONNECT; 481 dm_digtable.cursta_connectctate = DIG_STA_CONNECT;
469 else 482 else
470 dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT; 483 dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
@@ -562,23 +575,42 @@ EXPORT_SYMBOL(rtl92c_dm_init_edca_turbo);
562static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw) 575static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
563{ 576{
564 struct rtl_priv *rtlpriv = rtl_priv(hw); 577 struct rtl_priv *rtlpriv = rtl_priv(hw);
578 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
565 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 579 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
580
566 static u64 last_txok_cnt; 581 static u64 last_txok_cnt;
567 static u64 last_rxok_cnt; 582 static u64 last_rxok_cnt;
568 u64 cur_txok_cnt; 583 static u32 last_bt_edca_ul;
569 u64 cur_rxok_cnt; 584 static u32 last_bt_edca_dl;
585 u64 cur_txok_cnt = 0;
586 u64 cur_rxok_cnt = 0;
570 u32 edca_be_ul = 0x5ea42b; 587 u32 edca_be_ul = 0x5ea42b;
571 u32 edca_be_dl = 0x5ea42b; 588 u32 edca_be_dl = 0x5ea42b;
589 bool bt_change_edca = false;
572 590
573 if (mac->opmode == NL80211_IFTYPE_ADHOC) 591 if ((last_bt_edca_ul != rtlpcipriv->bt_coexist.bt_edca_ul) ||
574 goto dm_checkedcaturbo_exit; 592 (last_bt_edca_dl != rtlpcipriv->bt_coexist.bt_edca_dl)) {
593 rtlpriv->dm.current_turbo_edca = false;
594 last_bt_edca_ul = rtlpcipriv->bt_coexist.bt_edca_ul;
595 last_bt_edca_dl = rtlpcipriv->bt_coexist.bt_edca_dl;
596 }
597
598 if (rtlpcipriv->bt_coexist.bt_edca_ul != 0) {
599 edca_be_ul = rtlpcipriv->bt_coexist.bt_edca_ul;
600 bt_change_edca = true;
601 }
602
603 if (rtlpcipriv->bt_coexist.bt_edca_dl != 0) {
604 edca_be_ul = rtlpcipriv->bt_coexist.bt_edca_dl;
605 bt_change_edca = true;
606 }
575 607
576 if (mac->link_state != MAC80211_LINKED) { 608 if (mac->link_state != MAC80211_LINKED) {
577 rtlpriv->dm.current_turbo_edca = false; 609 rtlpriv->dm.current_turbo_edca = false;
578 return; 610 return;
579 } 611 }
580 612
581 if (!mac->ht_enable) { /*FIX MERGE */ 613 if ((!mac->ht_enable) && (!rtlpcipriv->bt_coexist.bt_coexistence)) {
582 if (!(edca_be_ul & 0xffff0000)) 614 if (!(edca_be_ul & 0xffff0000))
583 edca_be_ul |= 0x005e0000; 615 edca_be_ul |= 0x005e0000;
584 616
@@ -586,10 +618,12 @@ static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
586 edca_be_dl |= 0x005e0000; 618 edca_be_dl |= 0x005e0000;
587 } 619 }
588 620
589 if ((!rtlpriv->dm.is_any_nonbepkts) && 621 if ((bt_change_edca) || ((!rtlpriv->dm.is_any_nonbepkts) &&
590 (!rtlpriv->dm.disable_framebursting)) { 622 (!rtlpriv->dm.disable_framebursting))) {
623
591 cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt; 624 cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt;
592 cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt; 625 cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt;
626
593 if (cur_rxok_cnt > 4 * cur_txok_cnt) { 627 if (cur_rxok_cnt > 4 * cur_txok_cnt) {
594 if (!rtlpriv->dm.is_cur_rdlstate || 628 if (!rtlpriv->dm.is_cur_rdlstate ||
595 !rtlpriv->dm.current_turbo_edca) { 629 !rtlpriv->dm.current_turbo_edca) {
@@ -618,7 +652,6 @@ static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
618 } 652 }
619 } 653 }
620 654
621dm_checkedcaturbo_exit:
622 rtlpriv->dm.is_any_nonbepkts = false; 655 rtlpriv->dm.is_any_nonbepkts = false;
623 last_txok_cnt = rtlpriv->stats.txbytesunicast; 656 last_txok_cnt = rtlpriv->stats.txbytesunicast;
624 last_rxok_cnt = rtlpriv->stats.rxbytesunicast; 657 last_rxok_cnt = rtlpriv->stats.rxbytesunicast;
@@ -633,14 +666,14 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
633 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); 666 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
634 u8 thermalvalue, delta, delta_lck, delta_iqk; 667 u8 thermalvalue, delta, delta_lck, delta_iqk;
635 long ele_a, ele_d, temp_cck, val_x, value32; 668 long ele_a, ele_d, temp_cck, val_x, value32;
636 long val_y, ele_c; 669 long val_y, ele_c = 0;
637 u8 ofdm_index[2], cck_index, ofdm_index_old[2], cck_index_old; 670 u8 ofdm_index[2], cck_index = 0, ofdm_index_old[2], cck_index_old = 0;
638 int i; 671 int i;
639 bool is2t = IS_92C_SERIAL(rtlhal->version); 672 bool is2t = IS_92C_SERIAL(rtlhal->version);
640 u8 txpwr_level[2] = {0, 0}; 673 u8 txpwr_level[2] = {0, 0};
641 u8 ofdm_min_index = 6, rf; 674 u8 ofdm_min_index = 6, rf;
642 675
643 rtlpriv->dm.txpower_trackingInit = true; 676 rtlpriv->dm.txpower_trackinginit = true;
644 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, 677 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
645 ("rtl92c_dm_txpower_tracking_callback_thermalmeter\n")); 678 ("rtl92c_dm_txpower_tracking_callback_thermalmeter\n"));
646 679
@@ -683,7 +716,6 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
683 for (i = 0; i < OFDM_TABLE_LENGTH; i++) { 716 for (i = 0; i < OFDM_TABLE_LENGTH; i++) {
684 if (ele_d == (ofdmswing_table[i] & 717 if (ele_d == (ofdmswing_table[i] &
685 MASKOFDM_D)) { 718 MASKOFDM_D)) {
686 ofdm_index_old[1] = (u8) i;
687 719
688 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, 720 RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
689 DBG_LOUD, 721 DBG_LOUD,
@@ -1062,7 +1094,7 @@ static void rtl92c_dm_initialize_txpower_tracking_thermalmeter(
1062 struct rtl_priv *rtlpriv = rtl_priv(hw); 1094 struct rtl_priv *rtlpriv = rtl_priv(hw);
1063 1095
1064 rtlpriv->dm.txpower_tracking = true; 1096 rtlpriv->dm.txpower_tracking = true;
1065 rtlpriv->dm.txpower_trackingInit = false; 1097 rtlpriv->dm.txpower_trackinginit = false;
1066 1098
1067 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, 1099 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
1068 ("pMgntInfo->txpower_tracking = %d\n", 1100 ("pMgntInfo->txpower_tracking = %d\n",
@@ -1132,6 +1164,7 @@ static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
1132 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 1164 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1133 struct rate_adaptive *p_ra = &(rtlpriv->ra); 1165 struct rate_adaptive *p_ra = &(rtlpriv->ra);
1134 u32 low_rssithresh_for_ra, high_rssithresh_for_ra; 1166 u32 low_rssithresh_for_ra, high_rssithresh_for_ra;
1167 struct ieee80211_sta *sta = NULL;
1135 1168
1136 if (is_hal_stop(rtlhal)) { 1169 if (is_hal_stop(rtlhal)) {
1137 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, 1170 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
@@ -1145,8 +1178,8 @@ static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
1145 return; 1178 return;
1146 } 1179 }
1147 1180
1148 if (mac->link_state == MAC80211_LINKED) { 1181 if (mac->link_state == MAC80211_LINKED &&
1149 1182 mac->opmode == NL80211_IFTYPE_STATION) {
1150 switch (p_ra->pre_ratr_state) { 1183 switch (p_ra->pre_ratr_state) {
1151 case DM_RATR_STA_HIGH: 1184 case DM_RATR_STA_HIGH:
1152 high_rssithresh_for_ra = 50; 1185 high_rssithresh_for_ra = 50;
@@ -1185,10 +1218,13 @@ static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
1185 ("PreState = %d, CurState = %d\n", 1218 ("PreState = %d, CurState = %d\n",
1186 p_ra->pre_ratr_state, p_ra->ratr_state)); 1219 p_ra->pre_ratr_state, p_ra->ratr_state));
1187 1220
1188 rtlpriv->cfg->ops->update_rate_mask(hw, 1221 rcu_read_lock();
1222 sta = ieee80211_find_sta(mac->vif, mac->bssid);
1223 rtlpriv->cfg->ops->update_rate_tbl(hw, sta,
1189 p_ra->ratr_state); 1224 p_ra->ratr_state);
1190 1225
1191 p_ra->pre_ratr_state = p_ra->ratr_state; 1226 p_ra->pre_ratr_state = p_ra->ratr_state;
1227 rcu_read_unlock();
1192 } 1228 }
1193 } 1229 }
1194} 1230}
@@ -1202,51 +1238,6 @@ static void rtl92c_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw)
1202 dm_pstable.rssi_val_min = 0; 1238 dm_pstable.rssi_val_min = 0;
1203} 1239}
1204 1240
1205static void rtl92c_dm_1r_cca(struct ieee80211_hw *hw)
1206{
1207 struct rtl_priv *rtlpriv = rtl_priv(hw);
1208 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1209
1210 if (dm_pstable.rssi_val_min != 0) {
1211 if (dm_pstable.pre_ccastate == CCA_2R) {
1212 if (dm_pstable.rssi_val_min >= 35)
1213 dm_pstable.cur_ccasate = CCA_1R;
1214 else
1215 dm_pstable.cur_ccasate = CCA_2R;
1216 } else {
1217 if (dm_pstable.rssi_val_min <= 30)
1218 dm_pstable.cur_ccasate = CCA_2R;
1219 else
1220 dm_pstable.cur_ccasate = CCA_1R;
1221 }
1222 } else {
1223 dm_pstable.cur_ccasate = CCA_MAX;
1224 }
1225
1226 if (dm_pstable.pre_ccastate != dm_pstable.cur_ccasate) {
1227 if (dm_pstable.cur_ccasate == CCA_1R) {
1228 if (get_rf_type(rtlphy) == RF_2T2R) {
1229 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE,
1230 MASKBYTE0, 0x13);
1231 rtl_set_bbreg(hw, 0xe70, MASKBYTE3, 0x20);
1232 } else {
1233 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE,
1234 MASKBYTE0, 0x23);
1235 rtl_set_bbreg(hw, 0xe70, 0x7fc00000, 0x10c);
1236 }
1237 } else {
1238 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0,
1239 0x33);
1240 rtl_set_bbreg(hw, 0xe70, MASKBYTE3, 0x63);
1241 }
1242 dm_pstable.pre_ccastate = dm_pstable.cur_ccasate;
1243 }
1244
1245 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, ("CCAStage = %s\n",
1246 (dm_pstable.cur_ccasate ==
1247 0) ? "1RCCA" : "2RCCA"));
1248}
1249
1250void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal) 1241void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal)
1251{ 1242{
1252 static u8 initialize; 1243 static u8 initialize;
@@ -1352,7 +1343,9 @@ static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw)
1352 } 1343 }
1353 1344
1354 if (IS_92C_SERIAL(rtlhal->version)) 1345 if (IS_92C_SERIAL(rtlhal->version))
1355 rtl92c_dm_1r_cca(hw); 1346 ;/* rtl92c_dm_1r_cca(hw); */
1347 else
1348 rtl92c_dm_rf_saving(hw, false);
1356} 1349}
1357 1350
1358void rtl92c_dm_init(struct ieee80211_hw *hw) 1351void rtl92c_dm_init(struct ieee80211_hw *hw)
@@ -1369,6 +1362,84 @@ void rtl92c_dm_init(struct ieee80211_hw *hw)
1369} 1362}
1370EXPORT_SYMBOL(rtl92c_dm_init); 1363EXPORT_SYMBOL(rtl92c_dm_init);
1371 1364
1365void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
1366{
1367 struct rtl_priv *rtlpriv = rtl_priv(hw);
1368 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1369 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1370 long undecorated_smoothed_pwdb;
1371
1372 if (!rtlpriv->dm.dynamic_txpower_enable)
1373 return;
1374
1375 if (rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) {
1376 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
1377 return;
1378 }
1379
1380 if ((mac->link_state < MAC80211_LINKED) &&
1381 (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
1382 RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
1383 ("Not connected to any\n"));
1384
1385 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
1386
1387 rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
1388 return;
1389 }
1390
1391 if (mac->link_state >= MAC80211_LINKED) {
1392 if (mac->opmode == NL80211_IFTYPE_ADHOC) {
1393 undecorated_smoothed_pwdb =
1394 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
1395 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
1396 ("AP Client PWDB = 0x%lx\n",
1397 undecorated_smoothed_pwdb));
1398 } else {
1399 undecorated_smoothed_pwdb =
1400 rtlpriv->dm.undecorated_smoothed_pwdb;
1401 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
1402 ("STA Default Port PWDB = 0x%lx\n",
1403 undecorated_smoothed_pwdb));
1404 }
1405 } else {
1406 undecorated_smoothed_pwdb =
1407 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
1408
1409 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
1410 ("AP Ext Port PWDB = 0x%lx\n",
1411 undecorated_smoothed_pwdb));
1412 }
1413
1414 if (undecorated_smoothed_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
1415 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
1416 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
1417 ("TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n"));
1418 } else if ((undecorated_smoothed_pwdb <
1419 (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
1420 (undecorated_smoothed_pwdb >=
1421 TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
1422
1423 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
1424 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
1425 ("TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n"));
1426 } else if (undecorated_smoothed_pwdb <
1427 (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
1428 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
1429 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
1430 ("TXHIGHPWRLEVEL_NORMAL\n"));
1431 }
1432
1433 if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) {
1434 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
1435 ("PHY_SetTxPowerLevel8192S() Channel = %d\n",
1436 rtlphy->current_channel));
1437 rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
1438 }
1439
1440 rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl;
1441}
1442
1372void rtl92c_dm_watchdog(struct ieee80211_hw *hw) 1443void rtl92c_dm_watchdog(struct ieee80211_hw *hw)
1373{ 1444{
1374 struct rtl_priv *rtlpriv = rtl_priv(hw); 1445 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1388,11 +1459,321 @@ void rtl92c_dm_watchdog(struct ieee80211_hw *hw)
1388 rtl92c_dm_dig(hw); 1459 rtl92c_dm_dig(hw);
1389 rtl92c_dm_false_alarm_counter_statistics(hw); 1460 rtl92c_dm_false_alarm_counter_statistics(hw);
1390 rtl92c_dm_dynamic_bb_powersaving(hw); 1461 rtl92c_dm_dynamic_bb_powersaving(hw);
1391 rtlpriv->cfg->ops->dm_dynamic_txpower(hw); 1462 rtl92c_dm_dynamic_txpower(hw);
1392 rtl92c_dm_check_txpower_tracking(hw); 1463 rtl92c_dm_check_txpower_tracking(hw);
1393 rtl92c_dm_refresh_rate_adaptive_mask(hw); 1464 rtl92c_dm_refresh_rate_adaptive_mask(hw);
1465 rtl92c_dm_bt_coexist(hw);
1394 rtl92c_dm_check_edca_turbo(hw); 1466 rtl92c_dm_check_edca_turbo(hw);
1395
1396 } 1467 }
1397} 1468}
1398EXPORT_SYMBOL(rtl92c_dm_watchdog); 1469EXPORT_SYMBOL(rtl92c_dm_watchdog);
1470
1471u8 rtl92c_bt_rssi_state_change(struct ieee80211_hw *hw)
1472{
1473 struct rtl_priv *rtlpriv = rtl_priv(hw);
1474 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
1475 long undecorated_smoothed_pwdb;
1476 u8 curr_bt_rssi_state = 0x00;
1477
1478 if (rtlpriv->mac80211.link_state == MAC80211_LINKED) {
1479 undecorated_smoothed_pwdb =
1480 GET_UNDECORATED_AVERAGE_RSSI(rtlpriv);
1481 } else {
1482 if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)
1483 undecorated_smoothed_pwdb = 100;
1484 else
1485 undecorated_smoothed_pwdb =
1486 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
1487 }
1488
1489 /* Check RSSI to determine HighPower/NormalPower state for
1490 * BT coexistence. */
1491 if (undecorated_smoothed_pwdb >= 67)
1492 curr_bt_rssi_state &= (~BT_RSSI_STATE_NORMAL_POWER);
1493 else if (undecorated_smoothed_pwdb < 62)
1494 curr_bt_rssi_state |= BT_RSSI_STATE_NORMAL_POWER;
1495
1496 /* Check RSSI to determine AMPDU setting for BT coexistence. */
1497 if (undecorated_smoothed_pwdb >= 40)
1498 curr_bt_rssi_state &= (~BT_RSSI_STATE_AMDPU_OFF);
1499 else if (undecorated_smoothed_pwdb <= 32)
1500 curr_bt_rssi_state |= BT_RSSI_STATE_AMDPU_OFF;
1501
1502 /* Marked RSSI state. It will be used to determine BT coexistence
1503 * setting later. */
1504 if (undecorated_smoothed_pwdb < 35)
1505 curr_bt_rssi_state |= BT_RSSI_STATE_SPECIAL_LOW;
1506 else
1507 curr_bt_rssi_state &= (~BT_RSSI_STATE_SPECIAL_LOW);
1508
1509 /* Set Tx Power according to BT status. */
1510 if (undecorated_smoothed_pwdb >= 30)
1511 curr_bt_rssi_state |= BT_RSSI_STATE_TXPOWER_LOW;
1512 else if (undecorated_smoothed_pwdb < 25)
1513 curr_bt_rssi_state &= (~BT_RSSI_STATE_TXPOWER_LOW);
1514
1515 /* Check BT state related to BT_Idle in B/G mode. */
1516 if (undecorated_smoothed_pwdb < 15)
1517 curr_bt_rssi_state |= BT_RSSI_STATE_BG_EDCA_LOW;
1518 else
1519 curr_bt_rssi_state &= (~BT_RSSI_STATE_BG_EDCA_LOW);
1520
1521 if (curr_bt_rssi_state != rtlpcipriv->bt_coexist.bt_rssi_state) {
1522 rtlpcipriv->bt_coexist.bt_rssi_state = curr_bt_rssi_state;
1523 return true;
1524 } else {
1525 return false;
1526 }
1527}
1528EXPORT_SYMBOL(rtl92c_bt_rssi_state_change);
1529
1530static bool rtl92c_bt_state_change(struct ieee80211_hw *hw)
1531{
1532 struct rtl_priv *rtlpriv = rtl_priv(hw);
1533 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
1534
1535 u32 polling, ratio_tx, ratio_pri;
1536 u32 bt_tx, bt_pri;
1537 u8 bt_state;
1538 u8 cur_service_type;
1539
1540 if (rtlpriv->mac80211.link_state < MAC80211_LINKED)
1541 return false;
1542
1543 bt_state = rtl_read_byte(rtlpriv, 0x4fd);
1544 bt_tx = rtl_read_dword(rtlpriv, 0x488);
1545 bt_tx = bt_tx & 0x00ffffff;
1546 bt_pri = rtl_read_dword(rtlpriv, 0x48c);
1547 bt_pri = bt_pri & 0x00ffffff;
1548 polling = rtl_read_dword(rtlpriv, 0x490);
1549
1550 if (bt_tx == 0xffffffff && bt_pri == 0xffffffff &&
1551 polling == 0xffffffff && bt_state == 0xff)
1552 return false;
1553
1554 bt_state &= BIT_OFFSET_LEN_MASK_32(0, 1);
1555 if (bt_state != rtlpcipriv->bt_coexist.bt_cur_state) {
1556 rtlpcipriv->bt_coexist.bt_cur_state = bt_state;
1557
1558 if (rtlpcipriv->bt_coexist.reg_bt_sco == 3) {
1559 rtlpcipriv->bt_coexist.bt_service = BT_IDLE;
1560
1561 bt_state = bt_state |
1562 ((rtlpcipriv->bt_coexist.bt_ant_isolation == 1) ?
1563 0 : BIT_OFFSET_LEN_MASK_32(1, 1)) |
1564 BIT_OFFSET_LEN_MASK_32(2, 1);
1565 rtl_write_byte(rtlpriv, 0x4fd, bt_state);
1566 }
1567 return true;
1568 }
1569
1570 ratio_tx = bt_tx * 1000 / polling;
1571 ratio_pri = bt_pri * 1000 / polling;
1572 rtlpcipriv->bt_coexist.ratio_tx = ratio_tx;
1573 rtlpcipriv->bt_coexist.ratio_pri = ratio_pri;
1574
1575 if (bt_state && rtlpcipriv->bt_coexist.reg_bt_sco == 3) {
1576
1577 if ((ratio_tx < 30) && (ratio_pri < 30))
1578 cur_service_type = BT_IDLE;
1579 else if ((ratio_pri > 110) && (ratio_pri < 250))
1580 cur_service_type = BT_SCO;
1581 else if ((ratio_tx >= 200) && (ratio_pri >= 200))
1582 cur_service_type = BT_BUSY;
1583 else if ((ratio_tx >= 350) && (ratio_tx < 500))
1584 cur_service_type = BT_OTHERBUSY;
1585 else if (ratio_tx >= 500)
1586 cur_service_type = BT_PAN;
1587 else
1588 cur_service_type = BT_OTHER_ACTION;
1589
1590 if (cur_service_type != rtlpcipriv->bt_coexist.bt_service) {
1591 rtlpcipriv->bt_coexist.bt_service = cur_service_type;
1592 bt_state = bt_state |
1593 ((rtlpcipriv->bt_coexist.bt_ant_isolation == 1) ?
1594 0 : BIT_OFFSET_LEN_MASK_32(1, 1)) |
1595 ((rtlpcipriv->bt_coexist.bt_service != BT_IDLE) ?
1596 0 : BIT_OFFSET_LEN_MASK_32(2, 1));
1597
1598 /* Add interrupt migration when bt is not ini
1599 * idle state (no traffic). */
1600 if (rtlpcipriv->bt_coexist.bt_service != BT_IDLE) {
1601 rtl_write_word(rtlpriv, 0x504, 0x0ccc);
1602 rtl_write_byte(rtlpriv, 0x506, 0x54);
1603 rtl_write_byte(rtlpriv, 0x507, 0x54);
1604 } else {
1605 rtl_write_byte(rtlpriv, 0x506, 0x00);
1606 rtl_write_byte(rtlpriv, 0x507, 0x00);
1607 }
1608
1609 rtl_write_byte(rtlpriv, 0x4fd, bt_state);
1610 return true;
1611 }
1612 }
1613
1614 return false;
1615
1616}
1617
1618static bool rtl92c_bt_wifi_connect_change(struct ieee80211_hw *hw)
1619{
1620 struct rtl_priv *rtlpriv = rtl_priv(hw);
1621 static bool media_connect;
1622
1623 if (rtlpriv->mac80211.link_state < MAC80211_LINKED) {
1624 media_connect = false;
1625 } else {
1626 if (!media_connect) {
1627 media_connect = true;
1628 return true;
1629 }
1630 media_connect = true;
1631 }
1632
1633 return false;
1634}
1635
1636static void rtl92c_bt_set_normal(struct ieee80211_hw *hw)
1637{
1638 struct rtl_priv *rtlpriv = rtl_priv(hw);
1639 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
1640
1641
1642 if (rtlpcipriv->bt_coexist.bt_service == BT_OTHERBUSY) {
1643 rtlpcipriv->bt_coexist.bt_edca_ul = 0x5ea72b;
1644 rtlpcipriv->bt_coexist.bt_edca_dl = 0x5ea72b;
1645 } else if (rtlpcipriv->bt_coexist.bt_service == BT_BUSY) {
1646 rtlpcipriv->bt_coexist.bt_edca_ul = 0x5eb82f;
1647 rtlpcipriv->bt_coexist.bt_edca_dl = 0x5eb82f;
1648 } else if (rtlpcipriv->bt_coexist.bt_service == BT_SCO) {
1649 if (rtlpcipriv->bt_coexist.ratio_tx > 160) {
1650 rtlpcipriv->bt_coexist.bt_edca_ul = 0x5ea72f;
1651 rtlpcipriv->bt_coexist.bt_edca_dl = 0x5ea72f;
1652 } else {
1653 rtlpcipriv->bt_coexist.bt_edca_ul = 0x5ea32b;
1654 rtlpcipriv->bt_coexist.bt_edca_dl = 0x5ea42b;
1655 }
1656 } else {
1657 rtlpcipriv->bt_coexist.bt_edca_ul = 0;
1658 rtlpcipriv->bt_coexist.bt_edca_dl = 0;
1659 }
1660
1661 if ((rtlpcipriv->bt_coexist.bt_service != BT_IDLE) &&
1662 (rtlpriv->mac80211.mode == WIRELESS_MODE_G ||
1663 (rtlpriv->mac80211.mode == (WIRELESS_MODE_G | WIRELESS_MODE_B))) &&
1664 (rtlpcipriv->bt_coexist.bt_rssi_state &
1665 BT_RSSI_STATE_BG_EDCA_LOW)) {
1666 rtlpcipriv->bt_coexist.bt_edca_ul = 0x5eb82b;
1667 rtlpcipriv->bt_coexist.bt_edca_dl = 0x5eb82b;
1668 }
1669}
1670
1671static void rtl92c_bt_ant_isolation(struct ieee80211_hw *hw)
1672{
1673 struct rtl_priv *rtlpriv = rtl_priv(hw);
1674 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
1675
1676
1677 /* Only enable HW BT coexist when BT in "Busy" state. */
1678 if (rtlpriv->mac80211.vendor == PEER_CISCO &&
1679 rtlpcipriv->bt_coexist.bt_service == BT_OTHER_ACTION) {
1680 rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0);
1681 } else {
1682 if ((rtlpcipriv->bt_coexist.bt_service == BT_BUSY) &&
1683 (rtlpcipriv->bt_coexist.bt_rssi_state &
1684 BT_RSSI_STATE_NORMAL_POWER)) {
1685 rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0);
1686 } else if ((rtlpcipriv->bt_coexist.bt_service ==
1687 BT_OTHER_ACTION) && (rtlpriv->mac80211.mode <
1688 WIRELESS_MODE_N_24G) &&
1689 (rtlpcipriv->bt_coexist.bt_rssi_state &
1690 BT_RSSI_STATE_SPECIAL_LOW)) {
1691 rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0);
1692 } else if (rtlpcipriv->bt_coexist.bt_service == BT_PAN) {
1693 rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0x00);
1694 } else {
1695 rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0x00);
1696 }
1697 }
1698
1699 if (rtlpcipriv->bt_coexist.bt_service == BT_PAN)
1700 rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x10100);
1701 else
1702 rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x0);
1703
1704 if (rtlpcipriv->bt_coexist.bt_rssi_state &
1705 BT_RSSI_STATE_NORMAL_POWER) {
1706 rtl92c_bt_set_normal(hw);
1707 } else {
1708 rtlpcipriv->bt_coexist.bt_edca_ul = 0;
1709 rtlpcipriv->bt_coexist.bt_edca_dl = 0;
1710 }
1711
1712 if (rtlpcipriv->bt_coexist.bt_service != BT_IDLE) {
1713 rtlpriv->cfg->ops->set_rfreg(hw,
1714 RF90_PATH_A,
1715 0x1e,
1716 0xf0, 0xf);
1717 } else {
1718 rtlpriv->cfg->ops->set_rfreg(hw,
1719 RF90_PATH_A, 0x1e, 0xf0,
1720 rtlpcipriv->bt_coexist.bt_rfreg_origin_1e);
1721 }
1722
1723 if (!rtlpriv->dm.dynamic_txpower_enable) {
1724 if (rtlpcipriv->bt_coexist.bt_service != BT_IDLE) {
1725 if (rtlpcipriv->bt_coexist.bt_rssi_state &
1726 BT_RSSI_STATE_TXPOWER_LOW) {
1727 rtlpriv->dm.dynamic_txhighpower_lvl =
1728 TXHIGHPWRLEVEL_BT2;
1729 } else {
1730 rtlpriv->dm.dynamic_txhighpower_lvl =
1731 TXHIGHPWRLEVEL_BT1;
1732 }
1733 } else {
1734 rtlpriv->dm.dynamic_txhighpower_lvl =
1735 TXHIGHPWRLEVEL_NORMAL;
1736 }
1737 rtl92c_phy_set_txpower_level(hw,
1738 rtlpriv->phy.current_channel);
1739 }
1740}
1741
1742static void rtl92c_check_bt_change(struct ieee80211_hw *hw)
1743{
1744 struct rtl_priv *rtlpriv = rtl_priv(hw);
1745 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
1746
1747 if (rtlpcipriv->bt_coexist.bt_cur_state) {
1748 if (rtlpcipriv->bt_coexist.bt_ant_isolation)
1749 rtl92c_bt_ant_isolation(hw);
1750 } else {
1751 rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0x00);
1752 rtlpriv->cfg->ops->set_rfreg(hw, RF90_PATH_A, 0x1e, 0xf0,
1753 rtlpcipriv->bt_coexist.bt_rfreg_origin_1e);
1754
1755 rtlpcipriv->bt_coexist.bt_edca_ul = 0;
1756 rtlpcipriv->bt_coexist.bt_edca_dl = 0;
1757 }
1758}
1759
1760void rtl92c_dm_bt_coexist(struct ieee80211_hw *hw)
1761{
1762 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
1763
1764 bool wifi_connect_change;
1765 bool bt_state_change;
1766 bool rssi_state_change;
1767
1768 if ((rtlpcipriv->bt_coexist.bt_coexistence) &&
1769 (rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4)) {
1770
1771 wifi_connect_change = rtl92c_bt_wifi_connect_change(hw);
1772 bt_state_change = rtl92c_bt_state_change(hw);
1773 rssi_state_change = rtl92c_bt_rssi_state_change(hw);
1774
1775 if (wifi_connect_change || bt_state_change || rssi_state_change)
1776 rtl92c_check_bt_change(hw);
1777 }
1778}
1779EXPORT_SYMBOL(rtl92c_dm_bt_coexist);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
index b9cbb0a3c03f..b9736d3e9a39 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
@@ -200,5 +200,7 @@ void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal);
200void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta); 200void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
201void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw); 201void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw);
202void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery); 202void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery);
203void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw);
204void rtl92c_dm_bt_coexist(struct ieee80211_hw *hw);
203 205
204#endif 206#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
index 28a6ce3bc239..50303e1adff1 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
@@ -171,7 +171,6 @@ static void _rtl92c_write_fw(struct ieee80211_hw *hw,
171static int _rtl92c_fw_free_to_go(struct ieee80211_hw *hw) 171static int _rtl92c_fw_free_to_go(struct ieee80211_hw *hw)
172{ 172{
173 struct rtl_priv *rtlpriv = rtl_priv(hw); 173 struct rtl_priv *rtlpriv = rtl_priv(hw);
174 int err = -EIO;
175 u32 counter = 0; 174 u32 counter = 0;
176 u32 value32; 175 u32 value32;
177 176
@@ -184,7 +183,7 @@ static int _rtl92c_fw_free_to_go(struct ieee80211_hw *hw)
184 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 183 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
185 ("chksum report faill ! REG_MCUFWDL:0x%08x .\n", 184 ("chksum report faill ! REG_MCUFWDL:0x%08x .\n",
186 value32)); 185 value32));
187 goto exit; 186 return -EIO;
188 } 187 }
189 188
190 RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, 189 RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
@@ -204,8 +203,7 @@ static int _rtl92c_fw_free_to_go(struct ieee80211_hw *hw)
204 ("Polling FW ready success!!" 203 ("Polling FW ready success!!"
205 " REG_MCUFWDL:0x%08x .\n", 204 " REG_MCUFWDL:0x%08x .\n",
206 value32)); 205 value32));
207 err = 0; 206 return 0;
208 goto exit;
209 } 207 }
210 208
211 mdelay(FW_8192C_POLLING_DELAY); 209 mdelay(FW_8192C_POLLING_DELAY);
@@ -214,9 +212,7 @@ static int _rtl92c_fw_free_to_go(struct ieee80211_hw *hw)
214 212
215 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 213 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
216 ("Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", value32)); 214 ("Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", value32));
217 215 return -EIO;
218exit:
219 return err;
220} 216}
221 217
222int rtl92c_download_fw(struct ieee80211_hw *hw) 218int rtl92c_download_fw(struct ieee80211_hw *hw)
@@ -226,32 +222,16 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
226 struct rtl92c_firmware_header *pfwheader; 222 struct rtl92c_firmware_header *pfwheader;
227 u8 *pfwdata; 223 u8 *pfwdata;
228 u32 fwsize; 224 u32 fwsize;
229 int err;
230 enum version_8192c version = rtlhal->version; 225 enum version_8192c version = rtlhal->version;
231 const struct firmware *firmware;
232 226
233 printk(KERN_INFO "rtl8192cu: Loading firmware file %s\n", 227 printk(KERN_INFO "rtl8192c: Loading firmware file %s\n",
234 rtlpriv->cfg->fw_name); 228 rtlpriv->cfg->fw_name);
235 err = request_firmware(&firmware, rtlpriv->cfg->fw_name, 229 if (!rtlhal->pfirmware)
236 rtlpriv->io.dev);
237 if (err) {
238 printk(KERN_ERR "rtl8192cu: Firmware loading failed\n");
239 return 1;
240 }
241
242 if (firmware->size > 0x4000) {
243 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
244 ("Firmware is too big!\n"));
245 release_firmware(firmware);
246 return 1; 230 return 1;
247 }
248
249 memcpy(rtlhal->pfirmware, firmware->data, firmware->size);
250 fwsize = firmware->size;
251 release_firmware(firmware);
252 231
253 pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware; 232 pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
254 pfwdata = (u8 *) rtlhal->pfirmware; 233 pfwdata = (u8 *) rtlhal->pfirmware;
234 fwsize = rtlhal->fwsize;
255 235
256 if (IS_FW_HEADER_EXIST(pfwheader)) { 236 if (IS_FW_HEADER_EXIST(pfwheader)) {
257 RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, 237 RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
@@ -267,8 +247,7 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
267 _rtl92c_write_fw(hw, version, pfwdata, fwsize); 247 _rtl92c_write_fw(hw, version, pfwdata, fwsize);
268 _rtl92c_enable_fw_download(hw, false); 248 _rtl92c_enable_fw_download(hw, false);
269 249
270 err = _rtl92c_fw_free_to_go(hw); 250 if (_rtl92c_fw_free_to_go(hw)) {
271 if (err) {
272 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 251 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
273 ("Firmware is not ready to run!\n")); 252 ("Firmware is not ready to run!\n"));
274 } else { 253 } else {
@@ -300,10 +279,9 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
300 struct rtl_priv *rtlpriv = rtl_priv(hw); 279 struct rtl_priv *rtlpriv = rtl_priv(hw);
301 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 280 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
302 u8 boxnum; 281 u8 boxnum;
303 u16 box_reg, box_extreg; 282 u16 box_reg = 0, box_extreg = 0;
304 u8 u1b_tmp; 283 u8 u1b_tmp;
305 bool isfw_read = false; 284 bool isfw_read = false;
306 u8 buf_index = 0;
307 bool bwrite_sucess = false; 285 bool bwrite_sucess = false;
308 u8 wait_h2c_limmit = 100; 286 u8 wait_h2c_limmit = 100;
309 u8 wait_writeh2c_limmit = 100; 287 u8 wait_writeh2c_limmit = 100;
@@ -414,7 +392,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
414 case 1: 392 case 1:
415 boxcontent[0] &= ~(BIT(7)); 393 boxcontent[0] &= ~(BIT(7));
416 memcpy((u8 *) (boxcontent) + 1, 394 memcpy((u8 *) (boxcontent) + 1,
417 p_cmdbuffer + buf_index, 1); 395 p_cmdbuffer, 1);
418 396
419 for (idx = 0; idx < 4; idx++) { 397 for (idx = 0; idx < 4; idx++) {
420 rtl_write_byte(rtlpriv, box_reg + idx, 398 rtl_write_byte(rtlpriv, box_reg + idx,
@@ -424,7 +402,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
424 case 2: 402 case 2:
425 boxcontent[0] &= ~(BIT(7)); 403 boxcontent[0] &= ~(BIT(7));
426 memcpy((u8 *) (boxcontent) + 1, 404 memcpy((u8 *) (boxcontent) + 1,
427 p_cmdbuffer + buf_index, 2); 405 p_cmdbuffer, 2);
428 406
429 for (idx = 0; idx < 4; idx++) { 407 for (idx = 0; idx < 4; idx++) {
430 rtl_write_byte(rtlpriv, box_reg + idx, 408 rtl_write_byte(rtlpriv, box_reg + idx,
@@ -434,7 +412,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
434 case 3: 412 case 3:
435 boxcontent[0] &= ~(BIT(7)); 413 boxcontent[0] &= ~(BIT(7));
436 memcpy((u8 *) (boxcontent) + 1, 414 memcpy((u8 *) (boxcontent) + 1,
437 p_cmdbuffer + buf_index, 3); 415 p_cmdbuffer, 3);
438 416
439 for (idx = 0; idx < 4; idx++) { 417 for (idx = 0; idx < 4; idx++) {
440 rtl_write_byte(rtlpriv, box_reg + idx, 418 rtl_write_byte(rtlpriv, box_reg + idx,
@@ -444,9 +422,9 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
444 case 4: 422 case 4:
445 boxcontent[0] |= (BIT(7)); 423 boxcontent[0] |= (BIT(7));
446 memcpy((u8 *) (boxextcontent), 424 memcpy((u8 *) (boxextcontent),
447 p_cmdbuffer + buf_index, 2); 425 p_cmdbuffer, 2);
448 memcpy((u8 *) (boxcontent) + 1, 426 memcpy((u8 *) (boxcontent) + 1,
449 p_cmdbuffer + buf_index + 2, 2); 427 p_cmdbuffer + 2, 2);
450 428
451 for (idx = 0; idx < 2; idx++) { 429 for (idx = 0; idx < 2; idx++) {
452 rtl_write_byte(rtlpriv, box_extreg + idx, 430 rtl_write_byte(rtlpriv, box_extreg + idx,
@@ -461,9 +439,9 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
461 case 5: 439 case 5:
462 boxcontent[0] |= (BIT(7)); 440 boxcontent[0] |= (BIT(7));
463 memcpy((u8 *) (boxextcontent), 441 memcpy((u8 *) (boxextcontent),
464 p_cmdbuffer + buf_index, 2); 442 p_cmdbuffer, 2);
465 memcpy((u8 *) (boxcontent) + 1, 443 memcpy((u8 *) (boxcontent) + 1,
466 p_cmdbuffer + buf_index + 2, 3); 444 p_cmdbuffer + 2, 3);
467 445
468 for (idx = 0; idx < 2; idx++) { 446 for (idx = 0; idx < 2; idx++) {
469 rtl_write_byte(rtlpriv, box_extreg + idx, 447 rtl_write_byte(rtlpriv, box_extreg + idx,
@@ -561,6 +539,39 @@ void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
561} 539}
562EXPORT_SYMBOL(rtl92c_set_fw_pwrmode_cmd); 540EXPORT_SYMBOL(rtl92c_set_fw_pwrmode_cmd);
563 541
542static bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw,
543 struct sk_buff *skb)
544{
545 struct rtl_priv *rtlpriv = rtl_priv(hw);
546 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
547 struct rtl8192_tx_ring *ring;
548 struct rtl_tx_desc *pdesc;
549 u8 own;
550 unsigned long flags;
551 struct sk_buff *pskb = NULL;
552
553 ring = &rtlpci->tx_ring[BEACON_QUEUE];
554
555 pskb = __skb_dequeue(&ring->queue);
556 if (pskb)
557 kfree_skb(pskb);
558
559 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
560
561 pdesc = &ring->desc[0];
562 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc, true, HW_DESC_OWN);
563
564 rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *) pdesc, 1, 1, skb);
565
566 __skb_queue_tail(&ring->queue, skb);
567
568 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
569
570 rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE);
571
572 return true;
573}
574
564#define BEACON_PG 0 /*->1*/ 575#define BEACON_PG 0 /*->1*/
565#define PSPOLL_PG 2 576#define PSPOLL_PG 2
566#define NULL_PG 3 577#define NULL_PG 3
@@ -678,7 +689,7 @@ static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = {
678 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 689 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
679}; 690};
680 691
681void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) 692void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished)
682{ 693{
683 struct rtl_priv *rtlpriv = rtl_priv(hw); 694 struct rtl_priv *rtlpriv = rtl_priv(hw);
684 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 695 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -687,12 +698,12 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
687 u32 totalpacketlen; 698 u32 totalpacketlen;
688 bool rtstatus; 699 bool rtstatus;
689 u8 u1RsvdPageLoc[3] = {0}; 700 u8 u1RsvdPageLoc[3] = {0};
690 bool b_dlok = false; 701 bool dlok = false;
691 702
692 u8 *beacon; 703 u8 *beacon;
693 u8 *p_pspoll; 704 u8 *pspoll;
694 u8 *nullfunc; 705 u8 *nullfunc;
695 u8 *p_probersp; 706 u8 *probersp;
696 /*--------------------------------------------------------- 707 /*---------------------------------------------------------
697 (1) beacon 708 (1) beacon
698 ---------------------------------------------------------*/ 709 ---------------------------------------------------------*/
@@ -703,10 +714,10 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
703 /*------------------------------------------------------- 714 /*-------------------------------------------------------
704 (2) ps-poll 715 (2) ps-poll
705 --------------------------------------------------------*/ 716 --------------------------------------------------------*/
706 p_pspoll = &reserved_page_packet[PSPOLL_PG * 128]; 717 pspoll = &reserved_page_packet[PSPOLL_PG * 128];
707 SET_80211_PS_POLL_AID(p_pspoll, (mac->assoc_id | 0xc000)); 718 SET_80211_PS_POLL_AID(pspoll, (mac->assoc_id | 0xc000));
708 SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid); 719 SET_80211_PS_POLL_BSSID(pspoll, mac->bssid);
709 SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr); 720 SET_80211_PS_POLL_TA(pspoll, mac->mac_addr);
710 721
711 SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1RsvdPageLoc, PSPOLL_PG); 722 SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1RsvdPageLoc, PSPOLL_PG);
712 723
@@ -723,10 +734,10 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
723 /*--------------------------------------------------------- 734 /*---------------------------------------------------------
724 (4) probe response 735 (4) probe response
725 ----------------------------------------------------------*/ 736 ----------------------------------------------------------*/
726 p_probersp = &reserved_page_packet[PROBERSP_PG * 128]; 737 probersp = &reserved_page_packet[PROBERSP_PG * 128];
727 SET_80211_HDR_ADDRESS1(p_probersp, mac->bssid); 738 SET_80211_HDR_ADDRESS1(probersp, mac->bssid);
728 SET_80211_HDR_ADDRESS2(p_probersp, mac->mac_addr); 739 SET_80211_HDR_ADDRESS2(probersp, mac->mac_addr);
729 SET_80211_HDR_ADDRESS3(p_probersp, mac->bssid); 740 SET_80211_HDR_ADDRESS3(probersp, mac->bssid);
730 741
731 SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1RsvdPageLoc, PROBERSP_PG); 742 SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1RsvdPageLoc, PROBERSP_PG);
732 743
@@ -744,12 +755,12 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
744 memcpy((u8 *) skb_put(skb, totalpacketlen), 755 memcpy((u8 *) skb_put(skb, totalpacketlen),
745 &reserved_page_packet, totalpacketlen); 756 &reserved_page_packet, totalpacketlen);
746 757
747 rtstatus = rtlpriv->cfg->ops->cmd_send_packet(hw, skb); 758 rtstatus = _rtl92c_cmd_send_packet(hw, skb);
748 759
749 if (rtstatus) 760 if (rtstatus)
750 b_dlok = true; 761 dlok = true;
751 762
752 if (b_dlok) { 763 if (dlok) {
753 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, 764 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
754 ("Set RSVD page location to Fw.\n")); 765 ("Set RSVD page location to Fw.\n"));
755 RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, 766 RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
index 3db33bd14666..3d5823c12621 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
@@ -27,8 +27,8 @@
27 * 27 *
28 *****************************************************************************/ 28 *****************************************************************************/
29 29
30#ifndef __RTL92C__FW__H__ 30#ifndef __RTL92C__FW__COMMON__H__
31#define __RTL92C__FW__H__ 31#define __RTL92C__FW__COMMON__H__
32 32
33#define FW_8192C_SIZE 0x3000 33#define FW_8192C_SIZE 0x3000
34#define FW_8192C_START_ADDRESS 0x1000 34#define FW_8192C_START_ADDRESS 0x1000
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
index a70228278398..c5424cad43cb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
@@ -78,27 +78,29 @@ void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
78 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x)," 78 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
79 " data(%#x)\n", regaddr, bitmask, 79 " data(%#x)\n", regaddr, bitmask,
80 data)); 80 data));
81
81} 82}
82EXPORT_SYMBOL(rtl92c_phy_set_bb_reg); 83EXPORT_SYMBOL(rtl92c_phy_set_bb_reg);
83 84
84u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw, 85u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
85 enum radio_path rfpath, u32 offset) 86 enum radio_path rfpath, u32 offset)
86{ 87{
87 RT_ASSERT(false, ("deprecated!\n")); 88 RT_ASSERT(false, ("deprecated!\n"));
88 return 0; 89 return 0;
90
89} 91}
90EXPORT_SYMBOL(_rtl92c_phy_fw_rf_serial_read); 92EXPORT_SYMBOL(_rtl92c_phy_fw_rf_serial_read);
91 93
92void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw, 94void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
93 enum radio_path rfpath, u32 offset, 95 enum radio_path rfpath, u32 offset,
94 u32 data) 96 u32 data)
95{ 97{
96 RT_ASSERT(false, ("deprecated!\n")); 98 RT_ASSERT(false, ("deprecated!\n"));
97} 99}
98EXPORT_SYMBOL(_rtl92c_phy_fw_rf_serial_write); 100EXPORT_SYMBOL(_rtl92c_phy_fw_rf_serial_write);
99 101
100u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw, 102u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
101 enum radio_path rfpath, u32 offset) 103 enum radio_path rfpath, u32 offset)
102{ 104{
103 struct rtl_priv *rtlpriv = rtl_priv(hw); 105 struct rtl_priv *rtlpriv = rtl_priv(hw);
104 struct rtl_phy *rtlphy = &(rtlpriv->phy); 106 struct rtl_phy *rtlphy = &(rtlpriv->phy);
@@ -149,8 +151,8 @@ u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
149EXPORT_SYMBOL(_rtl92c_phy_rf_serial_read); 151EXPORT_SYMBOL(_rtl92c_phy_rf_serial_read);
150 152
151void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw, 153void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
152 enum radio_path rfpath, u32 offset, 154 enum radio_path rfpath, u32 offset,
153 u32 data) 155 u32 data)
154{ 156{
155 u32 data_and_addr; 157 u32 data_and_addr;
156 u32 newoffset; 158 u32 newoffset;
@@ -197,6 +199,7 @@ static void _rtl92c_phy_bb_config_1t(struct ieee80211_hw *hw)
197 rtl_set_bbreg(hw, 0xe80, 0x0c000000, 0x2); 199 rtl_set_bbreg(hw, 0xe80, 0x0c000000, 0x2);
198 rtl_set_bbreg(hw, 0xe88, 0x0c000000, 0x2); 200 rtl_set_bbreg(hw, 0xe88, 0x0c000000, 0x2);
199} 201}
202
200bool rtl92c_phy_rf_config(struct ieee80211_hw *hw) 203bool rtl92c_phy_rf_config(struct ieee80211_hw *hw)
201{ 204{
202 struct rtl_priv *rtlpriv = rtl_priv(hw); 205 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -241,13 +244,14 @@ bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw)
241 rtlphy->cck_high_power = (bool) (rtl_get_bbreg(hw, 244 rtlphy->cck_high_power = (bool) (rtl_get_bbreg(hw,
242 RFPGA0_XA_HSSIPARAMETER2, 245 RFPGA0_XA_HSSIPARAMETER2,
243 0x200)); 246 0x200));
247
244 return true; 248 return true;
245} 249}
246EXPORT_SYMBOL(_rtl92c_phy_bb8192c_config_parafile); 250EXPORT_SYMBOL(_rtl92c_phy_bb8192c_config_parafile);
247 251
248void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw, 252void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
249 u32 regaddr, u32 bitmask, 253 u32 regaddr, u32 bitmask,
250 u32 data) 254 u32 data)
251{ 255{
252 struct rtl_priv *rtlpriv = rtl_priv(hw); 256 struct rtl_priv *rtlpriv = rtl_priv(hw);
253 struct rtl_phy *rtlphy = &(rtlpriv->phy); 257 struct rtl_phy *rtlphy = &(rtlpriv->phy);
@@ -317,61 +321,48 @@ void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
317 } 321 }
318 if (regaddr == RTXAGC_B_RATE54_24) { 322 if (regaddr == RTXAGC_B_RATE54_24) {
319 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][9] = data; 323 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][9] = data;
320
321 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, 324 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
322 ("MCSTxPowerLevelOriginalOffset[%d][9] = 0x%x\n", 325 ("MCSTxPowerLevelOriginalOffset[%d][9] = 0x%x\n",
323 rtlphy->pwrgroup_cnt, 326 rtlphy->pwrgroup_cnt,
324 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][9])); 327 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][9]));
325 } 328 }
326
327 if (regaddr == RTXAGC_B_CCK1_55_MCS32) { 329 if (regaddr == RTXAGC_B_CCK1_55_MCS32) {
328 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][14] = data; 330 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][14] = data;
329
330 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, 331 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
331 ("MCSTxPowerLevelOriginalOffset[%d][14] = 0x%x\n", 332 ("MCSTxPowerLevelOriginalOffset[%d][14] = 0x%x\n",
332 rtlphy->pwrgroup_cnt, 333 rtlphy->pwrgroup_cnt,
333 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][14])); 334 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][14]));
334 } 335 }
335
336 if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0x000000ff) { 336 if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0x000000ff) {
337 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][15] = data; 337 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][15] = data;
338
339 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, 338 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
340 ("MCSTxPowerLevelOriginalOffset[%d][15] = 0x%x\n", 339 ("MCSTxPowerLevelOriginalOffset[%d][15] = 0x%x\n",
341 rtlphy->pwrgroup_cnt, 340 rtlphy->pwrgroup_cnt,
342 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][15])); 341 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][15]));
343 } 342 }
344
345 if (regaddr == RTXAGC_B_MCS03_MCS00) { 343 if (regaddr == RTXAGC_B_MCS03_MCS00) {
346 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][10] = data; 344 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][10] = data;
347
348 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, 345 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
349 ("MCSTxPowerLevelOriginalOffset[%d][10] = 0x%x\n", 346 ("MCSTxPowerLevelOriginalOffset[%d][10] = 0x%x\n",
350 rtlphy->pwrgroup_cnt, 347 rtlphy->pwrgroup_cnt,
351 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][10])); 348 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][10]));
352 } 349 }
353
354 if (regaddr == RTXAGC_B_MCS07_MCS04) { 350 if (regaddr == RTXAGC_B_MCS07_MCS04) {
355 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][11] = data; 351 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][11] = data;
356
357 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, 352 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
358 ("MCSTxPowerLevelOriginalOffset[%d][11] = 0x%x\n", 353 ("MCSTxPowerLevelOriginalOffset[%d][11] = 0x%x\n",
359 rtlphy->pwrgroup_cnt, 354 rtlphy->pwrgroup_cnt,
360 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][11])); 355 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][11]));
361 } 356 }
362
363 if (regaddr == RTXAGC_B_MCS11_MCS08) { 357 if (regaddr == RTXAGC_B_MCS11_MCS08) {
364 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][12] = data; 358 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][12] = data;
365
366 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, 359 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
367 ("MCSTxPowerLevelOriginalOffset[%d][12] = 0x%x\n", 360 ("MCSTxPowerLevelOriginalOffset[%d][12] = 0x%x\n",
368 rtlphy->pwrgroup_cnt, 361 rtlphy->pwrgroup_cnt,
369 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][12])); 362 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][12]));
370 } 363 }
371
372 if (regaddr == RTXAGC_B_MCS15_MCS12) { 364 if (regaddr == RTXAGC_B_MCS15_MCS12) {
373 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][13] = data; 365 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][13] = data;
374
375 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, 366 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
376 ("MCSTxPowerLevelOriginalOffset[%d][13] = 0x%x\n", 367 ("MCSTxPowerLevelOriginalOffset[%d][13] = 0x%x\n",
377 rtlphy->pwrgroup_cnt, 368 rtlphy->pwrgroup_cnt,
@@ -583,6 +574,7 @@ static void _rtl92c_ccxpower_index_check(struct ieee80211_hw *hw,
583 574
584 rtlphy->cur_cck_txpwridx = cckpowerlevel[0]; 575 rtlphy->cur_cck_txpwridx = cckpowerlevel[0];
585 rtlphy->cur_ofdm24g_txpwridx = ofdmpowerlevel[0]; 576 rtlphy->cur_ofdm24g_txpwridx = ofdmpowerlevel[0];
577
586} 578}
587 579
588void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel) 580void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
@@ -611,7 +603,6 @@ bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw, long power_indbm)
611 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); 603 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
612 u8 idx; 604 u8 idx;
613 u8 rf_path; 605 u8 rf_path;
614
615 u8 ccktxpwridx = _rtl92c_phy_dbm_to_txpwr_Idx(hw, 606 u8 ccktxpwridx = _rtl92c_phy_dbm_to_txpwr_Idx(hw,
616 WIRELESS_MODE_B, 607 WIRELESS_MODE_B,
617 power_indbm); 608 power_indbm);
@@ -639,11 +630,6 @@ bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw, long power_indbm)
639} 630}
640EXPORT_SYMBOL(rtl92c_phy_update_txpower_dbm); 631EXPORT_SYMBOL(rtl92c_phy_update_txpower_dbm);
641 632
642void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw, u16 beaconinterval)
643{
644}
645EXPORT_SYMBOL(rtl92c_phy_set_beacon_hw_reg);
646
647u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw, 633u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
648 enum wireless_mode wirelessmode, 634 enum wireless_mode wirelessmode,
649 long power_indbm) 635 long power_indbm)
@@ -741,9 +727,9 @@ void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
741 if (rtlphy->set_bwmode_inprogress) 727 if (rtlphy->set_bwmode_inprogress)
742 return; 728 return;
743 rtlphy->set_bwmode_inprogress = true; 729 rtlphy->set_bwmode_inprogress = true;
744 if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) 730 if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
745 rtlpriv->cfg->ops->phy_set_bw_mode_callback(hw); 731 rtlphy->set_bwmode_inprogress = false;
746 else { 732 } else {
747 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, 733 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
748 ("FALSE driver sleep or unload\n")); 734 ("FALSE driver sleep or unload\n"));
749 rtlphy->set_bwmode_inprogress = false; 735 rtlphy->set_bwmode_inprogress = false;
@@ -773,8 +759,9 @@ void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw)
773 mdelay(delay); 759 mdelay(delay);
774 else 760 else
775 continue; 761 continue;
776 } else 762 } else {
777 rtlphy->sw_chnl_inprogress = false; 763 rtlphy->sw_chnl_inprogress = false;
764 }
778 break; 765 break;
779 } while (true); 766 } while (true);
780 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n")); 767 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
@@ -811,9 +798,32 @@ u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw)
811} 798}
812EXPORT_SYMBOL(rtl92c_phy_sw_chnl); 799EXPORT_SYMBOL(rtl92c_phy_sw_chnl);
813 800
814static bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, 801static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
815 u8 channel, u8 *stage, u8 *step, 802 u32 cmdtableidx, u32 cmdtablesz,
816 u32 *delay) 803 enum swchnlcmd_id cmdid,
804 u32 para1, u32 para2, u32 msdelay)
805{
806 struct swchnlcmd *pcmd;
807
808 if (cmdtable == NULL) {
809 RT_ASSERT(false, ("cmdtable cannot be NULL.\n"));
810 return false;
811 }
812
813 if (cmdtableidx >= cmdtablesz)
814 return false;
815
816 pcmd = cmdtable + cmdtableidx;
817 pcmd->cmdid = cmdid;
818 pcmd->para1 = para1;
819 pcmd->para2 = para2;
820 pcmd->msdelay = msdelay;
821 return true;
822}
823
824bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
825 u8 channel, u8 *stage, u8 *step,
826 u32 *delay)
817{ 827{
818 struct rtl_priv *rtlpriv = rtl_priv(hw); 828 struct rtl_priv *rtlpriv = rtl_priv(hw);
819 struct rtl_phy *rtlphy = &(rtlpriv->phy); 829 struct rtl_phy *rtlphy = &(rtlpriv->phy);
@@ -917,29 +927,6 @@ static bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
917 return false; 927 return false;
918} 928}
919 929
920static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
921 u32 cmdtableidx, u32 cmdtablesz,
922 enum swchnlcmd_id cmdid,
923 u32 para1, u32 para2, u32 msdelay)
924{
925 struct swchnlcmd *pcmd;
926
927 if (cmdtable == NULL) {
928 RT_ASSERT(false, ("cmdtable cannot be NULL.\n"));
929 return false;
930 }
931
932 if (cmdtableidx >= cmdtablesz)
933 return false;
934
935 pcmd = cmdtable + cmdtableidx;
936 pcmd->cmdid = cmdid;
937 pcmd->para1 = para1;
938 pcmd->para2 = para2;
939 pcmd->msdelay = msdelay;
940 return true;
941}
942
943bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw, u32 rfpath) 930bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw, u32 rfpath)
944{ 931{
945 return true; 932 return true;
@@ -1002,13 +989,13 @@ static u8 _rtl92c_phy_path_b_iqk(struct ieee80211_hw *hw)
1002 reg_ebc = rtl_get_bbreg(hw, 0xebc, MASKDWORD); 989 reg_ebc = rtl_get_bbreg(hw, 0xebc, MASKDWORD);
1003 reg_ec4 = rtl_get_bbreg(hw, 0xec4, MASKDWORD); 990 reg_ec4 = rtl_get_bbreg(hw, 0xec4, MASKDWORD);
1004 reg_ecc = rtl_get_bbreg(hw, 0xecc, MASKDWORD); 991 reg_ecc = rtl_get_bbreg(hw, 0xecc, MASKDWORD);
992
1005 if (!(reg_eac & BIT(31)) && 993 if (!(reg_eac & BIT(31)) &&
1006 (((reg_eb4 & 0x03FF0000) >> 16) != 0x142) && 994 (((reg_eb4 & 0x03FF0000) >> 16) != 0x142) &&
1007 (((reg_ebc & 0x03FF0000) >> 16) != 0x42)) 995 (((reg_ebc & 0x03FF0000) >> 16) != 0x42))
1008 result |= 0x01; 996 result |= 0x01;
1009 else 997 else
1010 return result; 998 return result;
1011
1012 if (!(reg_eac & BIT(30)) && 999 if (!(reg_eac & BIT(30)) &&
1013 (((reg_ec4 & 0x03FF0000) >> 16) != 0x132) && 1000 (((reg_ec4 & 0x03FF0000) >> 16) != 0x132) &&
1014 (((reg_ecc & 0x03FF0000) >> 16) != 0x36)) 1001 (((reg_ecc & 0x03FF0000) >> 16) != 0x36))
@@ -1023,9 +1010,9 @@ static void _rtl92c_phy_path_a_fill_iqk_matrix(struct ieee80211_hw *hw,
1023 u32 oldval_0, x, tx0_a, reg; 1010 u32 oldval_0, x, tx0_a, reg;
1024 long y, tx0_c; 1011 long y, tx0_c;
1025 1012
1026 if (final_candidate == 0xFF) 1013 if (final_candidate == 0xFF) {
1027 return; 1014 return;
1028 else if (iqk_ok) { 1015 } else if (iqk_ok) {
1029 oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 1016 oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
1030 MASKDWORD) >> 22) & 0x3FF; 1017 MASKDWORD) >> 22) & 0x3FF;
1031 x = result[final_candidate][0]; 1018 x = result[final_candidate][0];
@@ -1063,9 +1050,9 @@ static void _rtl92c_phy_path_b_fill_iqk_matrix(struct ieee80211_hw *hw,
1063 u32 oldval_1, x, tx1_a, reg; 1050 u32 oldval_1, x, tx1_a, reg;
1064 long y, tx1_c; 1051 long y, tx1_c;
1065 1052
1066 if (final_candidate == 0xFF) 1053 if (final_candidate == 0xFF) {
1067 return; 1054 return;
1068 else if (iqk_ok) { 1055 } else if (iqk_ok) {
1069 oldval_1 = (rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 1056 oldval_1 = (rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
1070 MASKDWORD) >> 22) & 0x3FF; 1057 MASKDWORD) >> 22) & 0x3FF;
1071 x = result[final_candidate][4]; 1058 x = result[final_candidate][4];
@@ -1282,6 +1269,7 @@ static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw,
1282 RFPGA0_XA_HSSIPARAMETER1, 1269 RFPGA0_XA_HSSIPARAMETER1,
1283 BIT(8)); 1270 BIT(8));
1284 } 1271 }
1272
1285 if (!rtlphy->rfpi_enable) 1273 if (!rtlphy->rfpi_enable)
1286 _rtl92c_phy_pi_mode_switch(hw, true); 1274 _rtl92c_phy_pi_mode_switch(hw, true);
1287 if (t == 0) { 1275 if (t == 0) {
@@ -1317,9 +1305,10 @@ static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw,
1317 0x3FF0000) >> 16; 1305 0x3FF0000) >> 16;
1318 break; 1306 break;
1319 } else if (i == (retrycount - 1) && patha_ok == 0x01) 1307 } else if (i == (retrycount - 1) && patha_ok == 0x01)
1308
1320 result[t][0] = (rtl_get_bbreg(hw, 0xe94, 1309 result[t][0] = (rtl_get_bbreg(hw, 0xe94,
1321 MASKDWORD) & 0x3FF0000) >> 1310 MASKDWORD) & 0x3FF0000) >>
1322 16; 1311 16;
1323 result[t][1] = 1312 result[t][1] =
1324 (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) & 0x3FF0000) >> 16; 1313 (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) & 0x3FF0000) >> 16;
1325 1314
@@ -1375,8 +1364,7 @@ static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw,
1375static void _rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, 1364static void _rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw,
1376 char delta, bool is2t) 1365 char delta, bool is2t)
1377{ 1366{
1378 /* This routine is deliberately dummied out for later fixes */ 1367#if 0 /* This routine is deliberately dummied out for later fixes */
1379#if 0
1380 struct rtl_priv *rtlpriv = rtl_priv(hw); 1368 struct rtl_priv *rtlpriv = rtl_priv(hw);
1381 struct rtl_phy *rtlphy = &(rtlpriv->phy); 1369 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1382 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); 1370 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
@@ -1434,7 +1422,7 @@ static void _rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw,
1434 0x04db25a4, 0x0b1b25a4 1422 0x04db25a4, 0x0b1b25a4
1435 }; 1423 };
1436 1424
1437 u32 apk_offset[PATH_NUM] = { 0xb68, 0xb6c }; 1425 const u32 apk_offset[PATH_NUM] = { 0xb68, 0xb6c };
1438 1426
1439 u32 apk_normal_offset[PATH_NUM] = { 0xb28, 0xb98 }; 1427 u32 apk_normal_offset[PATH_NUM] = { 0xb28, 0xb98 };
1440 1428
@@ -1463,13 +1451,15 @@ static void _rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw,
1463 0x00050006 1451 0x00050006
1464 }; 1452 };
1465 1453
1466 const u32 apk_result[PATH_NUM][APK_BB_REG_NUM]; 1454 u32 apk_result[PATH_NUM][APK_BB_REG_NUM];
1467 1455
1468 long bb_offset, delta_v, delta_offset; 1456 long bb_offset, delta_v, delta_offset;
1469 1457
1470 if (!is2t) 1458 if (!is2t)
1471 pathbound = 1; 1459 pathbound = 1;
1472 1460
1461 return;
1462
1473 for (index = 0; index < PATH_NUM; index++) { 1463 for (index = 0; index < PATH_NUM; index++) {
1474 apk_offset[index] = apk_normal_offset[index]; 1464 apk_offset[index] = apk_normal_offset[index];
1475 apk_value[index] = apk_normal_value[index]; 1465 apk_value[index] = apk_normal_value[index];
@@ -1730,8 +1720,7 @@ static void _rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw,
1730 0x08)); 1720 0x08));
1731 1721
1732 } 1722 }
1733 1723 rtlphy->b_apk_done = true;
1734 rtlphy->apk_done = true;
1735#endif 1724#endif
1736} 1725}
1737 1726
@@ -1758,6 +1747,7 @@ static void _rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw,
1758 rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300, 0x1); 1747 rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300, 0x1);
1759 1748
1760 } 1749 }
1750
1761} 1751}
1762 1752
1763#undef IQK_ADDA_REG_NUM 1753#undef IQK_ADDA_REG_NUM
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h
index 53ffb0981586..9a264c0d6127 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h
@@ -27,8 +27,8 @@
27 * 27 *
28 *****************************************************************************/ 28 *****************************************************************************/
29 29
30#ifndef __RTL92C_PHY_H__ 30#ifndef __RTL92C_PHY_COMMON_H__
31#define __RTL92C_PHY_H__ 31#define __RTL92C_PHY_COMMON_H__
32 32
33#define MAX_PRECMD_CNT 16 33#define MAX_PRECMD_CNT 16
34#define MAX_RFDEPENDCMD_CNT 16 34#define MAX_RFDEPENDCMD_CNT 16
@@ -39,6 +39,7 @@
39#define RT_CANNOT_IO(hw) false 39#define RT_CANNOT_IO(hw) false
40#define HIGHPOWER_RADIOA_ARRAYLEN 22 40#define HIGHPOWER_RADIOA_ARRAYLEN 22
41 41
42#define IQK_ADDA_REG_NUM 16
42#define MAX_TOLERANCE 5 43#define MAX_TOLERANCE 5
43#define IQK_DELAY_TIME 1 44#define IQK_DELAY_TIME 1
44 45
@@ -56,6 +57,7 @@
56#define IQK_ADDA_REG_NUM 16 57#define IQK_ADDA_REG_NUM 16
57#define IQK_MAC_REG_NUM 4 58#define IQK_MAC_REG_NUM 4
58 59
60#define IQK_DELAY_TIME 1
59#define RF90_PATH_MAX 2 61#define RF90_PATH_MAX 2
60 62
61#define CT_OFFSET_MAC_ADDR 0X16 63#define CT_OFFSET_MAC_ADDR 0X16
@@ -77,6 +79,7 @@
77 79
78#define RTL92C_MAX_PATH_NUM 2 80#define RTL92C_MAX_PATH_NUM 2
79#define LLT_LAST_ENTRY_OF_TX_PKT_BUFFER 255 81#define LLT_LAST_ENTRY_OF_TX_PKT_BUFFER 255
82
80enum swchnlcmd_id { 83enum swchnlcmd_id {
81 CMDID_END, 84 CMDID_END,
82 CMDID_SET_TXPOWEROWER_LEVEL, 85 CMDID_SET_TXPOWEROWER_LEVEL,
@@ -184,45 +187,41 @@ struct tx_power_struct {
184 u32 mcs_original_offset[4][16]; 187 u32 mcs_original_offset[4][16];
185}; 188};
186 189
187extern u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, 190u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw,
188 u32 regaddr, u32 bitmask); 191 u32 regaddr, u32 bitmask);
189extern void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw, 192void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
190 u32 regaddr, u32 bitmask, u32 data); 193 u32 regaddr, u32 bitmask, u32 data);
191extern u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw, 194u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
192 enum radio_path rfpath, u32 regaddr, 195 enum radio_path rfpath, u32 regaddr,
193 u32 bitmask); 196 u32 bitmask);
194extern void rtl92c_phy_set_rf_reg(struct ieee80211_hw *hw, 197bool rtl92c_phy_mac_config(struct ieee80211_hw *hw);
195 enum radio_path rfpath, u32 regaddr, 198bool rtl92c_phy_bb_config(struct ieee80211_hw *hw);
196 u32 bitmask, u32 data); 199bool rtl92c_phy_rf_config(struct ieee80211_hw *hw);
197extern bool rtl92c_phy_mac_config(struct ieee80211_hw *hw); 200bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
198extern bool rtl92c_phy_bb_config(struct ieee80211_hw *hw);
199extern bool rtl92c_phy_rf_config(struct ieee80211_hw *hw);
200extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
201 enum radio_path rfpath); 201 enum radio_path rfpath);
202extern void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw); 202void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
203extern void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw, 203void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw,
204 long *powerlevel); 204 long *powerlevel);
205extern void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel); 205void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
206extern bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw, 206bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw,
207 long power_indbm); 207 long power_indbm);
208extern void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw, 208void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw,
209 u8 operation); 209 u8 operation);
210extern void rtl92c_phy_set_bw_mode_callback(struct ieee80211_hw *hw); 210void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
211extern void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
212 enum nl80211_channel_type ch_type); 211 enum nl80211_channel_type ch_type);
213extern void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw); 212void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw);
214extern u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw); 213u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw);
215extern void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery); 214void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
216extern void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw, 215void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw,
217 u16 beaconinterval); 216 u16 beaconinterval);
218void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta); 217void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
219void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw); 218void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw);
220void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain); 219void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
221bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, 220bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
222 enum radio_path rfpath); 221 enum radio_path rfpath);
223extern bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw, 222bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw,
224 u32 rfpath); 223 u32 rfpath);
225extern bool rtl92c_phy_set_rf_power_state(struct ieee80211_hw *hw, 224bool rtl92c_phy_set_rf_power_state(struct ieee80211_hw *hw,
226 enum rf_pwrstate rfpwr_state); 225 enum rf_pwrstate rfpwr_state);
227void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw); 226void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw);
228void rtl92c_phy_set_io(struct ieee80211_hw *hw); 227void rtl92c_phy_set_io(struct ieee80211_hw *hw);
@@ -235,12 +234,25 @@ u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
235 enum wireless_mode wirelessmode, 234 enum wireless_mode wirelessmode,
236 long power_indbm); 235 long power_indbm);
237void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw); 236void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw);
238static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable, 237void _rtl92c_phy_set_rf_sleep(struct ieee80211_hw *hw);
239 u32 cmdtableidx, u32 cmdtablesz, 238bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
240 enum swchnlcmd_id cmdid, u32 para1, 239 u8 channel, u8 *stage, u8 *step,
241 u32 para2, u32 msdelay); 240 u32 *delay);
242static bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, 241u8 rtl92c_bt_rssi_state_change(struct ieee80211_hw *hw);
243 u8 channel, u8 *stage, u8 *step, 242u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
244 u32 *delay); 243 enum radio_path rfpath, u32 offset);
244void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
245 enum radio_path rfpath, u32 offset,
246 u32 data);
247u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
248 enum radio_path rfpath, u32 offset);
249void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
250 enum radio_path rfpath, u32 offset,
251 u32 data);
252bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw);
253void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
254 u32 regaddr, u32 bitmask,
255 u32 data);
256bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
245 257
246#endif 258#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
index 2f577c8828fc..35ff7df41a1d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
@@ -121,19 +121,6 @@
121#define CHIP_92C 0x01 121#define CHIP_92C 0x01
122#define CHIP_88C 0x00 122#define CHIP_88C 0x00
123 123
124/* Add vendor information into chip version definition.
125 * Add UMC B-Cut and RTL8723 chip info definition.
126 *
127 * BIT 7 Reserved
128 * BIT 6 UMC BCut
129 * BIT 5 Manufacturer(TSMC/UMC)
130 * BIT 4 TEST/NORMAL
131 * BIT 3 8723 Version
132 * BIT 2 8723?
133 * BIT 1 1T2R?
134 * BIT 0 88C/92C
135*/
136
137enum version_8192c { 124enum version_8192c {
138 VERSION_A_CHIP_92C = 0x01, 125 VERSION_A_CHIP_92C = 0x01,
139 VERSION_A_CHIP_88C = 0x00, 126 VERSION_A_CHIP_88C = 0x00,
@@ -280,20 +267,6 @@ struct h2c_cmd_8192c {
280 u8 *p_cmdbuffer; 267 u8 *p_cmdbuffer;
281}; 268};
282 269
283static inline u8 _rtl92c_get_chnl_group(u8 chnl)
284{
285 u8 group = 0;
286
287 if (chnl < 3)
288 group = 0;
289 else if (chnl < 9)
290 group = 1;
291 else
292 group = 2;
293
294 return group;
295}
296
297/* NOTE: reference to rtl8192c_rates struct */ 270/* NOTE: reference to rtl8192c_rates struct */
298static inline int _rtl92c_rate_mapping(struct ieee80211_hw *hw, bool isHT, 271static inline int _rtl92c_rate_mapping(struct ieee80211_hw *hw, bool isHT,
299 u8 desc_rate, bool first_ampdu) 272 u8 desc_rate, bool first_ampdu)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
index 7d76504df4d1..2df33e53e15a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
@@ -29,10 +29,12 @@
29 29
30#include "../wifi.h" 30#include "../wifi.h"
31#include "../base.h" 31#include "../base.h"
32#include "../pci.h"
32#include "reg.h" 33#include "reg.h"
33#include "def.h" 34#include "def.h"
34#include "phy.h" 35#include "phy.h"
35#include "dm.h" 36#include "dm.h"
37#include "../rtl8192c/fw_common.h"
36 38
37void rtl92ce_dm_dynamic_txpower(struct ieee80211_hw *hw) 39void rtl92ce_dm_dynamic_txpower(struct ieee80211_hw *hw)
38{ 40{
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
index 36302ebae4a3..07dd9552e82f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
@@ -192,6 +192,7 @@ void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw);
192void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw); 192void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw);
193void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw); 193void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw);
194void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal); 194void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal);
195void rtl92c_dm_bt_coexist(struct ieee80211_hw *hw);
195void rtl92ce_dm_dynamic_txpower(struct ieee80211_hw *hw); 196void rtl92ce_dm_dynamic_txpower(struct ieee80211_hw *hw);
196 197
197#endif 198#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index 05477f465a75..4a56138eb33c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -30,12 +30,14 @@
30#include "../wifi.h" 30#include "../wifi.h"
31#include "../efuse.h" 31#include "../efuse.h"
32#include "../base.h" 32#include "../base.h"
33#include "../regd.h"
33#include "../cam.h" 34#include "../cam.h"
34#include "../ps.h" 35#include "../ps.h"
35#include "../pci.h" 36#include "../pci.h"
36#include "reg.h" 37#include "reg.h"
37#include "def.h" 38#include "def.h"
38#include "phy.h" 39#include "phy.h"
40#include "../rtl8192c/fw_common.h"
39#include "dm.h" 41#include "dm.h"
40#include "led.h" 42#include "led.h"
41#include "hw.h" 43#include "hw.h"
@@ -137,15 +139,6 @@ void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
137 139
138 break; 140 break;
139 } 141 }
140 case HW_VAR_MGT_FILTER:
141 *((u16 *) (val)) = rtl_read_word(rtlpriv, REG_RXFLTMAP0);
142 break;
143 case HW_VAR_CTRL_FILTER:
144 *((u16 *) (val)) = rtl_read_word(rtlpriv, REG_RXFLTMAP1);
145 break;
146 case HW_VAR_DATA_FILTER:
147 *((u16 *) (val)) = rtl_read_word(rtlpriv, REG_RXFLTMAP2);
148 break;
149 default: 142 default:
150 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 143 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
151 ("switch case not process\n")); 144 ("switch case not process\n"));
@@ -156,6 +149,7 @@ void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
156void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) 149void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
157{ 150{
158 struct rtl_priv *rtlpriv = rtl_priv(hw); 151 struct rtl_priv *rtlpriv = rtl_priv(hw);
152 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
159 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 153 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
160 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 154 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
161 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 155 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -178,7 +172,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
178 rate_cfg |= 0x01; 172 rate_cfg |= 0x01;
179 rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff); 173 rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff);
180 rtl_write_byte(rtlpriv, REG_RRSR + 1, 174 rtl_write_byte(rtlpriv, REG_RRSR + 1,
181 (rate_cfg >> 8)&0xff); 175 (rate_cfg >> 8) & 0xff);
182 while (rate_cfg > 0x1) { 176 while (rate_cfg > 0x1) {
183 rate_cfg = (rate_cfg >> 1); 177 rate_cfg = (rate_cfg >> 1);
184 rate_index++; 178 rate_index++;
@@ -276,13 +270,19 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
276 break; 270 break;
277 } 271 }
278 case HW_VAR_AMPDU_FACTOR:{ 272 case HW_VAR_AMPDU_FACTOR:{
279 u8 regtoset_normal[4] = { 0x41, 0xa8, 0x72, 0xb9 }; 273 u8 regtoset_normal[4] = {0x41, 0xa8, 0x72, 0xb9};
274 u8 regtoset_bt[4] = {0x31, 0x74, 0x42, 0x97};
280 275
281 u8 factor_toset; 276 u8 factor_toset;
282 u8 *p_regtoset = NULL; 277 u8 *p_regtoset = NULL;
283 u8 index = 0; 278 u8 index = 0;
284 279
285 p_regtoset = regtoset_normal; 280 if ((rtlpcipriv->bt_coexist.bt_coexistence) &&
281 (rtlpcipriv->bt_coexist.bt_coexist_type ==
282 BT_CSR_BC4))
283 p_regtoset = regtoset_bt;
284 else
285 p_regtoset = regtoset_normal;
286 286
287 factor_toset = *((u8 *) val); 287 factor_toset = *((u8 *) val);
288 if (factor_toset <= 3) { 288 if (factor_toset <= 3) {
@@ -317,45 +317,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
317 } 317 }
318 case HW_VAR_AC_PARAM:{ 318 case HW_VAR_AC_PARAM:{
319 u8 e_aci = *((u8 *) val); 319 u8 e_aci = *((u8 *) val);
320 u32 u4b_ac_param; 320 rtl92c_dm_init_edca_turbo(hw);
321 u16 cw_min = le16_to_cpu(mac->ac[e_aci].cw_min);
322 u16 cw_max = le16_to_cpu(mac->ac[e_aci].cw_max);
323 u16 tx_op = le16_to_cpu(mac->ac[e_aci].tx_op);
324
325 u4b_ac_param = (u32) mac->ac[e_aci].aifs;
326 u4b_ac_param |= ((u32)cw_min
327 & 0xF) << AC_PARAM_ECW_MIN_OFFSET;
328 u4b_ac_param |= ((u32)cw_max &
329 0xF) << AC_PARAM_ECW_MAX_OFFSET;
330 u4b_ac_param |= (u32)tx_op << AC_PARAM_TXOP_OFFSET;
331
332 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
333 ("queue:%x, ac_param:%x\n", e_aci,
334 u4b_ac_param));
335
336 switch (e_aci) {
337 case AC1_BK:
338 rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM,
339 u4b_ac_param);
340 break;
341 case AC0_BE:
342 rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM,
343 u4b_ac_param);
344 break;
345 case AC2_VI:
346 rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM,
347 u4b_ac_param);
348 break;
349 case AC3_VO:
350 rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM,
351 u4b_ac_param);
352 break;
353 default:
354 RT_ASSERT(false,
355 ("SetHwReg8185(): invalid aci: %d !\n",
356 e_aci));
357 break;
358 }
359 321
360 if (rtlpci->acm_method != eAcmWay2_SW) 322 if (rtlpci->acm_method != eAcmWay2_SW)
361 rtlpriv->cfg->ops->set_hw_reg(hw, 323 rtlpriv->cfg->ops->set_hw_reg(hw,
@@ -526,9 +488,6 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
526 case HW_VAR_CORRECT_TSF:{ 488 case HW_VAR_CORRECT_TSF:{
527 u8 btype_ibss = ((u8 *) (val))[0]; 489 u8 btype_ibss = ((u8 *) (val))[0];
528 490
529 /*btype_ibss = (mac->opmode == NL80211_IFTYPE_ADHOC) ?
530 1 : 0;*/
531
532 if (btype_ibss == true) 491 if (btype_ibss == true)
533 _rtl92ce_stop_tx_beacon(hw); 492 _rtl92ce_stop_tx_beacon(hw);
534 493
@@ -537,7 +496,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
537 rtl_write_dword(rtlpriv, REG_TSFTR, 496 rtl_write_dword(rtlpriv, REG_TSFTR,
538 (u32) (mac->tsf & 0xffffffff)); 497 (u32) (mac->tsf & 0xffffffff));
539 rtl_write_dword(rtlpriv, REG_TSFTR + 4, 498 rtl_write_dword(rtlpriv, REG_TSFTR + 4,
540 (u32) ((mac->tsf >> 32)&0xffffffff)); 499 (u32) ((mac->tsf >> 32) & 0xffffffff));
541 500
542 _rtl92ce_set_bcn_ctrl_reg(hw, BIT(3), 0); 501 _rtl92ce_set_bcn_ctrl_reg(hw, BIT(3), 0);
543 502
@@ -547,15 +506,6 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
547 break; 506 break;
548 507
549 } 508 }
550 case HW_VAR_MGT_FILTER:
551 rtl_write_word(rtlpriv, REG_RXFLTMAP0, *(u16 *) val);
552 break;
553 case HW_VAR_CTRL_FILTER:
554 rtl_write_word(rtlpriv, REG_RXFLTMAP1, *(u16 *) val);
555 break;
556 case HW_VAR_DATA_FILTER:
557 rtl_write_word(rtlpriv, REG_RXFLTMAP2, *(u16 *) val);
558 break;
559 default: 509 default:
560 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("switch case " 510 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("switch case "
561 "not process\n")); 511 "not process\n"));
@@ -679,12 +629,12 @@ static void _rtl92ce_gen_refresh_led_state(struct ieee80211_hw *hw)
679 rtl92ce_sw_led_on(hw, pLed0); 629 rtl92ce_sw_led_on(hw, pLed0);
680 else 630 else
681 rtl92ce_sw_led_off(hw, pLed0); 631 rtl92ce_sw_led_off(hw, pLed0);
682
683} 632}
684 633
685static bool _rtl92ce_init_mac(struct ieee80211_hw *hw) 634static bool _rtl92ce_init_mac(struct ieee80211_hw *hw)
686{ 635{
687 struct rtl_priv *rtlpriv = rtl_priv(hw); 636 struct rtl_priv *rtlpriv = rtl_priv(hw);
637 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
688 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 638 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
689 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 639 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
690 640
@@ -693,9 +643,22 @@ static bool _rtl92ce_init_mac(struct ieee80211_hw *hw)
693 u16 retry; 643 u16 retry;
694 644
695 rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00); 645 rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00);
646 if (rtlpcipriv->bt_coexist.bt_coexistence) {
647 u32 value32;
648 value32 = rtl_read_dword(rtlpriv, REG_APS_FSMCO);
649 value32 |= (SOP_ABG | SOP_AMB | XOP_BTCK);
650 rtl_write_dword(rtlpriv, REG_APS_FSMCO, value32);
651 }
696 rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b); 652 rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
697 rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL, 0x0F); 653 rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL, 0x0F);
698 654
655 if (rtlpcipriv->bt_coexist.bt_coexistence) {
656 u32 u4b_tmp = rtl_read_dword(rtlpriv, REG_AFE_XTAL_CTRL);
657
658 u4b_tmp &= (~0x00024800);
659 rtl_write_dword(rtlpriv, REG_AFE_XTAL_CTRL, u4b_tmp);
660 }
661
699 bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1) | BIT(0); 662 bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1) | BIT(0);
700 udelay(2); 663 udelay(2);
701 664
@@ -726,6 +689,11 @@ static bool _rtl92ce_init_mac(struct ieee80211_hw *hw)
726 rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL + 1, 0x82); 689 rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL + 1, 0x82);
727 udelay(2); 690 udelay(2);
728 691
692 if (rtlpcipriv->bt_coexist.bt_coexistence) {
693 bytetmp = rtl_read_byte(rtlpriv, REG_AFE_XTAL_CTRL+2) & 0xfd;
694 rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL+2, bytetmp);
695 }
696
729 rtl_write_word(rtlpriv, REG_CR, 0x2ff); 697 rtl_write_word(rtlpriv, REG_CR, 0x2ff);
730 698
731 if (_rtl92ce_llt_table_init(hw) == false) 699 if (_rtl92ce_llt_table_init(hw) == false)
@@ -793,6 +761,7 @@ static void _rtl92ce_hw_configure(struct ieee80211_hw *hw)
793{ 761{
794 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 762 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
795 struct rtl_priv *rtlpriv = rtl_priv(hw); 763 struct rtl_priv *rtlpriv = rtl_priv(hw);
764 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
796 u8 reg_bw_opmode; 765 u8 reg_bw_opmode;
797 u32 reg_ratr, reg_prsr; 766 u32 reg_ratr, reg_prsr;
798 767
@@ -824,7 +793,11 @@ static void _rtl92ce_hw_configure(struct ieee80211_hw *hw)
824 rtl_write_dword(rtlpriv, REG_RARFRC, 0x01000000); 793 rtl_write_dword(rtlpriv, REG_RARFRC, 0x01000000);
825 rtl_write_dword(rtlpriv, REG_RARFRC + 4, 0x07060504); 794 rtl_write_dword(rtlpriv, REG_RARFRC + 4, 0x07060504);
826 795
827 rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0xb972a841); 796 if ((rtlpcipriv->bt_coexist.bt_coexistence) &&
797 (rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4))
798 rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0x97427431);
799 else
800 rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0xb972a841);
828 801
829 rtl_write_byte(rtlpriv, REG_ATIMWND, 0x2); 802 rtl_write_byte(rtlpriv, REG_ATIMWND, 0x2);
830 803
@@ -840,11 +813,20 @@ static void _rtl92ce_hw_configure(struct ieee80211_hw *hw)
840 rtl_write_byte(rtlpriv, REG_PIFS, 0x1C); 813 rtl_write_byte(rtlpriv, REG_PIFS, 0x1C);
841 rtl_write_byte(rtlpriv, REG_AGGR_BREAK_TIME, 0x16); 814 rtl_write_byte(rtlpriv, REG_AGGR_BREAK_TIME, 0x16);
842 815
843 rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0020); 816 if ((rtlpcipriv->bt_coexist.bt_coexistence) &&
844 817 (rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4)) {
845 rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0020); 818 rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0020);
819 rtl_write_word(rtlpriv, REG_PROT_MODE_CTRL, 0x0402);
820 } else {
821 rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0020);
822 rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0020);
823 }
846 824
847 rtl_write_dword(rtlpriv, REG_FAST_EDCA_CTRL, 0x086666); 825 if ((rtlpcipriv->bt_coexist.bt_coexistence) &&
826 (rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4))
827 rtl_write_dword(rtlpriv, REG_FAST_EDCA_CTRL, 0x03086666);
828 else
829 rtl_write_dword(rtlpriv, REG_FAST_EDCA_CTRL, 0x086666);
848 830
849 rtl_write_byte(rtlpriv, REG_ACKTO, 0x40); 831 rtl_write_byte(rtlpriv, REG_ACKTO, 0x40);
850 832
@@ -948,8 +930,8 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
948 } 930 }
949 931
950 rtlhal->last_hmeboxnum = 0; 932 rtlhal->last_hmeboxnum = 0;
951 rtl92ce_phy_mac_config(hw); 933 rtl92c_phy_mac_config(hw);
952 rtl92ce_phy_bb_config(hw); 934 rtl92c_phy_bb_config(hw);
953 rtlphy->rf_mode = RF_OP_BY_SW_3WIRE; 935 rtlphy->rf_mode = RF_OP_BY_SW_3WIRE;
954 rtl92c_phy_rf_config(hw); 936 rtl92c_phy_rf_config(hw);
955 rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0, 937 rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0,
@@ -962,15 +944,20 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
962 _rtl92ce_hw_configure(hw); 944 _rtl92ce_hw_configure(hw);
963 rtl_cam_reset_all_entry(hw); 945 rtl_cam_reset_all_entry(hw);
964 rtl92ce_enable_hw_security_config(hw); 946 rtl92ce_enable_hw_security_config(hw);
947
965 ppsc->rfpwr_state = ERFON; 948 ppsc->rfpwr_state = ERFON;
949
966 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr); 950 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr);
967 _rtl92ce_enable_aspm_back_door(hw); 951 _rtl92ce_enable_aspm_back_door(hw);
968 rtlpriv->intf_ops->enable_aspm(hw); 952 rtlpriv->intf_ops->enable_aspm(hw);
953
954 rtl8192ce_bt_hw_init(hw);
955
969 if (ppsc->rfpwr_state == ERFON) { 956 if (ppsc->rfpwr_state == ERFON) {
970 rtl92c_phy_set_rfpath_switch(hw, 1); 957 rtl92c_phy_set_rfpath_switch(hw, 1);
971 if (iqk_initialized) 958 if (iqk_initialized) {
972 rtl92c_phy_iq_calibrate(hw, true); 959 rtl92c_phy_iq_calibrate(hw, true);
973 else { 960 } else {
974 rtl92c_phy_iq_calibrate(hw, false); 961 rtl92c_phy_iq_calibrate(hw, false);
975 iqk_initialized = true; 962 iqk_initialized = true;
976 } 963 }
@@ -1128,75 +1115,62 @@ static int _rtl92ce_set_media_status(struct ieee80211_hw *hw,
1128 return 0; 1115 return 0;
1129} 1116}
1130 1117
1131static void _rtl92ce_set_check_bssid(struct ieee80211_hw *hw, 1118void rtl92ce_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
1132 enum nl80211_iftype type)
1133{ 1119{
1134 struct rtl_priv *rtlpriv = rtl_priv(hw); 1120 struct rtl_priv *rtlpriv = rtl_priv(hw);
1135 u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR); 1121 u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR);
1136 u8 filterout_non_associated_bssid = false;
1137 1122
1138 switch (type) { 1123 if (rtlpriv->psc.rfpwr_state != ERFON)
1139 case NL80211_IFTYPE_ADHOC: 1124 return;
1140 case NL80211_IFTYPE_STATION:
1141 filterout_non_associated_bssid = true;
1142 break;
1143 case NL80211_IFTYPE_UNSPECIFIED:
1144 case NL80211_IFTYPE_AP:
1145 default:
1146 break;
1147 }
1148 1125
1149 if (filterout_non_associated_bssid == true) { 1126 if (check_bssid == true) {
1150 reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN); 1127 reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
1151 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, 1128 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
1152 (u8 *) (&reg_rcr)); 1129 (u8 *) (&reg_rcr));
1153 _rtl92ce_set_bcn_ctrl_reg(hw, 0, BIT(4)); 1130 _rtl92ce_set_bcn_ctrl_reg(hw, 0, BIT(4));
1154 } else if (filterout_non_associated_bssid == false) { 1131 } else if (check_bssid == false) {
1155 reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN)); 1132 reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
1156 _rtl92ce_set_bcn_ctrl_reg(hw, BIT(4), 0); 1133 _rtl92ce_set_bcn_ctrl_reg(hw, BIT(4), 0);
1157 rtlpriv->cfg->ops->set_hw_reg(hw, 1134 rtlpriv->cfg->ops->set_hw_reg(hw,
1158 HW_VAR_RCR, (u8 *) (&reg_rcr)); 1135 HW_VAR_RCR, (u8 *) (&reg_rcr));
1159 } 1136 }
1137
1160} 1138}
1161 1139
1162int rtl92ce_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type) 1140int rtl92ce_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
1163{ 1141{
1142 struct rtl_priv *rtlpriv = rtl_priv(hw);
1143
1164 if (_rtl92ce_set_media_status(hw, type)) 1144 if (_rtl92ce_set_media_status(hw, type))
1165 return -EOPNOTSUPP; 1145 return -EOPNOTSUPP;
1166 _rtl92ce_set_check_bssid(hw, type); 1146
1147 if (rtlpriv->mac80211.link_state == MAC80211_LINKED) {
1148 if (type != NL80211_IFTYPE_AP)
1149 rtl92ce_set_check_bssid(hw, true);
1150 } else {
1151 rtl92ce_set_check_bssid(hw, false);
1152 }
1153
1167 return 0; 1154 return 0;
1168} 1155}
1169 1156
1157/* don't set REG_EDCA_BE_PARAM here because mac80211 will send pkt when scan */
1170void rtl92ce_set_qos(struct ieee80211_hw *hw, int aci) 1158void rtl92ce_set_qos(struct ieee80211_hw *hw, int aci)
1171{ 1159{
1172 struct rtl_priv *rtlpriv = rtl_priv(hw); 1160 struct rtl_priv *rtlpriv = rtl_priv(hw);
1173 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1174 u32 u4b_ac_param;
1175 u16 cw_min = le16_to_cpu(mac->ac[aci].cw_min);
1176 u16 cw_max = le16_to_cpu(mac->ac[aci].cw_max);
1177 u16 tx_op = le16_to_cpu(mac->ac[aci].tx_op);
1178
1179 rtl92c_dm_init_edca_turbo(hw); 1161 rtl92c_dm_init_edca_turbo(hw);
1180 u4b_ac_param = (u32) mac->ac[aci].aifs;
1181 u4b_ac_param |= (u32) ((cw_min & 0xF) << AC_PARAM_ECW_MIN_OFFSET);
1182 u4b_ac_param |= (u32) ((cw_max & 0xF) << AC_PARAM_ECW_MAX_OFFSET);
1183 u4b_ac_param |= (u32) (tx_op << AC_PARAM_TXOP_OFFSET);
1184 RT_TRACE(rtlpriv, COMP_QOS, DBG_DMESG,
1185 ("queue:%x, ac_param:%x aifs:%x cwmin:%x cwmax:%x txop:%x\n",
1186 aci, u4b_ac_param, mac->ac[aci].aifs, cw_min,
1187 cw_max, tx_op));
1188 switch (aci) { 1162 switch (aci) {
1189 case AC1_BK: 1163 case AC1_BK:
1190 rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, u4b_ac_param); 1164 rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0xa44f);
1191 break; 1165 break;
1192 case AC0_BE: 1166 case AC0_BE:
1193 rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, u4b_ac_param); 1167 /* rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, u4b_ac_param); */
1194 break; 1168 break;
1195 case AC2_VI: 1169 case AC2_VI:
1196 rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, u4b_ac_param); 1170 rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x5e4322);
1197 break; 1171 break;
1198 case AC3_VO: 1172 case AC3_VO:
1199 rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, u4b_ac_param); 1173 rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222);
1200 break; 1174 break;
1201 default: 1175 default:
1202 RT_ASSERT(false, ("invalid aci: %d !\n", aci)); 1176 RT_ASSERT(false, ("invalid aci: %d !\n", aci));
@@ -1227,8 +1201,10 @@ void rtl92ce_disable_interrupt(struct ieee80211_hw *hw)
1227static void _rtl92ce_poweroff_adapter(struct ieee80211_hw *hw) 1201static void _rtl92ce_poweroff_adapter(struct ieee80211_hw *hw)
1228{ 1202{
1229 struct rtl_priv *rtlpriv = rtl_priv(hw); 1203 struct rtl_priv *rtlpriv = rtl_priv(hw);
1204 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
1230 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 1205 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1231 u8 u1b_tmp; 1206 u8 u1b_tmp;
1207 u32 u4b_tmp;
1232 1208
1233 rtlpriv->intf_ops->enable_aspm(hw); 1209 rtlpriv->intf_ops->enable_aspm(hw);
1234 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF); 1210 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
@@ -1243,13 +1219,27 @@ static void _rtl92ce_poweroff_adapter(struct ieee80211_hw *hw)
1243 rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00); 1219 rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
1244 rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x00000000); 1220 rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x00000000);
1245 u1b_tmp = rtl_read_byte(rtlpriv, REG_GPIO_PIN_CTRL); 1221 u1b_tmp = rtl_read_byte(rtlpriv, REG_GPIO_PIN_CTRL);
1246 rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x00FF0000 | 1222 if ((rtlpcipriv->bt_coexist.bt_coexistence) &&
1247 (u1b_tmp << 8)); 1223 ((rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4) ||
1224 (rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC8))) {
1225 rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x00F30000 |
1226 (u1b_tmp << 8));
1227 } else {
1228 rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x00FF0000 |
1229 (u1b_tmp << 8));
1230 }
1248 rtl_write_word(rtlpriv, REG_GPIO_IO_SEL, 0x0790); 1231 rtl_write_word(rtlpriv, REG_GPIO_IO_SEL, 0x0790);
1249 rtl_write_word(rtlpriv, REG_LEDCFG0, 0x8080); 1232 rtl_write_word(rtlpriv, REG_LEDCFG0, 0x8080);
1250 rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, 0x80); 1233 rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, 0x80);
1251 rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x23); 1234 rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x23);
1252 rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL, 0x0e); 1235 if (rtlpcipriv->bt_coexist.bt_coexistence) {
1236 u4b_tmp = rtl_read_dword(rtlpriv, REG_AFE_XTAL_CTRL);
1237 u4b_tmp |= 0x03824800;
1238 rtl_write_dword(rtlpriv, REG_AFE_XTAL_CTRL, u4b_tmp);
1239 } else {
1240 rtl_write_dword(rtlpriv, REG_AFE_XTAL_CTRL, 0x0e);
1241 }
1242
1253 rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0e); 1243 rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0e);
1254 rtl_write_byte(rtlpriv, REG_APS_FSMCO + 1, 0x10); 1244 rtl_write_byte(rtlpriv, REG_APS_FSMCO + 1, 0x10);
1255} 1245}
@@ -1327,6 +1317,7 @@ void rtl92ce_update_interrupt_mask(struct ieee80211_hw *hw,
1327 1317
1328 RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD, 1318 RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
1329 ("add_msr:%x, rm_msr:%x\n", add_msr, rm_msr)); 1319 ("add_msr:%x, rm_msr:%x\n", add_msr, rm_msr));
1320
1330 if (add_msr) 1321 if (add_msr)
1331 rtlpci->irq_mask[0] |= add_msr; 1322 rtlpci->irq_mask[0] |= add_msr;
1332 if (rm_msr) 1323 if (rm_msr)
@@ -1582,7 +1573,7 @@ static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw)
1582 ("RTL819X Not boot from eeprom, check it !!")); 1573 ("RTL819X Not boot from eeprom, check it !!"));
1583 } 1574 }
1584 1575
1585 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, ("MAP\n"), 1576 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, ("MAP\n"),
1586 hwinfo, HWSET_MAX_SIZE); 1577 hwinfo, HWSET_MAX_SIZE);
1587 1578
1588 eeprom_id = *((u16 *)&hwinfo[0]); 1579 eeprom_id = *((u16 *)&hwinfo[0]);
@@ -1610,6 +1601,10 @@ static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw)
1610 rtlefuse->autoload_failflag, 1601 rtlefuse->autoload_failflag,
1611 hwinfo); 1602 hwinfo);
1612 1603
1604 rtl8192ce_read_bt_coexist_info_from_hwpg(hw,
1605 rtlefuse->autoload_failflag,
1606 hwinfo);
1607
1613 rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN]; 1608 rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
1614 rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION]; 1609 rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
1615 rtlefuse->txpwr_fromeprom = true; 1610 rtlefuse->txpwr_fromeprom = true;
@@ -1618,6 +1613,9 @@ static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw)
1618 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, 1613 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1619 ("EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid)); 1614 ("EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid));
1620 1615
1616 /* set channel paln to world wide 13 */
1617 rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13;
1618
1621 if (rtlhal->oem_id == RT_CID_DEFAULT) { 1619 if (rtlhal->oem_id == RT_CID_DEFAULT) {
1622 switch (rtlefuse->eeprom_oemid) { 1620 switch (rtlefuse->eeprom_oemid) {
1623 case EEPROM_CID_DEFAULT: 1621 case EEPROM_CID_DEFAULT:
@@ -1701,30 +1699,36 @@ void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw)
1701 } else { 1699 } else {
1702 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Autoload ERR!!\n")); 1700 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Autoload ERR!!\n"));
1703 } 1701 }
1704
1705 _rtl92ce_hal_customized_behavior(hw); 1702 _rtl92ce_hal_customized_behavior(hw);
1706} 1703}
1707 1704
1708void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw) 1705static void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw,
1706 struct ieee80211_sta *sta)
1709{ 1707{
1710 struct rtl_priv *rtlpriv = rtl_priv(hw); 1708 struct rtl_priv *rtlpriv = rtl_priv(hw);
1709 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
1711 struct rtl_phy *rtlphy = &(rtlpriv->phy); 1710 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1712 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 1711 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1713 1712 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1714 u32 ratr_value = (u32) mac->basic_rates; 1713 u32 ratr_value;
1715 u8 *mcsrate = mac->mcs;
1716 u8 ratr_index = 0; 1714 u8 ratr_index = 0;
1717 u8 nmode = mac->ht_enable; 1715 u8 nmode = mac->ht_enable;
1718 u8 mimo_ps = 1; 1716 u8 mimo_ps = IEEE80211_SMPS_OFF;
1719 u16 shortgi_rate; 1717 u16 shortgi_rate;
1720 u32 tmp_ratr_value; 1718 u32 tmp_ratr_value;
1721 u8 curtxbw_40mhz = mac->bw_40; 1719 u8 curtxbw_40mhz = mac->bw_40;
1722 u8 curshortgi_40mhz = mac->sgi_40; 1720 u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1723 u8 curshortgi_20mhz = mac->sgi_20; 1721 1 : 0;
1722 u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1723 1 : 0;
1724 enum wireless_mode wirelessmode = mac->mode; 1724 enum wireless_mode wirelessmode = mac->mode;
1725 1725
1726 ratr_value |= ((*(u16 *) (mcsrate))) << 12; 1726 if (rtlhal->current_bandtype == BAND_ON_5G)
1727 1727 ratr_value = sta->supp_rates[1] << 4;
1728 else
1729 ratr_value = sta->supp_rates[0];
1730 ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
1731 sta->ht_cap.mcs.rx_mask[0] << 12);
1728 switch (wirelessmode) { 1732 switch (wirelessmode) {
1729 case WIRELESS_MODE_B: 1733 case WIRELESS_MODE_B:
1730 if (ratr_value & 0x0000000c) 1734 if (ratr_value & 0x0000000c)
@@ -1738,7 +1742,7 @@ void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw)
1738 case WIRELESS_MODE_N_24G: 1742 case WIRELESS_MODE_N_24G:
1739 case WIRELESS_MODE_N_5G: 1743 case WIRELESS_MODE_N_5G:
1740 nmode = 1; 1744 nmode = 1;
1741 if (mimo_ps == 0) { 1745 if (mimo_ps == IEEE80211_SMPS_STATIC) {
1742 ratr_value &= 0x0007F005; 1746 ratr_value &= 0x0007F005;
1743 } else { 1747 } else {
1744 u32 ratr_mask; 1748 u32 ratr_mask;
@@ -1761,10 +1765,19 @@ void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw)
1761 break; 1765 break;
1762 } 1766 }
1763 1767
1764 ratr_value &= 0x0FFFFFFF; 1768 if ((rtlpcipriv->bt_coexist.bt_coexistence) &&
1769 (rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4) &&
1770 (rtlpcipriv->bt_coexist.bt_cur_state) &&
1771 (rtlpcipriv->bt_coexist.bt_ant_isolation) &&
1772 ((rtlpcipriv->bt_coexist.bt_service == BT_SCO) ||
1773 (rtlpcipriv->bt_coexist.bt_service == BT_BUSY)))
1774 ratr_value &= 0x0fffcfc0;
1775 else
1776 ratr_value &= 0x0FFFFFFF;
1765 1777
1766 if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) || (!curtxbw_40mhz && 1778 if (nmode && ((curtxbw_40mhz &&
1767 curshortgi_20mhz))) { 1779 curshortgi_40mhz) || (!curtxbw_40mhz &&
1780 curshortgi_20mhz))) {
1768 1781
1769 ratr_value |= 0x10000000; 1782 ratr_value |= 0x10000000;
1770 tmp_ratr_value = (ratr_value >> 12); 1783 tmp_ratr_value = (ratr_value >> 12);
@@ -1784,24 +1797,42 @@ void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw)
1784 ("%x\n", rtl_read_dword(rtlpriv, REG_ARFR0))); 1797 ("%x\n", rtl_read_dword(rtlpriv, REG_ARFR0)));
1785} 1798}
1786 1799
1787void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level) 1800static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw,
1801 struct ieee80211_sta *sta, u8 rssi_level)
1788{ 1802{
1789 struct rtl_priv *rtlpriv = rtl_priv(hw); 1803 struct rtl_priv *rtlpriv = rtl_priv(hw);
1790 struct rtl_phy *rtlphy = &(rtlpriv->phy); 1804 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1791 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 1805 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1792 u32 ratr_bitmap = (u32) mac->basic_rates; 1806 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1793 u8 *p_mcsrate = mac->mcs; 1807 struct rtl_sta_info *sta_entry = NULL;
1808 u32 ratr_bitmap;
1794 u8 ratr_index; 1809 u8 ratr_index;
1795 u8 curtxbw_40mhz = mac->bw_40; 1810 u8 curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
1796 u8 curshortgi_40mhz = mac->sgi_40; 1811 ? 1 : 0;
1797 u8 curshortgi_20mhz = mac->sgi_20; 1812 u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1798 enum wireless_mode wirelessmode = mac->mode; 1813 1 : 0;
1814 u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1815 1 : 0;
1816 enum wireless_mode wirelessmode = 0;
1799 bool shortgi = false; 1817 bool shortgi = false;
1800 u8 rate_mask[5]; 1818 u8 rate_mask[5];
1801 u8 macid = 0; 1819 u8 macid = 0;
1802 u8 mimops = 1; 1820 u8 mimo_ps = IEEE80211_SMPS_OFF;
1803 1821
1804 ratr_bitmap |= (p_mcsrate[1] << 20) | (p_mcsrate[0] << 12); 1822 sta_entry = (struct rtl_sta_info *) sta->drv_priv;
1823 wirelessmode = sta_entry->wireless_mode;
1824 if (mac->opmode == NL80211_IFTYPE_STATION)
1825 curtxbw_40mhz = mac->bw_40;
1826 else if (mac->opmode == NL80211_IFTYPE_AP ||
1827 mac->opmode == NL80211_IFTYPE_ADHOC)
1828 macid = sta->aid + 1;
1829
1830 if (rtlhal->current_bandtype == BAND_ON_5G)
1831 ratr_bitmap = sta->supp_rates[1] << 4;
1832 else
1833 ratr_bitmap = sta->supp_rates[0];
1834 ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
1835 sta->ht_cap.mcs.rx_mask[0] << 12);
1805 switch (wirelessmode) { 1836 switch (wirelessmode) {
1806 case WIRELESS_MODE_B: 1837 case WIRELESS_MODE_B:
1807 ratr_index = RATR_INX_WIRELESS_B; 1838 ratr_index = RATR_INX_WIRELESS_B;
@@ -1828,7 +1859,7 @@ void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
1828 case WIRELESS_MODE_N_5G: 1859 case WIRELESS_MODE_N_5G:
1829 ratr_index = RATR_INX_WIRELESS_NGB; 1860 ratr_index = RATR_INX_WIRELESS_NGB;
1830 1861
1831 if (mimops == 0) { 1862 if (mimo_ps == IEEE80211_SMPS_STATIC) {
1832 if (rssi_level == 1) 1863 if (rssi_level == 1)
1833 ratr_bitmap &= 0x00070000; 1864 ratr_bitmap &= 0x00070000;
1834 else if (rssi_level == 2) 1865 else if (rssi_level == 2)
@@ -1892,8 +1923,8 @@ void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
1892 } 1923 }
1893 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, 1924 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
1894 ("ratr_bitmap :%x\n", ratr_bitmap)); 1925 ("ratr_bitmap :%x\n", ratr_bitmap));
1895 *(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) | 1926 *(u32 *)&rate_mask = EF4BYTE((ratr_bitmap & 0x0fffffff) |
1896 (ratr_index << 28); 1927 (ratr_index << 28));
1897 rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80; 1928 rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
1898 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("Rate_index:%x, " 1929 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("Rate_index:%x, "
1899 "ratr_val:%x, %x:%x:%x:%x:%x\n", 1930 "ratr_val:%x, %x:%x:%x:%x:%x\n",
@@ -1902,6 +1933,20 @@ void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
1902 rate_mask[2], rate_mask[3], 1933 rate_mask[2], rate_mask[3],
1903 rate_mask[4])); 1934 rate_mask[4]));
1904 rtl92c_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask); 1935 rtl92c_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask);
1936
1937 if (macid != 0)
1938 sta_entry->ratr_index = ratr_index;
1939}
1940
1941void rtl92ce_update_hal_rate_tbl(struct ieee80211_hw *hw,
1942 struct ieee80211_sta *sta, u8 rssi_level)
1943{
1944 struct rtl_priv *rtlpriv = rtl_priv(hw);
1945
1946 if (rtlpriv->dm.useramask)
1947 rtl92ce_update_hal_rate_mask(hw, sta, rssi_level);
1948 else
1949 rtl92ce_update_hal_rate_table(hw, sta);
1905} 1950}
1906 1951
1907void rtl92ce_update_channel_access_setting(struct ieee80211_hw *hw) 1952void rtl92ce_update_channel_access_setting(struct ieee80211_hw *hw)
@@ -1919,7 +1964,7 @@ void rtl92ce_update_channel_access_setting(struct ieee80211_hw *hw)
1919 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer); 1964 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer);
1920} 1965}
1921 1966
1922bool rtl92ce_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid) 1967bool rtl92ce_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
1923{ 1968{
1924 struct rtl_priv *rtlpriv = rtl_priv(hw); 1969 struct rtl_priv *rtlpriv = rtl_priv(hw);
1925 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 1970 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
@@ -1929,7 +1974,7 @@ bool rtl92ce_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
1929 bool actuallyset = false; 1974 bool actuallyset = false;
1930 unsigned long flag; 1975 unsigned long flag;
1931 1976
1932 if ((rtlpci->up_first_time == 1) || (rtlpci->being_init_adapter)) 1977 if (rtlpci->being_init_adapter)
1933 return false; 1978 return false;
1934 1979
1935 if (ppsc->swrf_processing) 1980 if (ppsc->swrf_processing)
@@ -1946,12 +1991,6 @@ bool rtl92ce_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
1946 1991
1947 cur_rfstate = ppsc->rfpwr_state; 1992 cur_rfstate = ppsc->rfpwr_state;
1948 1993
1949 if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) &&
1950 RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM)) {
1951 rtlpriv->intf_ops->disable_aspm(hw);
1952 RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM);
1953 }
1954
1955 rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG, rtl_read_byte(rtlpriv, 1994 rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG, rtl_read_byte(rtlpriv,
1956 REG_MAC_PINMUX_CFG)&~(BIT(3))); 1995 REG_MAC_PINMUX_CFG)&~(BIT(3)));
1957 1996
@@ -1976,38 +2015,13 @@ bool rtl92ce_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
1976 } 2015 }
1977 2016
1978 if (actuallyset) { 2017 if (actuallyset) {
1979 if (e_rfpowerstate_toset == ERFON) {
1980 if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) &&
1981 RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM)) {
1982 rtlpriv->intf_ops->disable_aspm(hw);
1983 RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM);
1984 }
1985 }
1986
1987 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag); 2018 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
1988 ppsc->rfchange_inprogress = false; 2019 ppsc->rfchange_inprogress = false;
1989 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag); 2020 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
1990 2021 } else {
1991 if (e_rfpowerstate_toset == ERFOFF) {
1992 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) {
1993 rtlpriv->intf_ops->enable_aspm(hw);
1994 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM);
1995 }
1996 }
1997
1998 } else if (e_rfpowerstate_toset == ERFOFF || cur_rfstate == ERFOFF) {
1999 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) 2022 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC)
2000 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); 2023 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
2001 2024
2002 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) {
2003 rtlpriv->intf_ops->enable_aspm(hw);
2004 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM);
2005 }
2006
2007 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
2008 ppsc->rfchange_inprogress = false;
2009 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
2010 } else {
2011 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag); 2025 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
2012 ppsc->rfchange_inprogress = false; 2026 ppsc->rfchange_inprogress = false;
2013 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag); 2027 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
@@ -2086,15 +2100,31 @@ void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index,
2086 macaddr = cam_const_broad; 2100 macaddr = cam_const_broad;
2087 entry_id = key_index; 2101 entry_id = key_index;
2088 } else { 2102 } else {
2103 if (mac->opmode == NL80211_IFTYPE_AP) {
2104 entry_id = rtl_cam_get_free_entry(hw,
2105 p_macaddr);
2106 if (entry_id >= TOTAL_CAM_ENTRY) {
2107 RT_TRACE(rtlpriv, COMP_SEC,
2108 DBG_EMERG,
2109 ("Can not find free hw"
2110 " security cam entry\n"));
2111 return;
2112 }
2113 } else {
2114 entry_id = CAM_PAIRWISE_KEY_POSITION;
2115 }
2116
2089 key_index = PAIRWISE_KEYIDX; 2117 key_index = PAIRWISE_KEYIDX;
2090 entry_id = CAM_PAIRWISE_KEY_POSITION;
2091 is_pairwise = true; 2118 is_pairwise = true;
2092 } 2119 }
2093 } 2120 }
2094 2121
2095 if (rtlpriv->sec.key_len[key_index] == 0) { 2122 if (rtlpriv->sec.key_len[key_index] == 0) {
2096 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, 2123 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
2097 ("delete one entry\n")); 2124 ("delete one entry, entry_id is %d\n",
2125 entry_id));
2126 if (mac->opmode == NL80211_IFTYPE_AP)
2127 rtl_cam_del_entry(hw, p_macaddr);
2098 rtl_cam_delete_one_entry(hw, p_macaddr, entry_id); 2128 rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
2099 } else { 2129 } else {
2100 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, 2130 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
@@ -2146,3 +2176,132 @@ void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index,
2146 } 2176 }
2147 } 2177 }
2148} 2178}
2179
2180static void rtl8192ce_bt_var_init(struct ieee80211_hw *hw)
2181{
2182 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
2183
2184 rtlpcipriv->bt_coexist.bt_coexistence =
2185 rtlpcipriv->bt_coexist.eeprom_bt_coexist;
2186 rtlpcipriv->bt_coexist.bt_ant_num =
2187 rtlpcipriv->bt_coexist.eeprom_bt_ant_num;
2188 rtlpcipriv->bt_coexist.bt_coexist_type =
2189 rtlpcipriv->bt_coexist.eeprom_bt_type;
2190
2191 if (rtlpcipriv->bt_coexist.reg_bt_iso == 2)
2192 rtlpcipriv->bt_coexist.bt_ant_isolation =
2193 rtlpcipriv->bt_coexist.eeprom_bt_ant_isolation;
2194 else
2195 rtlpcipriv->bt_coexist.bt_ant_isolation =
2196 rtlpcipriv->bt_coexist.reg_bt_iso;
2197
2198 rtlpcipriv->bt_coexist.bt_radio_shared_type =
2199 rtlpcipriv->bt_coexist.eeprom_bt_radio_shared;
2200
2201 if (rtlpcipriv->bt_coexist.bt_coexistence) {
2202
2203 if (rtlpcipriv->bt_coexist.reg_bt_sco == 1)
2204 rtlpcipriv->bt_coexist.bt_service = BT_OTHER_ACTION;
2205 else if (rtlpcipriv->bt_coexist.reg_bt_sco == 2)
2206 rtlpcipriv->bt_coexist.bt_service = BT_SCO;
2207 else if (rtlpcipriv->bt_coexist.reg_bt_sco == 4)
2208 rtlpcipriv->bt_coexist.bt_service = BT_BUSY;
2209 else if (rtlpcipriv->bt_coexist.reg_bt_sco == 5)
2210 rtlpcipriv->bt_coexist.bt_service = BT_OTHERBUSY;
2211 else
2212 rtlpcipriv->bt_coexist.bt_service = BT_IDLE;
2213
2214 rtlpcipriv->bt_coexist.bt_edca_ul = 0;
2215 rtlpcipriv->bt_coexist.bt_edca_dl = 0;
2216 rtlpcipriv->bt_coexist.bt_rssi_state = 0xff;
2217 }
2218}
2219
2220void rtl8192ce_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
2221 bool auto_load_fail, u8 *hwinfo)
2222{
2223 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
2224 u8 value;
2225
2226 if (!auto_load_fail) {
2227 rtlpcipriv->bt_coexist.eeprom_bt_coexist =
2228 ((hwinfo[RF_OPTION1] & 0xe0) >> 5);
2229 value = hwinfo[RF_OPTION4];
2230 rtlpcipriv->bt_coexist.eeprom_bt_type = ((value & 0xe) >> 1);
2231 rtlpcipriv->bt_coexist.eeprom_bt_ant_num = (value & 0x1);
2232 rtlpcipriv->bt_coexist.eeprom_bt_ant_isolation =
2233 ((value & 0x10) >> 4);
2234 rtlpcipriv->bt_coexist.eeprom_bt_radio_shared =
2235 ((value & 0x20) >> 5);
2236 } else {
2237 rtlpcipriv->bt_coexist.eeprom_bt_coexist = 0;
2238 rtlpcipriv->bt_coexist.eeprom_bt_type = BT_2WIRE;
2239 rtlpcipriv->bt_coexist.eeprom_bt_ant_num = ANT_X2;
2240 rtlpcipriv->bt_coexist.eeprom_bt_ant_isolation = 0;
2241 rtlpcipriv->bt_coexist.eeprom_bt_radio_shared = BT_RADIO_SHARED;
2242 }
2243
2244 rtl8192ce_bt_var_init(hw);
2245}
2246
2247void rtl8192ce_bt_reg_init(struct ieee80211_hw *hw)
2248{
2249 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
2250
2251 /* 0:Low, 1:High, 2:From Efuse. */
2252 rtlpcipriv->bt_coexist.reg_bt_iso = 2;
2253 /* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter. */
2254 rtlpcipriv->bt_coexist.reg_bt_sco = 3;
2255 /* 0:Disable BT control A-MPDU, 1:Enable BT control A-MPDU. */
2256 rtlpcipriv->bt_coexist.reg_bt_sco = 0;
2257}
2258
2259
2260void rtl8192ce_bt_hw_init(struct ieee80211_hw *hw)
2261{
2262 struct rtl_priv *rtlpriv = rtl_priv(hw);
2263 struct rtl_phy *rtlphy = &(rtlpriv->phy);
2264 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
2265
2266 u8 u1_tmp;
2267
2268 if (rtlpcipriv->bt_coexist.bt_coexistence &&
2269 ((rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4) ||
2270 rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC8)) {
2271
2272 if (rtlpcipriv->bt_coexist.bt_ant_isolation)
2273 rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0);
2274
2275 u1_tmp = rtl_read_byte(rtlpriv, 0x4fd) &
2276 BIT_OFFSET_LEN_MASK_32(0, 1);
2277 u1_tmp = u1_tmp |
2278 ((rtlpcipriv->bt_coexist.bt_ant_isolation == 1) ?
2279 0 : BIT_OFFSET_LEN_MASK_32(1, 1)) |
2280 ((rtlpcipriv->bt_coexist.bt_service == BT_SCO) ?
2281 0 : BIT_OFFSET_LEN_MASK_32(2, 1));
2282 rtl_write_byte(rtlpriv, 0x4fd, u1_tmp);
2283
2284 rtl_write_dword(rtlpriv, REG_BT_COEX_TABLE+4, 0xaaaa9aaa);
2285 rtl_write_dword(rtlpriv, REG_BT_COEX_TABLE+8, 0xffbd0040);
2286 rtl_write_dword(rtlpriv, REG_BT_COEX_TABLE+0xc, 0x40000010);
2287
2288 /* Config to 1T1R. */
2289 if (rtlphy->rf_type == RF_1T1R) {
2290 u1_tmp = rtl_read_byte(rtlpriv, ROFDM0_TRXPATHENABLE);
2291 u1_tmp &= ~(BIT_OFFSET_LEN_MASK_32(1, 1));
2292 rtl_write_byte(rtlpriv, ROFDM0_TRXPATHENABLE, u1_tmp);
2293
2294 u1_tmp = rtl_read_byte(rtlpriv, ROFDM1_TRXPATHENABLE);
2295 u1_tmp &= ~(BIT_OFFSET_LEN_MASK_32(1, 1));
2296 rtl_write_byte(rtlpriv, ROFDM1_TRXPATHENABLE, u1_tmp);
2297 }
2298 }
2299}
2300
2301void rtl92ce_suspend(struct ieee80211_hw *hw)
2302{
2303}
2304
2305void rtl92ce_resume(struct ieee80211_hw *hw)
2306{
2307}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
index a3dfdb635168..07dbe3e340a5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
@@ -30,7 +30,18 @@
30#ifndef __RTL92CE_HW_H__ 30#ifndef __RTL92CE_HW_H__
31#define __RTL92CE_HW_H__ 31#define __RTL92CE_HW_H__
32 32
33#define H2C_RA_MASK 6 33static inline u8 _rtl92c_get_chnl_group(u8 chnl)
34{
35 u8 group;
36
37 if (chnl < 3)
38 group = 0;
39 else if (chnl < 9)
40 group = 1;
41 else
42 group = 2;
43 return group;
44}
34 45
35void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); 46void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
36void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw); 47void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw);
@@ -41,28 +52,27 @@ void rtl92ce_card_disable(struct ieee80211_hw *hw);
41void rtl92ce_enable_interrupt(struct ieee80211_hw *hw); 52void rtl92ce_enable_interrupt(struct ieee80211_hw *hw);
42void rtl92ce_disable_interrupt(struct ieee80211_hw *hw); 53void rtl92ce_disable_interrupt(struct ieee80211_hw *hw);
43int rtl92ce_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type); 54int rtl92ce_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type);
55void rtl92ce_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid);
44void rtl92ce_set_qos(struct ieee80211_hw *hw, int aci); 56void rtl92ce_set_qos(struct ieee80211_hw *hw, int aci);
45void rtl92ce_set_beacon_related_registers(struct ieee80211_hw *hw); 57void rtl92ce_set_beacon_related_registers(struct ieee80211_hw *hw);
46void rtl92ce_set_beacon_interval(struct ieee80211_hw *hw); 58void rtl92ce_set_beacon_interval(struct ieee80211_hw *hw);
47void rtl92ce_update_interrupt_mask(struct ieee80211_hw *hw, 59void rtl92ce_update_interrupt_mask(struct ieee80211_hw *hw,
48 u32 add_msr, u32 rm_msr); 60 u32 add_msr, u32 rm_msr);
49void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); 61void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
50void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw); 62void rtl92ce_update_hal_rate_tbl(struct ieee80211_hw *hw,
51void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level); 63 struct ieee80211_sta *sta, u8 rssi_level);
52void rtl92ce_update_channel_access_setting(struct ieee80211_hw *hw); 64void rtl92ce_update_channel_access_setting(struct ieee80211_hw *hw);
53bool rtl92ce_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid); 65bool rtl92ce_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid);
54void rtl92ce_enable_hw_security_config(struct ieee80211_hw *hw); 66void rtl92ce_enable_hw_security_config(struct ieee80211_hw *hw);
55void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index, 67void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index,
56 u8 *p_macaddr, bool is_group, u8 enc_algo, 68 u8 *p_macaddr, bool is_group, u8 enc_algo,
57 bool is_wepkey, bool clear_all); 69 bool is_wepkey, bool clear_all);
58bool _rtl92ce_phy_config_mac_with_headerfile(struct ieee80211_hw *hw); 70
59void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished); 71void rtl8192ce_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
60void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode); 72 bool autoload_fail, u8 *hwinfo);
61void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus); 73void rtl8192ce_bt_reg_init(struct ieee80211_hw *hw);
62int rtl92c_download_fw(struct ieee80211_hw *hw); 74void rtl8192ce_bt_hw_init(struct ieee80211_hw *hw);
63void rtl92c_firmware_selfreset(struct ieee80211_hw *hw); 75void rtl92ce_suspend(struct ieee80211_hw *hw);
64void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw, 76void rtl92ce_resume(struct ieee80211_hw *hw);
65 u8 element_id, u32 cmd_len, u8 *p_cmdbuffer);
66bool rtl92ce_phy_mac_config(struct ieee80211_hw *hw);
67 77
68#endif 78#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/led.c b/drivers/net/wireless/rtlwifi/rtl8192ce/led.c
index 7b1da8d7508f..9dd1ed7b6422 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/led.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/led.c
@@ -32,6 +32,14 @@
32#include "reg.h" 32#include "reg.h"
33#include "led.h" 33#include "led.h"
34 34
35static void _rtl92ce_init_led(struct ieee80211_hw *hw,
36 struct rtl_led *pled, enum rtl_led_pin ledpin)
37{
38 pled->hw = hw;
39 pled->ledpin = ledpin;
40 pled->ledon = false;
41}
42
35void rtl92ce_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled) 43void rtl92ce_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
36{ 44{
37 u8 ledcfg; 45 u8 ledcfg;
@@ -97,13 +105,12 @@ void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
97 105
98void rtl92ce_init_sw_leds(struct ieee80211_hw *hw) 106void rtl92ce_init_sw_leds(struct ieee80211_hw *hw)
99{ 107{
108 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
109 _rtl92ce_init_led(hw, &(pcipriv->ledctl.sw_led0), LED_PIN_LED0);
110 _rtl92ce_init_led(hw, &(pcipriv->ledctl.sw_led1), LED_PIN_LED1);
100} 111}
101 112
102void rtl92ce_deinit_sw_leds(struct ieee80211_hw *hw) 113static void _rtl92ce_sw_led_control(struct ieee80211_hw *hw,
103{
104}
105
106void _rtl92ce_sw_led_control(struct ieee80211_hw *hw,
107 enum led_ctl_mode ledaction) 114 enum led_ctl_mode ledaction)
108{ 115{
109 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); 116 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
@@ -138,7 +145,7 @@ void rtl92ce_led_control(struct ieee80211_hw *hw,
138 ledaction == LED_CTL_POWER_ON)) { 145 ledaction == LED_CTL_POWER_ON)) {
139 return; 146 return;
140 } 147 }
141 RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, ("ledaction %d,\n", 148 RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, ("ledaction %d.\n",
142 ledaction)); 149 ledaction));
143 _rtl92ce_sw_led_control(hw, ledaction); 150 _rtl92ce_sw_led_control(hw, ledaction);
144} 151}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/led.h b/drivers/net/wireless/rtlwifi/rtl8192ce/led.h
index 10da3018f4b7..7dfccea2095b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/led.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/led.h
@@ -31,11 +31,8 @@
31#define __RTL92CE_LED_H__ 31#define __RTL92CE_LED_H__
32 32
33void rtl92ce_init_sw_leds(struct ieee80211_hw *hw); 33void rtl92ce_init_sw_leds(struct ieee80211_hw *hw);
34void rtl92ce_deinit_sw_leds(struct ieee80211_hw *hw);
35void rtl92ce_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled); 34void rtl92ce_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled);
36void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled); 35void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled);
37void rtl92ce_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction); 36void rtl92ce_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction);
38void _rtl92ce_sw_led_control(struct ieee80211_hw *hw,
39 enum led_ctl_mode ledaction);
40 37
41#endif 38#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
index d0541e8c6012..73ae8a431848 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
@@ -38,7 +38,9 @@
38#include "dm.h" 38#include "dm.h"
39#include "table.h" 39#include "table.h"
40 40
41u32 rtl92ce_phy_query_rf_reg(struct ieee80211_hw *hw, 41static bool _rtl92c_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
42
43u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
42 enum radio_path rfpath, u32 regaddr, u32 bitmask) 44 enum radio_path rfpath, u32 regaddr, u32 bitmask)
43{ 45{
44 struct rtl_priv *rtlpriv = rtl_priv(hw); 46 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -73,9 +75,47 @@ u32 rtl92ce_phy_query_rf_reg(struct ieee80211_hw *hw,
73 return readback_value; 75 return readback_value;
74} 76}
75 77
78bool rtl92c_phy_mac_config(struct ieee80211_hw *hw)
79{
80 struct rtl_priv *rtlpriv = rtl_priv(hw);
81 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
82 bool is92c = IS_92C_SERIAL(rtlhal->version);
83 bool rtstatus = _rtl92c_phy_config_mac_with_headerfile(hw);
84
85 if (is92c)
86 rtl_write_byte(rtlpriv, 0x14, 0x71);
87 return rtstatus;
88}
89
90bool rtl92c_phy_bb_config(struct ieee80211_hw *hw)
91{
92 bool rtstatus = true;
93 struct rtl_priv *rtlpriv = rtl_priv(hw);
94 u16 regval;
95 u32 regvaldw;
96 u8 reg_hwparafile = 1;
97
98 _rtl92c_phy_init_bb_rf_register_definition(hw);
99 regval = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
100 rtl_write_word(rtlpriv, REG_SYS_FUNC_EN,
101 regval | BIT(13) | BIT(0) | BIT(1));
102 rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, 0x83);
103 rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL + 1, 0xdb);
104 rtl_write_byte(rtlpriv, REG_RF_CTRL, RF_EN | RF_RSTB | RF_SDMRSTB);
105 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN,
106 FEN_PPLL | FEN_PCIEA | FEN_DIO_PCIE |
107 FEN_BB_GLB_RSTn | FEN_BBRSTB);
108 rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL + 1, 0x80);
109 regvaldw = rtl_read_dword(rtlpriv, REG_LEDCFG0);
110 rtl_write_dword(rtlpriv, REG_LEDCFG0, regvaldw | BIT(23));
111 if (reg_hwparafile == 1)
112 rtstatus = _rtl92c_phy_bb8192c_config_parafile(hw);
113 return rtstatus;
114}
115
76void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw, 116void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
77 enum radio_path rfpath, 117 enum radio_path rfpath,
78 u32 regaddr, u32 bitmask, u32 data) 118 u32 regaddr, u32 bitmask, u32 data)
79{ 119{
80 struct rtl_priv *rtlpriv = rtl_priv(hw); 120 struct rtl_priv *rtlpriv = rtl_priv(hw);
81 struct rtl_phy *rtlphy = &(rtlpriv->phy); 121 struct rtl_phy *rtlphy = &(rtlpriv->phy);
@@ -121,45 +161,7 @@ void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
121 bitmask, data, rfpath)); 161 bitmask, data, rfpath));
122} 162}
123 163
124bool rtl92ce_phy_mac_config(struct ieee80211_hw *hw) 164static bool _rtl92c_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
125{
126 struct rtl_priv *rtlpriv = rtl_priv(hw);
127 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
128 bool is92c = IS_92C_SERIAL(rtlhal->version);
129 bool rtstatus = _rtl92ce_phy_config_mac_with_headerfile(hw);
130
131 if (is92c)
132 rtl_write_byte(rtlpriv, 0x14, 0x71);
133 return rtstatus;
134}
135
136bool rtl92ce_phy_bb_config(struct ieee80211_hw *hw)
137{
138 bool rtstatus = true;
139 struct rtl_priv *rtlpriv = rtl_priv(hw);
140 u16 regval;
141 u32 regvaldw;
142 u8 reg_hwparafile = 1;
143
144 _rtl92c_phy_init_bb_rf_register_definition(hw);
145 regval = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
146 rtl_write_word(rtlpriv, REG_SYS_FUNC_EN,
147 regval | BIT(13) | BIT(0) | BIT(1));
148 rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, 0x83);
149 rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL + 1, 0xdb);
150 rtl_write_byte(rtlpriv, REG_RF_CTRL, RF_EN | RF_RSTB | RF_SDMRSTB);
151 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN,
152 FEN_PPLL | FEN_PCIEA | FEN_DIO_PCIE |
153 FEN_BB_GLB_RSTn | FEN_BBRSTB);
154 rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL + 1, 0x80);
155 regvaldw = rtl_read_dword(rtlpriv, REG_LEDCFG0);
156 rtl_write_dword(rtlpriv, REG_LEDCFG0, regvaldw | BIT(23));
157 if (reg_hwparafile == 1)
158 rtstatus = _rtl92c_phy_bb8192c_config_parafile(hw);
159 return rtstatus;
160}
161
162bool _rtl92ce_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
163{ 165{
164 struct rtl_priv *rtlpriv = rtl_priv(hw); 166 struct rtl_priv *rtlpriv = rtl_priv(hw);
165 u32 i; 167 u32 i;
@@ -177,7 +179,7 @@ bool _rtl92ce_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
177} 179}
178 180
179bool _rtl92ce_phy_config_bb_with_headerfile(struct ieee80211_hw *hw, 181bool _rtl92ce_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
180 u8 configtype) 182 u8 configtype)
181{ 183{
182 int i; 184 int i;
183 u32 *phy_regarray_table; 185 u32 *phy_regarray_table;
@@ -236,7 +238,7 @@ bool _rtl92ce_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
236} 238}
237 239
238bool _rtl92ce_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw, 240bool _rtl92ce_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
239 u8 configtype) 241 u8 configtype)
240{ 242{
241 struct rtl_priv *rtlpriv = rtl_priv(hw); 243 struct rtl_priv *rtlpriv = rtl_priv(hw);
242 int i; 244 int i;
@@ -274,7 +276,7 @@ bool _rtl92ce_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
274 return true; 276 return true;
275} 277}
276 278
277bool rtl92ce_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, 279bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
278 enum radio_path rfpath) 280 enum radio_path rfpath)
279{ 281{
280 282
@@ -364,74 +366,6 @@ bool rtl92ce_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
364 return true; 366 return true;
365} 367}
366 368
367void rtl92ce_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
368{
369 struct rtl_priv *rtlpriv = rtl_priv(hw);
370 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
371 struct rtl_phy *rtlphy = &(rtlpriv->phy);
372 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
373 u8 reg_bw_opmode;
374 u8 reg_prsr_rsc;
375
376 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
377 ("Switch to %s bandwidth\n",
378 rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
379 "20MHz" : "40MHz"))
380
381 if (is_hal_stop(rtlhal))
382 return;
383
384 reg_bw_opmode = rtl_read_byte(rtlpriv, REG_BWOPMODE);
385 reg_prsr_rsc = rtl_read_byte(rtlpriv, REG_RRSR + 2);
386
387 switch (rtlphy->current_chan_bw) {
388 case HT_CHANNEL_WIDTH_20:
389 reg_bw_opmode |= BW_OPMODE_20MHZ;
390 rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
391 break;
392
393 case HT_CHANNEL_WIDTH_20_40:
394 reg_bw_opmode &= ~BW_OPMODE_20MHZ;
395 rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
396
397 reg_prsr_rsc =
398 (reg_prsr_rsc & 0x90) | (mac->cur_40_prime_sc << 5);
399 rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc);
400 break;
401
402 default:
403 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
404 ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw));
405 break;
406 }
407
408 switch (rtlphy->current_chan_bw) {
409 case HT_CHANNEL_WIDTH_20:
410 rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x0);
411 rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x0);
412 rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 1);
413 break;
414 case HT_CHANNEL_WIDTH_20_40:
415 rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x1);
416 rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x1);
417 rtl_set_bbreg(hw, RCCK0_SYSTEM, BCCK_SIDEBAND,
418 (mac->cur_40_prime_sc >> 1));
419 rtl_set_bbreg(hw, ROFDM1_LSTF, 0xC00, mac->cur_40_prime_sc);
420 rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 0);
421 rtl_set_bbreg(hw, 0x818, (BIT(26) | BIT(27)),
422 (mac->cur_40_prime_sc ==
423 HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
424 break;
425 default:
426 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
427 ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw));
428 break;
429 }
430 rtl92c_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
431 rtlphy->set_bwmode_inprogress = false;
432 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
433}
434
435void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t) 369void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
436{ 370{
437 u8 tmpreg; 371 u8 tmpreg;
@@ -477,6 +411,36 @@ void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
477 } 411 }
478} 412}
479 413
414static void _rtl92ce_phy_set_rf_sleep(struct ieee80211_hw *hw)
415{
416 u32 u4b_tmp;
417 u8 delay = 5;
418 struct rtl_priv *rtlpriv = rtl_priv(hw);
419
420 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
421 rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
422 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
423 u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
424 while (u4b_tmp != 0 && delay > 0) {
425 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x0);
426 rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
427 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
428 u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
429 delay--;
430 }
431 if (delay == 0) {
432 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
433 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
434 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
435 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
436 RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
437 ("Switch RF timeout !!!.\n"));
438 return;
439 }
440 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
441 rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22);
442}
443
480static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw, 444static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
481 enum rf_pwrstate rfpwr_state) 445 enum rf_pwrstate rfpwr_state)
482{ 446{
@@ -523,33 +487,6 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
523 break; 487 break;
524 } 488 }
525 case ERFOFF:{ 489 case ERFOFF:{
526 for (queue_id = 0, i = 0;
527 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
528 ring = &pcipriv->dev.tx_ring[queue_id];
529 if (skb_queue_len(&ring->queue) == 0 ||
530 queue_id == BEACON_QUEUE) {
531 queue_id++;
532 continue;
533 } else {
534 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
535 ("eRf Off/Sleep: %d times "
536 "TcbBusyQueue[%d] "
537 "=%d before doze!\n", (i + 1),
538 queue_id,
539 skb_queue_len(&ring->queue)));
540 udelay(10);
541 i++;
542 }
543 if (i >= MAX_DOZE_WAITING_TIMES_9x) {
544 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
545 ("\nERFOFF: %d times "
546 "TcbBusyQueue[%d] = %d !\n",
547 MAX_DOZE_WAITING_TIMES_9x,
548 queue_id,
549 skb_queue_len(&ring->queue)));
550 break;
551 }
552 }
553 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) { 490 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
554 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, 491 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
555 ("IPS Set eRf nic disable\n")); 492 ("IPS Set eRf nic disable\n"));
@@ -581,6 +518,7 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
581 "TcbBusyQueue[%d] =%d before " 518 "TcbBusyQueue[%d] =%d before "
582 "doze!\n", (i + 1), queue_id, 519 "doze!\n", (i + 1), queue_id,
583 skb_queue_len(&ring->queue))); 520 skb_queue_len(&ring->queue)));
521
584 udelay(10); 522 udelay(10);
585 i++; 523 i++;
586 } 524 }
@@ -599,7 +537,7 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
599 jiffies_to_msecs(jiffies - 537 jiffies_to_msecs(jiffies -
600 ppsc->last_awake_jiffies))); 538 ppsc->last_awake_jiffies)));
601 ppsc->last_sleep_jiffies = jiffies; 539 ppsc->last_sleep_jiffies = jiffies;
602 _rtl92c_phy_set_rf_sleep(hw); 540 _rtl92ce_phy_set_rf_sleep(hw);
603 break; 541 break;
604 } 542 }
605 default: 543 default:
@@ -614,10 +552,11 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
614 return bresult; 552 return bresult;
615} 553}
616 554
617bool rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw, 555bool rtl92c_phy_set_rf_power_state(struct ieee80211_hw *hw,
618 enum rf_pwrstate rfpwr_state) 556 enum rf_pwrstate rfpwr_state)
619{ 557{
620 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 558 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
559
621 bool bresult = false; 560 bool bresult = false;
622 561
623 if (rfpwr_state == ppsc->rfpwr_state) 562 if (rfpwr_state == ppsc->rfpwr_state)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
index a37267e3fc22..ad580852cc76 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
@@ -39,6 +39,7 @@
39#define RT_CANNOT_IO(hw) false 39#define RT_CANNOT_IO(hw) false
40#define HIGHPOWER_RADIOA_ARRAYLEN 22 40#define HIGHPOWER_RADIOA_ARRAYLEN 22
41 41
42#define IQK_ADDA_REG_NUM 16
42#define MAX_TOLERANCE 5 43#define MAX_TOLERANCE 5
43#define IQK_DELAY_TIME 1 44#define IQK_DELAY_TIME 1
44 45
@@ -56,6 +57,8 @@
56#define IQK_ADDA_REG_NUM 16 57#define IQK_ADDA_REG_NUM 16
57#define IQK_MAC_REG_NUM 4 58#define IQK_MAC_REG_NUM 4
58 59
60#define IQK_DELAY_TIME 1
61
59#define RF90_PATH_MAX 2 62#define RF90_PATH_MAX 2
60 63
61#define CT_OFFSET_MAC_ADDR 0X16 64#define CT_OFFSET_MAC_ADDR 0X16
@@ -76,7 +79,7 @@
76#define CT_OFFSET_CUSTOMER_ID 0x7F 79#define CT_OFFSET_CUSTOMER_ID 0x7F
77 80
78#define RTL92C_MAX_PATH_NUM 2 81#define RTL92C_MAX_PATH_NUM 2
79#define LLT_LAST_ENTRY_OF_TX_PKT_BUFFER 255 82
80enum swchnlcmd_id { 83enum swchnlcmd_id {
81 CMDID_END, 84 CMDID_END,
82 CMDID_SET_TXPOWEROWER_LEVEL, 85 CMDID_SET_TXPOWEROWER_LEVEL,
@@ -184,43 +187,44 @@ struct tx_power_struct {
184 u32 mcs_original_offset[4][16]; 187 u32 mcs_original_offset[4][16];
185}; 188};
186 189
187extern u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, 190bool rtl92c_phy_bb_config(struct ieee80211_hw *hw);
191u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw,
188 u32 regaddr, u32 bitmask); 192 u32 regaddr, u32 bitmask);
189extern void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw, 193void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
190 u32 regaddr, u32 bitmask, u32 data); 194 u32 regaddr, u32 bitmask, u32 data);
191extern u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw, 195u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
192 enum radio_path rfpath, u32 regaddr, 196 enum radio_path rfpath, u32 regaddr,
193 u32 bitmask); 197 u32 bitmask);
194extern void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw, 198extern void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
195 enum radio_path rfpath, u32 regaddr, 199 enum radio_path rfpath, u32 regaddr,
196 u32 bitmask, u32 data); 200 u32 bitmask, u32 data);
197extern bool rtl92c_phy_mac_config(struct ieee80211_hw *hw); 201bool rtl92c_phy_mac_config(struct ieee80211_hw *hw);
198bool rtl92ce_phy_bb_config(struct ieee80211_hw *hw); 202bool rtl92ce_phy_bb_config(struct ieee80211_hw *hw);
199extern bool rtl92c_phy_rf_config(struct ieee80211_hw *hw); 203bool rtl92c_phy_rf_config(struct ieee80211_hw *hw);
200extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw, 204bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
201 enum radio_path rfpath); 205 enum radio_path rfpath);
202extern void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw); 206void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
203extern void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw, 207void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw,
204 long *powerlevel); 208 long *powerlevel);
205extern void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel); 209void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
206extern bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw, 210bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw,
207 long power_indbm); 211 long power_indbm);
208extern void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw, 212void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw,
209 u8 operation); 213 u8 operation);
210extern void rtl92c_phy_set_bw_mode_callback(struct ieee80211_hw *hw); 214void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
211extern void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
212 enum nl80211_channel_type ch_type); 215 enum nl80211_channel_type ch_type);
213extern void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw); 216void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw);
214extern u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw); 217u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw);
215extern void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery); 218void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
216extern void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw, 219void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw,
217 u16 beaconinterval); 220 u16 beaconinterval);
218void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta); 221void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
219void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw); 222void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw);
223void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t);
220void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain); 224void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
221bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, 225bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
222 enum radio_path rfpath); 226 enum radio_path rfpath);
223extern bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw, 227bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw,
224 u32 rfpath); 228 u32 rfpath);
225bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype); 229bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
226bool rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw, 230bool rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
@@ -237,9 +241,6 @@ u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
237void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw, 241void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
238 enum radio_path rfpath, u32 offset, 242 enum radio_path rfpath, u32 offset,
239 u32 data); 243 u32 data);
240void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
241 u32 regaddr, u32 bitmask,
242 u32 data);
243void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw, 244void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
244 enum radio_path rfpath, u32 offset, 245 enum radio_path rfpath, u32 offset,
245 u32 data); 246 u32 data);
@@ -250,5 +251,11 @@ bool _rtl92ce_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
250void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw); 251void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw);
251bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw); 252bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw);
252void _rtl92c_phy_set_rf_sleep(struct ieee80211_hw *hw); 253void _rtl92c_phy_set_rf_sleep(struct ieee80211_hw *hw);
254bool rtl92c_phy_set_rf_power_state(struct ieee80211_hw *hw,
255 enum rf_pwrstate rfpwr_state);
256bool _rtl92ce_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
257 u8 configtype);
258bool _rtl92ce_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
259 u8 configtype);
253 260
254#endif 261#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h b/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
index b0868a613841..598cecc63f41 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
@@ -72,6 +72,7 @@
72#define REG_GPIO_IO_SEL_2 0x0062 72#define REG_GPIO_IO_SEL_2 0x0062
73/* RTL8723 WIFI/BT/GPS Multi-Function control source. */ 73/* RTL8723 WIFI/BT/GPS Multi-Function control source. */
74#define REG_MULTI_FUNC_CTRL 0x0068 74#define REG_MULTI_FUNC_CTRL 0x0068
75
75#define REG_MCUFWDL 0x0080 76#define REG_MCUFWDL 0x0080
76 77
77#define REG_HMEBOX_EXT_0 0x0088 78#define REG_HMEBOX_EXT_0 0x0088
@@ -542,7 +543,7 @@
542#define IMR_OCPINT BIT(1) 543#define IMR_OCPINT BIT(1)
543#define IMR_WLANOFF BIT(0) 544#define IMR_WLANOFF BIT(0)
544 545
545#define HWSET_MAX_SIZE 128 546#define EFUSE_REAL_CONTENT_LEN 512
546 547
547#define EEPROM_DEFAULT_TSSI 0x0 548#define EEPROM_DEFAULT_TSSI 0x0
548#define EEPROM_DEFAULT_TXPOWERDIFF 0x0 549#define EEPROM_DEFAULT_TXPOWERDIFF 0x0
@@ -656,6 +657,7 @@
656#define STOPBE BIT(1) 657#define STOPBE BIT(1)
657#define STOPBK BIT(0) 658#define STOPBK BIT(0)
658 659
660#define RCR_APPFCS BIT(31)
659#define RCR_APP_FCS BIT(31) 661#define RCR_APP_FCS BIT(31)
660#define RCR_APP_MIC BIT(30) 662#define RCR_APP_MIC BIT(30)
661#define RCR_APP_ICV BIT(29) 663#define RCR_APP_ICV BIT(29)
@@ -688,6 +690,7 @@
688 690
689#define REG_USB_INFO 0xFE17 691#define REG_USB_INFO 0xFE17
690#define REG_USB_SPECIAL_OPTION 0xFE55 692#define REG_USB_SPECIAL_OPTION 0xFE55
693
691#define REG_USB_DMA_AGG_TO 0xFE5B 694#define REG_USB_DMA_AGG_TO 0xFE5B
692#define REG_USB_AGG_TO 0xFE5C 695#define REG_USB_AGG_TO 0xFE5C
693#define REG_USB_AGG_TH 0xFE5D 696#define REG_USB_AGG_TH 0xFE5D
@@ -775,7 +778,6 @@
775 778
776#define BOOT_FROM_EEPROM BIT(4) 779#define BOOT_FROM_EEPROM BIT(4)
777#define EEPROM_EN BIT(5) 780#define EEPROM_EN BIT(5)
778#define EEPROMSEL BOOT_FROM_EEPROM
779 781
780#define AFE_BGEN BIT(0) 782#define AFE_BGEN BIT(0)
781#define AFE_MBEN BIT(1) 783#define AFE_MBEN BIT(1)
@@ -901,28 +903,7 @@
901#define BD_PKG_SEL BIT(25) 903#define BD_PKG_SEL BIT(25)
902#define BD_HCI_SEL BIT(26) 904#define BD_HCI_SEL BIT(26)
903#define TYPE_ID BIT(27) 905#define TYPE_ID BIT(27)
904 906#define RF_RL_ID (BIT(31) | BIT(30) | BIT(29) | BIT(28))
905/* REG_GPIO_OUTSTS (For RTL8723 only) */
906#define EFS_HCI_SEL (BIT(0)|BIT(1))
907#define PAD_HCI_SEL (BIT(2)|BIT(3))
908#define HCI_SEL (BIT(4)|BIT(5))
909#define PKG_SEL_HCI BIT(6)
910#define FEN_GPS BIT(7)
911#define FEN_BT BIT(8)
912#define FEN_WL BIT(9)
913#define FEN_PCI BIT(10)
914#define FEN_USB BIT(11)
915#define BTRF_HWPDN_N BIT(12)
916#define WLRF_HWPDN_N BIT(13)
917#define PDN_BT_N BIT(14)
918#define PDN_GPS_N BIT(15)
919#define BT_CTL_HWPDN BIT(16)
920#define GPS_CTL_HWPDN BIT(17)
921#define PPHY_SUSB BIT(20)
922#define UPHY_SUSB BIT(21)
923#define PCI_SUSEN BIT(22)
924#define USB_SUSEN BIT(23)
925#define RF_RL_ID (BIT(31) | BIT(30) | BIT(29) | BIT(28))
926 907
927#define CHIP_VER_RTL_MASK 0xF000 908#define CHIP_VER_RTL_MASK 0xF000
928#define CHIP_VER_RTL_SHIFT 12 909#define CHIP_VER_RTL_SHIFT 12
@@ -1077,6 +1058,7 @@
1077#define _RARF_RC8(x) (((x) & 0x1F) << 24) 1058#define _RARF_RC8(x) (((x) & 0x1F) << 24)
1078 1059
1079#define AC_PARAM_TXOP_OFFSET 16 1060#define AC_PARAM_TXOP_OFFSET 16
1061#define AC_PARAM_TXOP_LIMIT_OFFSET 16
1080#define AC_PARAM_ECW_MAX_OFFSET 12 1062#define AC_PARAM_ECW_MAX_OFFSET 12
1081#define AC_PARAM_ECW_MIN_OFFSET 8 1063#define AC_PARAM_ECW_MIN_OFFSET 8
1082#define AC_PARAM_AIFS_OFFSET 0 1064#define AC_PARAM_AIFS_OFFSET 0
@@ -1221,33 +1203,11 @@
1221#define EPROM_CMD_CONFIG 0x3 1203#define EPROM_CMD_CONFIG 0x3
1222#define EPROM_CMD_LOAD 1 1204#define EPROM_CMD_LOAD 1
1223 1205
1224#define HWSET_MAX_SIZE_92S HWSET_MAX_SIZE 1206#define HWSET_MAX_SIZE_92S HWSET_MAX_SIZE
1225 1207
1226#define HAL_8192C_HW_GPIO_WPS_BIT BIT(2)
1227
1228/* REG_MULTI_FUNC_CTRL(For RTL8723 Only) */
1229/* Enable GPIO[9] as WiFi HW PDn source */
1230#define WL_HWPDN_EN BIT(0) 1208#define WL_HWPDN_EN BIT(0)
1231/* WiFi HW PDn polarity control */ 1209
1232#define WL_HWPDN_SL BIT(1) 1210#define HAL_8192C_HW_GPIO_WPS_BIT BIT(2)
1233/* WiFi function enable */
1234#define WL_FUNC_EN BIT(2)
1235/* Enable GPIO[9] as WiFi RF HW PDn source */
1236#define WL_HWROF_EN BIT(3)
1237/* Enable GPIO[11] as BT HW PDn source */
1238#define BT_HWPDN_EN BIT(16)
1239/* BT HW PDn polarity control */
1240#define BT_HWPDN_SL BIT(17)
1241/* BT function enable */
1242#define BT_FUNC_EN BIT(18)
1243/* Enable GPIO[11] as BT/GPS RF HW PDn source */
1244#define BT_HWROF_EN BIT(19)
1245/* Enable GPIO[10] as GPS HW PDn source */
1246#define GPS_HWPDN_EN BIT(20)
1247/* GPS HW PDn polarity control */
1248#define GPS_HWPDN_SL BIT(21)
1249/* GPS function enable */
1250#define GPS_FUNC_EN BIT(22)
1251 1211
1252#define RPMAC_RESET 0x100 1212#define RPMAC_RESET 0x100
1253#define RPMAC_TXSTART 0x104 1213#define RPMAC_TXSTART 0x104
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c
index 669b1168dbec..90d0f2cf3b27 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c
@@ -34,9 +34,9 @@
34#include "rf.h" 34#include "rf.h"
35#include "dm.h" 35#include "dm.h"
36 36
37static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw); 37static bool _rtl92ce_phy_rf6052_config_parafile(struct ieee80211_hw *hw);
38 38
39void rtl92c_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) 39void rtl92ce_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
40{ 40{
41 struct rtl_priv *rtlpriv = rtl_priv(hw); 41 struct rtl_priv *rtlpriv = rtl_priv(hw);
42 struct rtl_phy *rtlphy = &(rtlpriv->phy); 42 struct rtl_phy *rtlphy = &(rtlpriv->phy);
@@ -62,7 +62,7 @@ void rtl92c_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
62} 62}
63 63
64void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, 64void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
65 u8 *ppowerlevel) 65 u8 *ppowerlevel)
66{ 66{
67 struct rtl_priv *rtlpriv = rtl_priv(hw); 67 struct rtl_priv *rtlpriv = rtl_priv(hw);
68 struct rtl_phy *rtlphy = &(rtlpriv->phy); 68 struct rtl_phy *rtlphy = &(rtlpriv->phy);
@@ -128,8 +128,7 @@ void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
128 128
129 tmpval = tx_agc[RF90_PATH_A] >> 8; 129 tmpval = tx_agc[RF90_PATH_A] >> 8;
130 130
131 if (mac->mode == WIRELESS_MODE_B) 131 tmpval = tmpval & 0xff00ffff;
132 tmpval = tmpval & 0xff00ffff;
133 132
134 rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval); 133 rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
135 134
@@ -202,7 +201,7 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
202 struct rtl_priv *rtlpriv = rtl_priv(hw); 201 struct rtl_priv *rtlpriv = rtl_priv(hw);
203 struct rtl_phy *rtlphy = &(rtlpriv->phy); 202 struct rtl_phy *rtlphy = &(rtlpriv->phy);
204 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); 203 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
205 u8 i, chnlgroup, pwr_diff_limit[4]; 204 u8 i, chnlgroup = 0, pwr_diff_limit[4];
206 u32 writeVal, customer_limit, rf; 205 u32 writeVal, customer_limit, rf;
207 206
208 for (rf = 0; rf < 2; rf++) { 207 for (rf = 0; rf < 2; rf++) {
@@ -440,16 +439,17 @@ bool rtl92ce_phy_rf6052_config(struct ieee80211_hw *hw)
440 else 439 else
441 rtlphy->num_total_rfpath = 2; 440 rtlphy->num_total_rfpath = 2;
442 441
443 return _rtl92c_phy_rf6052_config_parafile(hw); 442 return _rtl92ce_phy_rf6052_config_parafile(hw);
443
444} 444}
445 445
446static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw) 446static bool _rtl92ce_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
447{ 447{
448 struct rtl_priv *rtlpriv = rtl_priv(hw); 448 struct rtl_priv *rtlpriv = rtl_priv(hw);
449 struct rtl_phy *rtlphy = &(rtlpriv->phy); 449 struct rtl_phy *rtlphy = &(rtlpriv->phy);
450 u32 u4_regvalue; 450 u32 u4_regvalue = 0;
451 u8 rfpath; 451 u8 rfpath;
452 bool rtstatus; 452 bool rtstatus = true;
453 struct bb_reg_def *pphyreg; 453 struct bb_reg_def *pphyreg;
454 454
455 for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) { 455 for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) {
@@ -484,12 +484,12 @@ static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
484 484
485 switch (rfpath) { 485 switch (rfpath) {
486 case RF90_PATH_A: 486 case RF90_PATH_A:
487 rtstatus = rtl92ce_phy_config_rf_with_headerfile(hw, 487 rtstatus = rtl92c_phy_config_rf_with_headerfile(hw,
488 (enum radio_path) rfpath); 488 (enum radio_path)rfpath);
489 break; 489 break;
490 case RF90_PATH_B: 490 case RF90_PATH_B:
491 rtstatus = rtl92ce_phy_config_rf_with_headerfile(hw, 491 rtstatus = rtl92c_phy_config_rf_with_headerfile(hw,
492 (enum radio_path) rfpath); 492 (enum radio_path)rfpath);
493 break; 493 break;
494 case RF90_PATH_C: 494 case RF90_PATH_C:
495 break; 495 break;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
index 3aa520c1c171..39ff03685986 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
@@ -34,14 +34,11 @@
34#define RF6052_MAX_REG 0x3F 34#define RF6052_MAX_REG 0x3F
35#define RF6052_MAX_PATH 2 35#define RF6052_MAX_PATH 2
36 36
37extern void rtl92c_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, 37extern void rtl92ce_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
38 u8 bandwidth); 38 u8 bandwidth);
39extern void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, 39extern void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
40 u8 *ppowerlevel); 40 u8 *ppowerlevel);
41extern void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, 41extern void rtl92ce_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
42 u8 *ppowerlevel, u8 channel); 42 u8 *ppowerlevel, u8 channel);
43bool rtl92ce_phy_rf6052_config(struct ieee80211_hw *hw); 43extern bool rtl92ce_phy_rf6052_config(struct ieee80211_hw *hw);
44bool rtl92ce_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
45 enum radio_path rfpath);
46
47#endif 44#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index b1cc4d44f534..390bbb5ee11d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -42,10 +42,58 @@
42#include "trx.h" 42#include "trx.h"
43#include "led.h" 43#include "led.h"
44 44
45static void rtl92c_init_aspm_vars(struct ieee80211_hw *hw)
46{
47 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
48
49 /*close ASPM for AMD defaultly */
50 rtlpci->const_amdpci_aspm = 0;
51
52 /*
53 * ASPM PS mode.
54 * 0 - Disable ASPM,
55 * 1 - Enable ASPM without Clock Req,
56 * 2 - Enable ASPM with Clock Req,
57 * 3 - Alwyas Enable ASPM with Clock Req,
58 * 4 - Always Enable ASPM without Clock Req.
59 * set defult to RTL8192CE:3 RTL8192E:2
60 * */
61 rtlpci->const_pci_aspm = 3;
62
63 /*Setting for PCI-E device */
64 rtlpci->const_devicepci_aspm_setting = 0x03;
65
66 /*Setting for PCI-E bridge */
67 rtlpci->const_hostpci_aspm_setting = 0x02;
68
69 /*
70 * In Hw/Sw Radio Off situation.
71 * 0 - Default,
72 * 1 - From ASPM setting without low Mac Pwr,
73 * 2 - From ASPM setting with low Mac Pwr,
74 * 3 - Bus D3
75 * set default to RTL8192CE:0 RTL8192SE:2
76 */
77 rtlpci->const_hwsw_rfoff_d3 = 0;
78
79 /*
80 * This setting works for those device with
81 * backdoor ASPM setting such as EPHY setting.
82 * 0 - Not support ASPM,
83 * 1 - Support ASPM,
84 * 2 - According to chipset.
85 */
86 rtlpci->const_support_pciaspm = 1;
87}
88
45int rtl92c_init_sw_vars(struct ieee80211_hw *hw) 89int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
46{ 90{
91 int err;
47 struct rtl_priv *rtlpriv = rtl_priv(hw); 92 struct rtl_priv *rtlpriv = rtl_priv(hw);
48 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 93 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
94 const struct firmware *firmware;
95
96 rtl8192ce_bt_reg_init(hw);
49 97
50 rtlpriv->dm.dm_initialgain_enable = 1; 98 rtlpriv->dm.dm_initialgain_enable = 1;
51 rtlpriv->dm.dm_flag = 0; 99 rtlpriv->dm.dm_flag = 0;
@@ -53,7 +101,12 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
53 rtlpriv->dm.thermalvalue = 0; 101 rtlpriv->dm.thermalvalue = 0;
54 rtlpci->transmit_config = CFENDFORM | BIT(12) | BIT(13); 102 rtlpci->transmit_config = CFENDFORM | BIT(12) | BIT(13);
55 103
56 rtlpci->receive_config = (RCR_APP_FCS | 104 /* compatible 5G band 88ce just 2.4G band & smsp */
105 rtlpriv->rtlhal.current_bandtype = BAND_ON_2_4G;
106 rtlpriv->rtlhal.bandset = BAND_ON_2_4G;
107 rtlpriv->rtlhal.macphymode = SINGLEMAC_SINGLEPHY;
108
109 rtlpci->receive_config = (RCR_APPFCS |
57 RCR_AMF | 110 RCR_AMF |
58 RCR_ADF | 111 RCR_ADF |
59 RCR_APP_MIC | 112 RCR_APP_MIC |
@@ -76,13 +129,49 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
76 129
77 rtlpci->irq_mask[1] = (u32) (IMR_CPWM | IMR_C2HCMD | 0); 130 rtlpci->irq_mask[1] = (u32) (IMR_CPWM | IMR_C2HCMD | 0);
78 131
79 rtlpriv->rtlhal.pfirmware = (u8 *) vmalloc(0x4000); 132 /* for LPS & IPS */
133 rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
134 rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
135 rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
136 rtlpriv->psc.reg_fwctrl_lps = 3;
137 rtlpriv->psc.reg_max_lps_awakeintvl = 5;
138 /* for ASPM, you can close aspm through
139 * set const_support_pciaspm = 0 */
140 rtl92c_init_aspm_vars(hw);
141
142 if (rtlpriv->psc.reg_fwctrl_lps == 1)
143 rtlpriv->psc.fwctrl_psmode = FW_PS_MIN_MODE;
144 else if (rtlpriv->psc.reg_fwctrl_lps == 2)
145 rtlpriv->psc.fwctrl_psmode = FW_PS_MAX_MODE;
146 else if (rtlpriv->psc.reg_fwctrl_lps == 3)
147 rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE;
148
149 /* for firmware buf */
150 rtlpriv->rtlhal.pfirmware = vzalloc(0x4000);
80 if (!rtlpriv->rtlhal.pfirmware) { 151 if (!rtlpriv->rtlhal.pfirmware) {
81 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 152 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
82 ("Can't alloc buffer for fw.\n")); 153 ("Can't alloc buffer for fw.\n"));
83 return 1; 154 return 1;
84 } 155 }
85 156
157 /* request fw */
158 err = request_firmware(&firmware, rtlpriv->cfg->fw_name,
159 rtlpriv->io.dev);
160 if (err) {
161 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
162 ("Failed to request firmware!\n"));
163 return 1;
164 }
165 if (firmware->size > 0x4000) {
166 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
167 ("Firmware is too big!\n"));
168 release_firmware(firmware);
169 return 1;
170 }
171 memcpy(rtlpriv->rtlhal.pfirmware, firmware->data, firmware->size);
172 rtlpriv->rtlhal.fwsize = firmware->size;
173 release_firmware(firmware);
174
86 return 0; 175 return 0;
87} 176}
88 177
@@ -103,17 +192,19 @@ static struct rtl_hal_ops rtl8192ce_hal_ops = {
103 .interrupt_recognized = rtl92ce_interrupt_recognized, 192 .interrupt_recognized = rtl92ce_interrupt_recognized,
104 .hw_init = rtl92ce_hw_init, 193 .hw_init = rtl92ce_hw_init,
105 .hw_disable = rtl92ce_card_disable, 194 .hw_disable = rtl92ce_card_disable,
195 .hw_suspend = rtl92ce_suspend,
196 .hw_resume = rtl92ce_resume,
106 .enable_interrupt = rtl92ce_enable_interrupt, 197 .enable_interrupt = rtl92ce_enable_interrupt,
107 .disable_interrupt = rtl92ce_disable_interrupt, 198 .disable_interrupt = rtl92ce_disable_interrupt,
108 .set_network_type = rtl92ce_set_network_type, 199 .set_network_type = rtl92ce_set_network_type,
200 .set_chk_bssid = rtl92ce_set_check_bssid,
109 .set_qos = rtl92ce_set_qos, 201 .set_qos = rtl92ce_set_qos,
110 .set_bcn_reg = rtl92ce_set_beacon_related_registers, 202 .set_bcn_reg = rtl92ce_set_beacon_related_registers,
111 .set_bcn_intv = rtl92ce_set_beacon_interval, 203 .set_bcn_intv = rtl92ce_set_beacon_interval,
112 .update_interrupt_mask = rtl92ce_update_interrupt_mask, 204 .update_interrupt_mask = rtl92ce_update_interrupt_mask,
113 .get_hw_reg = rtl92ce_get_hw_reg, 205 .get_hw_reg = rtl92ce_get_hw_reg,
114 .set_hw_reg = rtl92ce_set_hw_reg, 206 .set_hw_reg = rtl92ce_set_hw_reg,
115 .update_rate_table = rtl92ce_update_hal_rate_table, 207 .update_rate_tbl = rtl92ce_update_hal_rate_tbl,
116 .update_rate_mask = rtl92ce_update_hal_rate_mask,
117 .fill_tx_desc = rtl92ce_tx_fill_desc, 208 .fill_tx_desc = rtl92ce_tx_fill_desc,
118 .fill_tx_cmddesc = rtl92ce_tx_fill_cmddesc, 209 .fill_tx_cmddesc = rtl92ce_tx_fill_cmddesc,
119 .query_rx_desc = rtl92ce_rx_query_desc, 210 .query_rx_desc = rtl92ce_rx_query_desc,
@@ -123,7 +214,7 @@ static struct rtl_hal_ops rtl8192ce_hal_ops = {
123 .switch_channel = rtl92c_phy_sw_chnl, 214 .switch_channel = rtl92c_phy_sw_chnl,
124 .dm_watchdog = rtl92c_dm_watchdog, 215 .dm_watchdog = rtl92c_dm_watchdog,
125 .scan_operation_backup = rtl92c_phy_scan_operation_backup, 216 .scan_operation_backup = rtl92c_phy_scan_operation_backup,
126 .set_rf_power_state = rtl92ce_phy_set_rf_power_state, 217 .set_rf_power_state = rtl92c_phy_set_rf_power_state,
127 .led_control = rtl92ce_led_control, 218 .led_control = rtl92ce_led_control,
128 .set_desc = rtl92ce_set_desc, 219 .set_desc = rtl92ce_set_desc,
129 .get_desc = rtl92ce_get_desc, 220 .get_desc = rtl92ce_get_desc,
@@ -131,27 +222,29 @@ static struct rtl_hal_ops rtl8192ce_hal_ops = {
131 .enable_hw_sec = rtl92ce_enable_hw_security_config, 222 .enable_hw_sec = rtl92ce_enable_hw_security_config,
132 .set_key = rtl92ce_set_key, 223 .set_key = rtl92ce_set_key,
133 .init_sw_leds = rtl92ce_init_sw_leds, 224 .init_sw_leds = rtl92ce_init_sw_leds,
134 .deinit_sw_leds = rtl92ce_deinit_sw_leds,
135 .get_bbreg = rtl92c_phy_query_bb_reg, 225 .get_bbreg = rtl92c_phy_query_bb_reg,
136 .set_bbreg = rtl92c_phy_set_bb_reg, 226 .set_bbreg = rtl92c_phy_set_bb_reg,
137 .get_rfreg = rtl92ce_phy_query_rf_reg,
138 .set_rfreg = rtl92ce_phy_set_rf_reg, 227 .set_rfreg = rtl92ce_phy_set_rf_reg,
139 .cmd_send_packet = _rtl92c_cmd_send_packet, 228 .get_rfreg = rtl92c_phy_query_rf_reg,
140 .phy_rf6052_config = rtl92ce_phy_rf6052_config, 229 .phy_rf6052_config = rtl92ce_phy_rf6052_config,
141 .phy_rf6052_set_cck_txpower = rtl92ce_phy_rf6052_set_cck_txpower, 230 .phy_rf6052_set_cck_txpower = rtl92ce_phy_rf6052_set_cck_txpower,
142 .phy_rf6052_set_ofdm_txpower = rtl92ce_phy_rf6052_set_ofdm_txpower, 231 .phy_rf6052_set_ofdm_txpower = rtl92ce_phy_rf6052_set_ofdm_txpower,
143 .config_bb_with_headerfile = _rtl92ce_phy_config_bb_with_headerfile, 232 .config_bb_with_headerfile = _rtl92ce_phy_config_bb_with_headerfile,
144 .config_bb_with_pgheaderfile = _rtl92ce_phy_config_bb_with_pgheaderfile, 233 .config_bb_with_pgheaderfile = _rtl92ce_phy_config_bb_with_pgheaderfile,
145 .phy_lc_calibrate = _rtl92ce_phy_lc_calibrate, 234 .phy_lc_calibrate = _rtl92ce_phy_lc_calibrate,
146 .phy_set_bw_mode_callback = rtl92ce_phy_set_bw_mode_callback,
147 .dm_dynamic_txpower = rtl92ce_dm_dynamic_txpower, 235 .dm_dynamic_txpower = rtl92ce_dm_dynamic_txpower,
148}; 236};
149 237
150static struct rtl_mod_params rtl92ce_mod_params = { 238static struct rtl_mod_params rtl92ce_mod_params = {
151 .sw_crypto = 0, 239 .sw_crypto = false,
240 .inactiveps = true,
241 .swctrl_lps = false,
242 .fwctrl_lps = true,
152}; 243};
153 244
154static struct rtl_hal_cfg rtl92ce_hal_cfg = { 245static struct rtl_hal_cfg rtl92ce_hal_cfg = {
246 .bar_id = 2,
247 .write_readback = true,
155 .name = "rtl92c_pci", 248 .name = "rtl92c_pci",
156 .fw_name = "rtlwifi/rtl8192cfw.bin", 249 .fw_name = "rtlwifi/rtl8192cfw.bin",
157 .ops = &rtl8192ce_hal_ops, 250 .ops = &rtl8192ce_hal_ops,
@@ -175,6 +268,8 @@ static struct rtl_hal_cfg rtl92ce_hal_cfg = {
175 .maps[EFUSE_LOADER_CLK_EN] = LOADER_CLK_EN, 268 .maps[EFUSE_LOADER_CLK_EN] = LOADER_CLK_EN,
176 .maps[EFUSE_ANA8M] = EFUSE_ANA8M, 269 .maps[EFUSE_ANA8M] = EFUSE_ANA8M,
177 .maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE, 270 .maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE,
271 .maps[EFUSE_MAX_SECTION_MAP] = EFUSE_MAX_SECTION,
272 .maps[EFUSE_REAL_CONTENT_SIZE] = EFUSE_REAL_CONTENT_LEN,
178 273
179 .maps[RWCAM] = REG_CAMCMD, 274 .maps[RWCAM] = REG_CAMCMD,
180 .maps[WCAMI] = REG_CAMWRITE, 275 .maps[WCAMI] = REG_CAMWRITE,
@@ -239,7 +334,7 @@ static struct rtl_hal_cfg rtl92ce_hal_cfg = {
239 .maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15, 334 .maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15,
240}; 335};
241 336
242static struct pci_device_id rtl92ce_pci_ids[] __devinitdata = { 337DEFINE_PCI_DEVICE_TABLE(rtl92ce_pci_ids) = {
243 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8191, rtl92ce_hal_cfg)}, 338 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8191, rtl92ce_hal_cfg)},
244 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8178, rtl92ce_hal_cfg)}, 339 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8178, rtl92ce_hal_cfg)},
245 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8177, rtl92ce_hal_cfg)}, 340 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8177, rtl92ce_hal_cfg)},
@@ -257,7 +352,13 @@ MODULE_DESCRIPTION("Realtek 8192C/8188C 802.11n PCI wireless");
257MODULE_FIRMWARE("rtlwifi/rtl8192cfw.bin"); 352MODULE_FIRMWARE("rtlwifi/rtl8192cfw.bin");
258 353
259module_param_named(swenc, rtl92ce_mod_params.sw_crypto, bool, 0444); 354module_param_named(swenc, rtl92ce_mod_params.sw_crypto, bool, 0444);
355module_param_named(ips, rtl92ce_mod_params.inactiveps, bool, 0444);
356module_param_named(swlps, rtl92ce_mod_params.swctrl_lps, bool, 0444);
357module_param_named(fwlps, rtl92ce_mod_params.fwctrl_lps, bool, 0444);
260MODULE_PARM_DESC(swenc, "using hardware crypto (default 0 [hardware])\n"); 358MODULE_PARM_DESC(swenc, "using hardware crypto (default 0 [hardware])\n");
359MODULE_PARM_DESC(ips, "using no link power save (default 1 is open)\n");
360MODULE_PARM_DESC(fwlps, "using linked fw control power save "
361 "(default 1 is open)\n");
261 362
262static struct pci_driver rtl92ce_driver = { 363static struct pci_driver rtl92ce_driver = {
263 .name = KBUILD_MODNAME, 364 .name = KBUILD_MODNAME,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h
index 36e657668c1e..b7dc3263e433 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h
@@ -33,19 +33,9 @@
33int rtl92c_init_sw_vars(struct ieee80211_hw *hw); 33int rtl92c_init_sw_vars(struct ieee80211_hw *hw);
34void rtl92c_deinit_sw_vars(struct ieee80211_hw *hw); 34void rtl92c_deinit_sw_vars(struct ieee80211_hw *hw);
35void rtl92c_init_var_map(struct ieee80211_hw *hw); 35void rtl92c_init_var_map(struct ieee80211_hw *hw);
36bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw,
37 struct sk_buff *skb);
38void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
39 u8 *ppowerlevel);
40void rtl92ce_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
41 u8 *ppowerlevel, u8 channel);
42bool _rtl92ce_phy_config_bb_with_headerfile(struct ieee80211_hw *hw, 36bool _rtl92ce_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
43 u8 configtype); 37 u8 configtype);
44bool _rtl92ce_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw, 38bool _rtl92ce_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
45 u8 configtype); 39 u8 configtype);
46void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t);
47u32 rtl92ce_phy_query_rf_reg(struct ieee80211_hw *hw,
48 enum radio_path rfpath, u32 regaddr, u32 bitmask);
49void rtl92ce_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
50 40
51#endif 41#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index aa2b5815600f..54b2bd53d36a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -36,42 +36,16 @@
36#include "trx.h" 36#include "trx.h"
37#include "led.h" 37#include "led.h"
38 38
39static enum rtl_desc_qsel _rtl92ce_map_hwqueue_to_fwqueue(__le16 fc, 39static u8 _rtl92ce_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
40 unsigned int
41 skb_queue)
42{ 40{
43 enum rtl_desc_qsel qsel; 41 __le16 fc = rtl_get_fc(skb);
44 42
45 if (unlikely(ieee80211_is_beacon(fc))) { 43 if (unlikely(ieee80211_is_beacon(fc)))
46 qsel = QSLT_BEACON; 44 return QSLT_BEACON;
47 return qsel; 45 if (ieee80211_is_mgmt(fc))
48 } 46 return QSLT_MGNT;
49
50 if (ieee80211_is_mgmt(fc)) {
51 qsel = QSLT_MGNT;
52 return qsel;
53 }
54 47
55 switch (skb_queue) { 48 return skb->priority;
56 case VO_QUEUE:
57 qsel = QSLT_VO;
58 break;
59 case VI_QUEUE:
60 qsel = QSLT_VI;
61 break;
62 case BE_QUEUE:
63 qsel = QSLT_BE;
64 break;
65 case BK_QUEUE:
66 qsel = QSLT_BK;
67 break;
68 default:
69 qsel = QSLT_BE;
70 RT_ASSERT(false, ("BE queue, skb_queue:%d,"
71 " set qsel = 0x%X\n", skb_queue, QSLT_BE));
72 break;
73 }
74 return qsel;
75} 49}
76 50
77static int _rtl92ce_rate_mapping(bool isht, u8 desc_rate, bool first_ampdu) 51static int _rtl92ce_rate_mapping(bool isht, u8 desc_rate, bool first_ampdu)
@@ -255,6 +229,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
255 u8 evm, pwdb_all, rf_rx_num = 0; 229 u8 evm, pwdb_all, rf_rx_num = 0;
256 u8 i, max_spatial_stream; 230 u8 i, max_spatial_stream;
257 u32 rssi, total_rssi = 0; 231 u32 rssi, total_rssi = 0;
232 bool in_powersavemode = false;
258 bool is_cck_rate; 233 bool is_cck_rate;
259 234
260 is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc); 235 is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc);
@@ -270,9 +245,13 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
270 u8 report, cck_highpwr; 245 u8 report, cck_highpwr;
271 cck_buf = (struct phy_sts_cck_8192s_t *)p_drvinfo; 246 cck_buf = (struct phy_sts_cck_8192s_t *)p_drvinfo;
272 247
273 cck_highpwr = (u8) rtl_get_bbreg(hw, 248 if (!in_powersavemode)
274 RFPGA0_XA_HSSIPARAMETER2, 249 cck_highpwr = (u8) rtl_get_bbreg(hw,
275 BIT(9)); 250 RFPGA0_XA_HSSIPARAMETER2,
251 BIT(9));
252 else
253 cck_highpwr = false;
254
276 if (!cck_highpwr) { 255 if (!cck_highpwr) {
277 u8 cck_agc_rpt = cck_buf->cck_agc_rpt; 256 u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
278 report = cck_buf->cck_agc_rpt & 0xc0; 257 report = cck_buf->cck_agc_rpt & 0xc0;
@@ -398,6 +377,7 @@ static void _rtl92ce_process_ui_rssi(struct ieee80211_hw *hw,
398 377
399 if (rtlpriv->stats.ui_rssi.total_num++ >= 378 if (rtlpriv->stats.ui_rssi.total_num++ >=
400 PHY_RSSI_SLID_WIN_MAX) { 379 PHY_RSSI_SLID_WIN_MAX) {
380
401 rtlpriv->stats.ui_rssi.total_num = 381 rtlpriv->stats.ui_rssi.total_num =
402 PHY_RSSI_SLID_WIN_MAX; 382 PHY_RSSI_SLID_WIN_MAX;
403 last_rssi = 383 last_rssi =
@@ -424,10 +404,6 @@ static void _rtl92ce_process_ui_rssi(struct ieee80211_hw *hw,
424 if (!pstats->is_cck && pstats->packet_toself) { 404 if (!pstats->is_cck && pstats->packet_toself) {
425 for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath; 405 for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
426 rfpath++) { 406 rfpath++) {
427
428 if (!rtl8192_phy_check_is_legal_rfpath(hw, rfpath))
429 continue;
430
431 if (rtlpriv->stats.rx_rssi_percentage[rfpath] == 0) { 407 if (rtlpriv->stats.rx_rssi_percentage[rfpath] == 0) {
432 rtlpriv->stats.rx_rssi_percentage[rfpath] = 408 rtlpriv->stats.rx_rssi_percentage[rfpath] =
433 pstats->rx_mimo_signalstrength[rfpath]; 409 pstats->rx_mimo_signalstrength[rfpath];
@@ -723,7 +699,7 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
723void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw, 699void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
724 struct ieee80211_hdr *hdr, u8 *pdesc_tx, 700 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
725 struct ieee80211_tx_info *info, struct sk_buff *skb, 701 struct ieee80211_tx_info *info, struct sk_buff *skb,
726 unsigned int queue_index) 702 u8 hw_queue, struct rtl_tcb_desc *tcb_desc)
727{ 703{
728 struct rtl_priv *rtlpriv = rtl_priv(hw); 704 struct rtl_priv *rtlpriv = rtl_priv(hw);
729 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 705 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -732,16 +708,9 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
732 bool defaultadapter = true; 708 bool defaultadapter = true;
733 struct ieee80211_sta *sta; 709 struct ieee80211_sta *sta;
734 u8 *pdesc = (u8 *) pdesc_tx; 710 u8 *pdesc = (u8 *) pdesc_tx;
735 struct rtl_tcb_desc tcb_desc;
736 u8 *qc = ieee80211_get_qos_ctl(hdr);
737 u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
738 u16 seq_number; 711 u16 seq_number;
739 __le16 fc = hdr->frame_control; 712 __le16 fc = hdr->frame_control;
740 u8 rate_flag = info->control.rates[0].flags; 713 u8 fw_qsel = _rtl92ce_map_hwqueue_to_fwqueue(skb, hw_queue);
741
742 enum rtl_desc_qsel fw_qsel =
743 _rtl92ce_map_hwqueue_to_fwqueue(fc, queue_index);
744
745 bool firstseg = ((hdr->seq_ctrl & 714 bool firstseg = ((hdr->seq_ctrl &
746 cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0); 715 cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0);
747 716
@@ -751,56 +720,68 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
751 dma_addr_t mapping = pci_map_single(rtlpci->pdev, 720 dma_addr_t mapping = pci_map_single(rtlpci->pdev,
752 skb->data, skb->len, 721 skb->data, skb->len,
753 PCI_DMA_TODEVICE); 722 PCI_DMA_TODEVICE);
723 u8 bw_40 = 0;
724
725 rcu_read_lock();
726 sta = get_sta(hw, mac->vif, mac->bssid);
727 if (mac->opmode == NL80211_IFTYPE_STATION) {
728 bw_40 = mac->bw_40;
729 } else if (mac->opmode == NL80211_IFTYPE_AP ||
730 mac->opmode == NL80211_IFTYPE_ADHOC) {
731 if (sta)
732 bw_40 = sta->ht_cap.cap &
733 IEEE80211_HT_CAP_SUP_WIDTH_20_40;
734 }
754 735
755 seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; 736 seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
756 737
757 rtl_get_tcb_desc(hw, info, skb, &tcb_desc); 738 rtl_get_tcb_desc(hw, info, sta, skb, tcb_desc);
758 739
759 CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_92c)); 740 CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_92c));
760 741
742 if (ieee80211_is_nullfunc(fc) || ieee80211_is_ctl(fc)) {
743 firstseg = true;
744 lastseg = true;
745 }
761 if (firstseg) { 746 if (firstseg) {
762 SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN); 747 SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
763 748
764 SET_TX_DESC_TX_RATE(pdesc, tcb_desc.hw_rate); 749 SET_TX_DESC_TX_RATE(pdesc, tcb_desc->hw_rate);
765 750
766 if (tcb_desc.use_shortgi || tcb_desc.use_shortpreamble) 751 if (tcb_desc->use_shortgi || tcb_desc->use_shortpreamble)
767 SET_TX_DESC_DATA_SHORTGI(pdesc, 1); 752 SET_TX_DESC_DATA_SHORTGI(pdesc, 1);
768 753
769 if (mac->tids[tid].agg.agg_state == RTL_AGG_ON && 754 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
770 info->flags & IEEE80211_TX_CTL_AMPDU) {
771 SET_TX_DESC_AGG_BREAK(pdesc, 1); 755 SET_TX_DESC_AGG_BREAK(pdesc, 1);
772 SET_TX_DESC_MAX_AGG_NUM(pdesc, 0x14); 756 SET_TX_DESC_MAX_AGG_NUM(pdesc, 0x14);
773 } 757 }
774 SET_TX_DESC_SEQ(pdesc, seq_number); 758 SET_TX_DESC_SEQ(pdesc, seq_number);
775 759
776 SET_TX_DESC_RTS_ENABLE(pdesc, ((tcb_desc.rts_enable && 760 SET_TX_DESC_RTS_ENABLE(pdesc, ((tcb_desc->rts_enable &&
777 !tcb_desc. 761 !tcb_desc->
778 cts_enable) ? 1 : 0)); 762 cts_enable) ? 1 : 0));
779 SET_TX_DESC_HW_RTS_ENABLE(pdesc, 763 SET_TX_DESC_HW_RTS_ENABLE(pdesc,
780 ((tcb_desc.rts_enable 764 ((tcb_desc->rts_enable
781 || tcb_desc.cts_enable) ? 1 : 0)); 765 || tcb_desc->cts_enable) ? 1 : 0));
782 SET_TX_DESC_CTS2SELF(pdesc, ((tcb_desc.cts_enable) ? 1 : 0)); 766 SET_TX_DESC_CTS2SELF(pdesc, ((tcb_desc->cts_enable) ? 1 : 0));
783 SET_TX_DESC_RTS_STBC(pdesc, ((tcb_desc.rts_stbc) ? 1 : 0)); 767 SET_TX_DESC_RTS_STBC(pdesc, ((tcb_desc->rts_stbc) ? 1 : 0));
784 768
785 SET_TX_DESC_RTS_RATE(pdesc, tcb_desc.rts_rate); 769 SET_TX_DESC_RTS_RATE(pdesc, tcb_desc->rts_rate);
786 SET_TX_DESC_RTS_BW(pdesc, 0); 770 SET_TX_DESC_RTS_BW(pdesc, 0);
787 SET_TX_DESC_RTS_SC(pdesc, tcb_desc.rts_sc); 771 SET_TX_DESC_RTS_SC(pdesc, tcb_desc->rts_sc);
788 SET_TX_DESC_RTS_SHORT(pdesc, 772 SET_TX_DESC_RTS_SHORT(pdesc,
789 ((tcb_desc.rts_rate <= DESC92C_RATE54M) ? 773 ((tcb_desc->rts_rate <= DESC92C_RATE54M) ?
790 (tcb_desc.rts_use_shortpreamble ? 1 : 0) 774 (tcb_desc->rts_use_shortpreamble ? 1 : 0)
791 : (tcb_desc.rts_use_shortgi ? 1 : 0))); 775 : (tcb_desc->rts_use_shortgi ? 1 : 0)));
792 776
793 if (mac->bw_40) { 777 if (bw_40) {
794 if (tcb_desc.packet_bw) { 778 if (tcb_desc->packet_bw) {
795 SET_TX_DESC_DATA_BW(pdesc, 1); 779 SET_TX_DESC_DATA_BW(pdesc, 1);
796 SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3); 780 SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3);
797 } else { 781 } else {
798 SET_TX_DESC_DATA_BW(pdesc, 0); 782 SET_TX_DESC_DATA_BW(pdesc, 0);
799 783 SET_TX_DESC_TX_SUB_CARRIER(pdesc,
800 if (rate_flag & IEEE80211_TX_RC_DUP_DATA) { 784 mac->cur_40_prime_sc);
801 SET_TX_DESC_TX_SUB_CARRIER(pdesc,
802 mac->cur_40_prime_sc);
803 }
804 } 785 }
805 } else { 786 } else {
806 SET_TX_DESC_DATA_BW(pdesc, 0); 787 SET_TX_DESC_DATA_BW(pdesc, 0);
@@ -810,13 +791,10 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
810 SET_TX_DESC_LINIP(pdesc, 0); 791 SET_TX_DESC_LINIP(pdesc, 0);
811 SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb->len); 792 SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb->len);
812 793
813 rcu_read_lock();
814 sta = ieee80211_find_sta(mac->vif, mac->bssid);
815 if (sta) { 794 if (sta) {
816 u8 ampdu_density = sta->ht_cap.ampdu_density; 795 u8 ampdu_density = sta->ht_cap.ampdu_density;
817 SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density); 796 SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density);
818 } 797 }
819 rcu_read_unlock();
820 798
821 if (info->control.hw_key) { 799 if (info->control.hw_key) {
822 struct ieee80211_key_conf *keyconf = 800 struct ieee80211_key_conf *keyconf =
@@ -844,7 +822,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
844 SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F); 822 SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F);
845 SET_TX_DESC_RTS_RATE_FB_LIMIT(pdesc, 0xF); 823 SET_TX_DESC_RTS_RATE_FB_LIMIT(pdesc, 0xF);
846 SET_TX_DESC_DISABLE_FB(pdesc, 0); 824 SET_TX_DESC_DISABLE_FB(pdesc, 0);
847 SET_TX_DESC_USE_RATE(pdesc, tcb_desc.use_driver_rate ? 1 : 0); 825 SET_TX_DESC_USE_RATE(pdesc, tcb_desc->use_driver_rate ? 1 : 0);
848 826
849 if (ieee80211_is_data_qos(fc)) { 827 if (ieee80211_is_data_qos(fc)) {
850 if (mac->rdg_en) { 828 if (mac->rdg_en) {
@@ -855,24 +833,24 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
855 } 833 }
856 } 834 }
857 } 835 }
836 rcu_read_unlock();
858 837
859 SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0)); 838 SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0));
860 SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0)); 839 SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0));
861 840
862 SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) skb->len); 841 SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) skb->len);
863 842
864 SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping)); 843 SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
865 844
866 if (rtlpriv->dm.useramask) { 845 if (rtlpriv->dm.useramask) {
867 SET_TX_DESC_RATE_ID(pdesc, tcb_desc.ratr_index); 846 SET_TX_DESC_RATE_ID(pdesc, tcb_desc->ratr_index);
868 SET_TX_DESC_MACID(pdesc, tcb_desc.mac_id); 847 SET_TX_DESC_MACID(pdesc, tcb_desc->mac_id);
869 } else { 848 } else {
870 SET_TX_DESC_RATE_ID(pdesc, 0xC + tcb_desc.ratr_index); 849 SET_TX_DESC_RATE_ID(pdesc, 0xC + tcb_desc->ratr_index);
871 SET_TX_DESC_MACID(pdesc, tcb_desc.ratr_index); 850 SET_TX_DESC_MACID(pdesc, tcb_desc->ratr_index);
872 } 851 }
873 852
874 if ((!ieee80211_is_data_qos(fc)) && ppsc->leisure_ps && 853 if ((!ieee80211_is_data_qos(fc)) && ppsc->fwctrl_lps) {
875 ppsc->fwctrl_lps) {
876 SET_TX_DESC_HWSEQ_EN(pdesc, 1); 854 SET_TX_DESC_HWSEQ_EN(pdesc, 1);
877 SET_TX_DESC_PKT_ID(pdesc, 8); 855 SET_TX_DESC_PKT_ID(pdesc, 8);
878 856
@@ -923,7 +901,7 @@ void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
923 901
924 SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) (skb->len)); 902 SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) (skb->len));
925 903
926 SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping)); 904 SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
927 905
928 SET_TX_DESC_RATE_ID(pdesc, 7); 906 SET_TX_DESC_RATE_ID(pdesc, 7);
929 SET_TX_DESC_MACID(pdesc, 0); 907 SET_TX_DESC_MACID(pdesc, 0);
@@ -1021,7 +999,7 @@ u32 rtl92ce_get_desc(u8 *p_desc, bool istx, u8 desc_name)
1021 return ret; 999 return ret;
1022} 1000}
1023 1001
1024void rtl92ce_tx_polling(struct ieee80211_hw *hw, unsigned int hw_queue) 1002void rtl92ce_tx_polling(struct ieee80211_hw *hw, u8 hw_queue)
1025{ 1003{
1026 struct rtl_priv *rtlpriv = rtl_priv(hw); 1004 struct rtl_priv *rtlpriv = rtl_priv(hw);
1027 if (hw_queue == BEACON_QUEUE) { 1005 if (hw_queue == BEACON_QUEUE) {
@@ -1032,35 +1010,3 @@ void rtl92ce_tx_polling(struct ieee80211_hw *hw, unsigned int hw_queue)
1032 } 1010 }
1033} 1011}
1034 1012
1035bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw,
1036 struct sk_buff *skb)
1037{
1038 struct rtl_priv *rtlpriv = rtl_priv(hw);
1039 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1040 struct rtl8192_tx_ring *ring;
1041 struct rtl_tx_desc *pdesc;
1042 u8 own;
1043 unsigned long flags;
1044 struct sk_buff *pskb = NULL;
1045
1046 ring = &rtlpci->tx_ring[BEACON_QUEUE];
1047
1048 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
1049
1050 pskb = __skb_dequeue(&ring->queue);
1051 if (pskb)
1052 kfree_skb(pskb);
1053
1054 pdesc = &ring->desc[0];
1055 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc, true, HW_DESC_OWN);
1056
1057 rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *) pdesc, 1, 1, skb);
1058
1059 __skb_queue_tail(&ring->queue, skb);
1060
1061 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
1062
1063 rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE);
1064
1065 return true;
1066}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
index 803adcc80c96..0f1177137501 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
@@ -532,9 +532,9 @@
532#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \ 532#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \
533do { \ 533do { \
534 if (_size > TX_DESC_NEXT_DESC_OFFSET) \ 534 if (_size > TX_DESC_NEXT_DESC_OFFSET) \
535 memset((void *)__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); \ 535 memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); \
536 else \ 536 else \
537 memset((void *)__pdesc, 0, _size); \ 537 memset(__pdesc, 0, _size); \
538} while (0); 538} while (0);
539 539
540#define RX_HAL_IS_CCK_RATE(_pdesc)\ 540#define RX_HAL_IS_CCK_RATE(_pdesc)\
@@ -724,17 +724,16 @@ struct rx_desc_92c {
724void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw, 724void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
725 struct ieee80211_hdr *hdr, 725 struct ieee80211_hdr *hdr,
726 u8 *pdesc, struct ieee80211_tx_info *info, 726 u8 *pdesc, struct ieee80211_tx_info *info,
727 struct sk_buff *skb, unsigned int qsel); 727 struct sk_buff *skb, u8 hw_queue,
728 struct rtl_tcb_desc *ptcb_desc);
728bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw, 729bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
729 struct rtl_stats *stats, 730 struct rtl_stats *stats,
730 struct ieee80211_rx_status *rx_status, 731 struct ieee80211_rx_status *rx_status,
731 u8 *pdesc, struct sk_buff *skb); 732 u8 *pdesc, struct sk_buff *skb);
732void rtl92ce_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val); 733void rtl92ce_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val);
733u32 rtl92ce_get_desc(u8 *pdesc, bool istx, u8 desc_name); 734u32 rtl92ce_get_desc(u8 *pdesc, bool istx, u8 desc_name);
734void rtl92ce_tx_polling(struct ieee80211_hw *hw, unsigned int hw_queue); 735void rtl92ce_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
735void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, 736void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
736 bool b_firstseg, bool b_lastseg, 737 bool b_firstseg, bool b_lastseg,
737 struct sk_buff *skb); 738 struct sk_buff *skb);
738bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb);
739
740#endif 739#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 9444e76838cf..52e2af58c1ed 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -39,6 +39,7 @@
39#include "mac.h" 39#include "mac.h"
40#include "dm.h" 40#include "dm.h"
41#include "hw.h" 41#include "hw.h"
42#include "../rtl8192ce/hw.h"
42#include "trx.h" 43#include "trx.h"
43#include "led.h" 44#include "led.h"
44#include "table.h" 45#include "table.h"
@@ -605,10 +606,10 @@ void rtl92cu_read_eeprom_info(struct ieee80211_hw *hw)
605 if (!IS_NORMAL_CHIP(rtlhal->version)) 606 if (!IS_NORMAL_CHIP(rtlhal->version))
606 return; 607 return;
607 tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR); 608 tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
608 rtlefuse->epromtype = (tmp_u1b & EEPROMSEL) ? 609 rtlefuse->epromtype = (tmp_u1b & BOOT_FROM_EEPROM) ?
609 EEPROM_93C46 : EEPROM_BOOT_EFUSE; 610 EEPROM_93C46 : EEPROM_BOOT_EFUSE;
610 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("Boot from %s\n", 611 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("Boot from %s\n",
611 (tmp_u1b & EEPROMSEL) ? "EERROM" : "EFUSE")); 612 (tmp_u1b & BOOT_FROM_EEPROM) ? "EERROM" : "EFUSE"));
612 rtlefuse->autoload_failflag = (tmp_u1b & EEPROM_EN) ? false : true; 613 rtlefuse->autoload_failflag = (tmp_u1b & EEPROM_EN) ? false : true;
613 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Autoload %s\n", 614 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Autoload %s\n",
614 (tmp_u1b & EEPROM_EN) ? "OK!!" : "ERR!!")); 615 (tmp_u1b & EEPROM_EN) ? "OK!!" : "ERR!!"));
@@ -921,7 +922,7 @@ static void _rtl92cu_init_chipT_queue_priority(struct ieee80211_hw *hw,
921 u8 out_ep_num, 922 u8 out_ep_num,
922 u8 queue_sel) 923 u8 queue_sel)
923{ 924{
924 u8 hq_sele; 925 u8 hq_sele = 0;
925 struct rtl_priv *rtlpriv = rtl_priv(hw); 926 struct rtl_priv *rtlpriv = rtl_priv(hw);
926 927
927 switch (out_ep_num) { 928 switch (out_ep_num) {
@@ -977,7 +978,7 @@ static void _rtl92cu_init_wmac_setting(struct ieee80211_hw *hw)
977 struct rtl_priv *rtlpriv = rtl_priv(hw); 978 struct rtl_priv *rtlpriv = rtl_priv(hw);
978 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 979 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
979 980
980 mac->rx_conf = (RCR_APM | RCR_AM | RCR_ADF | RCR_AB | RCR_APP_FCS | 981 mac->rx_conf = (RCR_APM | RCR_AM | RCR_ADF | RCR_AB | RCR_APPFCS |
981 RCR_APP_ICV | RCR_AMF | RCR_HTC_LOC_CTRL | 982 RCR_APP_ICV | RCR_AMF | RCR_HTC_LOC_CTRL |
982 RCR_APP_MIC | RCR_APP_PHYSTS | RCR_ACRC32); 983 RCR_APP_MIC | RCR_APP_PHYSTS | RCR_ACRC32);
983 rtl_write_dword(rtlpriv, REG_RCR, mac->rx_conf); 984 rtl_write_dword(rtlpriv, REG_RCR, mac->rx_conf);
@@ -2182,7 +2183,9 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
2182 } 2183 }
2183} 2184}
2184 2185
2185void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw) 2186void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
2187 struct ieee80211_sta *sta,
2188 u8 rssi_level)
2186{ 2189{
2187 struct rtl_priv *rtlpriv = rtl_priv(hw); 2190 struct rtl_priv *rtlpriv = rtl_priv(hw);
2188 struct rtl_phy *rtlphy = &(rtlpriv->phy); 2191 struct rtl_phy *rtlphy = &(rtlpriv->phy);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
index 62af555bb61c..32f85cba106a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
@@ -98,13 +98,14 @@ void rtl92cu_update_interrupt_mask(struct ieee80211_hw *hw,
98 u32 add_msr, u32 rm_msr); 98 u32 add_msr, u32 rm_msr);
99void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); 99void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
100void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); 100void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
101void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw); 101void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
102 struct ieee80211_sta *sta,
103 u8 rssi_level);
102void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level); 104void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level);
103 105
104void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw); 106void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw);
105bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid); 107bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid);
106void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid); 108void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid);
107u8 _rtl92c_get_chnl_group(u8 chnl);
108int rtl92c_download_fw(struct ieee80211_hw *hw); 109int rtl92c_download_fw(struct ieee80211_hw *hw);
109void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode); 110void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
110void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished); 111void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
index 4e020e654e6b..9a3d0239e27e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
@@ -38,7 +38,7 @@
38#include "table.h" 38#include "table.h"
39 39
40u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw, 40u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw,
41 enum radio_path rfpath, u32 regaddr, u32 bitmask) 41 enum radio_path rfpath, u32 regaddr, u32 bitmask)
42{ 42{
43 struct rtl_priv *rtlpriv = rtl_priv(hw); 43 struct rtl_priv *rtlpriv = rtl_priv(hw);
44 u32 original_value, readback_value, bitshift; 44 u32 original_value, readback_value, bitshift;
@@ -64,8 +64,8 @@ u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw,
64} 64}
65 65
66void rtl92cu_phy_set_rf_reg(struct ieee80211_hw *hw, 66void rtl92cu_phy_set_rf_reg(struct ieee80211_hw *hw,
67 enum radio_path rfpath, 67 enum radio_path rfpath,
68 u32 regaddr, u32 bitmask, u32 data) 68 u32 regaddr, u32 bitmask, u32 data)
69{ 69{
70 struct rtl_priv *rtlpriv = rtl_priv(hw); 70 struct rtl_priv *rtlpriv = rtl_priv(hw);
71 struct rtl_phy *rtlphy = &(rtlpriv->phy); 71 struct rtl_phy *rtlphy = &(rtlpriv->phy);
@@ -163,7 +163,7 @@ bool _rtl92cu_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
163} 163}
164 164
165bool _rtl92cu_phy_config_bb_with_headerfile(struct ieee80211_hw *hw, 165bool _rtl92cu_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
166 u8 configtype) 166 u8 configtype)
167{ 167{
168 int i; 168 int i;
169 u32 *phy_regarray_table; 169 u32 *phy_regarray_table;
@@ -223,7 +223,7 @@ bool _rtl92cu_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
223} 223}
224 224
225bool _rtl92cu_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw, 225bool _rtl92cu_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
226 u8 configtype) 226 u8 configtype)
227{ 227{
228 struct rtl_priv *rtlpriv = rtl_priv(hw); 228 struct rtl_priv *rtlpriv = rtl_priv(hw);
229 struct rtl_phy *rtlphy = &(rtlpriv->phy); 229 struct rtl_phy *rtlphy = &(rtlpriv->phy);
@@ -459,7 +459,7 @@ void _rtl92cu_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
459 } 459 }
460} 460}
461 461
462bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw, 462static bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
463 enum rf_pwrstate rfpwr_state) 463 enum rf_pwrstate rfpwr_state)
464{ 464{
465 struct rtl_priv *rtlpriv = rtl_priv(hw); 465 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -595,7 +595,7 @@ bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
595} 595}
596 596
597bool rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw, 597bool rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
598 enum rf_pwrstate rfpwr_state) 598 enum rf_pwrstate rfpwr_state)
599{ 599{
600 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 600 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
601 bool bresult = false; 601 bool bresult = false;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.h b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.h
index 06299559ab68..ff81a61729d7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.h
@@ -34,3 +34,17 @@ bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw, u32 rfpath);
34void rtl92c_phy_set_io(struct ieee80211_hw *hw); 34void rtl92c_phy_set_io(struct ieee80211_hw *hw);
35bool _rtl92cu_phy_config_mac_with_headerfile(struct ieee80211_hw *hw); 35bool _rtl92cu_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
36bool rtl92cu_phy_bb_config(struct ieee80211_hw *hw); 36bool rtl92cu_phy_bb_config(struct ieee80211_hw *hw);
37u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw,
38 enum radio_path rfpath, u32 regaddr, u32 bitmask);
39void rtl92cu_phy_set_rf_reg(struct ieee80211_hw *hw,
40 enum radio_path rfpath,
41 u32 regaddr, u32 bitmask, u32 data);
42bool rtl92cu_phy_mac_config(struct ieee80211_hw *hw);
43bool _rtl92cu_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
44 u8 configtype);
45void _rtl92cu_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t);
46bool _rtl92cu_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
47 u8 configtype);
48void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
49bool rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
50 enum rf_pwrstate rfpwr_state);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
index 1c79c226f145..c7576ec4744e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
@@ -62,7 +62,7 @@ void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
62} 62}
63 63
64void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, 64void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
65 u8 *ppowerlevel) 65 u8 *ppowerlevel)
66{ 66{
67 struct rtl_priv *rtlpriv = rtl_priv(hw); 67 struct rtl_priv *rtlpriv = rtl_priv(hw);
68 struct rtl_phy *rtlphy = &(rtlpriv->phy); 68 struct rtl_phy *rtlphy = &(rtlpriv->phy);
@@ -389,7 +389,7 @@ static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw,
389} 389}
390 390
391void rtl92cu_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, 391void rtl92cu_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
392 u8 *ppowerlevel, u8 channel) 392 u8 *ppowerlevel, u8 channel)
393{ 393{
394 u32 writeVal[2], powerBase0[2], powerBase1[2]; 394 u32 writeVal[2], powerBase0[2], powerBase1[2];
395 u8 index = 0; 395 u8 index = 0;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
index 86c2728cfa00..500a2094b6bb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
@@ -43,5 +43,9 @@ extern void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
43bool rtl92cu_phy_rf6052_config(struct ieee80211_hw *hw); 43bool rtl92cu_phy_rf6052_config(struct ieee80211_hw *hw);
44bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, 44bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
45 enum radio_path rfpath); 45 enum radio_path rfpath);
46void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
47 u8 *ppowerlevel);
48void rtl92cu_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
49 u8 *ppowerlevel, u8 channel);
46 50
47#endif 51#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 71244a38d49e..bee7c1480f63 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -94,7 +94,7 @@ static struct rtl_hal_ops rtl8192cu_hal_ops = {
94 .update_interrupt_mask = rtl92cu_update_interrupt_mask, 94 .update_interrupt_mask = rtl92cu_update_interrupt_mask,
95 .get_hw_reg = rtl92cu_get_hw_reg, 95 .get_hw_reg = rtl92cu_get_hw_reg,
96 .set_hw_reg = rtl92cu_set_hw_reg, 96 .set_hw_reg = rtl92cu_set_hw_reg,
97 .update_rate_table = rtl92cu_update_hal_rate_table, 97 .update_rate_tbl = rtl92cu_update_hal_rate_table,
98 .update_rate_mask = rtl92cu_update_hal_rate_mask, 98 .update_rate_mask = rtl92cu_update_hal_rate_mask,
99 .fill_tx_desc = rtl92cu_tx_fill_desc, 99 .fill_tx_desc = rtl92cu_tx_fill_desc,
100 .fill_fake_txdesc = rtl92cu_fill_fake_txdesc, 100 .fill_fake_txdesc = rtl92cu_fill_fake_txdesc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index d0b0d43b9a6d..3a92ba3c4a1e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -372,7 +372,7 @@ static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb)
372 __le16 fc; 372 __le16 fc;
373 struct ieee80211_hdr *hdr; 373 struct ieee80211_hdr *hdr;
374 374
375 memset(rx_status, 0, sizeof(rx_status)); 375 memset(rx_status, 0, sizeof(*rx_status));
376 rxdesc = skb->data; 376 rxdesc = skb->data;
377 skb_len = skb->len; 377 skb_len = skb->len;
378 drvinfo_len = (GET_RX_DESC_DRVINFO_SIZE(rxdesc) * RTL_RX_DRV_INFO_UNIT); 378 drvinfo_len = (GET_RX_DESC_DRVINFO_SIZE(rxdesc) * RTL_RX_DRV_INFO_UNIT);
@@ -434,7 +434,7 @@ static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb)
434 "0x%02X\n", fc, (u32)hdr->addr1[0], (u32)hdr->addr1[1], 434 "0x%02X\n", fc, (u32)hdr->addr1[0], (u32)hdr->addr1[1],
435 (u32)hdr->addr1[2], (u32)hdr->addr1[3], (u32)hdr->addr1[4], 435 (u32)hdr->addr1[2], (u32)hdr->addr1[3], (u32)hdr->addr1[4],
436 (u32)hdr->addr1[5])); 436 (u32)hdr->addr1[5]));
437 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); 437 memcpy(IEEE80211_SKB_RXCB(skb), rx_status, sizeof(*rx_status));
438 ieee80211_rx_irqsafe(hw, skb); 438 ieee80211_rx_irqsafe(hw, skb);
439} 439}
440 440
@@ -498,14 +498,14 @@ static void _rtl_tx_desc_checksum(u8 *txdesc)
498void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw, 498void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
499 struct ieee80211_hdr *hdr, u8 *pdesc_tx, 499 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
500 struct ieee80211_tx_info *info, struct sk_buff *skb, 500 struct ieee80211_tx_info *info, struct sk_buff *skb,
501 unsigned int queue_index) 501 u8 queue_index,
502 struct rtl_tcb_desc *tcb_desc)
502{ 503{
503 struct rtl_priv *rtlpriv = rtl_priv(hw); 504 struct rtl_priv *rtlpriv = rtl_priv(hw);
504 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 505 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
505 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 506 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
506 bool defaultadapter = true; 507 bool defaultadapter = true;
507 struct ieee80211_sta *sta; 508 struct ieee80211_sta *sta = info->control.sta = info->control.sta;
508 struct rtl_tcb_desc tcb_desc;
509 u8 *qc = ieee80211_get_qos_ctl(hdr); 509 u8 *qc = ieee80211_get_qos_ctl(hdr);
510 u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; 510 u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
511 u16 seq_number; 511 u16 seq_number;
@@ -517,15 +517,15 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
517 u8 *txdesc; 517 u8 *txdesc;
518 518
519 seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; 519 seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
520 rtl_get_tcb_desc(hw, info, skb, &tcb_desc); 520 rtl_get_tcb_desc(hw, info, sta, skb, tcb_desc);
521 txdesc = (u8 *)skb_push(skb, RTL_TX_HEADER_SIZE); 521 txdesc = (u8 *)skb_push(skb, RTL_TX_HEADER_SIZE);
522 memset(txdesc, 0, RTL_TX_HEADER_SIZE); 522 memset(txdesc, 0, RTL_TX_HEADER_SIZE);
523 SET_TX_DESC_PKT_SIZE(txdesc, pktlen); 523 SET_TX_DESC_PKT_SIZE(txdesc, pktlen);
524 SET_TX_DESC_LINIP(txdesc, 0); 524 SET_TX_DESC_LINIP(txdesc, 0);
525 SET_TX_DESC_PKT_OFFSET(txdesc, RTL_DUMMY_OFFSET); 525 SET_TX_DESC_PKT_OFFSET(txdesc, RTL_DUMMY_OFFSET);
526 SET_TX_DESC_OFFSET(txdesc, RTL_TX_HEADER_SIZE); 526 SET_TX_DESC_OFFSET(txdesc, RTL_TX_HEADER_SIZE);
527 SET_TX_DESC_TX_RATE(txdesc, tcb_desc.hw_rate); 527 SET_TX_DESC_TX_RATE(txdesc, tcb_desc->hw_rate);
528 if (tcb_desc.use_shortgi || tcb_desc.use_shortpreamble) 528 if (tcb_desc->use_shortgi || tcb_desc->use_shortpreamble)
529 SET_TX_DESC_DATA_SHORTGI(txdesc, 1); 529 SET_TX_DESC_DATA_SHORTGI(txdesc, 1);
530 if (mac->tids[tid].agg.agg_state == RTL_AGG_ON && 530 if (mac->tids[tid].agg.agg_state == RTL_AGG_ON &&
531 info->flags & IEEE80211_TX_CTL_AMPDU) { 531 info->flags & IEEE80211_TX_CTL_AMPDU) {
@@ -535,21 +535,21 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
535 SET_TX_DESC_AGG_BREAK(txdesc, 1); 535 SET_TX_DESC_AGG_BREAK(txdesc, 1);
536 } 536 }
537 SET_TX_DESC_SEQ(txdesc, seq_number); 537 SET_TX_DESC_SEQ(txdesc, seq_number);
538 SET_TX_DESC_RTS_ENABLE(txdesc, ((tcb_desc.rts_enable && 538 SET_TX_DESC_RTS_ENABLE(txdesc, ((tcb_desc->rts_enable &&
539 !tcb_desc.cts_enable) ? 1 : 0)); 539 !tcb_desc->cts_enable) ? 1 : 0));
540 SET_TX_DESC_HW_RTS_ENABLE(txdesc, ((tcb_desc.rts_enable || 540 SET_TX_DESC_HW_RTS_ENABLE(txdesc, ((tcb_desc->rts_enable ||
541 tcb_desc.cts_enable) ? 1 : 0)); 541 tcb_desc->cts_enable) ? 1 : 0));
542 SET_TX_DESC_CTS2SELF(txdesc, ((tcb_desc.cts_enable) ? 1 : 0)); 542 SET_TX_DESC_CTS2SELF(txdesc, ((tcb_desc->cts_enable) ? 1 : 0));
543 SET_TX_DESC_RTS_STBC(txdesc, ((tcb_desc.rts_stbc) ? 1 : 0)); 543 SET_TX_DESC_RTS_STBC(txdesc, ((tcb_desc->rts_stbc) ? 1 : 0));
544 SET_TX_DESC_RTS_RATE(txdesc, tcb_desc.rts_rate); 544 SET_TX_DESC_RTS_RATE(txdesc, tcb_desc->rts_rate);
545 SET_TX_DESC_RTS_BW(txdesc, 0); 545 SET_TX_DESC_RTS_BW(txdesc, 0);
546 SET_TX_DESC_RTS_SC(txdesc, tcb_desc.rts_sc); 546 SET_TX_DESC_RTS_SC(txdesc, tcb_desc->rts_sc);
547 SET_TX_DESC_RTS_SHORT(txdesc, 547 SET_TX_DESC_RTS_SHORT(txdesc,
548 ((tcb_desc.rts_rate <= DESC92C_RATE54M) ? 548 ((tcb_desc->rts_rate <= DESC92C_RATE54M) ?
549 (tcb_desc.rts_use_shortpreamble ? 1 : 0) 549 (tcb_desc->rts_use_shortpreamble ? 1 : 0)
550 : (tcb_desc.rts_use_shortgi ? 1 : 0))); 550 : (tcb_desc->rts_use_shortgi ? 1 : 0)));
551 if (mac->bw_40) { 551 if (mac->bw_40) {
552 if (tcb_desc.packet_bw) { 552 if (tcb_desc->packet_bw) {
553 SET_TX_DESC_DATA_BW(txdesc, 1); 553 SET_TX_DESC_DATA_BW(txdesc, 1);
554 SET_TX_DESC_DATA_SC(txdesc, 3); 554 SET_TX_DESC_DATA_SC(txdesc, 3);
555 } else { 555 } else {
@@ -590,7 +590,7 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
590 SET_TX_DESC_DATA_RATE_FB_LIMIT(txdesc, 0x1F); 590 SET_TX_DESC_DATA_RATE_FB_LIMIT(txdesc, 0x1F);
591 SET_TX_DESC_RTS_RATE_FB_LIMIT(txdesc, 0xF); 591 SET_TX_DESC_RTS_RATE_FB_LIMIT(txdesc, 0xF);
592 SET_TX_DESC_DISABLE_FB(txdesc, 0); 592 SET_TX_DESC_DISABLE_FB(txdesc, 0);
593 SET_TX_DESC_USE_RATE(txdesc, tcb_desc.use_driver_rate ? 1 : 0); 593 SET_TX_DESC_USE_RATE(txdesc, tcb_desc->use_driver_rate ? 1 : 0);
594 if (ieee80211_is_data_qos(fc)) { 594 if (ieee80211_is_data_qos(fc)) {
595 if (mac->rdg_en) { 595 if (mac->rdg_en) {
596 RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, 596 RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
@@ -600,11 +600,11 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
600 } 600 }
601 } 601 }
602 if (rtlpriv->dm.useramask) { 602 if (rtlpriv->dm.useramask) {
603 SET_TX_DESC_RATE_ID(txdesc, tcb_desc.ratr_index); 603 SET_TX_DESC_RATE_ID(txdesc, tcb_desc->ratr_index);
604 SET_TX_DESC_MACID(txdesc, tcb_desc.mac_id); 604 SET_TX_DESC_MACID(txdesc, tcb_desc->mac_id);
605 } else { 605 } else {
606 SET_TX_DESC_RATE_ID(txdesc, 0xC + tcb_desc.ratr_index); 606 SET_TX_DESC_RATE_ID(txdesc, 0xC + tcb_desc->ratr_index);
607 SET_TX_DESC_MACID(txdesc, tcb_desc.ratr_index); 607 SET_TX_DESC_MACID(txdesc, tcb_desc->ratr_index);
608 } 608 }
609 if ((!ieee80211_is_data_qos(fc)) && ppsc->leisure_ps && 609 if ((!ieee80211_is_data_qos(fc)) && ppsc->leisure_ps &&
610 ppsc->fwctrl_lps) { 610 ppsc->fwctrl_lps) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
index b396d46edbb7..53de5f66e242 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
@@ -37,6 +37,8 @@
37#define RTL92C_SIZE_MAX_RX_BUFFER 15360 /* 8192 */ 37#define RTL92C_SIZE_MAX_RX_BUFFER 15360 /* 8192 */
38#define RX_DRV_INFO_SIZE_UNIT 8 38#define RX_DRV_INFO_SIZE_UNIT 8
39 39
40#define RTL_AGG_ON 1
41
40enum usb_rx_agg_mode { 42enum usb_rx_agg_mode {
41 USB_RX_AGG_DISABLE, 43 USB_RX_AGG_DISABLE,
42 USB_RX_AGG_DMA, 44 USB_RX_AGG_DMA,
@@ -419,7 +421,8 @@ struct sk_buff *rtl8192c_tx_aggregate_hdl(struct ieee80211_hw *,
419void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw, 421void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
420 struct ieee80211_hdr *hdr, u8 *pdesc_tx, 422 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
421 struct ieee80211_tx_info *info, struct sk_buff *skb, 423 struct ieee80211_tx_info *info, struct sk_buff *skb,
422 unsigned int queue_index); 424 u8 queue_index,
425 struct rtl_tcb_desc *tcb_desc);
423void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc, 426void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc,
424 u32 buffer_len, bool bIsPsPoll); 427 u32 buffer_len, bool bIsPsPoll);
425void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw, 428void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/Makefile b/drivers/net/wireless/rtlwifi/rtl8192se/Makefile
new file mode 100644
index 000000000000..b7eb13819cbc
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/Makefile
@@ -0,0 +1,15 @@
1rtl8192se-objs := \
2 dm.o \
3 fw.o \
4 hw.o \
5 led.o \
6 phy.o \
7 rf.o \
8 sw.o \
9 table.o \
10 trx.o
11
12obj-$(CONFIG_RTL8192SE) += rtl8192se.o
13
14ccflags-y += -D__CHECK_ENDIAN__
15
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/def.h b/drivers/net/wireless/rtlwifi/rtl8192se/def.h
new file mode 100644
index 000000000000..69828f2b3fab
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/def.h
@@ -0,0 +1,598 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29#ifndef __REALTEK_92S_DEF_H__
30#define __REALTEK_92S_DEF_H__
31
32#define RX_MPDU_QUEUE 0
33#define RX_CMD_QUEUE 1
34#define RX_MAX_QUEUE 2
35
36#define DESC92S_RATE1M 0x00
37#define DESC92S_RATE2M 0x01
38#define DESC92S_RATE5_5M 0x02
39#define DESC92S_RATE11M 0x03
40#define DESC92S_RATE6M 0x04
41#define DESC92S_RATE9M 0x05
42#define DESC92S_RATE12M 0x06
43#define DESC92S_RATE18M 0x07
44#define DESC92S_RATE24M 0x08
45#define DESC92S_RATE36M 0x09
46#define DESC92S_RATE48M 0x0a
47#define DESC92S_RATE54M 0x0b
48#define DESC92S_RATEMCS0 0x0c
49#define DESC92S_RATEMCS1 0x0d
50#define DESC92S_RATEMCS2 0x0e
51#define DESC92S_RATEMCS3 0x0f
52#define DESC92S_RATEMCS4 0x10
53#define DESC92S_RATEMCS5 0x11
54#define DESC92S_RATEMCS6 0x12
55#define DESC92S_RATEMCS7 0x13
56#define DESC92S_RATEMCS8 0x14
57#define DESC92S_RATEMCS9 0x15
58#define DESC92S_RATEMCS10 0x16
59#define DESC92S_RATEMCS11 0x17
60#define DESC92S_RATEMCS12 0x18
61#define DESC92S_RATEMCS13 0x19
62#define DESC92S_RATEMCS14 0x1a
63#define DESC92S_RATEMCS15 0x1b
64#define DESC92S_RATEMCS15_SG 0x1c
65#define DESC92S_RATEMCS32 0x20
66
67#define SHORT_SLOT_TIME 9
68#define NON_SHORT_SLOT_TIME 20
69
70/* Rx smooth factor */
71#define RX_SMOOTH_FACTOR 20
72
73/* Queue Select Value in TxDesc */
74#define QSLT_BK 0x2
75#define QSLT_BE 0x0
76#define QSLT_VI 0x5
77#define QSLT_VO 0x6
78#define QSLT_BEACON 0x10
79#define QSLT_HIGH 0x11
80#define QSLT_MGNT 0x12
81#define QSLT_CMD 0x13
82
83#define PHY_RSSI_SLID_WIN_MAX 100
84#define PHY_LINKQUALITY_SLID_WIN_MAX 20
85#define PHY_BEACON_RSSI_SLID_WIN_MAX 10
86
87/* Tx Desc */
88#define TX_DESC_SIZE_RTL8192S (16 * 4)
89#define TX_CMDDESC_SIZE_RTL8192S (16 * 4)
90
91/* Define a macro that takes a le32 word, converts it to host ordering,
92 * right shifts by a specified count, creates a mask of the specified
93 * bit count, and extracts that number of bits.
94 */
95
96#define SHIFT_AND_MASK_LE(__pdesc, __shift, __mask) \
97 ((le32_to_cpu(*(((__le32 *)(__pdesc)))) >> (__shift)) & \
98 BIT_LEN_MASK_32(__mask))
99
100/* Define a macro that clears a bit field in an le32 word and
101 * sets the specified value into that bit field. The resulting
102 * value remains in le32 ordering; however, it is properly converted
103 * to host ordering for the clear and set operations before conversion
104 * back to le32.
105 */
106
107#define SET_BITS_OFFSET_LE(__pdesc, __shift, __len, __val) \
108 (*(__le32 *)(__pdesc) = \
109 (cpu_to_le32((le32_to_cpu(*((__le32 *)(__pdesc))) & \
110 (~(BIT_OFFSET_LEN_MASK_32((__shift), __len)))) | \
111 (((u32)(__val) & BIT_LEN_MASK_32(__len)) << (__shift)))));
112
113/* macros to read/write various fields in RX or TX descriptors */
114
115/* Dword 0 */
116#define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \
117 SET_BITS_OFFSET_LE(__pdesc, 0, 16, __val)
118#define SET_TX_DESC_OFFSET(__pdesc, __val) \
119 SET_BITS_OFFSET_LE(__pdesc, 16, 8, __val)
120#define SET_TX_DESC_TYPE(__pdesc, __val) \
121 SET_BITS_OFFSET_LE(__pdesc, 24, 2, __val)
122#define SET_TX_DESC_LAST_SEG(__pdesc, __val) \
123 SET_BITS_OFFSET_LE(__pdesc, 26, 1, __val)
124#define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \
125 SET_BITS_OFFSET_LE(__pdesc, 27, 1, __val)
126#define SET_TX_DESC_LINIP(__pdesc, __val) \
127 SET_BITS_OFFSET_LE(__pdesc, 28, 1, __val)
128#define SET_TX_DESC_AMSDU(__pdesc, __val) \
129 SET_BITS_OFFSET_LE(__pdesc, 29, 1, __val)
130#define SET_TX_DESC_GREEN_FIELD(__pdesc, __val) \
131 SET_BITS_OFFSET_LE(__pdesc, 30, 1, __val)
132#define SET_TX_DESC_OWN(__pdesc, __val) \
133 SET_BITS_OFFSET_LE(__pdesc, 31, 1, __val)
134
135#define GET_TX_DESC_OWN(__pdesc) \
136 SHIFT_AND_MASK_LE(__pdesc, 31, 1)
137
138/* Dword 1 */
139#define SET_TX_DESC_MACID(__pdesc, __val) \
140 SET_BITS_OFFSET_LE(__pdesc + 4, 0, 5, __val)
141#define SET_TX_DESC_MORE_DATA(__pdesc, __val) \
142 SET_BITS_OFFSET_LE(__pdesc + 4, 5, 1, __val)
143#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \
144 SET_BITS_OFFSET_LE(__pdesc + 4, 6, 1, __val)
145#define SET_TX_DESC_PIFS(__pdesc, __val) \
146 SET_BITS_OFFSET_LE(__pdesc + 4, 7, 1, __val)
147#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \
148 SET_BITS_OFFSET_LE(__pdesc + 4, 8, 5, __val)
149#define SET_TX_DESC_ACK_POLICY(__pdesc, __val) \
150 SET_BITS_OFFSET_LE(__pdesc + 4, 13, 2, __val)
151#define SET_TX_DESC_NO_ACM(__pdesc, __val) \
152 SET_BITS_OFFSET_LE(__pdesc + 4, 15, 1, __val)
153#define SET_TX_DESC_NON_QOS(__pdesc, __val) \
154 SET_BITS_OFFSET_LE(__pdesc + 4, 16, 1, __val)
155#define SET_TX_DESC_KEY_ID(__pdesc, __val) \
156 SET_BITS_OFFSET_LE(__pdesc + 4, 17, 2, __val)
157#define SET_TX_DESC_OUI(__pdesc, __val) \
158 SET_BITS_OFFSET_LE(__pdesc + 4, 19, 1, __val)
159#define SET_TX_DESC_PKT_TYPE(__pdesc, __val) \
160 SET_BITS_OFFSET_LE(__pdesc + 4, 20, 1, __val)
161#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \
162 SET_BITS_OFFSET_LE(__pdesc + 4, 21, 1, __val)
163#define SET_TX_DESC_SEC_TYPE(__pdesc, __val) \
164 SET_BITS_OFFSET_LE(__pdesc + 4, 22, 2, __val)
165#define SET_TX_DESC_WDS(__pdesc, __val) \
166 SET_BITS_OFFSET_LE(__pdesc + 4, 24, 1, __val)
167#define SET_TX_DESC_HTC(__pdesc, __val) \
168 SET_BITS_OFFSET_LE(__pdesc + 4, 25, 1, __val)
169#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \
170 SET_BITS_OFFSET_LE(__pdesc + 4, 26, 5, __val)
171#define SET_TX_DESC_HWPC(__pdesc, __val) \
172 SET_BITS_OFFSET_LE(__pdesc + 4, 27, 1, __val)
173
174/* Dword 2 */
175#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val) \
176 SET_BITS_OFFSET_LE(__pdesc + 8, 0, 6, __val)
177#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \
178 SET_BITS_OFFSET_LE(__pdesc + 8, 6, 1, __val)
179#define SET_TX_DESC_TSFL(__pdesc, __val) \
180 SET_BITS_OFFSET_LE(__pdesc + 8, 7, 5, __val)
181#define SET_TX_DESC_RTS_RETRY_COUNT(__pdesc, __val) \
182 SET_BITS_OFFSET_LE(__pdesc + 8, 12, 6, __val)
183#define SET_TX_DESC_DATA_RETRY_COUNT(__pdesc, __val) \
184 SET_BITS_OFFSET_LE(__pdesc + 8, 18, 6, __val)
185#define SET_TX_DESC_RSVD_MACID(__pdesc, __val) \
186 SET_BITS_OFFSET_LE(((__pdesc) + 8), 24, 5, __val)
187#define SET_TX_DESC_AGG_ENABLE(__pdesc, __val) \
188 SET_BITS_OFFSET_LE(__pdesc + 8, 29, 1, __val)
189#define SET_TX_DESC_AGG_BREAK(__pdesc, __val) \
190 SET_BITS_OFFSET_LE(__pdesc + 8, 30, 1, __val)
191#define SET_TX_DESC_OWN_MAC(__pdesc, __val) \
192 SET_BITS_OFFSET_LE(__pdesc + 8, 31, 1, __val)
193
194/* Dword 3 */
195#define SET_TX_DESC_NEXT_HEAP_PAGE(__pdesc, __val) \
196 SET_BITS_OFFSET_LE(__pdesc + 12, 0, 8, __val)
197#define SET_TX_DESC_TAIL_PAGE(__pdesc, __val) \
198 SET_BITS_OFFSET_LE(__pdesc + 12, 8, 8, __val)
199#define SET_TX_DESC_SEQ(__pdesc, __val) \
200 SET_BITS_OFFSET_LE(__pdesc + 12, 16, 12, __val)
201#define SET_TX_DESC_FRAG(__pdesc, __val) \
202 SET_BITS_OFFSET_LE(__pdesc + 12, 28, 4, __val)
203
204/* Dword 4 */
205#define SET_TX_DESC_RTS_RATE(__pdesc, __val) \
206 SET_BITS_OFFSET_LE(__pdesc + 16, 0, 6, __val)
207#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val) \
208 SET_BITS_OFFSET_LE(__pdesc + 16, 6, 1, __val)
209#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val) \
210 SET_BITS_OFFSET_LE(__pdesc + 16, 7, 4, __val)
211#define SET_TX_DESC_CTS_ENABLE(__pdesc, __val) \
212 SET_BITS_OFFSET_LE(__pdesc + 16, 11, 1, __val)
213#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val) \
214 SET_BITS_OFFSET_LE(__pdesc + 16, 12, 1, __val)
215#define SET_TX_DESC_RA_BRSR_ID(__pdesc, __val) \
216 SET_BITS_OFFSET_LE(__pdesc + 16, 13, 3, __val)
217#define SET_TX_DESC_TXHT(__pdesc, __val) \
218 SET_BITS_OFFSET_LE(__pdesc + 16, 16, 1, __val)
219#define SET_TX_DESC_TX_SHORT(__pdesc, __val) \
220 SET_BITS_OFFSET_LE(__pdesc + 16, 17, 1, __val)
221#define SET_TX_DESC_TX_BANDWIDTH(__pdesc, __val) \
222 SET_BITS_OFFSET_LE(__pdesc + 16, 18, 1, __val)
223#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val) \
224 SET_BITS_OFFSET_LE(__pdesc + 16, 19, 2, __val)
225#define SET_TX_DESC_TX_STBC(__pdesc, __val) \
226 SET_BITS_OFFSET_LE(__pdesc + 16, 21, 2, __val)
227#define SET_TX_DESC_TX_REVERSE_DIRECTION(__pdesc, __val) \
228 SET_BITS_OFFSET_LE(__pdesc + 16, 23, 1, __val)
229#define SET_TX_DESC_RTS_HT(__pdesc, __val) \
230 SET_BITS_OFFSET_LE(__pdesc + 16, 24, 1, __val)
231#define SET_TX_DESC_RTS_SHORT(__pdesc, __val) \
232 SET_BITS_OFFSET_LE(__pdesc + 16, 25, 1, __val)
233#define SET_TX_DESC_RTS_BANDWIDTH(__pdesc, __val) \
234 SET_BITS_OFFSET_LE(__pdesc + 16, 26, 1, __val)
235#define SET_TX_DESC_RTS_SUB_CARRIER(__pdesc, __val) \
236 SET_BITS_OFFSET_LE(__pdesc + 16, 27, 2, __val)
237#define SET_TX_DESC_RTS_STBC(__pdesc, __val) \
238 SET_BITS_OFFSET_LE(__pdesc + 16, 29, 2, __val)
239#define SET_TX_DESC_USER_RATE(__pdesc, __val) \
240 SET_BITS_OFFSET_LE(__pdesc + 16, 31, 1, __val)
241
242/* Dword 5 */
243#define SET_TX_DESC_PACKET_ID(__pdesc, __val) \
244 SET_BITS_OFFSET_LE(__pdesc + 20, 0, 9, __val)
245#define SET_TX_DESC_TX_RATE(__pdesc, __val) \
246 SET_BITS_OFFSET_LE(__pdesc + 20, 9, 6, __val)
247#define SET_TX_DESC_DISABLE_FB(__pdesc, __val) \
248 SET_BITS_OFFSET_LE(__pdesc + 20, 15, 1, __val)
249#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \
250 SET_BITS_OFFSET_LE(__pdesc + 20, 16, 5, __val)
251#define SET_TX_DESC_TX_AGC(__pdesc, __val) \
252 SET_BITS_OFFSET_LE(__pdesc + 20, 21, 11, __val)
253
254/* Dword 6 */
255#define SET_TX_DESC_IP_CHECK_SUM(__pdesc, __val) \
256 SET_BITS_OFFSET_LE(__pdesc + 24, 0, 16, __val)
257#define SET_TX_DESC_TCP_CHECK_SUM(__pdesc, __val) \
258 SET_BITS_OFFSET_LE(__pdesc + 24, 16, 16, __val)
259
260/* Dword 7 */
261#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \
262 SET_BITS_OFFSET_LE(__pdesc + 28, 0, 16, __val)
263#define SET_TX_DESC_IP_HEADER_OFFSET(__pdesc, __val) \
264 SET_BITS_OFFSET_LE(__pdesc + 28, 16, 8, __val)
265#define SET_TX_DESC_TCP_ENABLE(__pdesc, __val) \
266 SET_BITS_OFFSET_LE(__pdesc + 28, 31, 1, __val)
267
268/* Dword 8 */
269#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) \
270 SET_BITS_OFFSET_LE(__pdesc + 32, 0, 32, __val)
271#define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc) \
272 SHIFT_AND_MASK_LE(__pdesc + 32, 0, 32)
273
274/* Dword 9 */
275#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) \
276 SET_BITS_OFFSET_LE(__pdesc + 36, 0, 32, __val)
277
278/* Because the PCI Tx descriptors are chaied at the
279 * initialization and all the NextDescAddresses in
280 * these descriptors cannot not be cleared (,or
281 * driver/HW cannot find the next descriptor), the
282 * offset 36 (NextDescAddresses) is reserved when
283 * the desc is cleared. */
284#define TX_DESC_NEXT_DESC_OFFSET 36
285#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \
286do { \
287 if (_size > TX_DESC_NEXT_DESC_OFFSET) \
288 memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); \
289 else \
290 memset(__pdesc, 0, _size); \
291} while (0);
292
293/* Rx Desc */
294#define RX_STATUS_DESC_SIZE 24
295#define RX_DRV_INFO_SIZE_UNIT 8
296
297/* DWORD 0 */
298#define SET_RX_STATUS_DESC_PKT_LEN(__pdesc, __val) \
299 SET_BITS_OFFSET_LE(__pdesc, 0, 14, __val)
300#define SET_RX_STATUS_DESC_CRC32(__pdesc, __val) \
301 SET_BITS_OFFSET_LE(__pdesc, 14, 1, __val)
302#define SET_RX_STATUS_DESC_ICV(__pdesc, __val) \
303 SET_BITS_OFFSET_LE(__pdesc, 15, 1, __val)
304#define SET_RX_STATUS_DESC_DRVINFO_SIZE(__pdesc, __val) \
305 SET_BITS_OFFSET_LE(__pdesc, 16, 4, __val)
306#define SET_RX_STATUS_DESC_SECURITY(__pdesc, __val) \
307 SET_BITS_OFFSET_LE(__pdesc, 20, 3, __val)
308#define SET_RX_STATUS_DESC_QOS(__pdesc, __val) \
309 SET_BITS_OFFSET_LE(__pdesc, 23, 1, __val)
310#define SET_RX_STATUS_DESC_SHIFT(__pdesc, __val) \
311 SET_BITS_OFFSET_LE(__pdesc, 24, 2, __val)
312#define SET_RX_STATUS_DESC_PHY_STATUS(__pdesc, __val) \
313 SET_BITS_OFFSET_LE(__pdesc, 26, 1, __val)
314#define SET_RX_STATUS_DESC_SWDEC(__pdesc, __val) \
315 SET_BITS_OFFSET_LE(__pdesc, 27, 1, __val)
316#define SET_RX_STATUS_DESC_LAST_SEG(__pdesc, __val) \
317 SET_BITS_OFFSET_LE(__pdesc, 28, 1, __val)
318#define SET_RX_STATUS_DESC_FIRST_SEG(__pdesc, __val) \
319 SET_BITS_OFFSET_LE(__pdesc, 29, 1, __val)
320#define SET_RX_STATUS_DESC_EOR(__pdesc, __val) \
321 SET_BITS_OFFSET_LE(__pdesc, 30, 1, __val)
322#define SET_RX_STATUS_DESC_OWN(__pdesc, __val) \
323 SET_BITS_OFFSET_LE(__pdesc, 31, 1, __val)
324
325#define GET_RX_STATUS_DESC_PKT_LEN(__pdesc) \
326 SHIFT_AND_MASK_LE(__pdesc, 0, 14)
327#define GET_RX_STATUS_DESC_CRC32(__pdesc) \
328 SHIFT_AND_MASK_LE(__pdesc, 14, 1)
329#define GET_RX_STATUS_DESC_ICV(__pdesc) \
330 SHIFT_AND_MASK_LE(__pdesc, 15, 1)
331#define GET_RX_STATUS_DESC_DRVINFO_SIZE(__pdesc) \
332 SHIFT_AND_MASK_LE(__pdesc, 16, 4)
333#define GET_RX_STATUS_DESC_SECURITY(__pdesc) \
334 SHIFT_AND_MASK_LE(__pdesc, 20, 3)
335#define GET_RX_STATUS_DESC_QOS(__pdesc) \
336 SHIFT_AND_MASK_LE(__pdesc, 23, 1)
337#define GET_RX_STATUS_DESC_SHIFT(__pdesc) \
338 SHIFT_AND_MASK_LE(__pdesc, 24, 2)
339#define GET_RX_STATUS_DESC_PHY_STATUS(__pdesc) \
340 SHIFT_AND_MASK_LE(__pdesc, 26, 1)
341#define GET_RX_STATUS_DESC_SWDEC(__pdesc) \
342 SHIFT_AND_MASK_LE(__pdesc, 27, 1)
343#define GET_RX_STATUS_DESC_LAST_SEG(__pdesc) \
344 SHIFT_AND_MASK_LE(__pdesc, 28, 1)
345#define GET_RX_STATUS_DESC_FIRST_SEG(__pdesc) \
346 SHIFT_AND_MASK_LE(__pdesc, 29, 1)
347#define GET_RX_STATUS_DESC_EOR(__pdesc) \
348 SHIFT_AND_MASK_LE(__pdesc, 30, 1)
349#define GET_RX_STATUS_DESC_OWN(__pdesc) \
350 SHIFT_AND_MASK_LE(__pdesc, 31, 1)
351
352/* DWORD 1 */
353#define SET_RX_STATUS_DESC_MACID(__pdesc, __val) \
354 SET_BITS_OFFSET_LE(__pdesc + 4, 0, 5, __val)
355#define SET_RX_STATUS_DESC_TID(__pdesc, __val) \
356 SET_BITS_OFFSET_LE(__pdesc + 4, 5, 4, __val)
357#define SET_RX_STATUS_DESC_PAGGR(__pdesc, __val) \
358 SET_BITS_OFFSET_LE(__pdesc + 4, 14, 1, __val)
359#define SET_RX_STATUS_DESC_FAGGR(__pdesc, __val) \
360 SET_BITS_OFFSET_LE(__pdesc + 4, 15, 1, __val)
361#define SET_RX_STATUS_DESC_A1_FIT(__pdesc, __val) \
362 SET_BITS_OFFSET_LE(__pdesc + 4, 16, 4, __val)
363#define SET_RX_STATUS_DESC_A2_FIT(__pdesc, __val) \
364 SET_BITS_OFFSET_LE(__pdesc + 4, 20, 4, __val)
365#define SET_RX_STATUS_DESC_PAM(__pdesc, __val) \
366 SET_BITS_OFFSET_LE(__pdesc + 4, 24, 1, __val)
367#define SET_RX_STATUS_DESC_PWR(__pdesc, __val) \
368 SET_BITS_OFFSET_LE(__pdesc + 4, 25, 1, __val)
369#define SET_RX_STATUS_DESC_MOREDATA(__pdesc, __val) \
370 SET_BITS_OFFSET_LE(__pdesc + 4, 26, 1, __val)
371#define SET_RX_STATUS_DESC_MOREFRAG(__pdesc, __val) \
372 SET_BITS_OFFSET_LE(__pdesc + 4, 27, 1, __val)
373#define SET_RX_STATUS_DESC_TYPE(__pdesc, __val) \
374 SET_BITS_OFFSET_LE(__pdesc + 4, 28, 2, __val)
375#define SET_RX_STATUS_DESC_MC(__pdesc, __val) \
376 SET_BITS_OFFSET_LE(__pdesc + 4, 30, 1, __val)
377#define SET_RX_STATUS_DESC_BC(__pdesc, __val) \
378 SET_BITS_OFFSET_LE(__pdesc + 4, 31, 1, __val)
379
380#define GET_RX_STATUS_DEC_MACID(__pdesc) \
381 SHIFT_AND_MASK_LE(__pdesc + 4, 0, 5)
382#define GET_RX_STATUS_DESC_TID(__pdesc) \
383 SHIFT_AND_MASK_LE(__pdesc + 4, 5, 4)
384#define GET_RX_STATUS_DESC_PAGGR(__pdesc) \
385 SHIFT_AND_MASK_LE(__pdesc + 4, 14, 1)
386#define GET_RX_STATUS_DESC_FAGGR(__pdesc) \
387 SHIFT_AND_MASK_LE(__pdesc + 4, 15, 1)
388#define GET_RX_STATUS_DESC_A1_FIT(__pdesc) \
389 SHIFT_AND_MASK_LE(__pdesc + 4, 16, 4)
390#define GET_RX_STATUS_DESC_A2_FIT(__pdesc) \
391 SHIFT_AND_MASK_LE(__pdesc + 4, 20, 4)
392#define GET_RX_STATUS_DESC_PAM(__pdesc) \
393 SHIFT_AND_MASK_LE(__pdesc + 4, 24, 1)
394#define GET_RX_STATUS_DESC_PWR(__pdesc) \
395 SHIFT_AND_MASK_LE(__pdesc + 4, 25, 1)
396#define GET_RX_STATUS_DESC_MORE_DATA(__pdesc) \
397 SHIFT_AND_MASK_LE(__pdesc + 4, 26, 1)
398#define GET_RX_STATUS_DESC_MORE_FRAG(__pdesc) \
399 SHIFT_AND_MASK_LE(__pdesc + 4, 27, 1)
400#define GET_RX_STATUS_DESC_TYPE(__pdesc) \
401 SHIFT_AND_MASK_LE(__pdesc + 4, 28, 2)
402#define GET_RX_STATUS_DESC_MC(__pdesc) \
403 SHIFT_AND_MASK_LE(__pdesc + 4, 30, 1)
404#define GET_RX_STATUS_DESC_BC(__pdesc) \
405 SHIFT_AND_MASK_LE(__pdesc + 4, 31, 1)
406
407/* DWORD 2 */
408#define SET_RX_STATUS_DESC_SEQ(__pdesc, __val) \
409 SET_BITS_OFFSET_LE(__pdesc + 8, 0, 12, __val)
410#define SET_RX_STATUS_DESC_FRAG(__pdesc, __val) \
411 SET_BITS_OFFSET_LE(__pdesc + 8, 12, 4, __val)
412#define SET_RX_STATUS_DESC_NEXT_PKTLEN(__pdesc, __val) \
413 SET_BITS_OFFSET_LE(__pdesc + 8, 16, 8, __val)
414#define SET_RX_STATUS_DESC_NEXT_IND(__pdesc, __val) \
415 SET_BITS_OFFSET_LE(__pdesc + 8, 30, 1, __val)
416
417#define GET_RX_STATUS_DESC_SEQ(__pdesc) \
418 SHIFT_AND_MASK_LE(__pdesc + 8, 0, 12)
419#define GET_RX_STATUS_DESC_FRAG(__pdesc) \
420 SHIFT_AND_MASK_LE(__pdesc + 8, 12, 4)
421#define GET_RX_STATUS_DESC_NEXT_PKTLEN(__pdesc) \
422 SHIFT_AND_MASK_LE(__pdesc + 8, 16, 8)
423#define GET_RX_STATUS_DESC_NEXT_IND(__pdesc) \
424 SHIFT_AND_MASK_LE(__pdesc + 8, 30, 1)
425
426/* DWORD 3 */
427#define SET_RX_STATUS_DESC_RX_MCS(__pdesc, __val) \
428 SET_BITS_OFFSET_LE(__pdesc + 12, 0, 6, __val)
429#define SET_RX_STATUS_DESC_RX_HT(__pdesc, __val) \
430 SET_BITS_OFFSET_LE(__pdesc + 12, 6, 1, __val)
431#define SET_RX_STATUS_DESC_AMSDU(__pdesc, __val) \
432 SET_BITS_OFFSET_LE(__pdesc + 12, 7, 1, __val)
433#define SET_RX_STATUS_DESC_SPLCP(__pdesc, __val) \
434 SET_BITS_OFFSET_LE(__pdesc + 12, 8, 1, __val)
435#define SET_RX_STATUS_DESC_BW(__pdesc, __val) \
436 SET_BITS_OFFSET_LE(__pdesc + 12, 9, 1, __val)
437#define SET_RX_STATUS_DESC_HTC(__pdesc, __val) \
438 SET_BITS_OFFSET_LE(__pdesc + 12, 10, 1, __val)
439#define SET_RX_STATUS_DESC_TCP_CHK_RPT(__pdesc, __val) \
440 SET_BITS_OFFSET_LE(__pdesc + 12, 11, 1, __val)
441#define SET_RX_STATUS_DESC_IP_CHK_RPT(__pdesc, __val) \
442 SET_BITS_OFFSET_LE(__pdesc + 12, 12, 1, __val)
443#define SET_RX_STATUS_DESC_TCP_CHK_VALID(__pdesc, __val) \
444 SET_BITS_OFFSET_LE(__pdesc + 12, 13, 1, __val)
445#define SET_RX_STATUS_DESC_HWPC_ERR(__pdesc, __val) \
446 SET_BITS_OFFSET_LE(__pdesc + 12, 14, 1, __val)
447#define SET_RX_STATUS_DESC_HWPC_IND(__pdesc, __val) \
448 SET_BITS_OFFSET_LE(__pdesc + 12, 15, 1, __val)
449#define SET_RX_STATUS_DESC_IV0(__pdesc, __val) \
450 SET_BITS_OFFSET_LE(__pdesc + 12, 16, 16, __val)
451
452#define GET_RX_STATUS_DESC_RX_MCS(__pdesc) \
453 SHIFT_AND_MASK_LE(__pdesc + 12, 0, 6)
454#define GET_RX_STATUS_DESC_RX_HT(__pdesc) \
455 SHIFT_AND_MASK_LE(__pdesc + 12, 6, 1)
456#define GET_RX_STATUS_DESC_AMSDU(__pdesc) \
457 SHIFT_AND_MASK_LE(__pdesc + 12, 7, 1)
458#define GET_RX_STATUS_DESC_SPLCP(__pdesc) \
459 SHIFT_AND_MASK_LE(__pdesc + 12, 8, 1)
460#define GET_RX_STATUS_DESC_BW(__pdesc) \
461 SHIFT_AND_MASK_LE(__pdesc + 12, 9, 1)
462#define GET_RX_STATUS_DESC_HTC(__pdesc) \
463 SHIFT_AND_MASK_LE(__pdesc + 12, 10, 1)
464#define GET_RX_STATUS_DESC_TCP_CHK_RPT(__pdesc) \
465 SHIFT_AND_MASK_LE(__pdesc + 12, 11, 1)
466#define GET_RX_STATUS_DESC_IP_CHK_RPT(__pdesc) \
467 SHIFT_AND_MASK_LE(__pdesc + 12, 12, 1)
468#define GET_RX_STATUS_DESC_TCP_CHK_VALID(__pdesc) \
469 SHIFT_AND_MASK_LE(__pdesc + 12, 13, 1)
470#define GET_RX_STATUS_DESC_HWPC_ERR(__pdesc) \
471 SHIFT_AND_MASK_LE(__pdesc + 12, 14, 1)
472#define GET_RX_STATUS_DESC_HWPC_IND(__pdesc) \
473 SHIFT_AND_MASK_LE(__pdesc + 12, 15, 1)
474#define GET_RX_STATUS_DESC_IV0(__pdesc) \
475 SHIFT_AND_MASK_LE(__pdesc + 12, 16, 16)
476
477/* DWORD 4 */
478#define SET_RX_STATUS_DESC_IV1(__pdesc, __val) \
479 SET_BITS_OFFSET_LE(__pdesc + 16, 0, 32, __val)
480#define GET_RX_STATUS_DESC_IV1(__pdesc) \
481 SHIFT_AND_MASK_LE(__pdesc + 16, 0, 32)
482
483/* DWORD 5 */
484#define SET_RX_STATUS_DESC_TSFL(__pdesc, __val) \
485 SET_BITS_OFFSET_LE(__pdesc + 20, 0, 32, __val)
486#define GET_RX_STATUS_DESC_TSFL(__pdesc) \
487 SHIFT_AND_MASK_LE(__pdesc + 20, 0, 32)
488
489/* DWORD 6 */
490#define SET_RX_STATUS__DESC_BUFF_ADDR(__pdesc, __val) \
491 SET_BITS_OFFSET_LE(__pdesc + 24, 0, 32, __val)
492
493#define RX_HAL_IS_CCK_RATE(_pdesc)\
494 (GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC92S_RATE1M || \
495 GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC92S_RATE2M || \
496 GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC92S_RATE5_5M ||\
497 GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC92S_RATE11M)
498
499enum rf_optype {
500 RF_OP_BY_SW_3WIRE = 0,
501 RF_OP_BY_FW,
502 RF_OP_MAX
503};
504
505enum ic_inferiority {
506 IC_INFERIORITY_A = 0,
507 IC_INFERIORITY_B = 1,
508};
509
510enum fwcmd_iotype {
511 /* For DIG DM */
512 FW_CMD_DIG_ENABLE = 0,
513 FW_CMD_DIG_DISABLE = 1,
514 FW_CMD_DIG_HALT = 2,
515 FW_CMD_DIG_RESUME = 3,
516 /* For High Power DM */
517 FW_CMD_HIGH_PWR_ENABLE = 4,
518 FW_CMD_HIGH_PWR_DISABLE = 5,
519 /* For Rate adaptive DM */
520 FW_CMD_RA_RESET = 6,
521 FW_CMD_RA_ACTIVE = 7,
522 FW_CMD_RA_REFRESH_N = 8,
523 FW_CMD_RA_REFRESH_BG = 9,
524 FW_CMD_RA_INIT = 10,
525 /* For FW supported IQK */
526 FW_CMD_IQK_INIT = 11,
527 /* Tx power tracking switch,
528 * MP driver only */
529 FW_CMD_TXPWR_TRACK_ENABLE = 12,
530 /* Tx power tracking switch,
531 * MP driver only */
532 FW_CMD_TXPWR_TRACK_DISABLE = 13,
533 /* Tx power tracking with thermal
534 * indication, for Normal driver */
535 FW_CMD_TXPWR_TRACK_THERMAL = 14,
536 FW_CMD_PAUSE_DM_BY_SCAN = 15,
537 FW_CMD_RESUME_DM_BY_SCAN = 16,
538 FW_CMD_RA_REFRESH_N_COMB = 17,
539 FW_CMD_RA_REFRESH_BG_COMB = 18,
540 FW_CMD_ANTENNA_SW_ENABLE = 19,
541 FW_CMD_ANTENNA_SW_DISABLE = 20,
542 /* Tx Status report for CCX from FW */
543 FW_CMD_TX_FEEDBACK_CCX_ENABLE = 21,
544 /* Indifate firmware that driver
545 * enters LPS, For PS-Poll issue */
546 FW_CMD_LPS_ENTER = 22,
547 /* Indicate firmware that driver
548 * leave LPS*/
549 FW_CMD_LPS_LEAVE = 23,
550 /* Set DIG mode to signal strength */
551 FW_CMD_DIG_MODE_SS = 24,
552 /* Set DIG mode to false alarm. */
553 FW_CMD_DIG_MODE_FA = 25,
554 FW_CMD_ADD_A2_ENTRY = 26,
555 FW_CMD_CTRL_DM_BY_DRIVER = 27,
556 FW_CMD_CTRL_DM_BY_DRIVER_NEW = 28,
557 FW_CMD_PAPE_CONTROL = 29,
558 FW_CMD_IQK_ENABLE = 30,
559};
560
561/*
562 * Driver info contain PHY status
563 * and other variabel size info
564 * PHY Status content as below
565 */
566struct rx_fwinfo {
567 /* DWORD 0 */
568 u8 gain_trsw[4];
569 /* DWORD 1 */
570 u8 pwdb_all;
571 u8 cfosho[4];
572 /* DWORD 2 */
573 u8 cfotail[4];
574 /* DWORD 3 */
575 s8 rxevm[2];
576 s8 rxsnr[4];
577 /* DWORD 4 */
578 u8 pdsnr[2];
579 /* DWORD 5 */
580 u8 csi_current[2];
581 u8 csi_target[2];
582 /* DWORD 6 */
583 u8 sigevm;
584 u8 max_ex_pwr;
585 u8 ex_intf_flag:1;
586 u8 sgi_en:1;
587 u8 rxsc:2;
588 u8 reserve:4;
589};
590
591struct phy_sts_cck_8192s_t {
592 u8 adc_pwdb_x[4];
593 u8 sq_rpt;
594 u8 cck_agc_rpt;
595};
596
597#endif
598
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
new file mode 100644
index 000000000000..da86db86fa4a
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
@@ -0,0 +1,733 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "../base.h"
32#include "reg.h"
33#include "def.h"
34#include "phy.h"
35#include "dm.h"
36#include "fw.h"
37
38struct dig_t digtable;
39static const u32 edca_setting_dl[PEER_MAX] = {
40 0xa44f, /* 0 UNKNOWN */
41 0x5ea44f, /* 1 REALTEK_90 */
42 0x5ea44f, /* 2 REALTEK_92SE */
43 0xa630, /* 3 BROAD */
44 0xa44f, /* 4 RAL */
45 0xa630, /* 5 ATH */
46 0xa630, /* 6 CISCO */
47 0xa42b, /* 7 MARV */
48};
49
50static const u32 edca_setting_dl_gmode[PEER_MAX] = {
51 0x4322, /* 0 UNKNOWN */
52 0xa44f, /* 1 REALTEK_90 */
53 0x5ea44f, /* 2 REALTEK_92SE */
54 0xa42b, /* 3 BROAD */
55 0x5e4322, /* 4 RAL */
56 0x4322, /* 5 ATH */
57 0xa430, /* 6 CISCO */
58 0x5ea44f, /* 7 MARV */
59};
60
61static const u32 edca_setting_ul[PEER_MAX] = {
62 0x5e4322, /* 0 UNKNOWN */
63 0xa44f, /* 1 REALTEK_90 */
64 0x5ea44f, /* 2 REALTEK_92SE */
65 0x5ea322, /* 3 BROAD */
66 0x5ea422, /* 4 RAL */
67 0x5ea322, /* 5 ATH */
68 0x3ea44f, /* 6 CISCO */
69 0x5ea44f, /* 7 MARV */
70};
71
72static void _rtl92s_dm_check_edca_turbo(struct ieee80211_hw *hw)
73{
74 struct rtl_priv *rtlpriv = rtl_priv(hw);
75 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
76
77 static u64 last_txok_cnt;
78 static u64 last_rxok_cnt;
79 u64 cur_txok_cnt = 0;
80 u64 cur_rxok_cnt = 0;
81
82 u32 edca_be_ul = edca_setting_ul[mac->vendor];
83 u32 edca_be_dl = edca_setting_dl[mac->vendor];
84 u32 edca_gmode = edca_setting_dl_gmode[mac->vendor];
85
86 if (mac->link_state != MAC80211_LINKED) {
87 rtlpriv->dm.current_turbo_edca = false;
88 goto dm_checkedcaturbo_exit;
89 }
90
91 if ((!rtlpriv->dm.is_any_nonbepkts) &&
92 (!rtlpriv->dm.disable_framebursting)) {
93 cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt;
94 cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt;
95
96 if (rtlpriv->phy.rf_type == RF_1T2R) {
97 if (cur_txok_cnt > 4 * cur_rxok_cnt) {
98 /* Uplink TP is present. */
99 if (rtlpriv->dm.is_cur_rdlstate ||
100 !rtlpriv->dm.current_turbo_edca) {
101 rtl_write_dword(rtlpriv, EDCAPARA_BE,
102 edca_be_ul);
103 rtlpriv->dm.is_cur_rdlstate = false;
104 }
105 } else {/* Balance TP is present. */
106 if (!rtlpriv->dm.is_cur_rdlstate ||
107 !rtlpriv->dm.current_turbo_edca) {
108 if (mac->mode == WIRELESS_MODE_G ||
109 mac->mode == WIRELESS_MODE_B)
110 rtl_write_dword(rtlpriv,
111 EDCAPARA_BE,
112 edca_gmode);
113 else
114 rtl_write_dword(rtlpriv,
115 EDCAPARA_BE,
116 edca_be_dl);
117 rtlpriv->dm.is_cur_rdlstate = true;
118 }
119 }
120 rtlpriv->dm.current_turbo_edca = true;
121 } else {
122 if (cur_rxok_cnt > 4 * cur_txok_cnt) {
123 if (!rtlpriv->dm.is_cur_rdlstate ||
124 !rtlpriv->dm.current_turbo_edca) {
125 if (mac->mode == WIRELESS_MODE_G ||
126 mac->mode == WIRELESS_MODE_B)
127 rtl_write_dword(rtlpriv,
128 EDCAPARA_BE,
129 edca_gmode);
130 else
131 rtl_write_dword(rtlpriv,
132 EDCAPARA_BE,
133 edca_be_dl);
134 rtlpriv->dm.is_cur_rdlstate = true;
135 }
136 } else {
137 if (rtlpriv->dm.is_cur_rdlstate ||
138 !rtlpriv->dm.current_turbo_edca) {
139 rtl_write_dword(rtlpriv, EDCAPARA_BE,
140 edca_be_ul);
141 rtlpriv->dm.is_cur_rdlstate = false;
142 }
143 }
144 rtlpriv->dm.current_turbo_edca = true;
145 }
146 } else {
147 if (rtlpriv->dm.current_turbo_edca) {
148 u8 tmp = AC0_BE;
149 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
150 (u8 *)(&tmp));
151 rtlpriv->dm.current_turbo_edca = false;
152 }
153 }
154
155dm_checkedcaturbo_exit:
156 rtlpriv->dm.is_any_nonbepkts = false;
157 last_txok_cnt = rtlpriv->stats.txbytesunicast;
158 last_rxok_cnt = rtlpriv->stats.rxbytesunicast;
159}
160
161static void _rtl92s_dm_txpowertracking_callback_thermalmeter(
162 struct ieee80211_hw *hw)
163{
164 struct rtl_priv *rtlpriv = rtl_priv(hw);
165 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
166 u8 thermalvalue = 0;
167
168 rtlpriv->dm.txpower_trackinginit = true;
169
170 thermalvalue = (u8)rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0x1f);
171
172 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
173 ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
174 "eeprom_thermalmeter 0x%x\n", thermalvalue,
175 rtlpriv->dm.thermalvalue, rtlefuse->eeprom_thermalmeter));
176
177 if (thermalvalue) {
178 rtlpriv->dm.thermalvalue = thermalvalue;
179 rtl92s_phy_set_fw_cmd(hw, FW_CMD_TXPWR_TRACK_THERMAL);
180 }
181
182 rtlpriv->dm.txpowercount = 0;
183}
184
185static void _rtl92s_dm_check_txpowertracking_thermalmeter(
186 struct ieee80211_hw *hw)
187{
188 struct rtl_priv *rtlpriv = rtl_priv(hw);
189 struct rtl_phy *rtlphy = &(rtlpriv->phy);
190 static u8 tm_trigger;
191 u8 tx_power_checkcnt = 5;
192
193 /* 2T2R TP issue */
194 if (rtlphy->rf_type == RF_2T2R)
195 return;
196
197 if (!rtlpriv->dm.txpower_tracking)
198 return;
199
200 if (rtlpriv->dm.txpowercount <= tx_power_checkcnt) {
201 rtlpriv->dm.txpowercount++;
202 return;
203 }
204
205 if (!tm_trigger) {
206 rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER,
207 RFREG_OFFSET_MASK, 0x60);
208 tm_trigger = 1;
209 } else {
210 _rtl92s_dm_txpowertracking_callback_thermalmeter(hw);
211 tm_trigger = 0;
212 }
213}
214
215static void _rtl92s_dm_refresh_rateadaptive_mask(struct ieee80211_hw *hw)
216{
217 struct rtl_priv *rtlpriv = rtl_priv(hw);
218 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
219 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
220 struct rate_adaptive *ra = &(rtlpriv->ra);
221
222 u32 low_rssi_thresh = 0;
223 u32 middle_rssi_thresh = 0;
224 u32 high_rssi_thresh = 0;
225 u8 rssi_level;
226 struct ieee80211_sta *sta = NULL;
227
228 if (is_hal_stop(rtlhal))
229 return;
230
231 if (!rtlpriv->dm.useramask)
232 return;
233
234 if (!rtlpriv->dm.inform_fw_driverctrldm) {
235 rtl92s_phy_set_fw_cmd(hw, FW_CMD_CTRL_DM_BY_DRIVER);
236 rtlpriv->dm.inform_fw_driverctrldm = true;
237 }
238
239 rcu_read_lock();
240 if (mac->opmode == NL80211_IFTYPE_STATION)
241 sta = get_sta(hw, mac->vif, mac->bssid);
242 if ((mac->link_state == MAC80211_LINKED) &&
243 (mac->opmode == NL80211_IFTYPE_STATION)) {
244 switch (ra->pre_ratr_state) {
245 case DM_RATR_STA_HIGH:
246 high_rssi_thresh = 40;
247 middle_rssi_thresh = 30;
248 low_rssi_thresh = 20;
249 break;
250 case DM_RATR_STA_MIDDLE:
251 high_rssi_thresh = 44;
252 middle_rssi_thresh = 30;
253 low_rssi_thresh = 20;
254 break;
255 case DM_RATR_STA_LOW:
256 high_rssi_thresh = 44;
257 middle_rssi_thresh = 34;
258 low_rssi_thresh = 20;
259 break;
260 case DM_RATR_STA_ULTRALOW:
261 high_rssi_thresh = 44;
262 middle_rssi_thresh = 34;
263 low_rssi_thresh = 24;
264 break;
265 default:
266 high_rssi_thresh = 44;
267 middle_rssi_thresh = 34;
268 low_rssi_thresh = 24;
269 break;
270 }
271
272 if (rtlpriv->dm.undecorated_smoothed_pwdb >
273 (long)high_rssi_thresh) {
274 ra->ratr_state = DM_RATR_STA_HIGH;
275 rssi_level = 1;
276 } else if (rtlpriv->dm.undecorated_smoothed_pwdb >
277 (long)middle_rssi_thresh) {
278 ra->ratr_state = DM_RATR_STA_LOW;
279 rssi_level = 3;
280 } else if (rtlpriv->dm.undecorated_smoothed_pwdb >
281 (long)low_rssi_thresh) {
282 ra->ratr_state = DM_RATR_STA_LOW;
283 rssi_level = 5;
284 } else {
285 ra->ratr_state = DM_RATR_STA_ULTRALOW;
286 rssi_level = 6;
287 }
288
289 if (ra->pre_ratr_state != ra->ratr_state) {
290 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, ("RSSI = %ld "
291 "RSSI_LEVEL = %d PreState = %d, CurState = %d\n",
292 rtlpriv->dm.undecorated_smoothed_pwdb,
293 ra->ratr_state,
294 ra->pre_ratr_state, ra->ratr_state));
295
296 rtlpriv->cfg->ops->update_rate_tbl(hw, sta,
297 ra->ratr_state);
298 ra->pre_ratr_state = ra->ratr_state;
299 }
300 }
301 rcu_read_unlock();
302}
303
304static void _rtl92s_dm_switch_baseband_mrc(struct ieee80211_hw *hw)
305{
306 struct rtl_priv *rtlpriv = rtl_priv(hw);
307 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
308 struct rtl_phy *rtlphy = &(rtlpriv->phy);
309 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
310 bool current_mrc;
311 bool enable_mrc = true;
312 long tmpentry_maxpwdb = 0;
313 u8 rssi_a = 0;
314 u8 rssi_b = 0;
315
316 if (is_hal_stop(rtlhal))
317 return;
318
319 if ((rtlphy->rf_type == RF_1T1R) || (rtlphy->rf_type == RF_2T2R))
320 return;
321
322 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_MRC, (u8 *)(&current_mrc));
323
324 if (mac->link_state >= MAC80211_LINKED) {
325 if (rtlpriv->dm.undecorated_smoothed_pwdb > tmpentry_maxpwdb) {
326 rssi_a = rtlpriv->stats.rx_rssi_percentage[RF90_PATH_A];
327 rssi_b = rtlpriv->stats.rx_rssi_percentage[RF90_PATH_B];
328 }
329 }
330
331 /* MRC settings would NOT affect TP on Wireless B mode. */
332 if (mac->mode != WIRELESS_MODE_B) {
333 if ((rssi_a == 0) && (rssi_b == 0)) {
334 enable_mrc = true;
335 } else if (rssi_b > 30) {
336 /* Turn on B-Path */
337 enable_mrc = true;
338 } else if (rssi_b < 5) {
339 /* Turn off B-path */
340 enable_mrc = false;
341 /* Take care of RSSI differentiation. */
342 } else if (rssi_a > 15 && (rssi_a >= rssi_b)) {
343 if ((rssi_a - rssi_b) > 15)
344 /* Turn off B-path */
345 enable_mrc = false;
346 else if ((rssi_a - rssi_b) < 10)
347 /* Turn on B-Path */
348 enable_mrc = true;
349 else
350 enable_mrc = current_mrc;
351 } else {
352 /* Turn on B-Path */
353 enable_mrc = true;
354 }
355 }
356
357 /* Update MRC settings if needed. */
358 if (enable_mrc != current_mrc)
359 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_MRC,
360 (u8 *)&enable_mrc);
361
362}
363
364void rtl92s_dm_init_edca_turbo(struct ieee80211_hw *hw)
365{
366 struct rtl_priv *rtlpriv = rtl_priv(hw);
367
368 rtlpriv->dm.current_turbo_edca = false;
369 rtlpriv->dm.is_any_nonbepkts = false;
370 rtlpriv->dm.is_cur_rdlstate = false;
371}
372
373static void _rtl92s_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
374{
375 struct rtl_priv *rtlpriv = rtl_priv(hw);
376 struct rate_adaptive *ra = &(rtlpriv->ra);
377
378 ra->ratr_state = DM_RATR_STA_MAX;
379 ra->pre_ratr_state = DM_RATR_STA_MAX;
380
381 if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER)
382 rtlpriv->dm.useramask = true;
383 else
384 rtlpriv->dm.useramask = false;
385
386 rtlpriv->dm.useramask = false;
387 rtlpriv->dm.inform_fw_driverctrldm = false;
388}
389
390static void _rtl92s_dm_init_txpowertracking_thermalmeter(
391 struct ieee80211_hw *hw)
392{
393 struct rtl_priv *rtlpriv = rtl_priv(hw);
394
395 rtlpriv->dm.txpower_tracking = true;
396 rtlpriv->dm.txpowercount = 0;
397 rtlpriv->dm.txpower_trackinginit = false;
398}
399
400static void _rtl92s_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
401{
402 struct rtl_priv *rtlpriv = rtl_priv(hw);
403 struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
404 u32 ret_value;
405
406 ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, MASKDWORD);
407 falsealm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16);
408
409 ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, MASKDWORD);
410 falsealm_cnt->cnt_rate_illegal = (ret_value & 0xffff);
411 falsealm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16);
412 ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD);
413 falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff);
414
415 falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail +
416 falsealm_cnt->cnt_rate_illegal + falsealm_cnt->cnt_crc8_fail +
417 falsealm_cnt->cnt_mcs_fail;
418
419 /* read CCK false alarm */
420 ret_value = rtl_get_bbreg(hw, 0xc64, MASKDWORD);
421 falsealm_cnt->cnt_cck_fail = (ret_value & 0xffff);
422 falsealm_cnt->cnt_all = falsealm_cnt->cnt_ofdm_fail +
423 falsealm_cnt->cnt_cck_fail;
424}
425
426static void rtl92s_backoff_enable_flag(struct ieee80211_hw *hw)
427{
428 struct rtl_priv *rtlpriv = rtl_priv(hw);
429 struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
430
431 if (falsealm_cnt->cnt_all > digtable.fa_highthresh) {
432 if ((digtable.backoff_val - 6) <
433 digtable.backoffval_range_min)
434 digtable.backoff_val = digtable.backoffval_range_min;
435 else
436 digtable.backoff_val -= 6;
437 } else if (falsealm_cnt->cnt_all < digtable.fa_lowthresh) {
438 if ((digtable.backoff_val + 6) >
439 digtable.backoffval_range_max)
440 digtable.backoff_val =
441 digtable.backoffval_range_max;
442 else
443 digtable.backoff_val += 6;
444 }
445}
446
447static void _rtl92s_dm_initial_gain_sta_beforeconnect(struct ieee80211_hw *hw)
448{
449 struct rtl_priv *rtlpriv = rtl_priv(hw);
450 struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
451 static u8 initialized, force_write;
452 u8 initial_gain = 0;
453
454 if ((digtable.pre_sta_connectstate == digtable.cur_sta_connectstate) ||
455 (digtable.cur_sta_connectstate == DIG_STA_BEFORE_CONNECT)) {
456 if (digtable.cur_sta_connectstate == DIG_STA_BEFORE_CONNECT) {
457 if (rtlpriv->psc.rfpwr_state != ERFON)
458 return;
459
460 if (digtable.backoff_enable_flag == true)
461 rtl92s_backoff_enable_flag(hw);
462 else
463 digtable.backoff_val = DM_DIG_BACKOFF;
464
465 if ((digtable.rssi_val + 10 - digtable.backoff_val) >
466 digtable.rx_gain_range_max)
467 digtable.cur_igvalue =
468 digtable.rx_gain_range_max;
469 else if ((digtable.rssi_val + 10 - digtable.backoff_val)
470 < digtable.rx_gain_range_min)
471 digtable.cur_igvalue =
472 digtable.rx_gain_range_min;
473 else
474 digtable.cur_igvalue = digtable.rssi_val + 10 -
475 digtable.backoff_val;
476
477 if (falsealm_cnt->cnt_all > 10000)
478 digtable.cur_igvalue =
479 (digtable.cur_igvalue > 0x33) ?
480 digtable.cur_igvalue : 0x33;
481
482 if (falsealm_cnt->cnt_all > 16000)
483 digtable.cur_igvalue =
484 digtable.rx_gain_range_max;
485 /* connected -> connected or disconnected -> disconnected */
486 } else {
487 /* Firmware control DIG, do nothing in driver dm */
488 return;
489 }
490 /* disconnected -> connected or connected ->
491 * disconnected or beforeconnect->(dis)connected */
492 } else {
493 /* Enable FW DIG */
494 digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
495 rtl92s_phy_set_fw_cmd(hw, FW_CMD_DIG_ENABLE);
496
497 digtable.backoff_val = DM_DIG_BACKOFF;
498 digtable.cur_igvalue = rtlpriv->phy.default_initialgain[0];
499 digtable.pre_igvalue = 0;
500 return;
501 }
502
503 /* Forced writing to prevent from fw-dig overwriting. */
504 if (digtable.pre_igvalue != rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1,
505 MASKBYTE0))
506 force_write = 1;
507
508 if ((digtable.pre_igvalue != digtable.cur_igvalue) ||
509 !initialized || force_write) {
510 /* Disable FW DIG */
511 rtl92s_phy_set_fw_cmd(hw, FW_CMD_DIG_DISABLE);
512
513 initial_gain = (u8)digtable.cur_igvalue;
514
515 /* Set initial gain. */
516 rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0, initial_gain);
517 rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0, initial_gain);
518 digtable.pre_igvalue = digtable.cur_igvalue;
519 initialized = 1;
520 force_write = 0;
521 }
522}
523
524static void _rtl92s_dm_ctrl_initgain_bytwoport(struct ieee80211_hw *hw)
525{
526 struct rtl_priv *rtlpriv = rtl_priv(hw);
527
528 if (rtlpriv->mac80211.act_scanning)
529 return;
530
531 /* Decide the current status and if modify initial gain or not */
532 if (rtlpriv->mac80211.link_state >= MAC80211_LINKED ||
533 rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC)
534 digtable.cur_sta_connectstate = DIG_STA_CONNECT;
535 else
536 digtable.cur_sta_connectstate = DIG_STA_DISCONNECT;
537
538 digtable.rssi_val = rtlpriv->dm.undecorated_smoothed_pwdb;
539
540 /* Change dig mode to rssi */
541 if (digtable.cur_sta_connectstate != DIG_STA_DISCONNECT) {
542 if (digtable.dig_twoport_algorithm ==
543 DIG_TWO_PORT_ALGO_FALSE_ALARM) {
544 digtable.dig_twoport_algorithm = DIG_TWO_PORT_ALGO_RSSI;
545 rtl92s_phy_set_fw_cmd(hw, FW_CMD_DIG_MODE_SS);
546 }
547 }
548
549 _rtl92s_dm_false_alarm_counter_statistics(hw);
550 _rtl92s_dm_initial_gain_sta_beforeconnect(hw);
551
552 digtable.pre_sta_connectstate = digtable.cur_sta_connectstate;
553}
554
555static void _rtl92s_dm_ctrl_initgain_byrssi(struct ieee80211_hw *hw)
556{
557 struct rtl_priv *rtlpriv = rtl_priv(hw);
558 struct rtl_phy *rtlphy = &(rtlpriv->phy);
559
560 /* 2T2R TP issue */
561 if (rtlphy->rf_type == RF_2T2R)
562 return;
563
564 if (!rtlpriv->dm.dm_initialgain_enable)
565 return;
566
567 if (digtable.dig_enable_flag == false)
568 return;
569
570 _rtl92s_dm_ctrl_initgain_bytwoport(hw);
571}
572
573static void _rtl92s_dm_dynamic_txpower(struct ieee80211_hw *hw)
574{
575 struct rtl_priv *rtlpriv = rtl_priv(hw);
576 struct rtl_phy *rtlphy = &(rtlpriv->phy);
577 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
578 long undecorated_smoothed_pwdb;
579 long txpwr_threshold_lv1, txpwr_threshold_lv2;
580
581 /* 2T2R TP issue */
582 if (rtlphy->rf_type == RF_2T2R)
583 return;
584
585 if (!rtlpriv->dm.dynamic_txpower_enable ||
586 rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) {
587 rtlpriv->dm.dynamic_txhighpower_lvl = TX_HIGHPWR_LEVEL_NORMAL;
588 return;
589 }
590
591 if ((mac->link_state < MAC80211_LINKED) &&
592 (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
593 RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
594 ("Not connected to any\n"));
595
596 rtlpriv->dm.dynamic_txhighpower_lvl = TX_HIGHPWR_LEVEL_NORMAL;
597
598 rtlpriv->dm.last_dtp_lvl = TX_HIGHPWR_LEVEL_NORMAL;
599 return;
600 }
601
602 if (mac->link_state >= MAC80211_LINKED) {
603 if (mac->opmode == NL80211_IFTYPE_ADHOC) {
604 undecorated_smoothed_pwdb =
605 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
606 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
607 ("AP Client PWDB = 0x%lx\n",
608 undecorated_smoothed_pwdb));
609 } else {
610 undecorated_smoothed_pwdb =
611 rtlpriv->dm.undecorated_smoothed_pwdb;
612 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
613 ("STA Default Port PWDB = 0x%lx\n",
614 undecorated_smoothed_pwdb));
615 }
616 } else {
617 undecorated_smoothed_pwdb =
618 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
619
620 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
621 ("AP Ext Port PWDB = 0x%lx\n",
622 undecorated_smoothed_pwdb));
623 }
624
625 txpwr_threshold_lv2 = TX_POWER_NEAR_FIELD_THRESH_LVL2;
626 txpwr_threshold_lv1 = TX_POWER_NEAR_FIELD_THRESH_LVL1;
627
628 if (rtl_get_bbreg(hw, 0xc90, MASKBYTE0) == 1)
629 rtlpriv->dm.dynamic_txhighpower_lvl = TX_HIGHPWR_LEVEL_NORMAL;
630 else if (undecorated_smoothed_pwdb >= txpwr_threshold_lv2)
631 rtlpriv->dm.dynamic_txhighpower_lvl = TX_HIGHPWR_LEVEL_NORMAL2;
632 else if ((undecorated_smoothed_pwdb < (txpwr_threshold_lv2 - 3)) &&
633 (undecorated_smoothed_pwdb >= txpwr_threshold_lv1))
634 rtlpriv->dm.dynamic_txhighpower_lvl = TX_HIGHPWR_LEVEL_NORMAL1;
635 else if (undecorated_smoothed_pwdb < (txpwr_threshold_lv1 - 3))
636 rtlpriv->dm.dynamic_txhighpower_lvl = TX_HIGHPWR_LEVEL_NORMAL;
637
638 if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl))
639 rtl92s_phy_set_txpower(hw, rtlphy->current_channel);
640
641 rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl;
642}
643
644static void _rtl92s_dm_init_dig(struct ieee80211_hw *hw)
645{
646 struct rtl_priv *rtlpriv = rtl_priv(hw);
647
648 /* Disable DIG scheme now.*/
649 digtable.dig_enable_flag = true;
650 digtable.backoff_enable_flag = true;
651
652 if ((rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER) &&
653 (hal_get_firmwareversion(rtlpriv) >= 0x3c))
654 digtable.dig_algorithm = DIG_ALGO_BY_TOW_PORT;
655 else
656 digtable.dig_algorithm =
657 DIG_ALGO_BEFORE_CONNECT_BY_RSSI_AND_ALARM;
658
659 digtable.dig_twoport_algorithm = DIG_TWO_PORT_ALGO_RSSI;
660 digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
661 /* off=by real rssi value, on=by digtable.rssi_val for new dig */
662 digtable.dig_dbgmode = DM_DBG_OFF;
663 digtable.dig_slgorithm_switch = 0;
664
665 /* 2007/10/04 MH Define init gain threshol. */
666 digtable.dig_state = DM_STA_DIG_MAX;
667 digtable.dig_highpwrstate = DM_STA_DIG_MAX;
668
669 digtable.cur_sta_connectstate = DIG_STA_DISCONNECT;
670 digtable.pre_sta_connectstate = DIG_STA_DISCONNECT;
671 digtable.cur_ap_connectstate = DIG_AP_DISCONNECT;
672 digtable.pre_ap_connectstate = DIG_AP_DISCONNECT;
673
674 digtable.rssi_lowthresh = DM_DIG_THRESH_LOW;
675 digtable.rssi_highthresh = DM_DIG_THRESH_HIGH;
676
677 digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
678 digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
679
680 digtable.rssi_highpower_lowthresh = DM_DIG_HIGH_PWR_THRESH_LOW;
681 digtable.rssi_highpower_highthresh = DM_DIG_HIGH_PWR_THRESH_HIGH;
682
683 /* for dig debug rssi value */
684 digtable.rssi_val = 50;
685 digtable.backoff_val = DM_DIG_BACKOFF;
686 digtable.rx_gain_range_max = DM_DIG_MAX;
687
688 digtable.rx_gain_range_min = DM_DIG_MIN;
689
690 digtable.backoffval_range_max = DM_DIG_BACKOFF_MAX;
691 digtable.backoffval_range_min = DM_DIG_BACKOFF_MIN;
692}
693
694static void _rtl92s_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
695{
696 struct rtl_priv *rtlpriv = rtl_priv(hw);
697
698 if ((hal_get_firmwareversion(rtlpriv) >= 60) &&
699 (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER))
700 rtlpriv->dm.dynamic_txpower_enable = true;
701 else
702 rtlpriv->dm.dynamic_txpower_enable = false;
703
704 rtlpriv->dm.last_dtp_lvl = TX_HIGHPWR_LEVEL_NORMAL;
705 rtlpriv->dm.dynamic_txhighpower_lvl = TX_HIGHPWR_LEVEL_NORMAL;
706}
707
708void rtl92s_dm_init(struct ieee80211_hw *hw)
709{
710 struct rtl_priv *rtlpriv = rtl_priv(hw);
711
712 rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
713 rtlpriv->dm.undecorated_smoothed_pwdb = -1;
714
715 _rtl92s_dm_init_dynamic_txpower(hw);
716 rtl92s_dm_init_edca_turbo(hw);
717 _rtl92s_dm_init_rate_adaptive_mask(hw);
718 _rtl92s_dm_init_txpowertracking_thermalmeter(hw);
719 _rtl92s_dm_init_dig(hw);
720
721 rtl_write_dword(rtlpriv, WFM5, FW_CCA_CHK_ENABLE);
722}
723
724void rtl92s_dm_watchdog(struct ieee80211_hw *hw)
725{
726 _rtl92s_dm_check_edca_turbo(hw);
727 _rtl92s_dm_check_txpowertracking_thermalmeter(hw);
728 _rtl92s_dm_ctrl_initgain_byrssi(hw);
729 _rtl92s_dm_dynamic_txpower(hw);
730 _rtl92s_dm_refresh_rateadaptive_mask(hw);
731 _rtl92s_dm_switch_baseband_mrc(hw);
732}
733
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/dm.h b/drivers/net/wireless/rtlwifi/rtl8192se/dm.h
new file mode 100644
index 000000000000..9051a556acc4
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/dm.h
@@ -0,0 +1,164 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29#ifndef __RTL_92S_DM_H__
30#define __RTL_92S_DM_H__
31
32struct dig_t {
33 u8 dig_enable_flag;
34 u8 dig_algorithm;
35 u8 dig_twoport_algorithm;
36 u8 dig_ext_port_stage;
37 u8 dig_dbgmode;
38 u8 dig_slgorithm_switch;
39
40 long rssi_lowthresh;
41 long rssi_highthresh;
42
43 u32 fa_lowthresh;
44 u32 fa_highthresh;
45
46 long rssi_highpower_lowthresh;
47 long rssi_highpower_highthresh;
48
49 u8 dig_state;
50 u8 dig_highpwrstate;
51 u8 cur_sta_connectstate;
52 u8 pre_sta_connectstate;
53 u8 cur_ap_connectstate;
54 u8 pre_ap_connectstate;
55
56 u8 cur_pd_thstate;
57 u8 pre_pd_thstate;
58 u8 cur_cs_ratiostate;
59 u8 pre_cs_ratiostate;
60
61 u32 pre_igvalue;
62 u32 cur_igvalue;
63
64 u8 backoff_enable_flag;
65 char backoff_val;
66 char backoffval_range_max;
67 char backoffval_range_min;
68 u8 rx_gain_range_max;
69 u8 rx_gain_range_min;
70
71 long rssi_val;
72};
73
74enum dm_dig_alg {
75 DIG_ALGO_BY_FALSE_ALARM = 0,
76 DIG_ALGO_BY_RSSI = 1,
77 DIG_ALGO_BEFORE_CONNECT_BY_RSSI_AND_ALARM = 2,
78 DIG_ALGO_BY_TOW_PORT = 3,
79 DIG_ALGO_MAX
80};
81
82enum dm_dig_two_port_alg {
83 DIG_TWO_PORT_ALGO_RSSI = 0,
84 DIG_TWO_PORT_ALGO_FALSE_ALARM = 1,
85};
86
87enum dm_dig_dbg {
88 DM_DBG_OFF = 0,
89 DM_DBG_ON = 1,
90 DM_DBG_MAX
91};
92
93enum dm_dig_sta {
94 DM_STA_DIG_OFF = 0,
95 DM_STA_DIG_ON,
96 DM_STA_DIG_MAX
97};
98
99enum dm_dig_connect {
100 DIG_STA_DISCONNECT = 0,
101 DIG_STA_CONNECT = 1,
102 DIG_STA_BEFORE_CONNECT = 2,
103 DIG_AP_DISCONNECT = 3,
104 DIG_AP_CONNECT = 4,
105 DIG_AP_ADD_STATION = 5,
106 DIG_CONNECT_MAX
107};
108
109enum dm_dig_ext_port_alg {
110 DIG_EXT_PORT_STAGE_0 = 0,
111 DIG_EXT_PORT_STAGE_1 = 1,
112 DIG_EXT_PORT_STAGE_2 = 2,
113 DIG_EXT_PORT_STAGE_3 = 3,
114 DIG_EXT_PORT_STAGE_MAX = 4,
115};
116
117enum dm_ratr_sta {
118 DM_RATR_STA_HIGH = 0,
119 DM_RATR_STA_MIDDLEHIGH = 1,
120 DM_RATR_STA_MIDDLE = 2,
121 DM_RATR_STA_MIDDLELOW = 3,
122 DM_RATR_STA_LOW = 4,
123 DM_RATR_STA_ULTRALOW = 5,
124 DM_RATR_STA_MAX
125};
126
127#define DM_TYPE_BYFW 0
128#define DM_TYPE_BYDRIVER 1
129
130#define TX_HIGH_PWR_LEVEL_NORMAL 0
131#define TX_HIGH_PWR_LEVEL_LEVEL1 1
132#define TX_HIGH_PWR_LEVEL_LEVEL2 2
133
134#define HAL_DM_DIG_DISABLE BIT(0) /* Disable Dig */
135#define HAL_DM_HIPWR_DISABLE BIT(1) /* Disable High Power */
136
137#define TX_HIGHPWR_LEVEL_NORMAL 0
138#define TX_HIGHPWR_LEVEL_NORMAL1 1
139#define TX_HIGHPWR_LEVEL_NORMAL2 2
140
141#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74
142#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67
143
144#define DM_DIG_THRESH_HIGH 40
145#define DM_DIG_THRESH_LOW 35
146#define DM_FALSEALARM_THRESH_LOW 40
147#define DM_FALSEALARM_THRESH_HIGH 1000
148#define DM_DIG_HIGH_PWR_THRESH_HIGH 75
149#define DM_DIG_HIGH_PWR_THRESH_LOW 70
150#define DM_DIG_BACKOFF 12
151#define DM_DIG_MAX 0x3e
152#define DM_DIG_MIN 0x1c
153#define DM_DIG_MIN_Netcore 0x12
154#define DM_DIG_BACKOFF_MAX 12
155#define DM_DIG_BACKOFF_MIN -4
156
157extern struct dig_t digtable;
158
159void rtl92s_dm_watchdog(struct ieee80211_hw *hw);
160void rtl92s_dm_init(struct ieee80211_hw *hw);
161void rtl92s_dm_init_edca_turbo(struct ieee80211_hw *hw);
162
163#endif
164
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c b/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
new file mode 100644
index 000000000000..3b5af0113d7f
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
@@ -0,0 +1,654 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "../pci.h"
32#include "../base.h"
33#include "reg.h"
34#include "def.h"
35#include "fw.h"
36
37static void _rtl92s_fw_set_rqpn(struct ieee80211_hw *hw)
38{
39 struct rtl_priv *rtlpriv = rtl_priv(hw);
40
41 rtl_write_dword(rtlpriv, RQPN, 0xffffffff);
42 rtl_write_dword(rtlpriv, RQPN + 4, 0xffffffff);
43 rtl_write_byte(rtlpriv, RQPN + 8, 0xff);
44 rtl_write_byte(rtlpriv, RQPN + 0xB, 0x80);
45}
46
47static bool _rtl92s_firmware_enable_cpu(struct ieee80211_hw *hw)
48{
49 struct rtl_priv *rtlpriv = rtl_priv(hw);
50 u32 ichecktime = 200;
51 u16 tmpu2b;
52 u8 tmpu1b, cpustatus = 0;
53
54 _rtl92s_fw_set_rqpn(hw);
55
56 /* Enable CPU. */
57 tmpu1b = rtl_read_byte(rtlpriv, SYS_CLKR);
58 /* AFE source */
59 rtl_write_byte(rtlpriv, SYS_CLKR, (tmpu1b | SYS_CPU_CLKSEL));
60
61 tmpu2b = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
62 rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, (tmpu2b | FEN_CPUEN));
63
64 /* Polling IMEM Ready after CPU has refilled. */
65 do {
66 cpustatus = rtl_read_byte(rtlpriv, TCR);
67 if (cpustatus & IMEM_RDY) {
68 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
69 ("IMEM Ready after CPU has refilled.\n"));
70 break;
71 }
72
73 udelay(100);
74 } while (ichecktime--);
75
76 if (!(cpustatus & IMEM_RDY))
77 return false;
78
79 return true;
80}
81
82static enum fw_status _rtl92s_firmware_get_nextstatus(
83 enum fw_status fw_currentstatus)
84{
85 enum fw_status next_fwstatus = 0;
86
87 switch (fw_currentstatus) {
88 case FW_STATUS_INIT:
89 next_fwstatus = FW_STATUS_LOAD_IMEM;
90 break;
91 case FW_STATUS_LOAD_IMEM:
92 next_fwstatus = FW_STATUS_LOAD_EMEM;
93 break;
94 case FW_STATUS_LOAD_EMEM:
95 next_fwstatus = FW_STATUS_LOAD_DMEM;
96 break;
97 case FW_STATUS_LOAD_DMEM:
98 next_fwstatus = FW_STATUS_READY;
99 break;
100 default:
101 break;
102 }
103
104 return next_fwstatus;
105}
106
107static u8 _rtl92s_firmware_header_map_rftype(struct ieee80211_hw *hw)
108{
109 struct rtl_priv *rtlpriv = rtl_priv(hw);
110 struct rtl_phy *rtlphy = &(rtlpriv->phy);
111
112 switch (rtlphy->rf_type) {
113 case RF_1T1R:
114 return 0x11;
115 break;
116 case RF_1T2R:
117 return 0x12;
118 break;
119 case RF_2T2R:
120 return 0x22;
121 break;
122 default:
123 RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
124 ("Unknown RF type(%x)\n",
125 rtlphy->rf_type));
126 break;
127 }
128 return 0x22;
129}
130
131static void _rtl92s_firmwareheader_priveupdate(struct ieee80211_hw *hw,
132 struct fw_priv *pfw_priv)
133{
134 /* Update RF types for RATR settings. */
135 pfw_priv->rf_config = _rtl92s_firmware_header_map_rftype(hw);
136}
137
138
139
140static bool _rtl92s_cmd_send_packet(struct ieee80211_hw *hw,
141 struct sk_buff *skb, u8 last)
142{
143 struct rtl_priv *rtlpriv = rtl_priv(hw);
144 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
145 struct rtl8192_tx_ring *ring;
146 struct rtl_tx_desc *pdesc;
147 unsigned long flags;
148 u8 idx = 0;
149
150 ring = &rtlpci->tx_ring[TXCMD_QUEUE];
151
152 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
153
154 idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries;
155 pdesc = &ring->desc[idx];
156 rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *)pdesc, 1, 1, skb);
157 __skb_queue_tail(&ring->queue, skb);
158
159 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
160
161 return true;
162}
163
164static bool _rtl92s_firmware_downloadcode(struct ieee80211_hw *hw,
165 u8 *code_virtual_address, u32 buffer_len)
166{
167 struct rtl_priv *rtlpriv = rtl_priv(hw);
168 struct sk_buff *skb;
169 struct rtl_tcb_desc *tcb_desc;
170 unsigned char *seg_ptr;
171 u16 frag_threshold = MAX_FIRMWARE_CODE_SIZE;
172 u16 frag_length, frag_offset = 0;
173 u16 extra_descoffset = 0;
174 u8 last_inipkt = 0;
175
176 _rtl92s_fw_set_rqpn(hw);
177
178 if (buffer_len >= MAX_FIRMWARE_CODE_SIZE) {
179 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
180 ("Size over FIRMWARE_CODE_SIZE!\n"));
181
182 return false;
183 }
184
185 extra_descoffset = 0;
186
187 do {
188 if ((buffer_len - frag_offset) > frag_threshold) {
189 frag_length = frag_threshold + extra_descoffset;
190 } else {
191 frag_length = (u16)(buffer_len - frag_offset +
192 extra_descoffset);
193 last_inipkt = 1;
194 }
195
196 /* Allocate skb buffer to contain firmware */
197 /* info and tx descriptor info. */
198 skb = dev_alloc_skb(frag_length);
199 skb_reserve(skb, extra_descoffset);
200 seg_ptr = (u8 *)skb_put(skb, (u32)(frag_length -
201 extra_descoffset));
202 memcpy(seg_ptr, code_virtual_address + frag_offset,
203 (u32)(frag_length - extra_descoffset));
204
205 tcb_desc = (struct rtl_tcb_desc *)(skb->cb);
206 tcb_desc->queue_index = TXCMD_QUEUE;
207 tcb_desc->cmd_or_init = DESC_PACKET_TYPE_INIT;
208 tcb_desc->last_inipkt = last_inipkt;
209
210 _rtl92s_cmd_send_packet(hw, skb, last_inipkt);
211
212 frag_offset += (frag_length - extra_descoffset);
213
214 } while (frag_offset < buffer_len);
215
216 rtl_write_byte(rtlpriv, TP_POLL, TPPOLL_CQ);
217
218 return true ;
219}
220
221static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw,
222 u8 loadfw_status)
223{
224 struct rtl_priv *rtlpriv = rtl_priv(hw);
225 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
226 struct rt_firmware *firmware = (struct rt_firmware *)rtlhal->pfirmware;
227 u32 tmpu4b;
228 u8 cpustatus = 0;
229 short pollingcnt = 1000;
230 bool rtstatus = true;
231
232 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("LoadStaus(%d)\n",
233 loadfw_status));
234
235 firmware->fwstatus = (enum fw_status)loadfw_status;
236
237 switch (loadfw_status) {
238 case FW_STATUS_LOAD_IMEM:
239 /* Polling IMEM code done. */
240 do {
241 cpustatus = rtl_read_byte(rtlpriv, TCR);
242 if (cpustatus & IMEM_CODE_DONE)
243 break;
244 udelay(5);
245 } while (pollingcnt--);
246
247 if (!(cpustatus & IMEM_CHK_RPT) || (pollingcnt <= 0)) {
248 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
249 ("FW_STATUS_LOAD_IMEM"
250 " FAIL CPU, Status=%x\r\n", cpustatus));
251 goto status_check_fail;
252 }
253 break;
254
255 case FW_STATUS_LOAD_EMEM:
256 /* Check Put Code OK and Turn On CPU */
257 /* Polling EMEM code done. */
258 do {
259 cpustatus = rtl_read_byte(rtlpriv, TCR);
260 if (cpustatus & EMEM_CODE_DONE)
261 break;
262 udelay(5);
263 } while (pollingcnt--);
264
265 if (!(cpustatus & EMEM_CHK_RPT) || (pollingcnt <= 0)) {
266 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
267 ("FW_STATUS_LOAD_EMEM"
268 " FAIL CPU, Status=%x\r\n", cpustatus));
269 goto status_check_fail;
270 }
271
272 /* Turn On CPU */
273 rtstatus = _rtl92s_firmware_enable_cpu(hw);
274 if (rtstatus != true) {
275 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
276 ("Enable CPU fail!\n"));
277 goto status_check_fail;
278 }
279 break;
280
281 case FW_STATUS_LOAD_DMEM:
282 /* Polling DMEM code done */
283 do {
284 cpustatus = rtl_read_byte(rtlpriv, TCR);
285 if (cpustatus & DMEM_CODE_DONE)
286 break;
287 udelay(5);
288 } while (pollingcnt--);
289
290 if (!(cpustatus & DMEM_CODE_DONE) || (pollingcnt <= 0)) {
291 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
292 ("Polling DMEM code done"
293 " fail ! cpustatus(%#x)\n", cpustatus));
294 goto status_check_fail;
295 }
296
297 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
298 ("DMEM code download success,"
299 " cpustatus(%#x)\n", cpustatus));
300
301 /* Prevent Delay too much and being scheduled out */
302 /* Polling Load Firmware ready */
303 pollingcnt = 2000;
304 do {
305 cpustatus = rtl_read_byte(rtlpriv, TCR);
306 if (cpustatus & FWRDY)
307 break;
308 udelay(40);
309 } while (pollingcnt--);
310
311 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
312 ("Polling Load Firmware ready,"
313 " cpustatus(%x)\n", cpustatus));
314
315 if (((cpustatus & LOAD_FW_READY) != LOAD_FW_READY) ||
316 (pollingcnt <= 0)) {
317 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
318 ("Polling Load Firmware"
319 " ready fail ! cpustatus(%x)\n", cpustatus));
320 goto status_check_fail;
321 }
322
323 /* If right here, we can set TCR/RCR to desired value */
324 /* and config MAC lookback mode to normal mode */
325 tmpu4b = rtl_read_dword(rtlpriv, TCR);
326 rtl_write_dword(rtlpriv, TCR, (tmpu4b & (~TCR_ICV)));
327
328 tmpu4b = rtl_read_dword(rtlpriv, RCR);
329 rtl_write_dword(rtlpriv, RCR, (tmpu4b | RCR_APPFCS |
330 RCR_APP_ICV | RCR_APP_MIC));
331
332 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
333 ("Current RCR settings(%#x)\n", tmpu4b));
334
335 /* Set to normal mode. */
336 rtl_write_byte(rtlpriv, LBKMD_SEL, LBK_NORMAL);
337 break;
338
339 default:
340 RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
341 ("Unknown status check!\n"));
342 rtstatus = false;
343 break;
344 }
345
346status_check_fail:
347 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("loadfw_status(%d), "
348 "rtstatus(%x)\n", loadfw_status, rtstatus));
349 return rtstatus;
350}
351
352int rtl92s_download_fw(struct ieee80211_hw *hw)
353{
354 struct rtl_priv *rtlpriv = rtl_priv(hw);
355 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
356 struct rt_firmware *firmware = NULL;
357 struct fw_hdr *pfwheader;
358 struct fw_priv *pfw_priv = NULL;
359 u8 *puc_mappedfile = NULL;
360 u32 ul_filelength = 0;
361 u32 file_length = 0;
362 u8 fwhdr_size = RT_8192S_FIRMWARE_HDR_SIZE;
363 u8 fwstatus = FW_STATUS_INIT;
364 bool rtstatus = true;
365
366 if (!rtlhal->pfirmware)
367 return 1;
368
369 firmware = (struct rt_firmware *)rtlhal->pfirmware;
370 firmware->fwstatus = FW_STATUS_INIT;
371
372 puc_mappedfile = firmware->sz_fw_tmpbuffer;
373 file_length = firmware->sz_fw_tmpbufferlen;
374
375 /* 1. Retrieve FW header. */
376 firmware->pfwheader = (struct fw_hdr *) puc_mappedfile;
377 pfwheader = firmware->pfwheader;
378 firmware->firmwareversion = byte(pfwheader->version, 0);
379 firmware->pfwheader->fwpriv.hci_sel = 1;/* pcie */
380
381 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("signature:%x, version:"
382 "%x, size:%x,"
383 "imemsize:%x, sram size:%x\n", pfwheader->signature,
384 pfwheader->version, pfwheader->dmem_size,
385 pfwheader->img_imem_size, pfwheader->img_sram_size));
386
387 /* 2. Retrieve IMEM image. */
388 if ((pfwheader->img_imem_size == 0) || (pfwheader->img_imem_size >
389 sizeof(firmware->fw_imem))) {
390 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
391 ("memory for data image is less than IMEM required\n"));
392 goto fail;
393 } else {
394 puc_mappedfile += fwhdr_size;
395
396 memcpy(firmware->fw_imem, puc_mappedfile,
397 pfwheader->img_imem_size);
398 firmware->fw_imem_len = pfwheader->img_imem_size;
399 }
400
401 /* 3. Retriecve EMEM image. */
402 if (pfwheader->img_sram_size > sizeof(firmware->fw_emem)) {
403 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
404 ("memory for data image is less than EMEM required\n"));
405 goto fail;
406 } else {
407 puc_mappedfile += firmware->fw_imem_len;
408
409 memcpy(firmware->fw_emem, puc_mappedfile,
410 pfwheader->img_sram_size);
411 firmware->fw_emem_len = pfwheader->img_sram_size;
412 }
413
414 /* 4. download fw now */
415 fwstatus = _rtl92s_firmware_get_nextstatus(firmware->fwstatus);
416 while (fwstatus != FW_STATUS_READY) {
417 /* Image buffer redirection. */
418 switch (fwstatus) {
419 case FW_STATUS_LOAD_IMEM:
420 puc_mappedfile = firmware->fw_imem;
421 ul_filelength = firmware->fw_imem_len;
422 break;
423 case FW_STATUS_LOAD_EMEM:
424 puc_mappedfile = firmware->fw_emem;
425 ul_filelength = firmware->fw_emem_len;
426 break;
427 case FW_STATUS_LOAD_DMEM:
428 /* Partial update the content of header private. */
429 pfwheader = firmware->pfwheader;
430 pfw_priv = &pfwheader->fwpriv;
431 _rtl92s_firmwareheader_priveupdate(hw, pfw_priv);
432 puc_mappedfile = (u8 *)(firmware->pfwheader) +
433 RT_8192S_FIRMWARE_HDR_EXCLUDE_PRI_SIZE;
434 ul_filelength = fwhdr_size -
435 RT_8192S_FIRMWARE_HDR_EXCLUDE_PRI_SIZE;
436 break;
437 default:
438 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
439 ("Unexpected Download step!!\n"));
440 goto fail;
441 break;
442 }
443
444 /* <2> Download image file */
445 rtstatus = _rtl92s_firmware_downloadcode(hw, puc_mappedfile,
446 ul_filelength);
447
448 if (rtstatus != true) {
449 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("fail!\n"));
450 goto fail;
451 }
452
453 /* <3> Check whether load FW process is ready */
454 rtstatus = _rtl92s_firmware_checkready(hw, fwstatus);
455 if (rtstatus != true) {
456 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("fail!\n"));
457 goto fail;
458 }
459
460 fwstatus = _rtl92s_firmware_get_nextstatus(firmware->fwstatus);
461 }
462
463 return rtstatus;
464fail:
465 return 0;
466}
467
468static u32 _rtl92s_fill_h2c_cmd(struct sk_buff *skb, u32 h2cbufferlen,
469 u32 cmd_num, u32 *pelement_id, u32 *pcmd_len,
470 u8 **pcmb_buffer, u8 *cmd_start_seq)
471{
472 u32 totallen = 0, len = 0, tx_desclen = 0;
473 u32 pre_continueoffset = 0;
474 u8 *ph2c_buffer;
475 u8 i = 0;
476
477 do {
478 /* 8 - Byte aligment */
479 len = H2C_TX_CMD_HDR_LEN + N_BYTE_ALIGMENT(pcmd_len[i], 8);
480
481 /* Buffer length is not enough */
482 if (h2cbufferlen < totallen + len + tx_desclen)
483 break;
484
485 /* Clear content */
486 ph2c_buffer = (u8 *)skb_put(skb, (u32)len);
487 memset((ph2c_buffer + totallen + tx_desclen), 0, len);
488
489 /* CMD len */
490 SET_BITS_TO_LE_4BYTE((ph2c_buffer + totallen + tx_desclen),
491 0, 16, pcmd_len[i]);
492
493 /* CMD ID */
494 SET_BITS_TO_LE_4BYTE((ph2c_buffer + totallen + tx_desclen),
495 16, 8, pelement_id[i]);
496
497 /* CMD Sequence */
498 *cmd_start_seq = *cmd_start_seq % 0x80;
499 SET_BITS_TO_LE_4BYTE((ph2c_buffer + totallen + tx_desclen),
500 24, 7, *cmd_start_seq);
501 ++*cmd_start_seq;
502
503 /* Copy memory */
504 memcpy((ph2c_buffer + totallen + tx_desclen +
505 H2C_TX_CMD_HDR_LEN), pcmb_buffer[i], pcmd_len[i]);
506
507 /* CMD continue */
508 /* set the continue in prevoius cmd. */
509 if (i < cmd_num - 1)
510 SET_BITS_TO_LE_4BYTE((ph2c_buffer + pre_continueoffset),
511 31, 1, 1);
512
513 pre_continueoffset = totallen;
514
515 totallen += len;
516 } while (++i < cmd_num);
517
518 return totallen;
519}
520
521static u32 _rtl92s_get_h2c_cmdlen(u32 h2cbufferlen, u32 cmd_num, u32 *pcmd_len)
522{
523 u32 totallen = 0, len = 0, tx_desclen = 0;
524 u8 i = 0;
525
526 do {
527 /* 8 - Byte aligment */
528 len = H2C_TX_CMD_HDR_LEN + N_BYTE_ALIGMENT(pcmd_len[i], 8);
529
530 /* Buffer length is not enough */
531 if (h2cbufferlen < totallen + len + tx_desclen)
532 break;
533
534 totallen += len;
535 } while (++i < cmd_num);
536
537 return totallen + tx_desclen;
538}
539
540static bool _rtl92s_firmware_set_h2c_cmd(struct ieee80211_hw *hw, u8 h2c_cmd,
541 u8 *pcmd_buffer)
542{
543 struct rtl_priv *rtlpriv = rtl_priv(hw);
544 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
545 struct rtl_tcb_desc *cb_desc;
546 struct sk_buff *skb;
547 u32 element_id = 0;
548 u32 cmd_len = 0;
549 u32 len;
550
551 switch (h2c_cmd) {
552 case FW_H2C_SETPWRMODE:
553 element_id = H2C_SETPWRMODE_CMD ;
554 cmd_len = sizeof(struct h2c_set_pwrmode_parm);
555 break;
556 case FW_H2C_JOINBSSRPT:
557 element_id = H2C_JOINBSSRPT_CMD;
558 cmd_len = sizeof(struct h2c_joinbss_rpt_parm);
559 break;
560 case FW_H2C_WOWLAN_UPDATE_GTK:
561 element_id = H2C_WOWLAN_UPDATE_GTK_CMD;
562 cmd_len = sizeof(struct h2c_wpa_two_way_parm);
563 break;
564 case FW_H2C_WOWLAN_UPDATE_IV:
565 element_id = H2C_WOWLAN_UPDATE_IV_CMD;
566 cmd_len = sizeof(unsigned long long);
567 break;
568 case FW_H2C_WOWLAN_OFFLOAD:
569 element_id = H2C_WOWLAN_FW_OFFLOAD;
570 cmd_len = sizeof(u8);
571 break;
572 default:
573 break;
574 }
575
576 len = _rtl92s_get_h2c_cmdlen(MAX_TRANSMIT_BUFFER_SIZE, 1, &cmd_len);
577 skb = dev_alloc_skb(len);
578 cb_desc = (struct rtl_tcb_desc *)(skb->cb);
579 cb_desc->queue_index = TXCMD_QUEUE;
580 cb_desc->cmd_or_init = DESC_PACKET_TYPE_NORMAL;
581 cb_desc->last_inipkt = false;
582
583 _rtl92s_fill_h2c_cmd(skb, MAX_TRANSMIT_BUFFER_SIZE, 1, &element_id,
584 &cmd_len, &pcmd_buffer, &rtlhal->h2c_txcmd_seq);
585 _rtl92s_cmd_send_packet(hw, skb, false);
586 rtlpriv->cfg->ops->tx_polling(hw, TXCMD_QUEUE);
587
588 return true;
589}
590
591void rtl92s_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 Mode)
592{
593 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
594 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
595 struct h2c_set_pwrmode_parm pwrmode;
596 u16 max_wakeup_period = 0;
597
598 pwrmode.mode = Mode;
599 pwrmode.flag_low_traffic_en = 0;
600 pwrmode.flag_lpnav_en = 0;
601 pwrmode.flag_rf_low_snr_en = 0;
602 pwrmode.flag_dps_en = 0;
603 pwrmode.bcn_rx_en = 0;
604 pwrmode.bcn_to = 0;
605 SET_BITS_TO_LE_2BYTE((u8 *)(&pwrmode) + 8, 0, 16,
606 mac->vif->bss_conf.beacon_int);
607 pwrmode.app_itv = 0;
608 pwrmode.awake_bcn_itvl = ppsc->reg_max_lps_awakeintvl;
609 pwrmode.smart_ps = 1;
610 pwrmode.bcn_pass_period = 10;
611
612 /* Set beacon pass count */
613 if (pwrmode.mode == FW_PS_MIN_MODE)
614 max_wakeup_period = mac->vif->bss_conf.beacon_int;
615 else if (pwrmode.mode == FW_PS_MAX_MODE)
616 max_wakeup_period = mac->vif->bss_conf.beacon_int *
617 mac->vif->bss_conf.dtim_period;
618
619 if (max_wakeup_period >= 500)
620 pwrmode.bcn_pass_cnt = 1;
621 else if ((max_wakeup_period >= 300) && (max_wakeup_period < 500))
622 pwrmode.bcn_pass_cnt = 2;
623 else if ((max_wakeup_period >= 200) && (max_wakeup_period < 300))
624 pwrmode.bcn_pass_cnt = 3;
625 else if ((max_wakeup_period >= 20) && (max_wakeup_period < 200))
626 pwrmode.bcn_pass_cnt = 5;
627 else
628 pwrmode.bcn_pass_cnt = 1;
629
630 _rtl92s_firmware_set_h2c_cmd(hw, FW_H2C_SETPWRMODE, (u8 *)&pwrmode);
631
632}
633
634void rtl92s_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw,
635 u8 mstatus, u8 ps_qosinfo)
636{
637 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
638 struct h2c_joinbss_rpt_parm joinbss_rpt;
639
640 joinbss_rpt.opmode = mstatus;
641 joinbss_rpt.ps_qos_info = ps_qosinfo;
642 joinbss_rpt.bssid[0] = mac->bssid[0];
643 joinbss_rpt.bssid[1] = mac->bssid[1];
644 joinbss_rpt.bssid[2] = mac->bssid[2];
645 joinbss_rpt.bssid[3] = mac->bssid[3];
646 joinbss_rpt.bssid[4] = mac->bssid[4];
647 joinbss_rpt.bssid[5] = mac->bssid[5];
648 SET_BITS_TO_LE_2BYTE((u8 *)(&joinbss_rpt) + 8, 0, 16,
649 mac->vif->bss_conf.beacon_int);
650 SET_BITS_TO_LE_2BYTE((u8 *)(&joinbss_rpt) + 10, 0, 16, mac->assoc_id);
651
652 _rtl92s_firmware_set_h2c_cmd(hw, FW_H2C_JOINBSSRPT, (u8 *)&joinbss_rpt);
653}
654
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/fw.h b/drivers/net/wireless/rtlwifi/rtl8192se/fw.h
new file mode 100644
index 000000000000..74cc503efe8a
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/fw.h
@@ -0,0 +1,375 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29#ifndef __REALTEK_FIRMWARE92S_H__
30#define __REALTEK_FIRMWARE92S_H__
31
32#define RTL8190_MAX_FIRMWARE_CODE_SIZE 64000
33#define RTL8190_CPU_START_OFFSET 0x80
34/* Firmware Local buffer size. 64k */
35#define MAX_FIRMWARE_CODE_SIZE 0xFF00
36
37#define RT_8192S_FIRMWARE_HDR_SIZE 80
38#define RT_8192S_FIRMWARE_HDR_EXCLUDE_PRI_SIZE 32
39
40/* support till 64 bit bus width OS */
41#define MAX_DEV_ADDR_SIZE 8
42#define MAX_FIRMWARE_INFORMATION_SIZE 32
43#define MAX_802_11_HEADER_LENGTH (40 + \
44 MAX_FIRMWARE_INFORMATION_SIZE)
45#define ENCRYPTION_MAX_OVERHEAD 128
46#define MAX_FRAGMENT_COUNT 8
47#define MAX_TRANSMIT_BUFFER_SIZE (1600 + \
48 (MAX_802_11_HEADER_LENGTH + \
49 ENCRYPTION_MAX_OVERHEAD) *\
50 MAX_FRAGMENT_COUNT)
51
52#define H2C_TX_CMD_HDR_LEN 8
53
54/* The following DM control code are for Reg0x364, */
55#define FW_DIG_ENABLE_CTL BIT(0)
56#define FW_HIGH_PWR_ENABLE_CTL BIT(1)
57#define FW_SS_CTL BIT(2)
58#define FW_RA_INIT_CTL BIT(3)
59#define FW_RA_BG_CTL BIT(4)
60#define FW_RA_N_CTL BIT(5)
61#define FW_PWR_TRK_CTL BIT(6)
62#define FW_IQK_CTL BIT(7)
63#define FW_FA_CTL BIT(8)
64#define FW_DRIVER_CTRL_DM_CTL BIT(9)
65#define FW_PAPE_CTL_BY_SW_HW BIT(10)
66#define FW_DISABLE_ALL_DM 0
67#define FW_PWR_TRK_PARAM_CLR 0x0000ffff
68#define FW_RA_PARAM_CLR 0xffff0000
69
70enum desc_packet_type {
71 DESC_PACKET_TYPE_INIT = 0,
72 DESC_PACKET_TYPE_NORMAL = 1,
73};
74
75/* 8-bytes alignment required */
76struct fw_priv {
77 /* --- long word 0 ---- */
78 /* 0x12: CE product, 0x92: IT product */
79 u8 signature_0;
80 /* 0x87: CE product, 0x81: IT product */
81 u8 signature_1;
82 /* 0x81: PCI-AP, 01:PCIe, 02: 92S-U,
83 * 0x82: USB-AP, 0x12: 72S-U, 03:SDIO */
84 u8 hci_sel;
85 /* the same value as reigster value */
86 u8 chip_version;
87 /* customer ID low byte */
88 u8 customer_id_0;
89 /* customer ID high byte */
90 u8 customer_id_1;
91 /* 0x11: 1T1R, 0x12: 1T2R,
92 * 0x92: 1T2R turbo, 0x22: 2T2R */
93 u8 rf_config;
94 /* 4: 4EP, 6: 6EP, 11: 11EP */
95 u8 usb_ep_num;
96
97 /* --- long word 1 ---- */
98 /* regulatory class bit map 0 */
99 u8 regulatory_class_0;
100 /* regulatory class bit map 1 */
101 u8 regulatory_class_1;
102 /* regulatory class bit map 2 */
103 u8 regulatory_class_2;
104 /* regulatory class bit map 3 */
105 u8 regulatory_class_3;
106 /* 0:SWSI, 1:HWSI, 2:HWPI */
107 u8 rfintfs;
108 u8 def_nettype;
109 u8 rsvd010;
110 u8 rsvd011;
111
112 /* --- long word 2 ---- */
113 /* 0x00: normal, 0x03: MACLBK, 0x01: PHYLBK */
114 u8 lbk_mode;
115 /* 1: for MP use, 0: for normal
116 * driver (to be discussed) */
117 u8 mp_mode;
118 u8 rsvd020;
119 u8 rsvd021;
120 u8 rsvd022;
121 u8 rsvd023;
122 u8 rsvd024;
123 u8 rsvd025;
124
125 /* --- long word 3 ---- */
126 /* QoS enable */
127 u8 qos_en;
128 /* 40MHz BW enable */
129 /* 4181 convert AMSDU to AMPDU, 0: disable */
130 u8 bw_40mhz_en;
131 u8 amsdu2ampdu_en;
132 /* 11n AMPDU enable */
133 u8 ampdu_en;
134 /* FW offloads, 0: driver handles */
135 u8 rate_control_offload;
136 /* FW offloads, 0: driver handles */
137 u8 aggregation_offload;
138 u8 rsvd030;
139 u8 rsvd031;
140
141 /* --- long word 4 ---- */
142 /* 1. FW offloads, 0: driver handles */
143 u8 beacon_offload;
144 /* 2. FW offloads, 0: driver handles */
145 u8 mlme_offload;
146 /* 3. FW offloads, 0: driver handles */
147 u8 hwpc_offload;
148 /* 4. FW offloads, 0: driver handles */
149 u8 tcp_checksum_offload;
150 /* 5. FW offloads, 0: driver handles */
151 u8 tcp_offload;
152 /* 6. FW offloads, 0: driver handles */
153 u8 ps_control_offload;
154 /* 7. FW offloads, 0: driver handles */
155 u8 wwlan_offload;
156 u8 rsvd040;
157
158 /* --- long word 5 ---- */
159 /* tcp tx packet length low byte */
160 u8 tcp_tx_frame_len_L;
161 /* tcp tx packet length high byte */
162 u8 tcp_tx_frame_len_H;
163 /* tcp rx packet length low byte */
164 u8 tcp_rx_frame_len_L;
165 /* tcp rx packet length high byte */
166 u8 tcp_rx_frame_len_H;
167 u8 rsvd050;
168 u8 rsvd051;
169 u8 rsvd052;
170 u8 rsvd053;
171};
172
173/* 8-byte alinment required */
174struct fw_hdr {
175
176 /* --- LONG WORD 0 ---- */
177 u16 signature;
178 /* 0x8000 ~ 0x8FFF for FPGA version,
179 * 0x0000 ~ 0x7FFF for ASIC version, */
180 u16 version;
181 /* define the size of boot loader */
182 u32 dmem_size;
183
184
185 /* --- LONG WORD 1 ---- */
186 /* define the size of FW in IMEM */
187 u32 img_imem_size;
188 /* define the size of FW in SRAM */
189 u32 img_sram_size;
190
191 /* --- LONG WORD 2 ---- */
192 /* define the size of DMEM variable */
193 u32 fw_priv_size;
194 u32 rsvd0;
195
196 /* --- LONG WORD 3 ---- */
197 u32 rsvd1;
198 u32 rsvd2;
199
200 struct fw_priv fwpriv;
201
202} ;
203
204enum fw_status {
205 FW_STATUS_INIT = 0,
206 FW_STATUS_LOAD_IMEM = 1,
207 FW_STATUS_LOAD_EMEM = 2,
208 FW_STATUS_LOAD_DMEM = 3,
209 FW_STATUS_READY = 4,
210};
211
212struct rt_firmware {
213 struct fw_hdr *pfwheader;
214 enum fw_status fwstatus;
215 u16 firmwareversion;
216 u8 fw_imem[RTL8190_MAX_FIRMWARE_CODE_SIZE];
217 u8 fw_emem[RTL8190_MAX_FIRMWARE_CODE_SIZE];
218 u32 fw_imem_len;
219 u32 fw_emem_len;
220 u8 sz_fw_tmpbuffer[164000];
221 u32 sz_fw_tmpbufferlen;
222 u16 cmdpacket_fragthresold;
223};
224
225struct h2c_set_pwrmode_parm {
226 u8 mode;
227 u8 flag_low_traffic_en;
228 u8 flag_lpnav_en;
229 u8 flag_rf_low_snr_en;
230 /* 1: dps, 0: 32k */
231 u8 flag_dps_en;
232 u8 bcn_rx_en;
233 u8 bcn_pass_cnt;
234 /* beacon TO (ms). ¡§=0¡¨ no limit. */
235 u8 bcn_to;
236 u16 bcn_itv;
237 /* only for VOIP mode. */
238 u8 app_itv;
239 u8 awake_bcn_itvl;
240 u8 smart_ps;
241 /* unit: 100 ms */
242 u8 bcn_pass_period;
243};
244
245struct h2c_joinbss_rpt_parm {
246 u8 opmode;
247 u8 ps_qos_info;
248 u8 bssid[6];
249 u16 bcnitv;
250 u16 aid;
251} ;
252
253struct h2c_wpa_ptk {
254 /* EAPOL-Key Key Confirmation Key (KCK) */
255 u8 kck[16];
256 /* EAPOL-Key Key Encryption Key (KEK) */
257 u8 kek[16];
258 /* Temporal Key 1 (TK1) */
259 u8 tk1[16];
260 union {
261 /* Temporal Key 2 (TK2) */
262 u8 tk2[16];
263 struct {
264 u8 tx_mic_key[8];
265 u8 rx_mic_key[8];
266 } athu;
267 } u;
268};
269
270struct h2c_wpa_two_way_parm {
271 /* algorithm TKIP or AES */
272 u8 pairwise_en_alg;
273 u8 group_en_alg;
274 struct h2c_wpa_ptk wpa_ptk_value;
275} ;
276
277enum h2c_cmd {
278 FW_H2C_SETPWRMODE = 0,
279 FW_H2C_JOINBSSRPT = 1,
280 FW_H2C_WOWLAN_UPDATE_GTK = 2,
281 FW_H2C_WOWLAN_UPDATE_IV = 3,
282 FW_H2C_WOWLAN_OFFLOAD = 4,
283};
284
285enum fw_h2c_cmd {
286 H2C_READ_MACREG_CMD, /*0*/
287 H2C_WRITE_MACREG_CMD,
288 H2C_READBB_CMD,
289 H2C_WRITEBB_CMD,
290 H2C_READRF_CMD,
291 H2C_WRITERF_CMD, /*5*/
292 H2C_READ_EEPROM_CMD,
293 H2C_WRITE_EEPROM_CMD,
294 H2C_READ_EFUSE_CMD,
295 H2C_WRITE_EFUSE_CMD,
296 H2C_READ_CAM_CMD, /*10*/
297 H2C_WRITE_CAM_CMD,
298 H2C_SETBCNITV_CMD,
299 H2C_SETMBIDCFG_CMD,
300 H2C_JOINBSS_CMD,
301 H2C_DISCONNECT_CMD, /*15*/
302 H2C_CREATEBSS_CMD,
303 H2C_SETOPMode_CMD,
304 H2C_SITESURVEY_CMD,
305 H2C_SETAUTH_CMD,
306 H2C_SETKEY_CMD, /*20*/
307 H2C_SETSTAKEY_CMD,
308 H2C_SETASSOCSTA_CMD,
309 H2C_DELASSOCSTA_CMD,
310 H2C_SETSTAPWRSTATE_CMD,
311 H2C_SETBASICRATE_CMD, /*25*/
312 H2C_GETBASICRATE_CMD,
313 H2C_SETDATARATE_CMD,
314 H2C_GETDATARATE_CMD,
315 H2C_SETPHYINFO_CMD,
316 H2C_GETPHYINFO_CMD, /*30*/
317 H2C_SETPHY_CMD,
318 H2C_GETPHY_CMD,
319 H2C_READRSSI_CMD,
320 H2C_READGAIN_CMD,
321 H2C_SETATIM_CMD, /*35*/
322 H2C_SETPWRMODE_CMD,
323 H2C_JOINBSSRPT_CMD,
324 H2C_SETRATABLE_CMD,
325 H2C_GETRATABLE_CMD,
326 H2C_GETCCXREPORT_CMD, /*40*/
327 H2C_GETDTMREPORT_CMD,
328 H2C_GETTXRATESTATICS_CMD,
329 H2C_SETUSBSUSPEND_CMD,
330 H2C_SETH2CLBK_CMD,
331 H2C_TMP1, /*45*/
332 H2C_WOWLAN_UPDATE_GTK_CMD,
333 H2C_WOWLAN_FW_OFFLOAD,
334 H2C_TMP2,
335 H2C_TMP3,
336 H2C_WOWLAN_UPDATE_IV_CMD, /*50*/
337 H2C_TMP4,
338 MAX_H2CCMD /*52*/
339};
340
341/* The following macros are used for FW
342 * CMD map and parameter updated. */
343#define FW_CMD_IO_CLR(rtlpriv, _Bit) \
344 do { \
345 udelay(1000); \
346 rtlpriv->rtlhal.fwcmd_iomap &= (~_Bit); \
347 } while (0);
348
349#define FW_CMD_IO_UPDATE(rtlpriv, _val) \
350 rtlpriv->rtlhal.fwcmd_iomap = _val;
351
352#define FW_CMD_IO_SET(rtlpriv, _val) \
353 do { \
354 rtl_write_word(rtlpriv, LBUS_MON_ADDR, (u16)_val); \
355 FW_CMD_IO_UPDATE(rtlpriv, _val); \
356 } while (0);
357
358#define FW_CMD_PARA_SET(rtlpriv, _val) \
359 do { \
360 rtl_write_dword(rtlpriv, LBUS_ADDR_MASK, _val); \
361 rtlpriv->rtlhal.fwcmd_ioparam = _val; \
362 } while (0);
363
364#define FW_CMD_IO_QUERY(rtlpriv) \
365 (u16)(rtlpriv->rtlhal.fwcmd_iomap)
366#define FW_CMD_IO_PARA_QUERY(rtlpriv) \
367 ((u32)(rtlpriv->rtlhal.fwcmd_ioparam))
368
369int rtl92s_download_fw(struct ieee80211_hw *hw);
370void rtl92s_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
371void rtl92s_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw,
372 u8 mstatus, u8 ps_qosinfo);
373
374#endif
375
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
new file mode 100644
index 000000000000..2e9005d0454b
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
@@ -0,0 +1,2512 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "../efuse.h"
32#include "../base.h"
33#include "../regd.h"
34#include "../cam.h"
35#include "../ps.h"
36#include "../pci.h"
37#include "reg.h"
38#include "def.h"
39#include "phy.h"
40#include "dm.h"
41#include "fw.h"
42#include "led.h"
43#include "hw.h"
44
45void rtl92se_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
46{
47 struct rtl_priv *rtlpriv = rtl_priv(hw);
48 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
49 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
50
51 switch (variable) {
52 case HW_VAR_RCR: {
53 *((u32 *) (val)) = rtlpci->receive_config;
54 break;
55 }
56 case HW_VAR_RF_STATE: {
57 *((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state;
58 break;
59 }
60 case HW_VAR_FW_PSMODE_STATUS: {
61 *((bool *) (val)) = ppsc->fw_current_inpsmode;
62 break;
63 }
64 case HW_VAR_CORRECT_TSF: {
65 u64 tsf;
66 u32 *ptsf_low = (u32 *)&tsf;
67 u32 *ptsf_high = ((u32 *)&tsf) + 1;
68
69 *ptsf_high = rtl_read_dword(rtlpriv, (TSFR + 4));
70 *ptsf_low = rtl_read_dword(rtlpriv, TSFR);
71
72 *((u64 *) (val)) = tsf;
73
74 break;
75 }
76 case HW_VAR_MRC: {
77 *((bool *)(val)) = rtlpriv->dm.current_mrc_switch;
78 break;
79 }
80 default: {
81 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
82 ("switch case not process\n"));
83 break;
84 }
85 }
86}
87
88void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
89{
90 struct rtl_priv *rtlpriv = rtl_priv(hw);
91 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
92 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
93 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
94 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
95 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
96
97 switch (variable) {
98 case HW_VAR_ETHER_ADDR:{
99 rtl_write_dword(rtlpriv, IDR0, ((u32 *)(val))[0]);
100 rtl_write_word(rtlpriv, IDR4, ((u16 *)(val + 4))[0]);
101 break;
102 }
103 case HW_VAR_BASIC_RATE:{
104 u16 rate_cfg = ((u16 *) val)[0];
105 u8 rate_index = 0;
106
107 if (rtlhal->version == VERSION_8192S_ACUT)
108 rate_cfg = rate_cfg & 0x150;
109 else
110 rate_cfg = rate_cfg & 0x15f;
111
112 rate_cfg |= 0x01;
113
114 rtl_write_byte(rtlpriv, RRSR, rate_cfg & 0xff);
115 rtl_write_byte(rtlpriv, RRSR + 1,
116 (rate_cfg >> 8) & 0xff);
117
118 while (rate_cfg > 0x1) {
119 rate_cfg = (rate_cfg >> 1);
120 rate_index++;
121 }
122 rtl_write_byte(rtlpriv, INIRTSMCS_SEL, rate_index);
123
124 break;
125 }
126 case HW_VAR_BSSID:{
127 rtl_write_dword(rtlpriv, BSSIDR, ((u32 *)(val))[0]);
128 rtl_write_word(rtlpriv, BSSIDR + 4,
129 ((u16 *)(val + 4))[0]);
130 break;
131 }
132 case HW_VAR_SIFS:{
133 rtl_write_byte(rtlpriv, SIFS_OFDM, val[0]);
134 rtl_write_byte(rtlpriv, SIFS_OFDM + 1, val[1]);
135 break;
136 }
137 case HW_VAR_SLOT_TIME:{
138 u8 e_aci;
139
140 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
141 ("HW_VAR_SLOT_TIME %x\n", val[0]));
142
143 rtl_write_byte(rtlpriv, SLOT_TIME, val[0]);
144
145 for (e_aci = 0; e_aci < AC_MAX; e_aci++) {
146 rtlpriv->cfg->ops->set_hw_reg(hw,
147 HW_VAR_AC_PARAM,
148 (u8 *)(&e_aci));
149 }
150 break;
151 }
152 case HW_VAR_ACK_PREAMBLE:{
153 u8 reg_tmp;
154 u8 short_preamble = (bool) (*(u8 *) val);
155 reg_tmp = (mac->cur_40_prime_sc) << 5;
156 if (short_preamble)
157 reg_tmp |= 0x80;
158
159 rtl_write_byte(rtlpriv, RRSR + 2, reg_tmp);
160 break;
161 }
162 case HW_VAR_AMPDU_MIN_SPACE:{
163 u8 min_spacing_to_set;
164 u8 sec_min_space;
165
166 min_spacing_to_set = *((u8 *)val);
167 if (min_spacing_to_set <= 7) {
168 if (rtlpriv->sec.pairwise_enc_algorithm ==
169 NO_ENCRYPTION)
170 sec_min_space = 0;
171 else
172 sec_min_space = 1;
173
174 if (min_spacing_to_set < sec_min_space)
175 min_spacing_to_set = sec_min_space;
176 if (min_spacing_to_set > 5)
177 min_spacing_to_set = 5;
178
179 mac->min_space_cfg =
180 ((mac->min_space_cfg & 0xf8) |
181 min_spacing_to_set);
182
183 *val = min_spacing_to_set;
184
185 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
186 ("Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
187 mac->min_space_cfg));
188
189 rtl_write_byte(rtlpriv, AMPDU_MIN_SPACE,
190 mac->min_space_cfg);
191 }
192 break;
193 }
194 case HW_VAR_SHORTGI_DENSITY:{
195 u8 density_to_set;
196
197 density_to_set = *((u8 *) val);
198 mac->min_space_cfg = rtlpriv->rtlhal.minspace_cfg;
199 mac->min_space_cfg |= (density_to_set << 3);
200
201 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
202 ("Set HW_VAR_SHORTGI_DENSITY: %#x\n",
203 mac->min_space_cfg));
204
205 rtl_write_byte(rtlpriv, AMPDU_MIN_SPACE,
206 mac->min_space_cfg);
207
208 break;
209 }
210 case HW_VAR_AMPDU_FACTOR:{
211 u8 factor_toset;
212 u8 regtoset;
213 u8 factorlevel[18] = {
214 2, 4, 4, 7, 7, 13, 13,
215 13, 2, 7, 7, 13, 13,
216 15, 15, 15, 15, 0};
217 u8 index = 0;
218
219 factor_toset = *((u8 *) val);
220 if (factor_toset <= 3) {
221 factor_toset = (1 << (factor_toset + 2));
222 if (factor_toset > 0xf)
223 factor_toset = 0xf;
224
225 for (index = 0; index < 17; index++) {
226 if (factorlevel[index] > factor_toset)
227 factorlevel[index] =
228 factor_toset;
229 }
230
231 for (index = 0; index < 8; index++) {
232 regtoset = ((factorlevel[index * 2]) |
233 (factorlevel[index *
234 2 + 1] << 4));
235 rtl_write_byte(rtlpriv,
236 AGGLEN_LMT_L + index,
237 regtoset);
238 }
239
240 regtoset = ((factorlevel[16]) |
241 (factorlevel[17] << 4));
242 rtl_write_byte(rtlpriv, AGGLEN_LMT_H, regtoset);
243
244 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
245 ("Set HW_VAR_AMPDU_FACTOR: %#x\n",
246 factor_toset));
247 }
248 break;
249 }
250 case HW_VAR_AC_PARAM:{
251 u8 e_aci = *((u8 *) val);
252 rtl92s_dm_init_edca_turbo(hw);
253
254 if (rtlpci->acm_method != eAcmWay2_SW)
255 rtlpriv->cfg->ops->set_hw_reg(hw,
256 HW_VAR_ACM_CTRL,
257 (u8 *)(&e_aci));
258 break;
259 }
260 case HW_VAR_ACM_CTRL:{
261 u8 e_aci = *((u8 *) val);
262 union aci_aifsn *p_aci_aifsn = (union aci_aifsn *)(&(
263 mac->ac[0].aifs));
264 u8 acm = p_aci_aifsn->f.acm;
265 u8 acm_ctrl = rtl_read_byte(rtlpriv, AcmHwCtrl);
266
267 acm_ctrl = acm_ctrl | ((rtlpci->acm_method == 2) ?
268 0x0 : 0x1);
269
270 if (acm) {
271 switch (e_aci) {
272 case AC0_BE:
273 acm_ctrl |= AcmHw_BeqEn;
274 break;
275 case AC2_VI:
276 acm_ctrl |= AcmHw_ViqEn;
277 break;
278 case AC3_VO:
279 acm_ctrl |= AcmHw_VoqEn;
280 break;
281 default:
282 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
283 ("HW_VAR_ACM_CTRL acm set "
284 "failed: eACI is %d\n", acm));
285 break;
286 }
287 } else {
288 switch (e_aci) {
289 case AC0_BE:
290 acm_ctrl &= (~AcmHw_BeqEn);
291 break;
292 case AC2_VI:
293 acm_ctrl &= (~AcmHw_ViqEn);
294 break;
295 case AC3_VO:
296 acm_ctrl &= (~AcmHw_BeqEn);
297 break;
298 default:
299 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
300 ("switch case not process\n"));
301 break;
302 }
303 }
304
305 RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
306 ("HW_VAR_ACM_CTRL Write 0x%X\n", acm_ctrl));
307 rtl_write_byte(rtlpriv, AcmHwCtrl, acm_ctrl);
308 break;
309 }
310 case HW_VAR_RCR:{
311 rtl_write_dword(rtlpriv, RCR, ((u32 *) (val))[0]);
312 rtlpci->receive_config = ((u32 *) (val))[0];
313 break;
314 }
315 case HW_VAR_RETRY_LIMIT:{
316 u8 retry_limit = ((u8 *) (val))[0];
317
318 rtl_write_word(rtlpriv, RETRY_LIMIT,
319 retry_limit << RETRY_LIMIT_SHORT_SHIFT |
320 retry_limit << RETRY_LIMIT_LONG_SHIFT);
321 break;
322 }
323 case HW_VAR_DUAL_TSF_RST: {
324 break;
325 }
326 case HW_VAR_EFUSE_BYTES: {
327 rtlefuse->efuse_usedbytes = *((u16 *) val);
328 break;
329 }
330 case HW_VAR_EFUSE_USAGE: {
331 rtlefuse->efuse_usedpercentage = *((u8 *) val);
332 break;
333 }
334 case HW_VAR_IO_CMD: {
335 break;
336 }
337 case HW_VAR_WPA_CONFIG: {
338 rtl_write_byte(rtlpriv, REG_SECR, *((u8 *) val));
339 break;
340 }
341 case HW_VAR_SET_RPWM:{
342 break;
343 }
344 case HW_VAR_H2C_FW_PWRMODE:{
345 break;
346 }
347 case HW_VAR_FW_PSMODE_STATUS: {
348 ppsc->fw_current_inpsmode = *((bool *) val);
349 break;
350 }
351 case HW_VAR_H2C_FW_JOINBSSRPT:{
352 break;
353 }
354 case HW_VAR_AID:{
355 break;
356 }
357 case HW_VAR_CORRECT_TSF:{
358 break;
359 }
360 case HW_VAR_MRC: {
361 bool bmrc_toset = *((bool *)val);
362 u8 u1bdata = 0;
363
364 if (bmrc_toset) {
365 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE,
366 MASKBYTE0, 0x33);
367 u1bdata = (u8)rtl_get_bbreg(hw,
368 ROFDM1_TRXPATHENABLE,
369 MASKBYTE0);
370 rtl_set_bbreg(hw, ROFDM1_TRXPATHENABLE,
371 MASKBYTE0,
372 ((u1bdata & 0xf0) | 0x03));
373 u1bdata = (u8)rtl_get_bbreg(hw,
374 ROFDM0_TRXPATHENABLE,
375 MASKBYTE1);
376 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE,
377 MASKBYTE1,
378 (u1bdata | 0x04));
379
380 /* Update current settings. */
381 rtlpriv->dm.current_mrc_switch = bmrc_toset;
382 } else {
383 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE,
384 MASKBYTE0, 0x13);
385 u1bdata = (u8)rtl_get_bbreg(hw,
386 ROFDM1_TRXPATHENABLE,
387 MASKBYTE0);
388 rtl_set_bbreg(hw, ROFDM1_TRXPATHENABLE,
389 MASKBYTE0,
390 ((u1bdata & 0xf0) | 0x01));
391 u1bdata = (u8)rtl_get_bbreg(hw,
392 ROFDM0_TRXPATHENABLE,
393 MASKBYTE1);
394 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE,
395 MASKBYTE1, (u1bdata & 0xfb));
396
397 /* Update current settings. */
398 rtlpriv->dm.current_mrc_switch = bmrc_toset;
399 }
400
401 break;
402 }
403 default:
404 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
405 ("switch case not process\n"));
406 break;
407 }
408
409}
410
411void rtl92se_enable_hw_security_config(struct ieee80211_hw *hw)
412{
413 struct rtl_priv *rtlpriv = rtl_priv(hw);
414 u8 sec_reg_value = 0x0;
415
416 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("PairwiseEncAlgorithm = %d "
417 "GroupEncAlgorithm = %d\n",
418 rtlpriv->sec.pairwise_enc_algorithm,
419 rtlpriv->sec.group_enc_algorithm));
420
421 if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
422 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
423 ("not open hw encryption\n"));
424 return;
425 }
426
427 sec_reg_value = SCR_TXENCENABLE | SCR_RXENCENABLE;
428
429 if (rtlpriv->sec.use_defaultkey) {
430 sec_reg_value |= SCR_TXUSEDK;
431 sec_reg_value |= SCR_RXUSEDK;
432 }
433
434 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, ("The SECR-value %x\n",
435 sec_reg_value));
436
437 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
438
439}
440
441static u8 _rtl92ce_halset_sysclk(struct ieee80211_hw *hw, u8 data)
442{
443 struct rtl_priv *rtlpriv = rtl_priv(hw);
444 u8 waitcount = 100;
445 bool bresult = false;
446 u8 tmpvalue;
447
448 rtl_write_byte(rtlpriv, SYS_CLKR + 1, data);
449
450 /* Wait the MAC synchronized. */
451 udelay(400);
452
453 /* Check if it is set ready. */
454 tmpvalue = rtl_read_byte(rtlpriv, SYS_CLKR + 1);
455 bresult = ((tmpvalue & BIT(7)) == (data & BIT(7)));
456
457 if ((data & (BIT(6) | BIT(7))) == false) {
458 waitcount = 100;
459 tmpvalue = 0;
460
461 while (1) {
462 waitcount--;
463
464 tmpvalue = rtl_read_byte(rtlpriv, SYS_CLKR + 1);
465 if ((tmpvalue & BIT(6)))
466 break;
467
468 printk(KERN_ERR "wait for BIT(6) return value %x\n",
469 tmpvalue);
470 if (waitcount == 0)
471 break;
472
473 udelay(10);
474 }
475
476 if (waitcount == 0)
477 bresult = false;
478 else
479 bresult = true;
480 }
481
482 return bresult;
483}
484
485void rtl8192se_gpiobit3_cfg_inputmode(struct ieee80211_hw *hw)
486{
487 struct rtl_priv *rtlpriv = rtl_priv(hw);
488 u8 u1tmp;
489
490 /* The following config GPIO function */
491 rtl_write_byte(rtlpriv, MAC_PINMUX_CFG, (GPIOMUX_EN | GPIOSEL_GPIO));
492 u1tmp = rtl_read_byte(rtlpriv, GPIO_IO_SEL);
493
494 /* config GPIO3 to input */
495 u1tmp &= HAL_8192S_HW_GPIO_OFF_MASK;
496 rtl_write_byte(rtlpriv, GPIO_IO_SEL, u1tmp);
497
498}
499
500static u8 _rtl92se_rf_onoff_detect(struct ieee80211_hw *hw)
501{
502 struct rtl_priv *rtlpriv = rtl_priv(hw);
503 u8 u1tmp;
504 u8 retval = ERFON;
505
506 /* The following config GPIO function */
507 rtl_write_byte(rtlpriv, MAC_PINMUX_CFG, (GPIOMUX_EN | GPIOSEL_GPIO));
508 u1tmp = rtl_read_byte(rtlpriv, GPIO_IO_SEL);
509
510 /* config GPIO3 to input */
511 u1tmp &= HAL_8192S_HW_GPIO_OFF_MASK;
512 rtl_write_byte(rtlpriv, GPIO_IO_SEL, u1tmp);
513
514 /* On some of the platform, driver cannot read correct
515 * value without delay between Write_GPIO_SEL and Read_GPIO_IN */
516 mdelay(10);
517
518 /* check GPIO3 */
519 u1tmp = rtl_read_byte(rtlpriv, GPIO_IN);
520 retval = (u1tmp & HAL_8192S_HW_GPIO_OFF_BIT) ? ERFON : ERFOFF;
521
522 return retval;
523}
524
525static void _rtl92se_macconfig_before_fwdownload(struct ieee80211_hw *hw)
526{
527 struct rtl_priv *rtlpriv = rtl_priv(hw);
528 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
529 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
530
531 u8 i;
532 u8 tmpu1b;
533 u16 tmpu2b;
534 u8 pollingcnt = 20;
535
536 if (rtlpci->first_init) {
537 /* Reset PCIE Digital */
538 tmpu1b = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
539 tmpu1b &= 0xFE;
540 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmpu1b);
541 udelay(1);
542 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmpu1b | BIT(0));
543 }
544
545 /* Switch to SW IO control */
546 tmpu1b = rtl_read_byte(rtlpriv, (SYS_CLKR + 1));
547 if (tmpu1b & BIT(7)) {
548 tmpu1b &= ~(BIT(6) | BIT(7));
549
550 /* Set failed, return to prevent hang. */
551 if (!_rtl92ce_halset_sysclk(hw, tmpu1b))
552 return;
553 }
554
555 rtl_write_byte(rtlpriv, AFE_PLL_CTRL, 0x0);
556 udelay(50);
557 rtl_write_byte(rtlpriv, LDOA15_CTRL, 0x34);
558 udelay(50);
559
560 /* Clear FW RPWM for FW control LPS.*/
561 rtl_write_byte(rtlpriv, RPWM, 0x0);
562
563 /* Reset MAC-IO and CPU and Core Digital BIT(10)/11/15 */
564 tmpu1b = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
565 tmpu1b &= 0x73;
566 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmpu1b);
567 /* wait for BIT 10/11/15 to pull high automatically!! */
568 mdelay(1);
569
570 rtl_write_byte(rtlpriv, CMDR, 0);
571 rtl_write_byte(rtlpriv, TCR, 0);
572
573 /* Data sheet not define 0x562!!! Copy from WMAC!!!!! */
574 tmpu1b = rtl_read_byte(rtlpriv, 0x562);
575 tmpu1b |= 0x08;
576 rtl_write_byte(rtlpriv, 0x562, tmpu1b);
577 tmpu1b &= ~(BIT(3));
578 rtl_write_byte(rtlpriv, 0x562, tmpu1b);
579
580 /* Enable AFE clock source */
581 tmpu1b = rtl_read_byte(rtlpriv, AFE_XTAL_CTRL);
582 rtl_write_byte(rtlpriv, AFE_XTAL_CTRL, (tmpu1b | 0x01));
583 /* Delay 1.5ms */
584 mdelay(2);
585 tmpu1b = rtl_read_byte(rtlpriv, AFE_XTAL_CTRL + 1);
586 rtl_write_byte(rtlpriv, AFE_XTAL_CTRL + 1, (tmpu1b & 0xfb));
587
588 /* Enable AFE Macro Block's Bandgap */
589 tmpu1b = rtl_read_byte(rtlpriv, AFE_MISC);
590 rtl_write_byte(rtlpriv, AFE_MISC, (tmpu1b | BIT(0)));
591 mdelay(1);
592
593 /* Enable AFE Mbias */
594 tmpu1b = rtl_read_byte(rtlpriv, AFE_MISC);
595 rtl_write_byte(rtlpriv, AFE_MISC, (tmpu1b | 0x02));
596 mdelay(1);
597
598 /* Enable LDOA15 block */
599 tmpu1b = rtl_read_byte(rtlpriv, LDOA15_CTRL);
600 rtl_write_byte(rtlpriv, LDOA15_CTRL, (tmpu1b | BIT(0)));
601
602 /* Set Digital Vdd to Retention isolation Path. */
603 tmpu2b = rtl_read_word(rtlpriv, REG_SYS_ISO_CTRL);
604 rtl_write_word(rtlpriv, REG_SYS_ISO_CTRL, (tmpu2b | BIT(11)));
605
606 /* For warm reboot NIC disappera bug. */
607 tmpu2b = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
608 rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, (tmpu2b | BIT(13)));
609
610 rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL + 1, 0x68);
611
612 /* Enable AFE PLL Macro Block */
613 /* We need to delay 100u before enabling PLL. */
614 udelay(200);
615 tmpu1b = rtl_read_byte(rtlpriv, AFE_PLL_CTRL);
616 rtl_write_byte(rtlpriv, AFE_PLL_CTRL, (tmpu1b | BIT(0) | BIT(4)));
617
618 /* for divider reset */
619 udelay(100);
620 rtl_write_byte(rtlpriv, AFE_PLL_CTRL, (tmpu1b | BIT(0) |
621 BIT(4) | BIT(6)));
622 udelay(10);
623 rtl_write_byte(rtlpriv, AFE_PLL_CTRL, (tmpu1b | BIT(0) | BIT(4)));
624 udelay(10);
625
626 /* Enable MAC 80MHZ clock */
627 tmpu1b = rtl_read_byte(rtlpriv, AFE_PLL_CTRL + 1);
628 rtl_write_byte(rtlpriv, AFE_PLL_CTRL + 1, (tmpu1b | BIT(0)));
629 mdelay(1);
630
631 /* Release isolation AFE PLL & MD */
632 rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL, 0xA6);
633
634 /* Enable MAC clock */
635 tmpu2b = rtl_read_word(rtlpriv, SYS_CLKR);
636 rtl_write_word(rtlpriv, SYS_CLKR, (tmpu2b | BIT(12) | BIT(11)));
637
638 /* Enable Core digital and enable IOREG R/W */
639 tmpu2b = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
640 rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, (tmpu2b | BIT(11)));
641
642 tmpu1b = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
643 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmpu1b & ~(BIT(7)));
644
645 /* enable REG_EN */
646 rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, (tmpu2b | BIT(11) | BIT(15)));
647
648 /* Switch the control path. */
649 tmpu2b = rtl_read_word(rtlpriv, SYS_CLKR);
650 rtl_write_word(rtlpriv, SYS_CLKR, (tmpu2b & (~BIT(2))));
651
652 tmpu1b = rtl_read_byte(rtlpriv, (SYS_CLKR + 1));
653 tmpu1b = ((tmpu1b | BIT(7)) & (~BIT(6)));
654 if (!_rtl92ce_halset_sysclk(hw, tmpu1b))
655 return; /* Set failed, return to prevent hang. */
656
657 rtl_write_word(rtlpriv, CMDR, 0x07FC);
658
659 /* MH We must enable the section of code to prevent load IMEM fail. */
660 /* Load MAC register from WMAc temporarily We simulate macreg. */
661 /* txt HW will provide MAC txt later */
662 rtl_write_byte(rtlpriv, 0x6, 0x30);
663 rtl_write_byte(rtlpriv, 0x49, 0xf0);
664
665 rtl_write_byte(rtlpriv, 0x4b, 0x81);
666
667 rtl_write_byte(rtlpriv, 0xb5, 0x21);
668
669 rtl_write_byte(rtlpriv, 0xdc, 0xff);
670 rtl_write_byte(rtlpriv, 0xdd, 0xff);
671 rtl_write_byte(rtlpriv, 0xde, 0xff);
672 rtl_write_byte(rtlpriv, 0xdf, 0xff);
673
674 rtl_write_byte(rtlpriv, 0x11a, 0x00);
675 rtl_write_byte(rtlpriv, 0x11b, 0x00);
676
677 for (i = 0; i < 32; i++)
678 rtl_write_byte(rtlpriv, INIMCS_SEL + i, 0x1b);
679
680 rtl_write_byte(rtlpriv, 0x236, 0xff);
681
682 rtl_write_byte(rtlpriv, 0x503, 0x22);
683
684 if (ppsc->support_aspm && !ppsc->support_backdoor)
685 rtl_write_byte(rtlpriv, 0x560, 0x40);
686 else
687 rtl_write_byte(rtlpriv, 0x560, 0x00);
688
689 rtl_write_byte(rtlpriv, DBG_PORT, 0x91);
690
691 /* Set RX Desc Address */
692 rtl_write_dword(rtlpriv, RDQDA, rtlpci->rx_ring[RX_MPDU_QUEUE].dma);
693 rtl_write_dword(rtlpriv, RCDA, rtlpci->rx_ring[RX_CMD_QUEUE].dma);
694
695 /* Set TX Desc Address */
696 rtl_write_dword(rtlpriv, TBKDA, rtlpci->tx_ring[BK_QUEUE].dma);
697 rtl_write_dword(rtlpriv, TBEDA, rtlpci->tx_ring[BE_QUEUE].dma);
698 rtl_write_dword(rtlpriv, TVIDA, rtlpci->tx_ring[VI_QUEUE].dma);
699 rtl_write_dword(rtlpriv, TVODA, rtlpci->tx_ring[VO_QUEUE].dma);
700 rtl_write_dword(rtlpriv, TBDA, rtlpci->tx_ring[BEACON_QUEUE].dma);
701 rtl_write_dword(rtlpriv, TCDA, rtlpci->tx_ring[TXCMD_QUEUE].dma);
702 rtl_write_dword(rtlpriv, TMDA, rtlpci->tx_ring[MGNT_QUEUE].dma);
703 rtl_write_dword(rtlpriv, THPDA, rtlpci->tx_ring[HIGH_QUEUE].dma);
704 rtl_write_dword(rtlpriv, HDA, rtlpci->tx_ring[HCCA_QUEUE].dma);
705
706 rtl_write_word(rtlpriv, CMDR, 0x37FC);
707
708 /* To make sure that TxDMA can ready to download FW. */
709 /* We should reset TxDMA if IMEM RPT was not ready. */
710 do {
711 tmpu1b = rtl_read_byte(rtlpriv, TCR);
712 if ((tmpu1b & TXDMA_INIT_VALUE) == TXDMA_INIT_VALUE)
713 break;
714
715 udelay(5);
716 } while (pollingcnt--);
717
718 if (pollingcnt <= 0) {
719 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
720 ("Polling TXDMA_INIT_VALUE "
721 "timeout!! Current TCR(%#x)\n", tmpu1b));
722 tmpu1b = rtl_read_byte(rtlpriv, CMDR);
723 rtl_write_byte(rtlpriv, CMDR, tmpu1b & (~TXDMA_EN));
724 udelay(2);
725 /* Reset TxDMA */
726 rtl_write_byte(rtlpriv, CMDR, tmpu1b | TXDMA_EN);
727 }
728
729 /* After MACIO reset,we must refresh LED state. */
730 if ((ppsc->rfoff_reason == RF_CHANGE_BY_IPS) ||
731 (ppsc->rfoff_reason == 0)) {
732 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
733 struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
734 enum rf_pwrstate rfpwr_state_toset;
735 rfpwr_state_toset = _rtl92se_rf_onoff_detect(hw);
736
737 if (rfpwr_state_toset == ERFON)
738 rtl92se_sw_led_on(hw, pLed0);
739 }
740}
741
742static void _rtl92se_macconfig_after_fwdownload(struct ieee80211_hw *hw)
743{
744 struct rtl_priv *rtlpriv = rtl_priv(hw);
745 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
746 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
747 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
748 u8 i;
749 u16 tmpu2b;
750
751 /* 1. System Configure Register (Offset: 0x0000 - 0x003F) */
752
753 /* 2. Command Control Register (Offset: 0x0040 - 0x004F) */
754 /* Turn on 0x40 Command register */
755 rtl_write_word(rtlpriv, CMDR, (BBRSTN | BB_GLB_RSTN |
756 SCHEDULE_EN | MACRXEN | MACTXEN | DDMA_EN | FW2HW_EN |
757 RXDMA_EN | TXDMA_EN | HCI_RXDMA_EN | HCI_TXDMA_EN));
758
759 /* Set TCR TX DMA pre 2 FULL enable bit */
760 rtl_write_dword(rtlpriv, TCR, rtl_read_dword(rtlpriv, TCR) |
761 TXDMAPRE2FULL);
762
763 /* Set RCR */
764 rtl_write_dword(rtlpriv, RCR, rtlpci->receive_config);
765
766 /* 3. MACID Setting Register (Offset: 0x0050 - 0x007F) */
767
768 /* 4. Timing Control Register (Offset: 0x0080 - 0x009F) */
769 /* Set CCK/OFDM SIFS */
770 /* CCK SIFS shall always be 10us. */
771 rtl_write_word(rtlpriv, SIFS_CCK, 0x0a0a);
772 rtl_write_word(rtlpriv, SIFS_OFDM, 0x1010);
773
774 /* Set AckTimeout */
775 rtl_write_byte(rtlpriv, ACK_TIMEOUT, 0x40);
776
777 /* Beacon related */
778 rtl_write_word(rtlpriv, BCN_INTERVAL, 100);
779 rtl_write_word(rtlpriv, ATIMWND, 2);
780
781 /* 5. FIFO Control Register (Offset: 0x00A0 - 0x015F) */
782 /* 5.1 Initialize Number of Reserved Pages in Firmware Queue */
783 /* Firmware allocate now, associate with FW internal setting.!!! */
784
785 /* 5.2 Setting TX/RX page size 0/1/2/3/4=64/128/256/512/1024 */
786 /* 5.3 Set driver info, we only accept PHY status now. */
787 /* 5.4 Set RXDMA arbitration to control RXDMA/MAC/FW R/W for RXFIFO */
788 rtl_write_byte(rtlpriv, RXDMA, rtl_read_byte(rtlpriv, RXDMA) | BIT(6));
789
790 /* 6. Adaptive Control Register (Offset: 0x0160 - 0x01CF) */
791 /* Set RRSR to all legacy rate and HT rate
792 * CCK rate is supported by default.
793 * CCK rate will be filtered out only when associated
794 * AP does not support it.
795 * Only enable ACK rate to OFDM 24M
796 * Disable RRSR for CCK rate in A-Cut */
797
798 if (rtlhal->version == VERSION_8192S_ACUT)
799 rtl_write_byte(rtlpriv, RRSR, 0xf0);
800 else if (rtlhal->version == VERSION_8192S_BCUT)
801 rtl_write_byte(rtlpriv, RRSR, 0xff);
802 rtl_write_byte(rtlpriv, RRSR + 1, 0x01);
803 rtl_write_byte(rtlpriv, RRSR + 2, 0x00);
804
805 /* A-Cut IC do not support CCK rate. We forbid ARFR to */
806 /* fallback to CCK rate */
807 for (i = 0; i < 8; i++) {
808 /*Disable RRSR for CCK rate in A-Cut */
809 if (rtlhal->version == VERSION_8192S_ACUT)
810 rtl_write_dword(rtlpriv, ARFR0 + i * 4, 0x1f0ff0f0);
811 }
812
813 /* Different rate use different AMPDU size */
814 /* MCS32/ MCS15_SG use max AMPDU size 15*2=30K */
815 rtl_write_byte(rtlpriv, AGGLEN_LMT_H, 0x0f);
816 /* MCS0/1/2/3 use max AMPDU size 4*2=8K */
817 rtl_write_word(rtlpriv, AGGLEN_LMT_L, 0x7442);
818 /* MCS4/5 use max AMPDU size 8*2=16K 6/7 use 10*2=20K */
819 rtl_write_word(rtlpriv, AGGLEN_LMT_L + 2, 0xddd7);
820 /* MCS8/9 use max AMPDU size 8*2=16K 10/11 use 10*2=20K */
821 rtl_write_word(rtlpriv, AGGLEN_LMT_L + 4, 0xd772);
822 /* MCS12/13/14/15 use max AMPDU size 15*2=30K */
823 rtl_write_word(rtlpriv, AGGLEN_LMT_L + 6, 0xfffd);
824
825 /* Set Data / Response auto rate fallack retry count */
826 rtl_write_dword(rtlpriv, DARFRC, 0x04010000);
827 rtl_write_dword(rtlpriv, DARFRC + 4, 0x09070605);
828 rtl_write_dword(rtlpriv, RARFRC, 0x04010000);
829 rtl_write_dword(rtlpriv, RARFRC + 4, 0x09070605);
830
831 /* 7. EDCA Setting Register (Offset: 0x01D0 - 0x01FF) */
832 /* Set all rate to support SG */
833 rtl_write_word(rtlpriv, SG_RATE, 0xFFFF);
834
835 /* 8. WMAC, BA, and CCX related Register (Offset: 0x0200 - 0x023F) */
836 /* Set NAV protection length */
837 rtl_write_word(rtlpriv, NAV_PROT_LEN, 0x0080);
838 /* CF-END Threshold */
839 rtl_write_byte(rtlpriv, CFEND_TH, 0xFF);
840 /* Set AMPDU minimum space */
841 rtl_write_byte(rtlpriv, AMPDU_MIN_SPACE, 0x07);
842 /* Set TXOP stall control for several queue/HI/BCN/MGT/ */
843 rtl_write_byte(rtlpriv, TXOP_STALL_CTRL, 0x00);
844
845 /* 9. Security Control Register (Offset: 0x0240 - 0x025F) */
846 /* 10. Power Save Control Register (Offset: 0x0260 - 0x02DF) */
847 /* 11. General Purpose Register (Offset: 0x02E0 - 0x02FF) */
848 /* 12. Host Interrupt Status Register (Offset: 0x0300 - 0x030F) */
849 /* 13. Test Mode and Debug Control Register (Offset: 0x0310 - 0x034F) */
850
851 /* 14. Set driver info, we only accept PHY status now. */
852 rtl_write_byte(rtlpriv, RXDRVINFO_SZ, 4);
853
854 /* 15. For EEPROM R/W Workaround */
855 /* 16. For EFUSE to share REG_SYS_FUNC_EN with EEPROM!!! */
856 tmpu2b = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN);
857 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, tmpu2b | BIT(13));
858 tmpu2b = rtl_read_byte(rtlpriv, REG_SYS_ISO_CTRL);
859 rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL, tmpu2b & (~BIT(8)));
860
861 /* 17. For EFUSE */
862 /* We may R/W EFUSE in EEPROM mode */
863 if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
864 u8 tempval;
865
866 tempval = rtl_read_byte(rtlpriv, REG_SYS_ISO_CTRL + 1);
867 tempval &= 0xFE;
868 rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL + 1, tempval);
869
870 /* Change Program timing */
871 rtl_write_byte(rtlpriv, REG_EFUSE_CTRL + 3, 0x72);
872 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("EFUSE CONFIG OK\n"));
873 }
874
875 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("OK\n"));
876
877}
878
879static void _rtl92se_hw_configure(struct ieee80211_hw *hw)
880{
881 struct rtl_priv *rtlpriv = rtl_priv(hw);
882 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
883 struct rtl_phy *rtlphy = &(rtlpriv->phy);
884 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
885
886 u8 reg_bw_opmode = 0;
887 u32 reg_ratr = 0, reg_rrsr = 0;
888 u8 regtmp = 0;
889
890 reg_bw_opmode = BW_OPMODE_20MHZ;
891 reg_ratr = RATE_ALL_CCK | RATE_ALL_OFDM_AG | RATE_ALL_OFDM_1SS |
892 RATE_ALL_OFDM_2SS;
893 reg_rrsr = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
894
895 regtmp = rtl_read_byte(rtlpriv, INIRTSMCS_SEL);
896 reg_rrsr = ((reg_rrsr & 0x000fffff) << 8) | regtmp;
897 rtl_write_dword(rtlpriv, INIRTSMCS_SEL, reg_rrsr);
898 rtl_write_byte(rtlpriv, BW_OPMODE, reg_bw_opmode);
899
900 /* Set Retry Limit here */
901 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT,
902 (u8 *)(&rtlpci->shortretry_limit));
903
904 rtl_write_byte(rtlpriv, MLT, 0x8f);
905
906 /* For Min Spacing configuration. */
907 switch (rtlphy->rf_type) {
908 case RF_1T2R:
909 case RF_1T1R:
910 rtlhal->minspace_cfg = (MAX_MSS_DENSITY_1T << 3);
911 break;
912 case RF_2T2R:
913 case RF_2T2R_GREEN:
914 rtlhal->minspace_cfg = (MAX_MSS_DENSITY_2T << 3);
915 break;
916 }
917 rtl_write_byte(rtlpriv, AMPDU_MIN_SPACE, rtlhal->minspace_cfg);
918}
919
920int rtl92se_hw_init(struct ieee80211_hw *hw)
921{
922 struct rtl_priv *rtlpriv = rtl_priv(hw);
923 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
924 struct rtl_phy *rtlphy = &(rtlpriv->phy);
925 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
926 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
927 u8 tmp_byte = 0;
928
929 bool rtstatus = true;
930 u8 tmp_u1b;
931 int err = false;
932 u8 i;
933 int wdcapra_add[] = {
934 EDCAPARA_BE, EDCAPARA_BK,
935 EDCAPARA_VI, EDCAPARA_VO};
936 u8 secr_value = 0x0;
937
938 rtlpci->being_init_adapter = true;
939
940 rtlpriv->intf_ops->disable_aspm(hw);
941
942 /* 1. MAC Initialize */
943 /* Before FW download, we have to set some MAC register */
944 _rtl92se_macconfig_before_fwdownload(hw);
945
946 rtlhal->version = (enum version_8192s)((rtl_read_dword(rtlpriv,
947 PMC_FSM) >> 16) & 0xF);
948
949 rtl8192se_gpiobit3_cfg_inputmode(hw);
950
951 /* 2. download firmware */
952 rtstatus = rtl92s_download_fw(hw);
953 if (!rtstatus) {
954 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
955 ("Failed to download FW. "
956 "Init HW without FW now.., Please copy FW into"
957 "/lib/firmware/rtlwifi\n"));
958 rtlhal->fw_ready = false;
959 } else {
960 rtlhal->fw_ready = true;
961 }
962
963 /* After FW download, we have to reset MAC register */
964 _rtl92se_macconfig_after_fwdownload(hw);
965
966 /*Retrieve default FW Cmd IO map. */
967 rtlhal->fwcmd_iomap = rtl_read_word(rtlpriv, LBUS_MON_ADDR);
968 rtlhal->fwcmd_ioparam = rtl_read_dword(rtlpriv, LBUS_ADDR_MASK);
969
970 /* 3. Initialize MAC/PHY Config by MACPHY_reg.txt */
971 if (rtl92s_phy_mac_config(hw) != true) {
972 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("MAC Config failed\n"));
973 return rtstatus;
974 }
975
976 /* Make sure BB/RF write OK. We should prevent enter IPS. radio off. */
977 /* We must set flag avoid BB/RF config period later!! */
978 rtl_write_dword(rtlpriv, CMDR, 0x37FC);
979
980 /* 4. Initialize BB After MAC Config PHY_reg.txt, AGC_Tab.txt */
981 if (rtl92s_phy_bb_config(hw) != true) {
982 RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, ("BB Config failed\n"));
983 return rtstatus;
984 }
985
986 /* 5. Initiailze RF RAIO_A.txt RF RAIO_B.txt */
987 /* Before initalizing RF. We can not use FW to do RF-R/W. */
988
989 rtlphy->rf_mode = RF_OP_BY_SW_3WIRE;
990
991 /* RF Power Save */
992#if 0
993 /* H/W or S/W RF OFF before sleep. */
994 if (rtlpriv->psc.rfoff_reason > RF_CHANGE_BY_PS) {
995 u32 rfoffreason = rtlpriv->psc.rfoff_reason;
996
997 rtlpriv->psc.rfoff_reason = RF_CHANGE_BY_INIT;
998 rtlpriv->psc.rfpwr_state = ERFON;
999 rtl_ps_set_rf_state(hw, ERFOFF, rfoffreason, true);
1000 } else {
1001 /* gpio radio on/off is out of adapter start */
1002 if (rtlpriv->psc.hwradiooff == false) {
1003 rtlpriv->psc.rfpwr_state = ERFON;
1004 rtlpriv->psc.rfoff_reason = 0;
1005 }
1006 }
1007#endif
1008
1009 /* Before RF-R/W we must execute the IO from Scott's suggestion. */
1010 rtl_write_byte(rtlpriv, AFE_XTAL_CTRL + 1, 0xDB);
1011 if (rtlhal->version == VERSION_8192S_ACUT)
1012 rtl_write_byte(rtlpriv, SPS1_CTRL + 3, 0x07);
1013 else
1014 rtl_write_byte(rtlpriv, RF_CTRL, 0x07);
1015
1016 if (rtl92s_phy_rf_config(hw) != true) {
1017 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("RF Config failed\n"));
1018 return rtstatus;
1019 }
1020
1021 /* After read predefined TXT, we must set BB/MAC/RF
1022 * register as our requirement */
1023
1024 rtlphy->rfreg_chnlval[0] = rtl92s_phy_query_rf_reg(hw,
1025 (enum radio_path)0,
1026 RF_CHNLBW,
1027 RFREG_OFFSET_MASK);
1028 rtlphy->rfreg_chnlval[1] = rtl92s_phy_query_rf_reg(hw,
1029 (enum radio_path)1,
1030 RF_CHNLBW,
1031 RFREG_OFFSET_MASK);
1032
1033 /*---- Set CCK and OFDM Block "ON"----*/
1034 rtl_set_bbreg(hw, RFPGA0_RFMOD, BCCKEN, 0x1);
1035 rtl_set_bbreg(hw, RFPGA0_RFMOD, BOFDMEN, 0x1);
1036
1037 /*3 Set Hardware(Do nothing now) */
1038 _rtl92se_hw_configure(hw);
1039
1040 /* Read EEPROM TX power index and PHY_REG_PG.txt to capture correct */
1041 /* TX power index for different rate set. */
1042 /* Get original hw reg values */
1043 rtl92s_phy_get_hw_reg_originalvalue(hw);
1044 /* Write correct tx power index */
1045 rtl92s_phy_set_txpower(hw, rtlphy->current_channel);
1046
1047 /* We must set MAC address after firmware download. */
1048 for (i = 0; i < 6; i++)
1049 rtl_write_byte(rtlpriv, MACIDR0 + i, rtlefuse->dev_addr[i]);
1050
1051 /* EEPROM R/W workaround */
1052 tmp_u1b = rtl_read_byte(rtlpriv, MAC_PINMUX_CFG);
1053 rtl_write_byte(rtlpriv, MAC_PINMUX_CFG, tmp_u1b & (~BIT(3)));
1054
1055 rtl_write_byte(rtlpriv, 0x4d, 0x0);
1056
1057 if (hal_get_firmwareversion(rtlpriv) >= 0x49) {
1058 tmp_byte = rtl_read_byte(rtlpriv, FW_RSVD_PG_CRTL) & (~BIT(4));
1059 tmp_byte = tmp_byte | BIT(5);
1060 rtl_write_byte(rtlpriv, FW_RSVD_PG_CRTL, tmp_byte);
1061 rtl_write_dword(rtlpriv, TXDESC_MSK, 0xFFFFCFFF);
1062 }
1063
1064 /* We enable high power and RA related mechanism after NIC
1065 * initialized. */
1066 rtl92s_phy_set_fw_cmd(hw, FW_CMD_RA_INIT);
1067
1068 /* Add to prevent ASPM bug. */
1069 /* Always enable hst and NIC clock request. */
1070 rtl92s_phy_switch_ephy_parameter(hw);
1071
1072 /* Security related
1073 * 1. Clear all H/W keys.
1074 * 2. Enable H/W encryption/decryption. */
1075 rtl_cam_reset_all_entry(hw);
1076 secr_value |= SCR_TXENCENABLE;
1077 secr_value |= SCR_RXENCENABLE;
1078 secr_value |= SCR_NOSKMC;
1079 rtl_write_byte(rtlpriv, REG_SECR, secr_value);
1080
1081 for (i = 0; i < 4; i++)
1082 rtl_write_dword(rtlpriv, wdcapra_add[i], 0x5e4322);
1083
1084 if (rtlphy->rf_type == RF_1T2R) {
1085 bool mrc2set = true;
1086 /* Turn on B-Path */
1087 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_MRC, (u8 *)&mrc2set);
1088 }
1089
1090 rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_ON);
1091 rtl92s_dm_init(hw);
1092 rtlpci->being_init_adapter = false;
1093
1094 return err;
1095}
1096
1097void rtl92se_set_mac_addr(struct rtl_io *io, const u8 * addr)
1098{
1099}
1100
1101void rtl92se_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
1102{
1103 struct rtl_priv *rtlpriv = rtl_priv(hw);
1104 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1105 u32 reg_rcr = rtlpci->receive_config;
1106
1107 if (rtlpriv->psc.rfpwr_state != ERFON)
1108 return;
1109
1110 if (check_bssid == true) {
1111 reg_rcr |= (RCR_CBSSID);
1112 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
1113 } else if (check_bssid == false) {
1114 reg_rcr &= (~RCR_CBSSID);
1115 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
1116 }
1117
1118}
1119
1120static int _rtl92se_set_media_status(struct ieee80211_hw *hw,
1121 enum nl80211_iftype type)
1122{
1123 struct rtl_priv *rtlpriv = rtl_priv(hw);
1124 u8 bt_msr = rtl_read_byte(rtlpriv, MSR);
1125 enum led_ctl_mode ledaction = LED_CTL_NO_LINK;
1126 u32 temp;
1127 bt_msr &= ~MSR_LINK_MASK;
1128
1129 switch (type) {
1130 case NL80211_IFTYPE_UNSPECIFIED:
1131 bt_msr |= (MSR_LINK_NONE << MSR_LINK_SHIFT);
1132 ledaction = LED_CTL_LINK;
1133 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1134 ("Set Network type to NO LINK!\n"));
1135 break;
1136 case NL80211_IFTYPE_ADHOC:
1137 bt_msr |= (MSR_LINK_ADHOC << MSR_LINK_SHIFT);
1138 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1139 ("Set Network type to Ad Hoc!\n"));
1140 break;
1141 case NL80211_IFTYPE_STATION:
1142 bt_msr |= (MSR_LINK_MANAGED << MSR_LINK_SHIFT);
1143 ledaction = LED_CTL_LINK;
1144 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1145 ("Set Network type to STA!\n"));
1146 break;
1147 case NL80211_IFTYPE_AP:
1148 bt_msr |= (MSR_LINK_MASTER << MSR_LINK_SHIFT);
1149 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1150 ("Set Network type to AP!\n"));
1151 break;
1152 default:
1153 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1154 ("Network type %d not support!\n", type));
1155 return 1;
1156 break;
1157
1158 }
1159
1160 rtl_write_byte(rtlpriv, (MSR), bt_msr);
1161
1162 temp = rtl_read_dword(rtlpriv, TCR);
1163 rtl_write_dword(rtlpriv, TCR, temp & (~BIT(8)));
1164 rtl_write_dword(rtlpriv, TCR, temp | BIT(8));
1165
1166
1167 return 0;
1168}
1169
1170/* HW_VAR_MEDIA_STATUS & HW_VAR_CECHK_BSSID */
1171int rtl92se_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
1172{
1173 struct rtl_priv *rtlpriv = rtl_priv(hw);
1174
1175 if (_rtl92se_set_media_status(hw, type))
1176 return -EOPNOTSUPP;
1177
1178 if (rtlpriv->mac80211.link_state == MAC80211_LINKED) {
1179 if (type != NL80211_IFTYPE_AP)
1180 rtl92se_set_check_bssid(hw, true);
1181 } else {
1182 rtl92se_set_check_bssid(hw, false);
1183 }
1184
1185 return 0;
1186}
1187
1188/* don't set REG_EDCA_BE_PARAM here because mac80211 will send pkt when scan */
1189void rtl92se_set_qos(struct ieee80211_hw *hw, int aci)
1190{
1191 struct rtl_priv *rtlpriv = rtl_priv(hw);
1192 rtl92s_dm_init_edca_turbo(hw);
1193
1194 switch (aci) {
1195 case AC1_BK:
1196 rtl_write_dword(rtlpriv, EDCAPARA_BK, 0xa44f);
1197 break;
1198 case AC0_BE:
1199 /* rtl_write_dword(rtlpriv, EDCAPARA_BE, u4b_ac_param); */
1200 break;
1201 case AC2_VI:
1202 rtl_write_dword(rtlpriv, EDCAPARA_VI, 0x5e4322);
1203 break;
1204 case AC3_VO:
1205 rtl_write_dword(rtlpriv, EDCAPARA_VO, 0x2f3222);
1206 break;
1207 default:
1208 RT_ASSERT(false, ("invalid aci: %d !\n", aci));
1209 break;
1210 }
1211}
1212
1213void rtl92se_enable_interrupt(struct ieee80211_hw *hw)
1214{
1215 struct rtl_priv *rtlpriv = rtl_priv(hw);
1216 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1217
1218 rtl_write_dword(rtlpriv, INTA_MASK, rtlpci->irq_mask[0]);
1219 /* Support Bit 32-37(Assign as Bit 0-5) interrupt setting now */
1220 rtl_write_dword(rtlpriv, INTA_MASK + 4, rtlpci->irq_mask[1] & 0x3F);
1221
1222 rtlpci->irq_enabled = true;
1223}
1224
1225void rtl92se_disable_interrupt(struct ieee80211_hw *hw)
1226{
1227 struct rtl_priv *rtlpriv = rtl_priv(hw);
1228 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1229
1230 rtl_write_dword(rtlpriv, INTA_MASK, 0);
1231 rtl_write_dword(rtlpriv, INTA_MASK + 4, 0);
1232
1233 rtlpci->irq_enabled = false;
1234}
1235
1236
1237static u8 _rtl92s_set_sysclk(struct ieee80211_hw *hw, u8 data)
1238{
1239 struct rtl_priv *rtlpriv = rtl_priv(hw);
1240 u8 waitcnt = 100;
1241 bool result = false;
1242 u8 tmp;
1243
1244 rtl_write_byte(rtlpriv, SYS_CLKR + 1, data);
1245
1246 /* Wait the MAC synchronized. */
1247 udelay(400);
1248
1249 /* Check if it is set ready. */
1250 tmp = rtl_read_byte(rtlpriv, SYS_CLKR + 1);
1251 result = ((tmp & BIT(7)) == (data & BIT(7)));
1252
1253 if ((data & (BIT(6) | BIT(7))) == false) {
1254 waitcnt = 100;
1255 tmp = 0;
1256
1257 while (1) {
1258 waitcnt--;
1259 tmp = rtl_read_byte(rtlpriv, SYS_CLKR + 1);
1260
1261 if ((tmp & BIT(6)))
1262 break;
1263
1264 printk(KERN_ERR "wait for BIT(6) return value %x\n",
1265 tmp);
1266
1267 if (waitcnt == 0)
1268 break;
1269 udelay(10);
1270 }
1271
1272 if (waitcnt == 0)
1273 result = false;
1274 else
1275 result = true;
1276 }
1277
1278 return result;
1279}
1280
1281static void _rtl92s_phy_set_rfhalt(struct ieee80211_hw *hw)
1282{
1283 struct rtl_priv *rtlpriv = rtl_priv(hw);
1284 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1285 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1286 u8 u1btmp;
1287
1288 if (rtlhal->driver_going2unload)
1289 rtl_write_byte(rtlpriv, 0x560, 0x0);
1290
1291 /* Power save for BB/RF */
1292 u1btmp = rtl_read_byte(rtlpriv, LDOV12D_CTRL);
1293 u1btmp |= BIT(0);
1294 rtl_write_byte(rtlpriv, LDOV12D_CTRL, u1btmp);
1295 rtl_write_byte(rtlpriv, SPS1_CTRL, 0x0);
1296 rtl_write_byte(rtlpriv, TXPAUSE, 0xFF);
1297 rtl_write_word(rtlpriv, CMDR, 0x57FC);
1298 udelay(100);
1299 rtl_write_word(rtlpriv, CMDR, 0x77FC);
1300 rtl_write_byte(rtlpriv, PHY_CCA, 0x0);
1301 udelay(10);
1302 rtl_write_word(rtlpriv, CMDR, 0x37FC);
1303 udelay(10);
1304 rtl_write_word(rtlpriv, CMDR, 0x77FC);
1305 udelay(10);
1306 rtl_write_word(rtlpriv, CMDR, 0x57FC);
1307 rtl_write_word(rtlpriv, CMDR, 0x0000);
1308
1309 if (rtlhal->driver_going2unload) {
1310 u1btmp = rtl_read_byte(rtlpriv, (REG_SYS_FUNC_EN + 1));
1311 u1btmp &= ~(BIT(0));
1312 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, u1btmp);
1313 }
1314
1315 u1btmp = rtl_read_byte(rtlpriv, (SYS_CLKR + 1));
1316
1317 /* Add description. After switch control path. register
1318 * after page1 will be invisible. We can not do any IO
1319 * for register>0x40. After resume&MACIO reset, we need
1320 * to remember previous reg content. */
1321 if (u1btmp & BIT(7)) {
1322 u1btmp &= ~(BIT(6) | BIT(7));
1323 if (!_rtl92s_set_sysclk(hw, u1btmp)) {
1324 printk(KERN_ERR "Switch ctrl path fail\n");
1325 return;
1326 }
1327 }
1328
1329 /* Power save for MAC */
1330 if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS &&
1331 !rtlhal->driver_going2unload) {
1332 /* enable LED function */
1333 rtl_write_byte(rtlpriv, 0x03, 0xF9);
1334 /* SW/HW radio off or halt adapter!! For example S3/S4 */
1335 } else {
1336 /* LED function disable. Power range is about 8mA now. */
1337 /* if write 0xF1 disconnet_pci power
1338 * ifconfig wlan0 down power are both high 35:70 */
1339 /* if write oxF9 disconnet_pci power
1340 * ifconfig wlan0 down power are both low 12:45*/
1341 rtl_write_byte(rtlpriv, 0x03, 0xF9);
1342 }
1343
1344 rtl_write_byte(rtlpriv, SYS_CLKR + 1, 0x70);
1345 rtl_write_byte(rtlpriv, AFE_PLL_CTRL + 1, 0x68);
1346 rtl_write_byte(rtlpriv, AFE_PLL_CTRL, 0x00);
1347 rtl_write_byte(rtlpriv, LDOA15_CTRL, 0x34);
1348 rtl_write_byte(rtlpriv, AFE_XTAL_CTRL, 0x0E);
1349 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
1350
1351}
1352
1353static void _rtl92se_gen_refreshledstate(struct ieee80211_hw *hw)
1354{
1355 struct rtl_priv *rtlpriv = rtl_priv(hw);
1356 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1357 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
1358 struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
1359
1360 if (rtlpci->up_first_time == 1)
1361 return;
1362
1363 if (rtlpriv->psc.rfoff_reason == RF_CHANGE_BY_IPS)
1364 rtl92se_sw_led_on(hw, pLed0);
1365 else
1366 rtl92se_sw_led_off(hw, pLed0);
1367}
1368
1369
1370static void _rtl92se_power_domain_init(struct ieee80211_hw *hw)
1371{
1372 struct rtl_priv *rtlpriv = rtl_priv(hw);
1373 u16 tmpu2b;
1374 u8 tmpu1b;
1375
1376 rtlpriv->psc.pwrdomain_protect = true;
1377
1378 tmpu1b = rtl_read_byte(rtlpriv, (SYS_CLKR + 1));
1379 if (tmpu1b & BIT(7)) {
1380 tmpu1b &= ~(BIT(6) | BIT(7));
1381 if (!_rtl92s_set_sysclk(hw, tmpu1b)) {
1382 rtlpriv->psc.pwrdomain_protect = false;
1383 return;
1384 }
1385 }
1386
1387 rtl_write_byte(rtlpriv, AFE_PLL_CTRL, 0x0);
1388 rtl_write_byte(rtlpriv, LDOA15_CTRL, 0x34);
1389
1390 /* Reset MAC-IO and CPU and Core Digital BIT10/11/15 */
1391 tmpu1b = rtl_read_byte(rtlpriv, SYS_FUNC_EN + 1);
1392
1393 /* If IPS we need to turn LED on. So we not
1394 * not disable BIT 3/7 of reg3. */
1395 if (rtlpriv->psc.rfoff_reason & (RF_CHANGE_BY_IPS | RF_CHANGE_BY_HW))
1396 tmpu1b &= 0xFB;
1397 else
1398 tmpu1b &= 0x73;
1399
1400 rtl_write_byte(rtlpriv, SYS_FUNC_EN + 1, tmpu1b);
1401 /* wait for BIT 10/11/15 to pull high automatically!! */
1402 mdelay(1);
1403
1404 rtl_write_byte(rtlpriv, CMDR, 0);
1405 rtl_write_byte(rtlpriv, TCR, 0);
1406
1407 /* Data sheet not define 0x562!!! Copy from WMAC!!!!! */
1408 tmpu1b = rtl_read_byte(rtlpriv, 0x562);
1409 tmpu1b |= 0x08;
1410 rtl_write_byte(rtlpriv, 0x562, tmpu1b);
1411 tmpu1b &= ~(BIT(3));
1412 rtl_write_byte(rtlpriv, 0x562, tmpu1b);
1413
1414 /* Enable AFE clock source */
1415 tmpu1b = rtl_read_byte(rtlpriv, AFE_XTAL_CTRL);
1416 rtl_write_byte(rtlpriv, AFE_XTAL_CTRL, (tmpu1b | 0x01));
1417 /* Delay 1.5ms */
1418 udelay(1500);
1419 tmpu1b = rtl_read_byte(rtlpriv, AFE_XTAL_CTRL + 1);
1420 rtl_write_byte(rtlpriv, AFE_XTAL_CTRL + 1, (tmpu1b & 0xfb));
1421
1422 /* Enable AFE Macro Block's Bandgap */
1423 tmpu1b = rtl_read_byte(rtlpriv, AFE_MISC);
1424 rtl_write_byte(rtlpriv, AFE_MISC, (tmpu1b | BIT(0)));
1425 mdelay(1);
1426
1427 /* Enable AFE Mbias */
1428 tmpu1b = rtl_read_byte(rtlpriv, AFE_MISC);
1429 rtl_write_byte(rtlpriv, AFE_MISC, (tmpu1b | 0x02));
1430 mdelay(1);
1431
1432 /* Enable LDOA15 block */
1433 tmpu1b = rtl_read_byte(rtlpriv, LDOA15_CTRL);
1434 rtl_write_byte(rtlpriv, LDOA15_CTRL, (tmpu1b | BIT(0)));
1435
1436 /* Set Digital Vdd to Retention isolation Path. */
1437 tmpu2b = rtl_read_word(rtlpriv, SYS_ISO_CTRL);
1438 rtl_write_word(rtlpriv, SYS_ISO_CTRL, (tmpu2b | BIT(11)));
1439
1440
1441 /* For warm reboot NIC disappera bug. */
1442 tmpu2b = rtl_read_word(rtlpriv, SYS_FUNC_EN);
1443 rtl_write_word(rtlpriv, SYS_FUNC_EN, (tmpu2b | BIT(13)));
1444
1445 rtl_write_byte(rtlpriv, SYS_ISO_CTRL + 1, 0x68);
1446
1447 /* Enable AFE PLL Macro Block */
1448 tmpu1b = rtl_read_byte(rtlpriv, AFE_PLL_CTRL);
1449 rtl_write_byte(rtlpriv, AFE_PLL_CTRL, (tmpu1b | BIT(0) | BIT(4)));
1450 /* Enable MAC 80MHZ clock */
1451 tmpu1b = rtl_read_byte(rtlpriv, AFE_PLL_CTRL + 1);
1452 rtl_write_byte(rtlpriv, AFE_PLL_CTRL + 1, (tmpu1b | BIT(0)));
1453 mdelay(1);
1454
1455 /* Release isolation AFE PLL & MD */
1456 rtl_write_byte(rtlpriv, SYS_ISO_CTRL, 0xA6);
1457
1458 /* Enable MAC clock */
1459 tmpu2b = rtl_read_word(rtlpriv, SYS_CLKR);
1460 rtl_write_word(rtlpriv, SYS_CLKR, (tmpu2b | BIT(12) | BIT(11)));
1461
1462 /* Enable Core digital and enable IOREG R/W */
1463 tmpu2b = rtl_read_word(rtlpriv, SYS_FUNC_EN);
1464 rtl_write_word(rtlpriv, SYS_FUNC_EN, (tmpu2b | BIT(11)));
1465 /* enable REG_EN */
1466 rtl_write_word(rtlpriv, SYS_FUNC_EN, (tmpu2b | BIT(11) | BIT(15)));
1467
1468 /* Switch the control path. */
1469 tmpu2b = rtl_read_word(rtlpriv, SYS_CLKR);
1470 rtl_write_word(rtlpriv, SYS_CLKR, (tmpu2b & (~BIT(2))));
1471
1472 tmpu1b = rtl_read_byte(rtlpriv, (SYS_CLKR + 1));
1473 tmpu1b = ((tmpu1b | BIT(7)) & (~BIT(6)));
1474 if (!_rtl92s_set_sysclk(hw, tmpu1b)) {
1475 rtlpriv->psc.pwrdomain_protect = false;
1476 return;
1477 }
1478
1479 rtl_write_word(rtlpriv, CMDR, 0x37FC);
1480
1481 /* After MACIO reset,we must refresh LED state. */
1482 _rtl92se_gen_refreshledstate(hw);
1483
1484 rtlpriv->psc.pwrdomain_protect = false;
1485}
1486
1487void rtl92se_card_disable(struct ieee80211_hw *hw)
1488{
1489 struct rtl_priv *rtlpriv = rtl_priv(hw);
1490 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1491 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1492 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1493 enum nl80211_iftype opmode;
1494 u8 wait = 30;
1495
1496 rtlpriv->intf_ops->enable_aspm(hw);
1497
1498 if (rtlpci->driver_is_goingto_unload ||
1499 ppsc->rfoff_reason > RF_CHANGE_BY_PS)
1500 rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
1501
1502 /* we should chnge GPIO to input mode
1503 * this will drop away current about 25mA*/
1504 rtl8192se_gpiobit3_cfg_inputmode(hw);
1505
1506 /* this is very important for ips power save */
1507 while (wait-- >= 10 && rtlpriv->psc.pwrdomain_protect) {
1508 if (rtlpriv->psc.pwrdomain_protect)
1509 mdelay(20);
1510 else
1511 break;
1512 }
1513
1514 mac->link_state = MAC80211_NOLINK;
1515 opmode = NL80211_IFTYPE_UNSPECIFIED;
1516 _rtl92se_set_media_status(hw, opmode);
1517
1518 _rtl92s_phy_set_rfhalt(hw);
1519 udelay(100);
1520}
1521
1522void rtl92se_interrupt_recognized(struct ieee80211_hw *hw, u32 *p_inta,
1523 u32 *p_intb)
1524{
1525 struct rtl_priv *rtlpriv = rtl_priv(hw);
1526 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1527
1528 *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
1529 rtl_write_dword(rtlpriv, ISR, *p_inta);
1530
1531 *p_intb = rtl_read_dword(rtlpriv, ISR + 4) & rtlpci->irq_mask[1];
1532 rtl_write_dword(rtlpriv, ISR + 4, *p_intb);
1533}
1534
1535void rtl92se_set_beacon_related_registers(struct ieee80211_hw *hw)
1536{
1537 struct rtl_priv *rtlpriv = rtl_priv(hw);
1538 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1539 u16 bcntime_cfg = 0;
1540 u16 bcn_cw = 6, bcn_ifs = 0xf;
1541 u16 atim_window = 2;
1542
1543 /* ATIM Window (in unit of TU). */
1544 rtl_write_word(rtlpriv, ATIMWND, atim_window);
1545
1546 /* Beacon interval (in unit of TU). */
1547 rtl_write_word(rtlpriv, BCN_INTERVAL, mac->beacon_interval);
1548
1549 /* DrvErlyInt (in unit of TU). (Time to send
1550 * interrupt to notify driver to change
1551 * beacon content) */
1552 rtl_write_word(rtlpriv, BCN_DRV_EARLY_INT, 10 << 4);
1553
1554 /* BcnDMATIM(in unit of us). Indicates the
1555 * time before TBTT to perform beacon queue DMA */
1556 rtl_write_word(rtlpriv, BCN_DMATIME, 256);
1557
1558 /* Force beacon frame transmission even
1559 * after receiving beacon frame from
1560 * other ad hoc STA */
1561 rtl_write_byte(rtlpriv, BCN_ERR_THRESH, 100);
1562
1563 /* Beacon Time Configuration */
1564 if (mac->opmode == NL80211_IFTYPE_ADHOC)
1565 bcntime_cfg |= (bcn_cw << BCN_TCFG_CW_SHIFT);
1566
1567 /* TODO: bcn_ifs may required to be changed on ASIC */
1568 bcntime_cfg |= bcn_ifs << BCN_TCFG_IFS;
1569
1570 /*for beacon changed */
1571 rtl92s_phy_set_beacon_hwreg(hw, mac->beacon_interval);
1572}
1573
1574void rtl92se_set_beacon_interval(struct ieee80211_hw *hw)
1575{
1576 struct rtl_priv *rtlpriv = rtl_priv(hw);
1577 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1578 u16 bcn_interval = mac->beacon_interval;
1579
1580 /* Beacon interval (in unit of TU). */
1581 rtl_write_word(rtlpriv, BCN_INTERVAL, bcn_interval);
1582 /* 2008.10.24 added by tynli for beacon changed. */
1583 rtl92s_phy_set_beacon_hwreg(hw, bcn_interval);
1584}
1585
1586void rtl92se_update_interrupt_mask(struct ieee80211_hw *hw,
1587 u32 add_msr, u32 rm_msr)
1588{
1589 struct rtl_priv *rtlpriv = rtl_priv(hw);
1590 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1591
1592 RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
1593 ("add_msr:%x, rm_msr:%x\n", add_msr, rm_msr));
1594
1595 if (add_msr)
1596 rtlpci->irq_mask[0] |= add_msr;
1597
1598 if (rm_msr)
1599 rtlpci->irq_mask[0] &= (~rm_msr);
1600
1601 rtl92se_disable_interrupt(hw);
1602 rtl92se_enable_interrupt(hw);
1603}
1604
1605static void _rtl8192se_get_IC_Inferiority(struct ieee80211_hw *hw)
1606{
1607 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1608 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1609 u8 efuse_id;
1610
1611 rtlhal->ic_class = IC_INFERIORITY_A;
1612
1613 /* Only retrieving while using EFUSE. */
1614 if ((rtlefuse->epromtype == EEPROM_BOOT_EFUSE) &&
1615 !rtlefuse->autoload_failflag) {
1616 efuse_id = efuse_read_1byte(hw, EFUSE_IC_ID_OFFSET);
1617
1618 if (efuse_id == 0xfe)
1619 rtlhal->ic_class = IC_INFERIORITY_B;
1620 }
1621}
1622
1623static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
1624{
1625 struct rtl_priv *rtlpriv = rtl_priv(hw);
1626 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1627 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1628 u16 i, usvalue;
1629 u16 eeprom_id;
1630 u8 tempval;
1631 u8 hwinfo[HWSET_MAX_SIZE_92S];
1632 u8 rf_path, index;
1633
1634 if (rtlefuse->epromtype == EEPROM_93C46) {
1635 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1636 ("RTL819X Not boot from eeprom, check it !!"));
1637 } else if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
1638 rtl_efuse_shadow_map_update(hw);
1639
1640 memcpy((void *)hwinfo, (void *)
1641 &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
1642 HWSET_MAX_SIZE_92S);
1643 }
1644
1645 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, ("MAP\n"),
1646 hwinfo, HWSET_MAX_SIZE_92S);
1647
1648 eeprom_id = *((u16 *)&hwinfo[0]);
1649 if (eeprom_id != RTL8190_EEPROM_ID) {
1650 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1651 ("EEPROM ID(%#x) is invalid!!\n", eeprom_id));
1652 rtlefuse->autoload_failflag = true;
1653 } else {
1654 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Autoload OK\n"));
1655 rtlefuse->autoload_failflag = false;
1656 }
1657
1658 if (rtlefuse->autoload_failflag == true)
1659 return;
1660
1661 _rtl8192se_get_IC_Inferiority(hw);
1662
1663 /* Read IC Version && Channel Plan */
1664 /* VID, DID SE 0xA-D */
1665 rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID];
1666 rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID];
1667 rtlefuse->eeprom_svid = *(u16 *)&hwinfo[EEPROM_SVID];
1668 rtlefuse->eeprom_smid = *(u16 *)&hwinfo[EEPROM_SMID];
1669 rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
1670
1671 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1672 ("EEPROMId = 0x%4x\n", eeprom_id));
1673 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1674 ("EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid));
1675 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1676 ("EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did));
1677 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1678 ("EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid));
1679 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1680 ("EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid));
1681
1682 for (i = 0; i < 6; i += 2) {
1683 usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i];
1684 *((u16 *) (&rtlefuse->dev_addr[i])) = usvalue;
1685 }
1686
1687 for (i = 0; i < 6; i++)
1688 rtl_write_byte(rtlpriv, MACIDR0 + i, rtlefuse->dev_addr[i]);
1689
1690 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1691 (MAC_FMT "\n", MAC_ARG(rtlefuse->dev_addr)));
1692
1693 /* Get Tx Power Level by Channel */
1694 /* Read Tx power of Channel 1 ~ 14 from EEPROM. */
1695 /* 92S suupport RF A & B */
1696 for (rf_path = 0; rf_path < 2; rf_path++) {
1697 for (i = 0; i < 3; i++) {
1698 /* Read CCK RF A & B Tx power */
1699 rtlefuse->eeprom_chnlarea_txpwr_cck[rf_path][i] =
1700 hwinfo[EEPROM_TXPOWERBASE + rf_path * 3 + i];
1701
1702 /* Read OFDM RF A & B Tx power for 1T */
1703 rtlefuse->eeprom_chnlarea_txpwr_ht40_1s[rf_path][i] =
1704 hwinfo[EEPROM_TXPOWERBASE + 6 + rf_path * 3 + i];
1705
1706 /* Read OFDM RF A & B Tx power for 2T */
1707 rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path][i]
1708 = hwinfo[EEPROM_TXPOWERBASE + 12 +
1709 rf_path * 3 + i];
1710 }
1711 }
1712
1713 for (rf_path = 0; rf_path < 2; rf_path++)
1714 for (i = 0; i < 3; i++)
1715 RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
1716 ("RF(%d) EEPROM CCK Area(%d) = 0x%x\n", rf_path,
1717 i, rtlefuse->eeprom_chnlarea_txpwr_cck
1718 [rf_path][i]));
1719 for (rf_path = 0; rf_path < 2; rf_path++)
1720 for (i = 0; i < 3; i++)
1721 RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
1722 ("RF(%d) EEPROM HT40 1S Area(%d) = 0x%x\n",
1723 rf_path, i,
1724 rtlefuse->eeprom_chnlarea_txpwr_ht40_1s
1725 [rf_path][i]));
1726 for (rf_path = 0; rf_path < 2; rf_path++)
1727 for (i = 0; i < 3; i++)
1728 RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
1729 ("RF(%d) EEPROM HT40 2S Diff Area(%d) = 0x%x\n",
1730 rf_path, i,
1731 rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif
1732 [rf_path][i]));
1733
1734 for (rf_path = 0; rf_path < 2; rf_path++) {
1735
1736 /* Assign dedicated channel tx power */
1737 for (i = 0; i < 14; i++) {
1738 /* channel 1~3 use the same Tx Power Level. */
1739 if (i < 3)
1740 index = 0;
1741 /* Channel 4-8 */
1742 else if (i < 8)
1743 index = 1;
1744 /* Channel 9-14 */
1745 else
1746 index = 2;
1747
1748 /* Record A & B CCK /OFDM - 1T/2T Channel area
1749 * tx power */
1750 rtlefuse->txpwrlevel_cck[rf_path][i] =
1751 rtlefuse->eeprom_chnlarea_txpwr_cck
1752 [rf_path][index];
1753 rtlefuse->txpwrlevel_ht40_1s[rf_path][i] =
1754 rtlefuse->eeprom_chnlarea_txpwr_ht40_1s
1755 [rf_path][index];
1756 rtlefuse->txpwrlevel_ht40_2s[rf_path][i] =
1757 rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif
1758 [rf_path][index];
1759 }
1760
1761 for (i = 0; i < 14; i++) {
1762 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
1763 ("RF(%d)-Ch(%d) [CCK / HT40_1S / HT40_2S] = "
1764 "[0x%x / 0x%x / 0x%x]\n", rf_path, i,
1765 rtlefuse->txpwrlevel_cck[rf_path][i],
1766 rtlefuse->txpwrlevel_ht40_1s[rf_path][i],
1767 rtlefuse->txpwrlevel_ht40_2s[rf_path][i]));
1768 }
1769 }
1770
1771 for (rf_path = 0; rf_path < 2; rf_path++) {
1772 for (i = 0; i < 3; i++) {
1773 /* Read Power diff limit. */
1774 rtlefuse->eeprom_pwrgroup[rf_path][i] =
1775 hwinfo[EEPROM_TXPWRGROUP + rf_path * 3 + i];
1776 }
1777 }
1778
1779 for (rf_path = 0; rf_path < 2; rf_path++) {
1780 /* Fill Pwr group */
1781 for (i = 0; i < 14; i++) {
1782 /* Chanel 1-3 */
1783 if (i < 3)
1784 index = 0;
1785 /* Channel 4-8 */
1786 else if (i < 8)
1787 index = 1;
1788 /* Channel 9-13 */
1789 else
1790 index = 2;
1791
1792 rtlefuse->pwrgroup_ht20[rf_path][i] =
1793 (rtlefuse->eeprom_pwrgroup[rf_path][index] &
1794 0xf);
1795 rtlefuse->pwrgroup_ht40[rf_path][i] =
1796 ((rtlefuse->eeprom_pwrgroup[rf_path][index] &
1797 0xf0) >> 4);
1798
1799 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
1800 ("RF-%d pwrgroup_ht20[%d] = 0x%x\n",
1801 rf_path, i,
1802 rtlefuse->pwrgroup_ht20[rf_path][i]));
1803 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
1804 ("RF-%d pwrgroup_ht40[%d] = 0x%x\n",
1805 rf_path, i,
1806 rtlefuse->pwrgroup_ht40[rf_path][i]));
1807 }
1808 }
1809
1810 for (i = 0; i < 14; i++) {
1811 /* Read tx power difference between HT OFDM 20/40 MHZ */
1812 /* channel 1-3 */
1813 if (i < 3)
1814 index = 0;
1815 /* Channel 4-8 */
1816 else if (i < 8)
1817 index = 1;
1818 /* Channel 9-14 */
1819 else
1820 index = 2;
1821
1822 tempval = (*(u8 *)&hwinfo[EEPROM_TX_PWR_HT20_DIFF +
1823 index]) & 0xff;
1824 rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] = (tempval & 0xF);
1825 rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] =
1826 ((tempval >> 4) & 0xF);
1827
1828 /* Read OFDM<->HT tx power diff */
1829 /* Channel 1-3 */
1830 if (i < 3)
1831 index = 0;
1832 /* Channel 4-8 */
1833 else if (i < 8)
1834 index = 0x11;
1835 /* Channel 9-14 */
1836 else
1837 index = 1;
1838
1839 tempval = (*(u8 *)&hwinfo[EEPROM_TX_PWR_OFDM_DIFF + index])
1840 & 0xff;
1841 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i] =
1842 (tempval & 0xF);
1843 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i] =
1844 ((tempval >> 4) & 0xF);
1845
1846 tempval = (*(u8 *)&hwinfo[TX_PWR_SAFETY_CHK]);
1847 rtlefuse->txpwr_safetyflag = (tempval & 0x01);
1848 }
1849
1850 rtlefuse->eeprom_regulatory = 0;
1851 if (rtlefuse->eeprom_version >= 2) {
1852 /* BIT(0)~2 */
1853 if (rtlefuse->eeprom_version >= 4)
1854 rtlefuse->eeprom_regulatory =
1855 (hwinfo[EEPROM_REGULATORY] & 0x7);
1856 else /* BIT(0) */
1857 rtlefuse->eeprom_regulatory =
1858 (hwinfo[EEPROM_REGULATORY] & 0x1);
1859 }
1860 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
1861 ("eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory));
1862
1863 for (i = 0; i < 14; i++)
1864 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
1865 ("RF-A Ht20 to HT40 Diff[%d] = 0x%x\n", i,
1866 rtlefuse->txpwr_ht20diff[RF90_PATH_A][i]));
1867 for (i = 0; i < 14; i++)
1868 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
1869 ("RF-A Legacy to Ht40 Diff[%d] = 0x%x\n", i,
1870 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i]));
1871 for (i = 0; i < 14; i++)
1872 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
1873 ("RF-B Ht20 to HT40 Diff[%d] = 0x%x\n", i,
1874 rtlefuse->txpwr_ht20diff[RF90_PATH_B][i]));
1875 for (i = 0; i < 14; i++)
1876 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
1877 ("RF-B Legacy to HT40 Diff[%d] = 0x%x\n", i,
1878 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i]));
1879
1880 RTPRINT(rtlpriv, FINIT, INIT_TxPower, ("TxPwrSafetyFlag = %d\n",
1881 rtlefuse->txpwr_safetyflag));
1882
1883 /* Read RF-indication and Tx Power gain
1884 * index diff of legacy to HT OFDM rate. */
1885 tempval = (*(u8 *)&hwinfo[EEPROM_RFIND_POWERDIFF]) & 0xff;
1886 rtlefuse->eeprom_txpowerdiff = tempval;
1887 rtlefuse->legacy_httxpowerdiff =
1888 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][0];
1889
1890 RTPRINT(rtlpriv, FINIT, INIT_TxPower, ("TxPowerDiff = %#x\n",
1891 rtlefuse->eeprom_txpowerdiff));
1892
1893 /* Get TSSI value for each path. */
1894 usvalue = *(u16 *)&hwinfo[EEPROM_TSSI_A];
1895 rtlefuse->eeprom_tssi[RF90_PATH_A] = (u8)((usvalue & 0xff00) >> 8);
1896 usvalue = *(u8 *)&hwinfo[EEPROM_TSSI_B];
1897 rtlefuse->eeprom_tssi[RF90_PATH_B] = (u8)(usvalue & 0xff);
1898
1899 RTPRINT(rtlpriv, FINIT, INIT_TxPower, ("TSSI_A = 0x%x, TSSI_B = 0x%x\n",
1900 rtlefuse->eeprom_tssi[RF90_PATH_A],
1901 rtlefuse->eeprom_tssi[RF90_PATH_B]));
1902
1903 /* Read antenna tx power offset of B/C/D to A from EEPROM */
1904 /* and read ThermalMeter from EEPROM */
1905 tempval = *(u8 *)&hwinfo[EEPROM_THERMALMETER];
1906 rtlefuse->eeprom_thermalmeter = tempval;
1907 RTPRINT(rtlpriv, FINIT, INIT_TxPower, ("thermalmeter = 0x%x\n",
1908 rtlefuse->eeprom_thermalmeter));
1909
1910 /* ThermalMeter, BIT(0)~3 for RFIC1, BIT(4)~7 for RFIC2 */
1911 rtlefuse->thermalmeter[0] = (rtlefuse->eeprom_thermalmeter & 0x1f);
1912 rtlefuse->tssi_13dbm = rtlefuse->eeprom_thermalmeter * 100;
1913
1914 /* Read CrystalCap from EEPROM */
1915 tempval = (*(u8 *)&hwinfo[EEPROM_CRYSTALCAP]) >> 4;
1916 rtlefuse->eeprom_crystalcap = tempval;
1917 /* CrystalCap, BIT(12)~15 */
1918 rtlefuse->crystalcap = rtlefuse->eeprom_crystalcap;
1919
1920 /* Read IC Version && Channel Plan */
1921 /* Version ID, Channel plan */
1922 rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
1923 rtlefuse->txpwr_fromeprom = true;
1924 RTPRINT(rtlpriv, FINIT, INIT_TxPower, ("EEPROM ChannelPlan = 0x%4x\n",
1925 rtlefuse->eeprom_channelplan));
1926
1927 /* Read Customer ID or Board Type!!! */
1928 tempval = *(u8 *)&hwinfo[EEPROM_BOARDTYPE];
1929 /* Change RF type definition */
1930 if (tempval == 0)
1931 rtlphy->rf_type = RF_2T2R;
1932 else if (tempval == 1)
1933 rtlphy->rf_type = RF_1T2R;
1934 else if (tempval == 2)
1935 rtlphy->rf_type = RF_1T2R;
1936 else if (tempval == 3)
1937 rtlphy->rf_type = RF_1T1R;
1938
1939 /* 1T2R but 1SS (1x1 receive combining) */
1940 rtlefuse->b1x1_recvcombine = false;
1941 if (rtlphy->rf_type == RF_1T2R) {
1942 tempval = rtl_read_byte(rtlpriv, 0x07);
1943 if (!(tempval & BIT(0))) {
1944 rtlefuse->b1x1_recvcombine = true;
1945 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1946 ("RF_TYPE=1T2R but only 1SS\n"));
1947 }
1948 }
1949 rtlefuse->b1ss_support = rtlefuse->b1x1_recvcombine;
1950 rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMID];
1951
1952 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("EEPROM Customer ID: 0x%2x",
1953 rtlefuse->eeprom_oemid));
1954
1955 /* set channel paln to world wide 13 */
1956 rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13;
1957}
1958
1959void rtl92se_read_eeprom_info(struct ieee80211_hw *hw)
1960{
1961 struct rtl_priv *rtlpriv = rtl_priv(hw);
1962 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1963 u8 tmp_u1b = 0;
1964
1965 tmp_u1b = rtl_read_byte(rtlpriv, EPROM_CMD);
1966
1967 if (tmp_u1b & BIT(4)) {
1968 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("Boot from EEPROM\n"));
1969 rtlefuse->epromtype = EEPROM_93C46;
1970 } else {
1971 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("Boot from EFUSE\n"));
1972 rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
1973 }
1974
1975 if (tmp_u1b & BIT(5)) {
1976 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Autoload OK\n"));
1977 rtlefuse->autoload_failflag = false;
1978 _rtl92se_read_adapter_info(hw);
1979 } else {
1980 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Autoload ERR!!\n"));
1981 rtlefuse->autoload_failflag = true;
1982 }
1983}
1984
1985static void rtl92se_update_hal_rate_table(struct ieee80211_hw *hw,
1986 struct ieee80211_sta *sta)
1987{
1988 struct rtl_priv *rtlpriv = rtl_priv(hw);
1989 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1990 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1991 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1992 u32 ratr_value;
1993 u8 ratr_index = 0;
1994 u8 nmode = mac->ht_enable;
1995 u8 mimo_ps = IEEE80211_SMPS_OFF;
1996 u16 shortgi_rate = 0;
1997 u32 tmp_ratr_value = 0;
1998 u8 curtxbw_40mhz = mac->bw_40;
1999 u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
2000 1 : 0;
2001 u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
2002 1 : 0;
2003 enum wireless_mode wirelessmode = mac->mode;
2004
2005 if (rtlhal->current_bandtype == BAND_ON_5G)
2006 ratr_value = sta->supp_rates[1] << 4;
2007 else
2008 ratr_value = sta->supp_rates[0];
2009 ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
2010 sta->ht_cap.mcs.rx_mask[0] << 12);
2011 switch (wirelessmode) {
2012 case WIRELESS_MODE_B:
2013 ratr_value &= 0x0000000D;
2014 break;
2015 case WIRELESS_MODE_G:
2016 ratr_value &= 0x00000FF5;
2017 break;
2018 case WIRELESS_MODE_N_24G:
2019 case WIRELESS_MODE_N_5G:
2020 nmode = 1;
2021 if (mimo_ps == IEEE80211_SMPS_STATIC) {
2022 ratr_value &= 0x0007F005;
2023 } else {
2024 u32 ratr_mask;
2025
2026 if (get_rf_type(rtlphy) == RF_1T2R ||
2027 get_rf_type(rtlphy) == RF_1T1R) {
2028 if (curtxbw_40mhz)
2029 ratr_mask = 0x000ff015;
2030 else
2031 ratr_mask = 0x000ff005;
2032 } else {
2033 if (curtxbw_40mhz)
2034 ratr_mask = 0x0f0ff015;
2035 else
2036 ratr_mask = 0x0f0ff005;
2037 }
2038
2039 ratr_value &= ratr_mask;
2040 }
2041 break;
2042 default:
2043 if (rtlphy->rf_type == RF_1T2R)
2044 ratr_value &= 0x000ff0ff;
2045 else
2046 ratr_value &= 0x0f0ff0ff;
2047
2048 break;
2049 }
2050
2051 if (rtlpriv->rtlhal.version >= VERSION_8192S_BCUT)
2052 ratr_value &= 0x0FFFFFFF;
2053 else if (rtlpriv->rtlhal.version == VERSION_8192S_ACUT)
2054 ratr_value &= 0x0FFFFFF0;
2055
2056 if (nmode && ((curtxbw_40mhz &&
2057 curshortgi_40mhz) || (!curtxbw_40mhz &&
2058 curshortgi_20mhz))) {
2059
2060 ratr_value |= 0x10000000;
2061 tmp_ratr_value = (ratr_value >> 12);
2062
2063 for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) {
2064 if ((1 << shortgi_rate) & tmp_ratr_value)
2065 break;
2066 }
2067
2068 shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) |
2069 (shortgi_rate << 4) | (shortgi_rate);
2070
2071 rtl_write_byte(rtlpriv, SG_RATE, shortgi_rate);
2072 }
2073
2074 rtl_write_dword(rtlpriv, ARFR0 + ratr_index * 4, ratr_value);
2075 if (ratr_value & 0xfffff000)
2076 rtl92s_phy_set_fw_cmd(hw, FW_CMD_RA_REFRESH_N);
2077 else
2078 rtl92s_phy_set_fw_cmd(hw, FW_CMD_RA_REFRESH_BG);
2079
2080 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
2081 ("%x\n", rtl_read_dword(rtlpriv, ARFR0)));
2082}
2083
2084static void rtl92se_update_hal_rate_mask(struct ieee80211_hw *hw,
2085 struct ieee80211_sta *sta,
2086 u8 rssi_level)
2087{
2088 struct rtl_priv *rtlpriv = rtl_priv(hw);
2089 struct rtl_phy *rtlphy = &(rtlpriv->phy);
2090 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
2091 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
2092 struct rtl_sta_info *sta_entry = NULL;
2093 u32 ratr_bitmap;
2094 u8 ratr_index = 0;
2095 u8 curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
2096 ? 1 : 0;
2097 u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
2098 1 : 0;
2099 u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
2100 1 : 0;
2101 enum wireless_mode wirelessmode = 0;
2102 bool shortgi = false;
2103 u32 ratr_value = 0;
2104 u8 shortgi_rate = 0;
2105 u32 mask = 0;
2106 u32 band = 0;
2107 bool bmulticast = false;
2108 u8 macid = 0;
2109 u8 mimo_ps = IEEE80211_SMPS_OFF;
2110
2111 sta_entry = (struct rtl_sta_info *) sta->drv_priv;
2112 wirelessmode = sta_entry->wireless_mode;
2113 if (mac->opmode == NL80211_IFTYPE_STATION)
2114 curtxbw_40mhz = mac->bw_40;
2115 else if (mac->opmode == NL80211_IFTYPE_AP ||
2116 mac->opmode == NL80211_IFTYPE_ADHOC)
2117 macid = sta->aid + 1;
2118
2119 if (rtlhal->current_bandtype == BAND_ON_5G)
2120 ratr_bitmap = sta->supp_rates[1] << 4;
2121 else
2122 ratr_bitmap = sta->supp_rates[0];
2123 ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
2124 sta->ht_cap.mcs.rx_mask[0] << 12);
2125 switch (wirelessmode) {
2126 case WIRELESS_MODE_B:
2127 band |= WIRELESS_11B;
2128 ratr_index = RATR_INX_WIRELESS_B;
2129 if (ratr_bitmap & 0x0000000c)
2130 ratr_bitmap &= 0x0000000d;
2131 else
2132 ratr_bitmap &= 0x0000000f;
2133 break;
2134 case WIRELESS_MODE_G:
2135 band |= (WIRELESS_11G | WIRELESS_11B);
2136 ratr_index = RATR_INX_WIRELESS_GB;
2137
2138 if (rssi_level == 1)
2139 ratr_bitmap &= 0x00000f00;
2140 else if (rssi_level == 2)
2141 ratr_bitmap &= 0x00000ff0;
2142 else
2143 ratr_bitmap &= 0x00000ff5;
2144 break;
2145 case WIRELESS_MODE_A:
2146 band |= WIRELESS_11A;
2147 ratr_index = RATR_INX_WIRELESS_A;
2148 ratr_bitmap &= 0x00000ff0;
2149 break;
2150 case WIRELESS_MODE_N_24G:
2151 case WIRELESS_MODE_N_5G:
2152 band |= (WIRELESS_11N | WIRELESS_11G | WIRELESS_11B);
2153 ratr_index = RATR_INX_WIRELESS_NGB;
2154
2155 if (mimo_ps == IEEE80211_SMPS_STATIC) {
2156 if (rssi_level == 1)
2157 ratr_bitmap &= 0x00070000;
2158 else if (rssi_level == 2)
2159 ratr_bitmap &= 0x0007f000;
2160 else
2161 ratr_bitmap &= 0x0007f005;
2162 } else {
2163 if (rtlphy->rf_type == RF_1T2R ||
2164 rtlphy->rf_type == RF_1T1R) {
2165 if (rssi_level == 1) {
2166 ratr_bitmap &= 0x000f0000;
2167 } else if (rssi_level == 3) {
2168 ratr_bitmap &= 0x000fc000;
2169 } else if (rssi_level == 5) {
2170 ratr_bitmap &= 0x000ff000;
2171 } else {
2172 if (curtxbw_40mhz)
2173 ratr_bitmap &= 0x000ff015;
2174 else
2175 ratr_bitmap &= 0x000ff005;
2176 }
2177 } else {
2178 if (rssi_level == 1) {
2179 ratr_bitmap &= 0x0f8f0000;
2180 } else if (rssi_level == 3) {
2181 ratr_bitmap &= 0x0f8fc000;
2182 } else if (rssi_level == 5) {
2183 ratr_bitmap &= 0x0f8ff000;
2184 } else {
2185 if (curtxbw_40mhz)
2186 ratr_bitmap &= 0x0f8ff015;
2187 else
2188 ratr_bitmap &= 0x0f8ff005;
2189 }
2190 }
2191 }
2192
2193 if ((curtxbw_40mhz && curshortgi_40mhz) ||
2194 (!curtxbw_40mhz && curshortgi_20mhz)) {
2195 if (macid == 0)
2196 shortgi = true;
2197 else if (macid == 1)
2198 shortgi = false;
2199 }
2200 break;
2201 default:
2202 band |= (WIRELESS_11N | WIRELESS_11G | WIRELESS_11B);
2203 ratr_index = RATR_INX_WIRELESS_NGB;
2204
2205 if (rtlphy->rf_type == RF_1T2R)
2206 ratr_bitmap &= 0x000ff0ff;
2207 else
2208 ratr_bitmap &= 0x0f8ff0ff;
2209 break;
2210 }
2211
2212 if (rtlpriv->rtlhal.version >= VERSION_8192S_BCUT)
2213 ratr_bitmap &= 0x0FFFFFFF;
2214 else if (rtlpriv->rtlhal.version == VERSION_8192S_ACUT)
2215 ratr_bitmap &= 0x0FFFFFF0;
2216
2217 if (shortgi) {
2218 ratr_bitmap |= 0x10000000;
2219 /* Get MAX MCS available. */
2220 ratr_value = (ratr_bitmap >> 12);
2221 for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) {
2222 if ((1 << shortgi_rate) & ratr_value)
2223 break;
2224 }
2225
2226 shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) |
2227 (shortgi_rate << 4) | (shortgi_rate);
2228 rtl_write_byte(rtlpriv, SG_RATE, shortgi_rate);
2229 }
2230
2231 mask |= (bmulticast ? 1 : 0) << 9 | (macid & 0x1f) << 4 | (band & 0xf);
2232
2233 RT_TRACE(rtlpriv, COMP_RATR, DBG_TRACE, ("mask = %x, bitmap = %x\n",
2234 mask, ratr_bitmap));
2235 rtl_write_dword(rtlpriv, 0x2c4, ratr_bitmap);
2236 rtl_write_dword(rtlpriv, WFM5, (FW_RA_UPDATE_MASK | (mask << 8)));
2237
2238 if (macid != 0)
2239 sta_entry->ratr_index = ratr_index;
2240}
2241
2242void rtl92se_update_hal_rate_tbl(struct ieee80211_hw *hw,
2243 struct ieee80211_sta *sta, u8 rssi_level)
2244{
2245 struct rtl_priv *rtlpriv = rtl_priv(hw);
2246
2247 if (rtlpriv->dm.useramask)
2248 rtl92se_update_hal_rate_mask(hw, sta, rssi_level);
2249 else
2250 rtl92se_update_hal_rate_table(hw, sta);
2251}
2252
2253void rtl92se_update_channel_access_setting(struct ieee80211_hw *hw)
2254{
2255 struct rtl_priv *rtlpriv = rtl_priv(hw);
2256 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
2257 u16 sifs_timer;
2258
2259 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
2260 (u8 *)&mac->slot_time);
2261 sifs_timer = 0x0e0e;
2262 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer);
2263
2264}
2265
2266/* this ifunction is for RFKILL, it's different with windows,
2267 * because UI will disable wireless when GPIO Radio Off.
2268 * And here we not check or Disable/Enable ASPM like windows*/
2269bool rtl92se_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
2270{
2271 struct rtl_priv *rtlpriv = rtl_priv(hw);
2272 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
2273 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
2274 enum rf_pwrstate rfpwr_toset, cur_rfstate;
2275 unsigned long flag = 0;
2276 bool actuallyset = false;
2277 bool turnonbypowerdomain = false;
2278
2279 /* just 8191se can check gpio before firstup, 92c/92d have fixed it */
2280 if ((rtlpci->up_first_time == 1) || (rtlpci->being_init_adapter))
2281 return false;
2282
2283 if (ppsc->swrf_processing)
2284 return false;
2285
2286 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
2287 if (ppsc->rfchange_inprogress) {
2288 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
2289 return false;
2290 } else {
2291 ppsc->rfchange_inprogress = true;
2292 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
2293 }
2294
2295 cur_rfstate = ppsc->rfpwr_state;
2296
2297 /* because after _rtl92s_phy_set_rfhalt, all power
2298 * closed, so we must open some power for GPIO check,
2299 * or we will always check GPIO RFOFF here,
2300 * And we should close power after GPIO check */
2301 if (RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) {
2302 _rtl92se_power_domain_init(hw);
2303 turnonbypowerdomain = true;
2304 }
2305
2306 rfpwr_toset = _rtl92se_rf_onoff_detect(hw);
2307
2308 if ((ppsc->hwradiooff == true) && (rfpwr_toset == ERFON)) {
2309 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
2310 ("RFKILL-HW Radio ON, RF ON\n"));
2311
2312 rfpwr_toset = ERFON;
2313 ppsc->hwradiooff = false;
2314 actuallyset = true;
2315 } else if ((ppsc->hwradiooff == false) && (rfpwr_toset == ERFOFF)) {
2316 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
2317 ("RFKILL-HW Radio OFF, RF OFF\n"));
2318
2319 rfpwr_toset = ERFOFF;
2320 ppsc->hwradiooff = true;
2321 actuallyset = true;
2322 }
2323
2324 if (actuallyset) {
2325 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
2326 ppsc->rfchange_inprogress = false;
2327 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
2328
2329 /* this not include ifconfig wlan0 down case */
2330 /* } else if (rfpwr_toset == ERFOFF || cur_rfstate == ERFOFF) { */
2331 } else {
2332 /* because power_domain_init may be happen when
2333 * _rtl92s_phy_set_rfhalt, this will open some powers
2334 * and cause current increasing about 40 mA for ips,
2335 * rfoff and ifconfig down, so we set
2336 * _rtl92s_phy_set_rfhalt again here */
2337 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC &&
2338 turnonbypowerdomain) {
2339 _rtl92s_phy_set_rfhalt(hw);
2340 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
2341 }
2342
2343 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
2344 ppsc->rfchange_inprogress = false;
2345 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
2346 }
2347
2348 *valid = 1;
2349 return !ppsc->hwradiooff;
2350
2351}
2352
2353/* Is_wepkey just used for WEP used as group & pairwise key
2354 * if pairwise is AES ang group is WEP Is_wepkey == false.*/
2355void rtl92se_set_key(struct ieee80211_hw *hw, u32 key_index, u8 *p_macaddr,
2356 bool is_group, u8 enc_algo, bool is_wepkey, bool clear_all)
2357{
2358 struct rtl_priv *rtlpriv = rtl_priv(hw);
2359 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
2360 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
2361 u8 *macaddr = p_macaddr;
2362
2363 u32 entry_id = 0;
2364 bool is_pairwise = false;
2365
2366 static u8 cam_const_addr[4][6] = {
2367 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
2368 {0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
2369 {0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
2370 {0x00, 0x00, 0x00, 0x00, 0x00, 0x03}
2371 };
2372 static u8 cam_const_broad[] = {
2373 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2374 };
2375
2376 if (clear_all) {
2377 u8 idx = 0;
2378 u8 cam_offset = 0;
2379 u8 clear_number = 5;
2380
2381 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("clear_all\n"));
2382
2383 for (idx = 0; idx < clear_number; idx++) {
2384 rtl_cam_mark_invalid(hw, cam_offset + idx);
2385 rtl_cam_empty_entry(hw, cam_offset + idx);
2386
2387 if (idx < 5) {
2388 memset(rtlpriv->sec.key_buf[idx], 0,
2389 MAX_KEY_LEN);
2390 rtlpriv->sec.key_len[idx] = 0;
2391 }
2392 }
2393
2394 } else {
2395 switch (enc_algo) {
2396 case WEP40_ENCRYPTION:
2397 enc_algo = CAM_WEP40;
2398 break;
2399 case WEP104_ENCRYPTION:
2400 enc_algo = CAM_WEP104;
2401 break;
2402 case TKIP_ENCRYPTION:
2403 enc_algo = CAM_TKIP;
2404 break;
2405 case AESCCMP_ENCRYPTION:
2406 enc_algo = CAM_AES;
2407 break;
2408 default:
2409 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
2410 ("switch case not process\n"));
2411 enc_algo = CAM_TKIP;
2412 break;
2413 }
2414
2415 if (is_wepkey || rtlpriv->sec.use_defaultkey) {
2416 macaddr = cam_const_addr[key_index];
2417 entry_id = key_index;
2418 } else {
2419 if (is_group) {
2420 macaddr = cam_const_broad;
2421 entry_id = key_index;
2422 } else {
2423 if (mac->opmode == NL80211_IFTYPE_AP) {
2424 entry_id = rtl_cam_get_free_entry(hw,
2425 p_macaddr);
2426 if (entry_id >= TOTAL_CAM_ENTRY) {
2427 RT_TRACE(rtlpriv,
2428 COMP_SEC, DBG_EMERG,
2429 ("Can not find free hw"
2430 " security cam entry\n"));
2431 return;
2432 }
2433 } else {
2434 entry_id = CAM_PAIRWISE_KEY_POSITION;
2435 }
2436
2437 key_index = PAIRWISE_KEYIDX;
2438 is_pairwise = true;
2439 }
2440 }
2441
2442 if (rtlpriv->sec.key_len[key_index] == 0) {
2443 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
2444 ("delete one entry, entry_id is %d\n",
2445 entry_id));
2446 if (mac->opmode == NL80211_IFTYPE_AP)
2447 rtl_cam_del_entry(hw, p_macaddr);
2448 rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
2449 } else {
2450 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
2451 ("The insert KEY length is %d\n",
2452 rtlpriv->sec.key_len[PAIRWISE_KEYIDX]));
2453 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
2454 ("The insert KEY is %x %x\n",
2455 rtlpriv->sec.key_buf[0][0],
2456 rtlpriv->sec.key_buf[0][1]));
2457
2458 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
2459 ("add one entry\n"));
2460 if (is_pairwise) {
2461 RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_LOUD,
2462 "Pairwiase Key content :",
2463 rtlpriv->sec.pairwise_key,
2464 rtlpriv->sec.key_len[PAIRWISE_KEYIDX]);
2465
2466 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
2467 ("set Pairwiase key\n"));
2468
2469 rtl_cam_add_one_entry(hw, macaddr, key_index,
2470 entry_id, enc_algo,
2471 CAM_CONFIG_NO_USEDK,
2472 rtlpriv->sec.key_buf[key_index]);
2473 } else {
2474 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
2475 ("set group key\n"));
2476
2477 if (mac->opmode == NL80211_IFTYPE_ADHOC) {
2478 rtl_cam_add_one_entry(hw,
2479 rtlefuse->dev_addr,
2480 PAIRWISE_KEYIDX,
2481 CAM_PAIRWISE_KEY_POSITION,
2482 enc_algo, CAM_CONFIG_NO_USEDK,
2483 rtlpriv->sec.key_buf[entry_id]);
2484 }
2485
2486 rtl_cam_add_one_entry(hw, macaddr, key_index,
2487 entry_id, enc_algo,
2488 CAM_CONFIG_NO_USEDK,
2489 rtlpriv->sec.key_buf[entry_id]);
2490 }
2491
2492 }
2493 }
2494}
2495
2496void rtl92se_suspend(struct ieee80211_hw *hw)
2497{
2498 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
2499
2500 rtlpci->up_first_time = true;
2501}
2502
2503void rtl92se_resume(struct ieee80211_hw *hw)
2504{
2505 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
2506 u32 val;
2507
2508 pci_read_config_dword(rtlpci->pdev, 0x40, &val);
2509 if ((val & 0x0000ff00) != 0)
2510 pci_write_config_dword(rtlpci->pdev, 0x40,
2511 val & 0xffff00ff);
2512}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.h b/drivers/net/wireless/rtlwifi/rtl8192se/hw.h
new file mode 100644
index 000000000000..6160a9bfe98a
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.h
@@ -0,0 +1,79 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29#ifndef __REALTEK_PCI92SE_HW_H__
30#define __REALTEK_PCI92SE_HW_H__
31
32#define MSR_LINK_MANAGED 2
33#define MSR_LINK_NONE 0
34#define MSR_LINK_SHIFT 0
35#define MSR_LINK_ADHOC 1
36#define MSR_LINK_MASTER 3
37
38enum WIRELESS_NETWORK_TYPE {
39 WIRELESS_11B = 1,
40 WIRELESS_11G = 2,
41 WIRELESS_11A = 4,
42 WIRELESS_11N = 8
43};
44
45void rtl92se_get_hw_reg(struct ieee80211_hw *hw,
46 u8 variable, u8 *val);
47void rtl92se_read_eeprom_info(struct ieee80211_hw *hw);
48void rtl92se_interrupt_recognized(struct ieee80211_hw *hw,
49 u32 *inta, u32 *intb);
50int rtl92se_hw_init(struct ieee80211_hw *hw);
51void rtl92se_card_disable(struct ieee80211_hw *hw);
52void rtl92se_enable_interrupt(struct ieee80211_hw *hw);
53void rtl92se_disable_interrupt(struct ieee80211_hw *hw);
54int rtl92se_set_network_type(struct ieee80211_hw *hw,
55 enum nl80211_iftype type);
56void rtl92se_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid);
57void rtl92se_set_mac_addr(struct rtl_io *io, const u8 * addr);
58void rtl92se_set_qos(struct ieee80211_hw *hw, int aci);
59void rtl92se_set_beacon_related_registers(struct ieee80211_hw *hw);
60void rtl92se_set_beacon_interval(struct ieee80211_hw *hw);
61void rtl92se_update_interrupt_mask(struct ieee80211_hw *hw,
62 u32 add_msr, u32 rm_msr);
63void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable,
64 u8 *val);
65void rtl92se_update_hal_rate_tbl(struct ieee80211_hw *hw,
66 struct ieee80211_sta *sta, u8 rssi_level);
67void rtl92se_update_channel_access_setting(struct ieee80211_hw *hw);
68bool rtl92se_gpio_radio_on_off_checking(struct ieee80211_hw *hw,
69 u8 *valid);
70void rtl8192se_gpiobit3_cfg_inputmode(struct ieee80211_hw *hw);
71void rtl92se_enable_hw_security_config(struct ieee80211_hw *hw);
72void rtl92se_set_key(struct ieee80211_hw *hw,
73 u32 key_index, u8 *macaddr, bool is_group,
74 u8 enc_algo, bool is_wepkey, bool clear_all);
75void rtl92se_suspend(struct ieee80211_hw *hw);
76void rtl92se_resume(struct ieee80211_hw *hw);
77
78#endif
79
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/led.c b/drivers/net/wireless/rtlwifi/rtl8192se/led.c
new file mode 100644
index 000000000000..6d4f66616680
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/led.c
@@ -0,0 +1,149 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "../pci.h"
32#include "reg.h"
33#include "led.h"
34
35static void _rtl92se_init_led(struct ieee80211_hw *hw,
36 struct rtl_led *pled, enum rtl_led_pin ledpin)
37{
38 pled->hw = hw;
39 pled->ledpin = ledpin;
40 pled->ledon = false;
41}
42
43void rtl92se_init_sw_leds(struct ieee80211_hw *hw)
44{
45 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
46 _rtl92se_init_led(hw, &(pcipriv->ledctl.sw_led0), LED_PIN_LED0);
47 _rtl92se_init_led(hw, &(pcipriv->ledctl.sw_led1), LED_PIN_LED1);
48}
49
50void rtl92se_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
51{
52 u8 ledcfg;
53 struct rtl_priv *rtlpriv = rtl_priv(hw);
54
55 RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
56 ("LedAddr:%X ledpin=%d\n", LEDCFG, pled->ledpin));
57
58 ledcfg = rtl_read_byte(rtlpriv, LEDCFG);
59
60 switch (pled->ledpin) {
61 case LED_PIN_GPIO0:
62 break;
63 case LED_PIN_LED0:
64 rtl_write_byte(rtlpriv, LEDCFG, ledcfg & 0xf0);
65 break;
66 case LED_PIN_LED1:
67 rtl_write_byte(rtlpriv, LEDCFG, ledcfg & 0x0f);
68 break;
69 default:
70 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
71 ("switch case not process\n"));
72 break;
73 }
74 pled->ledon = true;
75}
76
77void rtl92se_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
78{
79 struct rtl_priv *rtlpriv = rtl_priv(hw);
80 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
81 u8 ledcfg;
82
83 RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
84 ("LedAddr:%X ledpin=%d\n", LEDCFG, pled->ledpin));
85
86 ledcfg = rtl_read_byte(rtlpriv, LEDCFG);
87
88 switch (pled->ledpin) {
89 case LED_PIN_GPIO0:
90 break;
91 case LED_PIN_LED0:
92 ledcfg &= 0xf0;
93 if (pcipriv->ledctl.led_opendrain == true)
94 rtl_write_byte(rtlpriv, LEDCFG, (ledcfg | BIT(1)));
95 else
96 rtl_write_byte(rtlpriv, LEDCFG, (ledcfg | BIT(3)));
97 break;
98 case LED_PIN_LED1:
99 ledcfg &= 0x0f;
100 rtl_write_byte(rtlpriv, LEDCFG, (ledcfg | BIT(3)));
101 break;
102 default:
103 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
104 ("switch case not process\n"));
105 break;
106 }
107 pled->ledon = false;
108}
109
110static void _rtl92se_sw_led_control(struct ieee80211_hw *hw,
111 enum led_ctl_mode ledaction)
112{
113 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
114 struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
115 switch (ledaction) {
116 case LED_CTL_POWER_ON:
117 case LED_CTL_LINK:
118 case LED_CTL_NO_LINK:
119 rtl92se_sw_led_on(hw, pLed0);
120 break;
121 case LED_CTL_POWER_OFF:
122 rtl92se_sw_led_off(hw, pLed0);
123 break;
124 default:
125 break;
126 }
127}
128
129void rtl92se_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction)
130{
131 struct rtl_priv *rtlpriv = rtl_priv(hw);
132 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
133
134 if ((ppsc->rfoff_reason > RF_CHANGE_BY_PS) &&
135 (ledaction == LED_CTL_TX ||
136 ledaction == LED_CTL_RX ||
137 ledaction == LED_CTL_SITE_SURVEY ||
138 ledaction == LED_CTL_LINK ||
139 ledaction == LED_CTL_NO_LINK ||
140 ledaction == LED_CTL_START_TO_LINK ||
141 ledaction == LED_CTL_POWER_ON)) {
142 return;
143 }
144 RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, ("ledaction %d,\n",
145 ledaction));
146
147 _rtl92se_sw_led_control(hw, ledaction);
148}
149
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/led.h b/drivers/net/wireless/rtlwifi/rtl8192se/led.h
new file mode 100644
index 000000000000..8cce3870af3c
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/led.h
@@ -0,0 +1,37 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29#ifndef __REALTEK_PCI92SE_LED_H__
30#define __REALTEK_PCI92SE_LED_H__
31
32void rtl92se_init_sw_leds(struct ieee80211_hw *hw);
33void rtl92se_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled);
34void rtl92se_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled);
35void rtl92se_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction);
36
37#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
new file mode 100644
index 000000000000..63b45e60a95e
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
@@ -0,0 +1,1740 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "../pci.h"
32#include "../ps.h"
33#include "reg.h"
34#include "def.h"
35#include "phy.h"
36#include "rf.h"
37#include "dm.h"
38#include "fw.h"
39#include "hw.h"
40#include "table.h"
41
42static u32 _rtl92s_phy_calculate_bit_shift(u32 bitmask)
43{
44 u32 i;
45
46 for (i = 0; i <= 31; i++) {
47 if (((bitmask >> i) & 0x1) == 1)
48 break;
49 }
50
51 return i;
52}
53
54u32 rtl92s_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
55{
56 struct rtl_priv *rtlpriv = rtl_priv(hw);
57 u32 returnvalue = 0, originalvalue, bitshift;
58
59 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x)\n",
60 regaddr, bitmask));
61
62 originalvalue = rtl_read_dword(rtlpriv, regaddr);
63 bitshift = _rtl92s_phy_calculate_bit_shift(bitmask);
64 returnvalue = (originalvalue & bitmask) >> bitshift;
65
66 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
67 ("BBR MASK=0x%x Addr[0x%x]=0x%x\n",
68 bitmask, regaddr, originalvalue));
69
70 return returnvalue;
71
72}
73
74void rtl92s_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask,
75 u32 data)
76{
77 struct rtl_priv *rtlpriv = rtl_priv(hw);
78 u32 originalvalue, bitshift;
79
80 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
81 " data(%#x)\n", regaddr, bitmask, data));
82
83 if (bitmask != MASKDWORD) {
84 originalvalue = rtl_read_dword(rtlpriv, regaddr);
85 bitshift = _rtl92s_phy_calculate_bit_shift(bitmask);
86 data = ((originalvalue & (~bitmask)) | (data << bitshift));
87 }
88
89 rtl_write_dword(rtlpriv, regaddr, data);
90
91 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
92 " data(%#x)\n", regaddr, bitmask, data));
93
94}
95
96static u32 _rtl92s_phy_rf_serial_read(struct ieee80211_hw *hw,
97 enum radio_path rfpath, u32 offset)
98{
99
100 struct rtl_priv *rtlpriv = rtl_priv(hw);
101 struct rtl_phy *rtlphy = &(rtlpriv->phy);
102 struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
103 u32 newoffset;
104 u32 tmplong, tmplong2;
105 u8 rfpi_enable = 0;
106 u32 retvalue = 0;
107
108 offset &= 0x3f;
109 newoffset = offset;
110
111 tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
112
113 if (rfpath == RF90_PATH_A)
114 tmplong2 = tmplong;
115 else
116 tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD);
117
118 tmplong2 = (tmplong2 & (~BLSSI_READADDRESS)) | (newoffset << 23) |
119 BLSSI_READEDGE;
120
121 rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
122 tmplong & (~BLSSI_READEDGE));
123
124 mdelay(1);
125
126 rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
127 mdelay(1);
128
129 rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD, tmplong |
130 BLSSI_READEDGE);
131 mdelay(1);
132
133 if (rfpath == RF90_PATH_A)
134 rfpi_enable = (u8)rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
135 BIT(8));
136 else if (rfpath == RF90_PATH_B)
137 rfpi_enable = (u8)rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
138 BIT(8));
139
140 if (rfpi_enable)
141 retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readbackpi,
142 BLSSI_READBACK_DATA);
143 else
144 retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readback,
145 BLSSI_READBACK_DATA);
146
147 retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readback,
148 BLSSI_READBACK_DATA);
149
150 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("RFR-%d Addr[0x%x]=0x%x\n",
151 rfpath, pphyreg->rflssi_readback, retvalue));
152
153 return retvalue;
154
155}
156
157static void _rtl92s_phy_rf_serial_write(struct ieee80211_hw *hw,
158 enum radio_path rfpath, u32 offset,
159 u32 data)
160{
161 struct rtl_priv *rtlpriv = rtl_priv(hw);
162 struct rtl_phy *rtlphy = &(rtlpriv->phy);
163 struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
164 u32 data_and_addr = 0;
165 u32 newoffset;
166
167 offset &= 0x3f;
168 newoffset = offset;
169
170 data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
171 rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
172
173 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("RFW-%d Addr[0x%x]=0x%x\n",
174 rfpath, pphyreg->rf3wire_offset, data_and_addr));
175}
176
177
178u32 rtl92s_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
179 u32 regaddr, u32 bitmask)
180{
181 struct rtl_priv *rtlpriv = rtl_priv(hw);
182 u32 original_value, readback_value, bitshift;
183 unsigned long flags;
184
185 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), rfpath(%#x), "
186 "bitmask(%#x)\n", regaddr, rfpath, bitmask));
187
188 spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
189
190 original_value = _rtl92s_phy_rf_serial_read(hw, rfpath, regaddr);
191
192 bitshift = _rtl92s_phy_calculate_bit_shift(bitmask);
193 readback_value = (original_value & bitmask) >> bitshift;
194
195 spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
196
197 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), rfpath(%#x), "
198 "bitmask(%#x), original_value(%#x)\n", regaddr, rfpath,
199 bitmask, original_value));
200
201 return readback_value;
202}
203
204void rtl92s_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
205 u32 regaddr, u32 bitmask, u32 data)
206{
207 struct rtl_priv *rtlpriv = rtl_priv(hw);
208 struct rtl_phy *rtlphy = &(rtlpriv->phy);
209 u32 original_value, bitshift;
210 unsigned long flags;
211
212 if (!((rtlphy->rf_pathmap >> rfpath) & 0x1))
213 return;
214
215 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
216 " data(%#x), rfpath(%#x)\n", regaddr, bitmask, data, rfpath));
217
218 spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
219
220 if (bitmask != RFREG_OFFSET_MASK) {
221 original_value = _rtl92s_phy_rf_serial_read(hw, rfpath,
222 regaddr);
223 bitshift = _rtl92s_phy_calculate_bit_shift(bitmask);
224 data = ((original_value & (~bitmask)) | (data << bitshift));
225 }
226
227 _rtl92s_phy_rf_serial_write(hw, rfpath, regaddr, data);
228
229 spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
230
231 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x), "
232 "data(%#x), rfpath(%#x)\n", regaddr, bitmask, data, rfpath));
233
234}
235
236void rtl92s_phy_scan_operation_backup(struct ieee80211_hw *hw,
237 u8 operation)
238{
239 struct rtl_priv *rtlpriv = rtl_priv(hw);
240 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
241
242 if (!is_hal_stop(rtlhal)) {
243 switch (operation) {
244 case SCAN_OPT_BACKUP:
245 rtl92s_phy_set_fw_cmd(hw, FW_CMD_PAUSE_DM_BY_SCAN);
246 break;
247 case SCAN_OPT_RESTORE:
248 rtl92s_phy_set_fw_cmd(hw, FW_CMD_RESUME_DM_BY_SCAN);
249 break;
250 default:
251 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
252 ("Unknown operation.\n"));
253 break;
254 }
255 }
256}
257
258void rtl92s_phy_set_bw_mode(struct ieee80211_hw *hw,
259 enum nl80211_channel_type ch_type)
260{
261 struct rtl_priv *rtlpriv = rtl_priv(hw);
262 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
263 struct rtl_phy *rtlphy = &(rtlpriv->phy);
264 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
265 u8 reg_bw_opmode;
266 u8 reg_prsr_rsc;
267
268 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("Switch to %s bandwidth\n",
269 rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
270 "20MHz" : "40MHz"));
271
272 if (rtlphy->set_bwmode_inprogress)
273 return;
274 if (is_hal_stop(rtlhal))
275 return;
276
277 rtlphy->set_bwmode_inprogress = true;
278
279 reg_bw_opmode = rtl_read_byte(rtlpriv, BW_OPMODE);
280 reg_prsr_rsc = rtl_read_byte(rtlpriv, RRSR + 2);
281
282 switch (rtlphy->current_chan_bw) {
283 case HT_CHANNEL_WIDTH_20:
284 reg_bw_opmode |= BW_OPMODE_20MHZ;
285 rtl_write_byte(rtlpriv, BW_OPMODE, reg_bw_opmode);
286 break;
287 case HT_CHANNEL_WIDTH_20_40:
288 reg_bw_opmode &= ~BW_OPMODE_20MHZ;
289 rtl_write_byte(rtlpriv, BW_OPMODE, reg_bw_opmode);
290 break;
291 default:
292 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
293 ("unknown bandwidth: %#X\n",
294 rtlphy->current_chan_bw));
295 break;
296 }
297
298 switch (rtlphy->current_chan_bw) {
299 case HT_CHANNEL_WIDTH_20:
300 rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x0);
301 rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x0);
302
303 if (rtlhal->version >= VERSION_8192S_BCUT)
304 rtl_write_byte(rtlpriv, RFPGA0_ANALOGPARAMETER2, 0x58);
305 break;
306 case HT_CHANNEL_WIDTH_20_40:
307 rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x1);
308 rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x1);
309
310 rtl_set_bbreg(hw, RCCK0_SYSTEM, BCCK_SIDEBAND,
311 (mac->cur_40_prime_sc >> 1));
312 rtl_set_bbreg(hw, ROFDM1_LSTF, 0xC00, mac->cur_40_prime_sc);
313
314 if (rtlhal->version >= VERSION_8192S_BCUT)
315 rtl_write_byte(rtlpriv, RFPGA0_ANALOGPARAMETER2, 0x18);
316 break;
317 default:
318 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
319 ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw));
320 break;
321 }
322
323 rtl92s_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
324 rtlphy->set_bwmode_inprogress = false;
325 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
326}
327
328static bool _rtl92s_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
329 u32 cmdtableidx, u32 cmdtablesz, enum swchnlcmd_id cmdid,
330 u32 para1, u32 para2, u32 msdelay)
331{
332 struct swchnlcmd *pcmd;
333
334 if (cmdtable == NULL) {
335 RT_ASSERT(false, ("cmdtable cannot be NULL.\n"));
336 return false;
337 }
338
339 if (cmdtableidx >= cmdtablesz)
340 return false;
341
342 pcmd = cmdtable + cmdtableidx;
343 pcmd->cmdid = cmdid;
344 pcmd->para1 = para1;
345 pcmd->para2 = para2;
346 pcmd->msdelay = msdelay;
347
348 return true;
349}
350
351static bool _rtl92s_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
352 u8 channel, u8 *stage, u8 *step, u32 *delay)
353{
354 struct rtl_priv *rtlpriv = rtl_priv(hw);
355 struct rtl_phy *rtlphy = &(rtlpriv->phy);
356 struct swchnlcmd precommoncmd[MAX_PRECMD_CNT];
357 u32 precommoncmdcnt;
358 struct swchnlcmd postcommoncmd[MAX_POSTCMD_CNT];
359 u32 postcommoncmdcnt;
360 struct swchnlcmd rfdependcmd[MAX_RFDEPENDCMD_CNT];
361 u32 rfdependcmdcnt;
362 struct swchnlcmd *currentcmd = NULL;
363 u8 rfpath;
364 u8 num_total_rfpath = rtlphy->num_total_rfpath;
365
366 precommoncmdcnt = 0;
367 _rtl92s_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
368 MAX_PRECMD_CNT, CMDID_SET_TXPOWEROWER_LEVEL, 0, 0, 0);
369 _rtl92s_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
370 MAX_PRECMD_CNT, CMDID_END, 0, 0, 0);
371
372 postcommoncmdcnt = 0;
373
374 _rtl92s_phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++,
375 MAX_POSTCMD_CNT, CMDID_END, 0, 0, 0);
376
377 rfdependcmdcnt = 0;
378
379 RT_ASSERT((channel >= 1 && channel <= 14),
380 ("illegal channel for Zebra: %d\n", channel));
381
382 _rtl92s_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
383 MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG,
384 RF_CHNLBW, channel, 10);
385
386 _rtl92s_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
387 MAX_RFDEPENDCMD_CNT, CMDID_END, 0, 0, 0);
388
389 do {
390 switch (*stage) {
391 case 0:
392 currentcmd = &precommoncmd[*step];
393 break;
394 case 1:
395 currentcmd = &rfdependcmd[*step];
396 break;
397 case 2:
398 currentcmd = &postcommoncmd[*step];
399 break;
400 }
401
402 if (currentcmd->cmdid == CMDID_END) {
403 if ((*stage) == 2) {
404 return true;
405 } else {
406 (*stage)++;
407 (*step) = 0;
408 continue;
409 }
410 }
411
412 switch (currentcmd->cmdid) {
413 case CMDID_SET_TXPOWEROWER_LEVEL:
414 rtl92s_phy_set_txpower(hw, channel);
415 break;
416 case CMDID_WRITEPORT_ULONG:
417 rtl_write_dword(rtlpriv, currentcmd->para1,
418 currentcmd->para2);
419 break;
420 case CMDID_WRITEPORT_USHORT:
421 rtl_write_word(rtlpriv, currentcmd->para1,
422 (u16)currentcmd->para2);
423 break;
424 case CMDID_WRITEPORT_UCHAR:
425 rtl_write_byte(rtlpriv, currentcmd->para1,
426 (u8)currentcmd->para2);
427 break;
428 case CMDID_RF_WRITEREG:
429 for (rfpath = 0; rfpath < num_total_rfpath; rfpath++) {
430 rtlphy->rfreg_chnlval[rfpath] =
431 ((rtlphy->rfreg_chnlval[rfpath] &
432 0xfffffc00) | currentcmd->para2);
433 rtl_set_rfreg(hw, (enum radio_path)rfpath,
434 currentcmd->para1,
435 RFREG_OFFSET_MASK,
436 rtlphy->rfreg_chnlval[rfpath]);
437 }
438 break;
439 default:
440 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
441 ("switch case not process\n"));
442 break;
443 }
444
445 break;
446 } while (true);
447
448 (*delay) = currentcmd->msdelay;
449 (*step)++;
450 return false;
451}
452
453u8 rtl92s_phy_sw_chnl(struct ieee80211_hw *hw)
454{
455 struct rtl_priv *rtlpriv = rtl_priv(hw);
456 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
457 struct rtl_phy *rtlphy = &(rtlpriv->phy);
458 u32 delay;
459 bool ret;
460
461 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
462 ("switch to channel%d\n",
463 rtlphy->current_channel));
464
465 if (rtlphy->sw_chnl_inprogress)
466 return 0;
467
468 if (rtlphy->set_bwmode_inprogress)
469 return 0;
470
471 if (is_hal_stop(rtlhal))
472 return 0;
473
474 rtlphy->sw_chnl_inprogress = true;
475 rtlphy->sw_chnl_stage = 0;
476 rtlphy->sw_chnl_step = 0;
477
478 do {
479 if (!rtlphy->sw_chnl_inprogress)
480 break;
481
482 ret = _rtl92s_phy_sw_chnl_step_by_step(hw,
483 rtlphy->current_channel,
484 &rtlphy->sw_chnl_stage,
485 &rtlphy->sw_chnl_step, &delay);
486 if (!ret) {
487 if (delay > 0)
488 mdelay(delay);
489 else
490 continue;
491 } else {
492 rtlphy->sw_chnl_inprogress = false;
493 }
494 break;
495 } while (true);
496
497 rtlphy->sw_chnl_inprogress = false;
498
499 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
500
501 return 1;
502}
503
504static void _rtl92se_phy_set_rf_sleep(struct ieee80211_hw *hw)
505{
506 struct rtl_priv *rtlpriv = rtl_priv(hw);
507 u8 u1btmp;
508
509 u1btmp = rtl_read_byte(rtlpriv, LDOV12D_CTRL);
510 u1btmp |= BIT(0);
511
512 rtl_write_byte(rtlpriv, LDOV12D_CTRL, u1btmp);
513 rtl_write_byte(rtlpriv, SPS1_CTRL, 0x0);
514 rtl_write_byte(rtlpriv, TXPAUSE, 0xFF);
515 rtl_write_word(rtlpriv, CMDR, 0x57FC);
516 udelay(100);
517
518 rtl_write_word(rtlpriv, CMDR, 0x77FC);
519 rtl_write_byte(rtlpriv, PHY_CCA, 0x0);
520 udelay(10);
521
522 rtl_write_word(rtlpriv, CMDR, 0x37FC);
523 udelay(10);
524
525 rtl_write_word(rtlpriv, CMDR, 0x77FC);
526 udelay(10);
527
528 rtl_write_word(rtlpriv, CMDR, 0x57FC);
529
530 /* we should chnge GPIO to input mode
531 * this will drop away current about 25mA*/
532 rtl8192se_gpiobit3_cfg_inputmode(hw);
533}
534
535bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw,
536 enum rf_pwrstate rfpwr_state)
537{
538 struct rtl_priv *rtlpriv = rtl_priv(hw);
539 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
540 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
541 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
542 bool bresult = true;
543 u8 i, queue_id;
544 struct rtl8192_tx_ring *ring = NULL;
545
546 if (rfpwr_state == ppsc->rfpwr_state)
547 return false;
548
549 ppsc->set_rfpowerstate_inprogress = true;
550
551 switch (rfpwr_state) {
552 case ERFON:{
553 if ((ppsc->rfpwr_state == ERFOFF) &&
554 RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) {
555
556 bool rtstatus;
557 u32 InitializeCount = 0;
558 do {
559 InitializeCount++;
560 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
561 ("IPS Set eRf nic enable\n"));
562 rtstatus = rtl_ps_enable_nic(hw);
563 } while ((rtstatus != true) &&
564 (InitializeCount < 10));
565
566 RT_CLEAR_PS_LEVEL(ppsc,
567 RT_RF_OFF_LEVL_HALT_NIC);
568 } else {
569 RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
570 ("awake, sleeped:%d ms "
571 "state_inap:%x\n",
572 jiffies_to_msecs(jiffies -
573 ppsc->last_sleep_jiffies),
574 rtlpriv->psc.state_inap));
575 ppsc->last_awake_jiffies = jiffies;
576 rtl_write_word(rtlpriv, CMDR, 0x37FC);
577 rtl_write_byte(rtlpriv, TXPAUSE, 0x00);
578 rtl_write_byte(rtlpriv, PHY_CCA, 0x3);
579 }
580
581 if (mac->link_state == MAC80211_LINKED)
582 rtlpriv->cfg->ops->led_control(hw,
583 LED_CTL_LINK);
584 else
585 rtlpriv->cfg->ops->led_control(hw,
586 LED_CTL_NO_LINK);
587 break;
588 }
589 case ERFOFF:{
590 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
591 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
592 ("IPS Set eRf nic disable\n"));
593 rtl_ps_disable_nic(hw);
594 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
595 } else {
596 if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS)
597 rtlpriv->cfg->ops->led_control(hw,
598 LED_CTL_NO_LINK);
599 else
600 rtlpriv->cfg->ops->led_control(hw,
601 LED_CTL_POWER_OFF);
602 }
603 break;
604 }
605 case ERFSLEEP:
606 if (ppsc->rfpwr_state == ERFOFF)
607 break;
608
609 for (queue_id = 0, i = 0;
610 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
611 ring = &pcipriv->dev.tx_ring[queue_id];
612 if (skb_queue_len(&ring->queue) == 0 ||
613 queue_id == BEACON_QUEUE) {
614 queue_id++;
615 continue;
616 } else {
617 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
618 ("eRf Off/Sleep: "
619 "%d times TcbBusyQueue[%d] = "
620 "%d before doze!\n",
621 (i + 1), queue_id,
622 skb_queue_len(&ring->queue)));
623
624 udelay(10);
625 i++;
626 }
627
628 if (i >= MAX_DOZE_WAITING_TIMES_9x) {
629 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
630 ("\nERFOFF: %d times"
631 "TcbBusyQueue[%d] = %d !\n",
632 MAX_DOZE_WAITING_TIMES_9x,
633 queue_id,
634 skb_queue_len(&ring->queue)));
635 break;
636 }
637 }
638
639 RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
640 ("Set ERFSLEEP awaked:%d ms\n",
641 jiffies_to_msecs(jiffies -
642 ppsc->last_awake_jiffies)));
643
644 RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
645 ("sleep awaked:%d ms "
646 "state_inap:%x\n", jiffies_to_msecs(jiffies -
647 ppsc->last_awake_jiffies),
648 rtlpriv->psc.state_inap));
649 ppsc->last_sleep_jiffies = jiffies;
650 _rtl92se_phy_set_rf_sleep(hw);
651 break;
652 default:
653 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
654 ("switch case not process\n"));
655 bresult = false;
656 break;
657 }
658
659 if (bresult)
660 ppsc->rfpwr_state = rfpwr_state;
661
662 ppsc->set_rfpowerstate_inprogress = false;
663
664 return bresult;
665}
666
667static bool _rtl92s_phy_config_rfpa_bias_current(struct ieee80211_hw *hw,
668 enum radio_path rfpath)
669{
670 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
671 bool rtstatus = true;
672 u32 tmpval = 0;
673
674 /* If inferiority IC, we have to increase the PA bias current */
675 if (rtlhal->ic_class != IC_INFERIORITY_A) {
676 tmpval = rtl92s_phy_query_rf_reg(hw, rfpath, RF_IPA, 0xf);
677 rtl92s_phy_set_rf_reg(hw, rfpath, RF_IPA, 0xf, tmpval + 1);
678 }
679
680 return rtstatus;
681}
682
683static void _rtl92s_store_pwrindex_diffrate_offset(struct ieee80211_hw *hw,
684 u32 reg_addr, u32 bitmask, u32 data)
685{
686 struct rtl_priv *rtlpriv = rtl_priv(hw);
687 struct rtl_phy *rtlphy = &(rtlpriv->phy);
688
689 if (reg_addr == RTXAGC_RATE18_06)
690 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][0] =
691 data;
692 if (reg_addr == RTXAGC_RATE54_24)
693 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][1] =
694 data;
695 if (reg_addr == RTXAGC_CCK_MCS32)
696 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][6] =
697 data;
698 if (reg_addr == RTXAGC_MCS03_MCS00)
699 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][2] =
700 data;
701 if (reg_addr == RTXAGC_MCS07_MCS04)
702 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][3] =
703 data;
704 if (reg_addr == RTXAGC_MCS11_MCS08)
705 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][4] =
706 data;
707 if (reg_addr == RTXAGC_MCS15_MCS12) {
708 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][5] =
709 data;
710 rtlphy->pwrgroup_cnt++;
711 }
712}
713
714static void _rtl92s_phy_init_register_definition(struct ieee80211_hw *hw)
715{
716 struct rtl_priv *rtlpriv = rtl_priv(hw);
717 struct rtl_phy *rtlphy = &(rtlpriv->phy);
718
719 /*RF Interface Sowrtware Control */
720 rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW;
721 rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW;
722 rtlphy->phyreg_def[RF90_PATH_C].rfintfs = RFPGA0_XCD_RFINTERFACESW;
723 rtlphy->phyreg_def[RF90_PATH_D].rfintfs = RFPGA0_XCD_RFINTERFACESW;
724
725 /* RF Interface Readback Value */
726 rtlphy->phyreg_def[RF90_PATH_A].rfintfi = RFPGA0_XAB_RFINTERFACERB;
727 rtlphy->phyreg_def[RF90_PATH_B].rfintfi = RFPGA0_XAB_RFINTERFACERB;
728 rtlphy->phyreg_def[RF90_PATH_C].rfintfi = RFPGA0_XCD_RFINTERFACERB;
729 rtlphy->phyreg_def[RF90_PATH_D].rfintfi = RFPGA0_XCD_RFINTERFACERB;
730
731 /* RF Interface Output (and Enable) */
732 rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE;
733 rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE;
734 rtlphy->phyreg_def[RF90_PATH_C].rfintfo = RFPGA0_XC_RFINTERFACEOE;
735 rtlphy->phyreg_def[RF90_PATH_D].rfintfo = RFPGA0_XD_RFINTERFACEOE;
736
737 /* RF Interface (Output and) Enable */
738 rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE;
739 rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE;
740 rtlphy->phyreg_def[RF90_PATH_C].rfintfe = RFPGA0_XC_RFINTERFACEOE;
741 rtlphy->phyreg_def[RF90_PATH_D].rfintfe = RFPGA0_XD_RFINTERFACEOE;
742
743 /* Addr of LSSI. Wirte RF register by driver */
744 rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset =
745 RFPGA0_XA_LSSIPARAMETER;
746 rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset =
747 RFPGA0_XB_LSSIPARAMETER;
748 rtlphy->phyreg_def[RF90_PATH_C].rf3wire_offset =
749 RFPGA0_XC_LSSIPARAMETER;
750 rtlphy->phyreg_def[RF90_PATH_D].rf3wire_offset =
751 RFPGA0_XD_LSSIPARAMETER;
752
753 /* RF parameter */
754 rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = RFPGA0_XAB_RFPARAMETER;
755 rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = RFPGA0_XAB_RFPARAMETER;
756 rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = RFPGA0_XCD_RFPARAMETER;
757 rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = RFPGA0_XCD_RFPARAMETER;
758
759 /* Tx AGC Gain Stage (same for all path. Should we remove this?) */
760 rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE;
761 rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE;
762 rtlphy->phyreg_def[RF90_PATH_C].rftxgain_stage = RFPGA0_TXGAINSTAGE;
763 rtlphy->phyreg_def[RF90_PATH_D].rftxgain_stage = RFPGA0_TXGAINSTAGE;
764
765 /* Tranceiver A~D HSSI Parameter-1 */
766 rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para1 = RFPGA0_XA_HSSIPARAMETER1;
767 rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para1 = RFPGA0_XB_HSSIPARAMETER1;
768 rtlphy->phyreg_def[RF90_PATH_C].rfhssi_para1 = RFPGA0_XC_HSSIPARAMETER1;
769 rtlphy->phyreg_def[RF90_PATH_D].rfhssi_para1 = RFPGA0_XD_HSSIPARAMETER1;
770
771 /* Tranceiver A~D HSSI Parameter-2 */
772 rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2;
773 rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2;
774 rtlphy->phyreg_def[RF90_PATH_C].rfhssi_para2 = RFPGA0_XC_HSSIPARAMETER2;
775 rtlphy->phyreg_def[RF90_PATH_D].rfhssi_para2 = RFPGA0_XD_HSSIPARAMETER2;
776
777 /* RF switch Control */
778 rtlphy->phyreg_def[RF90_PATH_A].rfswitch_control =
779 RFPGA0_XAB_SWITCHCONTROL;
780 rtlphy->phyreg_def[RF90_PATH_B].rfswitch_control =
781 RFPGA0_XAB_SWITCHCONTROL;
782 rtlphy->phyreg_def[RF90_PATH_C].rfswitch_control =
783 RFPGA0_XCD_SWITCHCONTROL;
784 rtlphy->phyreg_def[RF90_PATH_D].rfswitch_control =
785 RFPGA0_XCD_SWITCHCONTROL;
786
787 /* AGC control 1 */
788 rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1;
789 rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1;
790 rtlphy->phyreg_def[RF90_PATH_C].rfagc_control1 = ROFDM0_XCAGCCORE1;
791 rtlphy->phyreg_def[RF90_PATH_D].rfagc_control1 = ROFDM0_XDAGCCORE1;
792
793 /* AGC control 2 */
794 rtlphy->phyreg_def[RF90_PATH_A].rfagc_control2 = ROFDM0_XAAGCCORE2;
795 rtlphy->phyreg_def[RF90_PATH_B].rfagc_control2 = ROFDM0_XBAGCCORE2;
796 rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2;
797 rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;
798
799 /* RX AFE control 1 */
800 rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbalance =
801 ROFDM0_XARXIQIMBALANCE;
802 rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbalance =
803 ROFDM0_XBRXIQIMBALANCE;
804 rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbalance =
805 ROFDM0_XCRXIQIMBALANCE;
806 rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbalance =
807 ROFDM0_XDRXIQIMBALANCE;
808
809 /* RX AFE control 1 */
810 rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE;
811 rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE;
812 rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE;
813 rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
814
815 /* Tx AFE control 1 */
816 rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbalance =
817 ROFDM0_XATXIQIMBALANCE;
818 rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbalance =
819 ROFDM0_XBTXIQIMBALANCE;
820 rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbalance =
821 ROFDM0_XCTXIQIMBALANCE;
822 rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbalance =
823 ROFDM0_XDTXIQIMBALANCE;
824
825 /* Tx AFE control 2 */
826 rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE;
827 rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE;
828 rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE;
829 rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE;
830
831 /* Tranceiver LSSI Readback */
832 rtlphy->phyreg_def[RF90_PATH_A].rflssi_readback =
833 RFPGA0_XA_LSSIREADBACK;
834 rtlphy->phyreg_def[RF90_PATH_B].rflssi_readback =
835 RFPGA0_XB_LSSIREADBACK;
836 rtlphy->phyreg_def[RF90_PATH_C].rflssi_readback =
837 RFPGA0_XC_LSSIREADBACK;
838 rtlphy->phyreg_def[RF90_PATH_D].rflssi_readback =
839 RFPGA0_XD_LSSIREADBACK;
840
841 /* Tranceiver LSSI Readback PI mode */
842 rtlphy->phyreg_def[RF90_PATH_A].rflssi_readbackpi =
843 TRANSCEIVERA_HSPI_READBACK;
844 rtlphy->phyreg_def[RF90_PATH_B].rflssi_readbackpi =
845 TRANSCEIVERB_HSPI_READBACK;
846}
847
848
849static bool _rtl92s_phy_config_bb(struct ieee80211_hw *hw, u8 configtype)
850{
851 int i;
852 u32 *phy_reg_table;
853 u32 *agc_table;
854 u16 phy_reg_len, agc_len;
855
856 agc_len = AGCTAB_ARRAYLENGTH;
857 agc_table = rtl8192seagctab_array;
858 /* Default RF_type: 2T2R */
859 phy_reg_len = PHY_REG_2T2RARRAYLENGTH;
860 phy_reg_table = rtl8192sephy_reg_2t2rarray;
861
862 if (configtype == BASEBAND_CONFIG_PHY_REG) {
863 for (i = 0; i < phy_reg_len; i = i + 2) {
864 if (phy_reg_table[i] == 0xfe)
865 mdelay(50);
866 else if (phy_reg_table[i] == 0xfd)
867 mdelay(5);
868 else if (phy_reg_table[i] == 0xfc)
869 mdelay(1);
870 else if (phy_reg_table[i] == 0xfb)
871 udelay(50);
872 else if (phy_reg_table[i] == 0xfa)
873 udelay(5);
874 else if (phy_reg_table[i] == 0xf9)
875 udelay(1);
876
877 /* Add delay for ECS T20 & LG malow platform, */
878 udelay(1);
879
880 rtl92s_phy_set_bb_reg(hw, phy_reg_table[i], MASKDWORD,
881 phy_reg_table[i + 1]);
882 }
883 } else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
884 for (i = 0; i < agc_len; i = i + 2) {
885 rtl92s_phy_set_bb_reg(hw, agc_table[i], MASKDWORD,
886 agc_table[i + 1]);
887
888 /* Add delay for ECS T20 & LG malow platform */
889 udelay(1);
890 }
891 }
892
893 return true;
894}
895
896static bool _rtl92s_phy_set_bb_to_diff_rf(struct ieee80211_hw *hw,
897 u8 configtype)
898{
899 struct rtl_priv *rtlpriv = rtl_priv(hw);
900 struct rtl_phy *rtlphy = &(rtlpriv->phy);
901 u32 *phy_regarray2xtxr_table;
902 u16 phy_regarray2xtxr_len;
903 int i;
904
905 if (rtlphy->rf_type == RF_1T1R) {
906 phy_regarray2xtxr_table = rtl8192sephy_changeto_1t1rarray;
907 phy_regarray2xtxr_len = PHY_CHANGETO_1T1RARRAYLENGTH;
908 } else if (rtlphy->rf_type == RF_1T2R) {
909 phy_regarray2xtxr_table = rtl8192sephy_changeto_1t2rarray;
910 phy_regarray2xtxr_len = PHY_CHANGETO_1T2RARRAYLENGTH;
911 } else {
912 return false;
913 }
914
915 if (configtype == BASEBAND_CONFIG_PHY_REG) {
916 for (i = 0; i < phy_regarray2xtxr_len; i = i + 3) {
917 if (phy_regarray2xtxr_table[i] == 0xfe)
918 mdelay(50);
919 else if (phy_regarray2xtxr_table[i] == 0xfd)
920 mdelay(5);
921 else if (phy_regarray2xtxr_table[i] == 0xfc)
922 mdelay(1);
923 else if (phy_regarray2xtxr_table[i] == 0xfb)
924 udelay(50);
925 else if (phy_regarray2xtxr_table[i] == 0xfa)
926 udelay(5);
927 else if (phy_regarray2xtxr_table[i] == 0xf9)
928 udelay(1);
929
930 rtl92s_phy_set_bb_reg(hw, phy_regarray2xtxr_table[i],
931 phy_regarray2xtxr_table[i + 1],
932 phy_regarray2xtxr_table[i + 2]);
933 }
934 }
935
936 return true;
937}
938
939static bool _rtl92s_phy_config_bb_with_pg(struct ieee80211_hw *hw,
940 u8 configtype)
941{
942 int i;
943 u32 *phy_table_pg;
944 u16 phy_pg_len;
945
946 phy_pg_len = PHY_REG_ARRAY_PGLENGTH;
947 phy_table_pg = rtl8192sephy_reg_array_pg;
948
949 if (configtype == BASEBAND_CONFIG_PHY_REG) {
950 for (i = 0; i < phy_pg_len; i = i + 3) {
951 if (phy_table_pg[i] == 0xfe)
952 mdelay(50);
953 else if (phy_table_pg[i] == 0xfd)
954 mdelay(5);
955 else if (phy_table_pg[i] == 0xfc)
956 mdelay(1);
957 else if (phy_table_pg[i] == 0xfb)
958 udelay(50);
959 else if (phy_table_pg[i] == 0xfa)
960 udelay(5);
961 else if (phy_table_pg[i] == 0xf9)
962 udelay(1);
963
964 _rtl92s_store_pwrindex_diffrate_offset(hw,
965 phy_table_pg[i],
966 phy_table_pg[i + 1],
967 phy_table_pg[i + 2]);
968 rtl92s_phy_set_bb_reg(hw, phy_table_pg[i],
969 phy_table_pg[i + 1],
970 phy_table_pg[i + 2]);
971 }
972 }
973
974 return true;
975}
976
977static bool _rtl92s_phy_bb_config_parafile(struct ieee80211_hw *hw)
978{
979 struct rtl_priv *rtlpriv = rtl_priv(hw);
980 struct rtl_phy *rtlphy = &(rtlpriv->phy);
981 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
982 bool rtstatus = true;
983
984 /* 1. Read PHY_REG.TXT BB INIT!! */
985 /* We will separate as 1T1R/1T2R/1T2R_GREEN/2T2R */
986 if (rtlphy->rf_type == RF_1T2R || rtlphy->rf_type == RF_2T2R ||
987 rtlphy->rf_type == RF_1T1R || rtlphy->rf_type == RF_2T2R_GREEN) {
988 rtstatus = _rtl92s_phy_config_bb(hw, BASEBAND_CONFIG_PHY_REG);
989
990 if (rtlphy->rf_type != RF_2T2R &&
991 rtlphy->rf_type != RF_2T2R_GREEN)
992 /* so we should reconfig BB reg with the right
993 * PHY parameters. */
994 rtstatus = _rtl92s_phy_set_bb_to_diff_rf(hw,
995 BASEBAND_CONFIG_PHY_REG);
996 } else {
997 rtstatus = false;
998 }
999
1000 if (rtstatus != true) {
1001 RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
1002 ("Write BB Reg Fail!!"));
1003 goto phy_BB8190_Config_ParaFile_Fail;
1004 }
1005
1006 /* 2. If EEPROM or EFUSE autoload OK, We must config by
1007 * PHY_REG_PG.txt */
1008 if (rtlefuse->autoload_failflag == false) {
1009 rtlphy->pwrgroup_cnt = 0;
1010
1011 rtstatus = _rtl92s_phy_config_bb_with_pg(hw,
1012 BASEBAND_CONFIG_PHY_REG);
1013 }
1014 if (rtstatus != true) {
1015 RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
1016 ("_rtl92s_phy_bb_config_parafile(): "
1017 "BB_PG Reg Fail!!"));
1018 goto phy_BB8190_Config_ParaFile_Fail;
1019 }
1020
1021 /* 3. BB AGC table Initialization */
1022 rtstatus = _rtl92s_phy_config_bb(hw, BASEBAND_CONFIG_AGC_TAB);
1023
1024 if (rtstatus != true) {
1025 printk(KERN_ERR "_rtl92s_phy_bb_config_parafile(): "
1026 "AGC Table Fail\n");
1027 goto phy_BB8190_Config_ParaFile_Fail;
1028 }
1029
1030 /* Check if the CCK HighPower is turned ON. */
1031 /* This is used to calculate PWDB. */
1032 rtlphy->cck_high_power = (bool)(rtl92s_phy_query_bb_reg(hw,
1033 RFPGA0_XA_HSSIPARAMETER2, 0x200));
1034
1035phy_BB8190_Config_ParaFile_Fail:
1036 return rtstatus;
1037}
1038
1039u8 rtl92s_phy_config_rf(struct ieee80211_hw *hw, enum radio_path rfpath)
1040{
1041 struct rtl_priv *rtlpriv = rtl_priv(hw);
1042 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1043 int i;
1044 bool rtstatus = true;
1045 u32 *radio_a_table;
1046 u32 *radio_b_table;
1047 u16 radio_a_tblen, radio_b_tblen;
1048
1049 radio_a_tblen = RADIOA_1T_ARRAYLENGTH;
1050 radio_a_table = rtl8192seradioa_1t_array;
1051
1052 /* Using Green mode array table for RF_2T2R_GREEN */
1053 if (rtlphy->rf_type == RF_2T2R_GREEN) {
1054 radio_b_table = rtl8192seradiob_gm_array;
1055 radio_b_tblen = RADIOB_GM_ARRAYLENGTH;
1056 } else {
1057 radio_b_table = rtl8192seradiob_array;
1058 radio_b_tblen = RADIOB_ARRAYLENGTH;
1059 }
1060
1061 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Radio No %x\n", rfpath));
1062 rtstatus = true;
1063
1064 switch (rfpath) {
1065 case RF90_PATH_A:
1066 for (i = 0; i < radio_a_tblen; i = i + 2) {
1067 if (radio_a_table[i] == 0xfe)
1068 /* Delay specific ms. Only RF configuration
1069 * requires delay. */
1070 mdelay(50);
1071 else if (radio_a_table[i] == 0xfd)
1072 mdelay(5);
1073 else if (radio_a_table[i] == 0xfc)
1074 mdelay(1);
1075 else if (radio_a_table[i] == 0xfb)
1076 udelay(50);
1077 else if (radio_a_table[i] == 0xfa)
1078 udelay(5);
1079 else if (radio_a_table[i] == 0xf9)
1080 udelay(1);
1081 else
1082 rtl92s_phy_set_rf_reg(hw, rfpath,
1083 radio_a_table[i],
1084 MASK20BITS,
1085 radio_a_table[i + 1]);
1086
1087 /* Add delay for ECS T20 & LG malow platform */
1088 udelay(1);
1089 }
1090
1091 /* PA Bias current for inferiority IC */
1092 _rtl92s_phy_config_rfpa_bias_current(hw, rfpath);
1093 break;
1094 case RF90_PATH_B:
1095 for (i = 0; i < radio_b_tblen; i = i + 2) {
1096 if (radio_b_table[i] == 0xfe)
1097 /* Delay specific ms. Only RF configuration
1098 * requires delay.*/
1099 mdelay(50);
1100 else if (radio_b_table[i] == 0xfd)
1101 mdelay(5);
1102 else if (radio_b_table[i] == 0xfc)
1103 mdelay(1);
1104 else if (radio_b_table[i] == 0xfb)
1105 udelay(50);
1106 else if (radio_b_table[i] == 0xfa)
1107 udelay(5);
1108 else if (radio_b_table[i] == 0xf9)
1109 udelay(1);
1110 else
1111 rtl92s_phy_set_rf_reg(hw, rfpath,
1112 radio_b_table[i],
1113 MASK20BITS,
1114 radio_b_table[i + 1]);
1115
1116 /* Add delay for ECS T20 & LG malow platform */
1117 udelay(1);
1118 }
1119 break;
1120 case RF90_PATH_C:
1121 ;
1122 break;
1123 case RF90_PATH_D:
1124 ;
1125 break;
1126 default:
1127 break;
1128 }
1129
1130 return rtstatus;
1131}
1132
1133
1134bool rtl92s_phy_mac_config(struct ieee80211_hw *hw)
1135{
1136 struct rtl_priv *rtlpriv = rtl_priv(hw);
1137 u32 i;
1138 u32 arraylength;
1139 u32 *ptraArray;
1140
1141 arraylength = MAC_2T_ARRAYLENGTH;
1142 ptraArray = rtl8192semac_2t_array;
1143
1144 for (i = 0; i < arraylength; i = i + 2)
1145 rtl_write_byte(rtlpriv, ptraArray[i], (u8)ptraArray[i + 1]);
1146
1147 return true;
1148}
1149
1150
1151bool rtl92s_phy_bb_config(struct ieee80211_hw *hw)
1152{
1153 struct rtl_priv *rtlpriv = rtl_priv(hw);
1154 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1155 bool rtstatus = true;
1156 u8 pathmap, index, rf_num = 0;
1157 u8 path1, path2;
1158
1159 _rtl92s_phy_init_register_definition(hw);
1160
1161 /* Config BB and AGC */
1162 rtstatus = _rtl92s_phy_bb_config_parafile(hw);
1163
1164
1165 /* Check BB/RF confiuration setting. */
1166 /* We only need to configure RF which is turned on. */
1167 path1 = (u8)(rtl92s_phy_query_bb_reg(hw, RFPGA0_TXINFO, 0xf));
1168 mdelay(10);
1169 path2 = (u8)(rtl92s_phy_query_bb_reg(hw, ROFDM0_TRXPATHENABLE, 0xf));
1170 pathmap = path1 | path2;
1171
1172 rtlphy->rf_pathmap = pathmap;
1173 for (index = 0; index < 4; index++) {
1174 if ((pathmap >> index) & 0x1)
1175 rf_num++;
1176 }
1177
1178 if ((rtlphy->rf_type == RF_1T1R && rf_num != 1) ||
1179 (rtlphy->rf_type == RF_1T2R && rf_num != 2) ||
1180 (rtlphy->rf_type == RF_2T2R && rf_num != 2) ||
1181 (rtlphy->rf_type == RF_2T2R_GREEN && rf_num != 2)) {
1182 RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
1183 ("RF_Type(%x) does not match "
1184 "RF_Num(%x)!!\n", rtlphy->rf_type, rf_num));
1185 RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
1186 ("path1 0x%x, path2 0x%x, pathmap "
1187 "0x%x\n", path1, path2, pathmap));
1188 }
1189
1190 return rtstatus;
1191}
1192
1193bool rtl92s_phy_rf_config(struct ieee80211_hw *hw)
1194{
1195 struct rtl_priv *rtlpriv = rtl_priv(hw);
1196 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1197
1198 /* Initialize general global value */
1199 if (rtlphy->rf_type == RF_1T1R)
1200 rtlphy->num_total_rfpath = 1;
1201 else
1202 rtlphy->num_total_rfpath = 2;
1203
1204 /* Config BB and RF */
1205 return rtl92s_phy_rf6052_config(hw);
1206}
1207
1208void rtl92s_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
1209{
1210 struct rtl_priv *rtlpriv = rtl_priv(hw);
1211 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1212
1213 /* read rx initial gain */
1214 rtlphy->default_initialgain[0] = rtl_get_bbreg(hw,
1215 ROFDM0_XAAGCCORE1, MASKBYTE0);
1216 rtlphy->default_initialgain[1] = rtl_get_bbreg(hw,
1217 ROFDM0_XBAGCCORE1, MASKBYTE0);
1218 rtlphy->default_initialgain[2] = rtl_get_bbreg(hw,
1219 ROFDM0_XCAGCCORE1, MASKBYTE0);
1220 rtlphy->default_initialgain[3] = rtl_get_bbreg(hw,
1221 ROFDM0_XDAGCCORE1, MASKBYTE0);
1222 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Default initial gain "
1223 "(c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x)\n",
1224 rtlphy->default_initialgain[0],
1225 rtlphy->default_initialgain[1],
1226 rtlphy->default_initialgain[2],
1227 rtlphy->default_initialgain[3]));
1228
1229 /* read framesync */
1230 rtlphy->framesync = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR3, MASKBYTE0);
1231 rtlphy->framesync_c34 = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR2,
1232 MASKDWORD);
1233 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1234 ("Default framesync (0x%x) = 0x%x\n",
1235 ROFDM0_RXDETECTOR3, rtlphy->framesync));
1236
1237}
1238
1239static void _rtl92s_phy_get_txpower_index(struct ieee80211_hw *hw, u8 channel,
1240 u8 *cckpowerlevel, u8 *ofdmpowerLevel)
1241{
1242 struct rtl_priv *rtlpriv = rtl_priv(hw);
1243 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1244 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1245 u8 index = (channel - 1);
1246
1247 /* 1. CCK */
1248 /* RF-A */
1249 cckpowerlevel[0] = rtlefuse->txpwrlevel_cck[0][index];
1250 /* RF-B */
1251 cckpowerlevel[1] = rtlefuse->txpwrlevel_cck[1][index];
1252
1253 /* 2. OFDM for 1T or 2T */
1254 if (rtlphy->rf_type == RF_1T2R || rtlphy->rf_type == RF_1T1R) {
1255 /* Read HT 40 OFDM TX power */
1256 ofdmpowerLevel[0] = rtlefuse->txpwrlevel_ht40_1s[0][index];
1257 ofdmpowerLevel[1] = rtlefuse->txpwrlevel_ht40_1s[1][index];
1258 } else if (rtlphy->rf_type == RF_2T2R) {
1259 /* Read HT 40 OFDM TX power */
1260 ofdmpowerLevel[0] = rtlefuse->txpwrlevel_ht40_2s[0][index];
1261 ofdmpowerLevel[1] = rtlefuse->txpwrlevel_ht40_2s[1][index];
1262 }
1263}
1264
1265static void _rtl92s_phy_ccxpower_indexcheck(struct ieee80211_hw *hw,
1266 u8 channel, u8 *cckpowerlevel, u8 *ofdmpowerlevel)
1267{
1268 struct rtl_priv *rtlpriv = rtl_priv(hw);
1269 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1270
1271 rtlphy->cur_cck_txpwridx = cckpowerlevel[0];
1272 rtlphy->cur_ofdm24g_txpwridx = ofdmpowerlevel[0];
1273}
1274
1275void rtl92s_phy_set_txpower(struct ieee80211_hw *hw, u8 channel)
1276{
1277 struct rtl_priv *rtlpriv = rtl_priv(hw);
1278 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1279 /* [0]:RF-A, [1]:RF-B */
1280 u8 cckpowerlevel[2], ofdmpowerLevel[2];
1281
1282 if (rtlefuse->txpwr_fromeprom == false)
1283 return;
1284
1285 /* Mainly we use RF-A Tx Power to write the Tx Power registers,
1286 * but the RF-B Tx Power must be calculated by the antenna diff.
1287 * So we have to rewrite Antenna gain offset register here.
1288 * Please refer to BB register 0x80c
1289 * 1. For CCK.
1290 * 2. For OFDM 1T or 2T */
1291 _rtl92s_phy_get_txpower_index(hw, channel, &cckpowerlevel[0],
1292 &ofdmpowerLevel[0]);
1293
1294 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
1295 ("Channel-%d, cckPowerLevel (A / B) = "
1296 "0x%x / 0x%x, ofdmPowerLevel (A / B) = 0x%x / 0x%x\n",
1297 channel, cckpowerlevel[0], cckpowerlevel[1],
1298 ofdmpowerLevel[0], ofdmpowerLevel[1]));
1299
1300 _rtl92s_phy_ccxpower_indexcheck(hw, channel, &cckpowerlevel[0],
1301 &ofdmpowerLevel[0]);
1302
1303 rtl92s_phy_rf6052_set_ccktxpower(hw, cckpowerlevel[0]);
1304 rtl92s_phy_rf6052_set_ofdmtxpower(hw, &ofdmpowerLevel[0], channel);
1305
1306}
1307
1308void rtl92s_phy_chk_fwcmd_iodone(struct ieee80211_hw *hw)
1309{
1310 struct rtl_priv *rtlpriv = rtl_priv(hw);
1311 u16 pollingcnt = 10000;
1312 u32 tmpvalue;
1313
1314 /* Make sure that CMD IO has be accepted by FW. */
1315 do {
1316 udelay(10);
1317
1318 tmpvalue = rtl_read_dword(rtlpriv, WFM5);
1319 if (tmpvalue == 0)
1320 break;
1321 } while (--pollingcnt);
1322
1323 if (pollingcnt == 0)
1324 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Set FW Cmd fail!!\n"));
1325}
1326
1327
1328static void _rtl92s_phy_set_fwcmd_io(struct ieee80211_hw *hw)
1329{
1330 struct rtl_priv *rtlpriv = rtl_priv(hw);
1331 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1332 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1333 u32 input, current_aid = 0;
1334
1335 if (is_hal_stop(rtlhal))
1336 return;
1337
1338 /* We re-map RA related CMD IO to combinational ones */
1339 /* if FW version is v.52 or later. */
1340 switch (rtlhal->current_fwcmd_io) {
1341 case FW_CMD_RA_REFRESH_N:
1342 rtlhal->current_fwcmd_io = FW_CMD_RA_REFRESH_N_COMB;
1343 break;
1344 case FW_CMD_RA_REFRESH_BG:
1345 rtlhal->current_fwcmd_io = FW_CMD_RA_REFRESH_BG_COMB;
1346 break;
1347 default:
1348 break;
1349 }
1350
1351 switch (rtlhal->current_fwcmd_io) {
1352 case FW_CMD_RA_RESET:
1353 RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG,
1354 ("FW_CMD_RA_RESET\n"));
1355 rtl_write_dword(rtlpriv, WFM5, FW_RA_RESET);
1356 rtl92s_phy_chk_fwcmd_iodone(hw);
1357 break;
1358 case FW_CMD_RA_ACTIVE:
1359 RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG,
1360 ("FW_CMD_RA_ACTIVE\n"));
1361 rtl_write_dword(rtlpriv, WFM5, FW_RA_ACTIVE);
1362 rtl92s_phy_chk_fwcmd_iodone(hw);
1363 break;
1364 case FW_CMD_RA_REFRESH_N:
1365 RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG,
1366 ("FW_CMD_RA_REFRESH_N\n"));
1367 input = FW_RA_REFRESH;
1368 rtl_write_dword(rtlpriv, WFM5, input);
1369 rtl92s_phy_chk_fwcmd_iodone(hw);
1370 rtl_write_dword(rtlpriv, WFM5, FW_RA_ENABLE_RSSI_MASK);
1371 rtl92s_phy_chk_fwcmd_iodone(hw);
1372 break;
1373 case FW_CMD_RA_REFRESH_BG:
1374 RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG,
1375 ("FW_CMD_RA_REFRESH_BG\n"));
1376 rtl_write_dword(rtlpriv, WFM5, FW_RA_REFRESH);
1377 rtl92s_phy_chk_fwcmd_iodone(hw);
1378 rtl_write_dword(rtlpriv, WFM5, FW_RA_DISABLE_RSSI_MASK);
1379 rtl92s_phy_chk_fwcmd_iodone(hw);
1380 break;
1381 case FW_CMD_RA_REFRESH_N_COMB:
1382 RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG,
1383 ("FW_CMD_RA_REFRESH_N_COMB\n"));
1384 input = FW_RA_IOT_N_COMB;
1385 rtl_write_dword(rtlpriv, WFM5, input);
1386 rtl92s_phy_chk_fwcmd_iodone(hw);
1387 break;
1388 case FW_CMD_RA_REFRESH_BG_COMB:
1389 RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG,
1390 ("FW_CMD_RA_REFRESH_BG_COMB\n"));
1391 input = FW_RA_IOT_BG_COMB;
1392 rtl_write_dword(rtlpriv, WFM5, input);
1393 rtl92s_phy_chk_fwcmd_iodone(hw);
1394 break;
1395 case FW_CMD_IQK_ENABLE:
1396 RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG,
1397 ("FW_CMD_IQK_ENABLE\n"));
1398 rtl_write_dword(rtlpriv, WFM5, FW_IQK_ENABLE);
1399 rtl92s_phy_chk_fwcmd_iodone(hw);
1400 break;
1401 case FW_CMD_PAUSE_DM_BY_SCAN:
1402 /* Lower initial gain */
1403 rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0, 0x17);
1404 rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0, 0x17);
1405 /* CCA threshold */
1406 rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0x40);
1407 break;
1408 case FW_CMD_RESUME_DM_BY_SCAN:
1409 /* CCA threshold */
1410 rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd);
1411 rtl92s_phy_set_txpower(hw, rtlphy->current_channel);
1412 break;
1413 case FW_CMD_HIGH_PWR_DISABLE:
1414 if (rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE)
1415 break;
1416
1417 /* Lower initial gain */
1418 rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0, 0x17);
1419 rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0, 0x17);
1420 /* CCA threshold */
1421 rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0x40);
1422 break;
1423 case FW_CMD_HIGH_PWR_ENABLE:
1424 if ((rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) ||
1425 (rtlpriv->dm.dynamic_txpower_enable == true))
1426 break;
1427
1428 /* CCA threshold */
1429 rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd);
1430 break;
1431 case FW_CMD_LPS_ENTER:
1432 RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG,
1433 ("FW_CMD_LPS_ENTER\n"));
1434 current_aid = rtlpriv->mac80211.assoc_id;
1435 rtl_write_dword(rtlpriv, WFM5, (FW_LPS_ENTER |
1436 ((current_aid | 0xc000) << 8)));
1437 rtl92s_phy_chk_fwcmd_iodone(hw);
1438 /* FW set TXOP disable here, so disable EDCA
1439 * turbo mode until driver leave LPS */
1440 break;
1441 case FW_CMD_LPS_LEAVE:
1442 RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG,
1443 ("FW_CMD_LPS_LEAVE\n"));
1444 rtl_write_dword(rtlpriv, WFM5, FW_LPS_LEAVE);
1445 rtl92s_phy_chk_fwcmd_iodone(hw);
1446 break;
1447 case FW_CMD_ADD_A2_ENTRY:
1448 RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG,
1449 ("FW_CMD_ADD_A2_ENTRY\n"));
1450 rtl_write_dword(rtlpriv, WFM5, FW_ADD_A2_ENTRY);
1451 rtl92s_phy_chk_fwcmd_iodone(hw);
1452 break;
1453 case FW_CMD_CTRL_DM_BY_DRIVER:
1454 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
1455 ("FW_CMD_CTRL_DM_BY_DRIVER\n"));
1456 rtl_write_dword(rtlpriv, WFM5, FW_CTRL_DM_BY_DRIVER);
1457 rtl92s_phy_chk_fwcmd_iodone(hw);
1458 break;
1459
1460 default:
1461 break;
1462 }
1463
1464 rtl92s_phy_chk_fwcmd_iodone(hw);
1465
1466 /* Clear FW CMD operation flag. */
1467 rtlhal->set_fwcmd_inprogress = false;
1468}
1469
1470bool rtl92s_phy_set_fw_cmd(struct ieee80211_hw *hw, enum fwcmd_iotype fw_cmdio)
1471{
1472 struct rtl_priv *rtlpriv = rtl_priv(hw);
1473 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1474 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1475 u32 fw_param = FW_CMD_IO_PARA_QUERY(rtlpriv);
1476 u16 fw_cmdmap = FW_CMD_IO_QUERY(rtlpriv);
1477 bool bPostProcessing = false;
1478
1479 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
1480 ("Set FW Cmd(%#x), set_fwcmd_inprogress(%d)\n",
1481 fw_cmdio, rtlhal->set_fwcmd_inprogress));
1482
1483 do {
1484 /* We re-map to combined FW CMD ones if firmware version */
1485 /* is v.53 or later. */
1486 switch (fw_cmdio) {
1487 case FW_CMD_RA_REFRESH_N:
1488 fw_cmdio = FW_CMD_RA_REFRESH_N_COMB;
1489 break;
1490 case FW_CMD_RA_REFRESH_BG:
1491 fw_cmdio = FW_CMD_RA_REFRESH_BG_COMB;
1492 break;
1493 default:
1494 break;
1495 }
1496
1497 /* If firmware version is v.62 or later,
1498 * use FW_CMD_IO_SET for FW_CMD_CTRL_DM_BY_DRIVER */
1499 if (hal_get_firmwareversion(rtlpriv) >= 0x3E) {
1500 if (fw_cmdio == FW_CMD_CTRL_DM_BY_DRIVER)
1501 fw_cmdio = FW_CMD_CTRL_DM_BY_DRIVER_NEW;
1502 }
1503
1504
1505 /* We shall revise all FW Cmd IO into Reg0x364
1506 * DM map table in the future. */
1507 switch (fw_cmdio) {
1508 case FW_CMD_RA_INIT:
1509 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("RA init!!\n"));
1510 fw_cmdmap |= FW_RA_INIT_CTL;
1511 FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
1512 /* Clear control flag to sync with FW. */
1513 FW_CMD_IO_CLR(rtlpriv, FW_RA_INIT_CTL);
1514 break;
1515 case FW_CMD_DIG_DISABLE:
1516 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
1517 ("Set DIG disable!!\n"));
1518 fw_cmdmap &= ~FW_DIG_ENABLE_CTL;
1519 FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
1520 break;
1521 case FW_CMD_DIG_ENABLE:
1522 case FW_CMD_DIG_RESUME:
1523 if (!(rtlpriv->dm.dm_flag & HAL_DM_DIG_DISABLE)) {
1524 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
1525 ("Set DIG enable or resume!!\n"));
1526 fw_cmdmap |= (FW_DIG_ENABLE_CTL | FW_SS_CTL);
1527 FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
1528 }
1529 break;
1530 case FW_CMD_DIG_HALT:
1531 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
1532 ("Set DIG halt!!\n"));
1533 fw_cmdmap &= ~(FW_DIG_ENABLE_CTL | FW_SS_CTL);
1534 FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
1535 break;
1536 case FW_CMD_TXPWR_TRACK_THERMAL: {
1537 u8 thermalval = 0;
1538 fw_cmdmap |= FW_PWR_TRK_CTL;
1539
1540 /* Clear FW parameter in terms of thermal parts. */
1541 fw_param &= FW_PWR_TRK_PARAM_CLR;
1542
1543 thermalval = rtlpriv->dm.thermalvalue;
1544 fw_param |= ((thermalval << 24) |
1545 (rtlefuse->thermalmeter[0] << 16));
1546
1547 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
1548 ("Set TxPwr tracking!! "
1549 "FwCmdMap(%#x), FwParam(%#x)\n",
1550 fw_cmdmap, fw_param));
1551
1552 FW_CMD_PARA_SET(rtlpriv, fw_param);
1553 FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
1554
1555 /* Clear control flag to sync with FW. */
1556 FW_CMD_IO_CLR(rtlpriv, FW_PWR_TRK_CTL);
1557 }
1558 break;
1559 /* The following FW CMDs are only compatible to
1560 * v.53 or later. */
1561 case FW_CMD_RA_REFRESH_N_COMB:
1562 fw_cmdmap |= FW_RA_N_CTL;
1563
1564 /* Clear RA BG mode control. */
1565 fw_cmdmap &= ~(FW_RA_BG_CTL | FW_RA_INIT_CTL);
1566
1567 /* Clear FW parameter in terms of RA parts. */
1568 fw_param &= FW_RA_PARAM_CLR;
1569
1570 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
1571 ("[FW CMD] [New Version] "
1572 "Set RA/IOT Comb in n mode!! FwCmdMap(%#x), "
1573 "FwParam(%#x)\n", fw_cmdmap, fw_param));
1574
1575 FW_CMD_PARA_SET(rtlpriv, fw_param);
1576 FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
1577
1578 /* Clear control flag to sync with FW. */
1579 FW_CMD_IO_CLR(rtlpriv, FW_RA_N_CTL);
1580 break;
1581 case FW_CMD_RA_REFRESH_BG_COMB:
1582 fw_cmdmap |= FW_RA_BG_CTL;
1583
1584 /* Clear RA n-mode control. */
1585 fw_cmdmap &= ~(FW_RA_N_CTL | FW_RA_INIT_CTL);
1586 /* Clear FW parameter in terms of RA parts. */
1587 fw_param &= FW_RA_PARAM_CLR;
1588
1589 FW_CMD_PARA_SET(rtlpriv, fw_param);
1590 FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
1591
1592 /* Clear control flag to sync with FW. */
1593 FW_CMD_IO_CLR(rtlpriv, FW_RA_BG_CTL);
1594 break;
1595 case FW_CMD_IQK_ENABLE:
1596 fw_cmdmap |= FW_IQK_CTL;
1597 FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
1598 /* Clear control flag to sync with FW. */
1599 FW_CMD_IO_CLR(rtlpriv, FW_IQK_CTL);
1600 break;
1601 /* The following FW CMD is compatible to v.62 or later. */
1602 case FW_CMD_CTRL_DM_BY_DRIVER_NEW:
1603 fw_cmdmap |= FW_DRIVER_CTRL_DM_CTL;
1604 FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
1605 break;
1606 /* The followed FW Cmds needs post-processing later. */
1607 case FW_CMD_RESUME_DM_BY_SCAN:
1608 fw_cmdmap |= (FW_DIG_ENABLE_CTL |
1609 FW_HIGH_PWR_ENABLE_CTL |
1610 FW_SS_CTL);
1611
1612 if (rtlpriv->dm.dm_flag & HAL_DM_DIG_DISABLE ||
1613 !digtable.dig_enable_flag)
1614 fw_cmdmap &= ~FW_DIG_ENABLE_CTL;
1615
1616 if ((rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) ||
1617 (rtlpriv->dm.dynamic_txpower_enable == true))
1618 fw_cmdmap &= ~FW_HIGH_PWR_ENABLE_CTL;
1619
1620 if ((digtable.dig_ext_port_stage ==
1621 DIG_EXT_PORT_STAGE_0) ||
1622 (digtable.dig_ext_port_stage ==
1623 DIG_EXT_PORT_STAGE_1))
1624 fw_cmdmap &= ~FW_DIG_ENABLE_CTL;
1625
1626 FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
1627 bPostProcessing = true;
1628 break;
1629 case FW_CMD_PAUSE_DM_BY_SCAN:
1630 fw_cmdmap &= ~(FW_DIG_ENABLE_CTL |
1631 FW_HIGH_PWR_ENABLE_CTL |
1632 FW_SS_CTL);
1633 FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
1634 bPostProcessing = true;
1635 break;
1636 case FW_CMD_HIGH_PWR_DISABLE:
1637 fw_cmdmap &= ~FW_HIGH_PWR_ENABLE_CTL;
1638 FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
1639 bPostProcessing = true;
1640 break;
1641 case FW_CMD_HIGH_PWR_ENABLE:
1642 if (!(rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) &&
1643 (rtlpriv->dm.dynamic_txpower_enable != true)) {
1644 fw_cmdmap |= (FW_HIGH_PWR_ENABLE_CTL |
1645 FW_SS_CTL);
1646 FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
1647 bPostProcessing = true;
1648 }
1649 break;
1650 case FW_CMD_DIG_MODE_FA:
1651 fw_cmdmap |= FW_FA_CTL;
1652 FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
1653 break;
1654 case FW_CMD_DIG_MODE_SS:
1655 fw_cmdmap &= ~FW_FA_CTL;
1656 FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
1657 break;
1658 case FW_CMD_PAPE_CONTROL:
1659 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
1660 ("[FW CMD] Set PAPE Control\n"));
1661 fw_cmdmap &= ~FW_PAPE_CTL_BY_SW_HW;
1662
1663 FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
1664 break;
1665 default:
1666 /* Pass to original FW CMD processing callback
1667 * routine. */
1668 bPostProcessing = true;
1669 break;
1670 }
1671 } while (false);
1672
1673 /* We shall post processing these FW CMD if
1674 * variable bPostProcessing is set. */
1675 if (bPostProcessing && !rtlhal->set_fwcmd_inprogress) {
1676 rtlhal->set_fwcmd_inprogress = true;
1677 /* Update current FW Cmd for callback use. */
1678 rtlhal->current_fwcmd_io = fw_cmdio;
1679 } else {
1680 return false;
1681 }
1682
1683 _rtl92s_phy_set_fwcmd_io(hw);
1684 return true;
1685}
1686
1687static void _rtl92s_phy_check_ephy_switchready(struct ieee80211_hw *hw)
1688{
1689 struct rtl_priv *rtlpriv = rtl_priv(hw);
1690 u32 delay = 100;
1691 u8 regu1;
1692
1693 regu1 = rtl_read_byte(rtlpriv, 0x554);
1694 while ((regu1 & BIT(5)) && (delay > 0)) {
1695 regu1 = rtl_read_byte(rtlpriv, 0x554);
1696 delay--;
1697 /* We delay only 50us to prevent
1698 * being scheduled out. */
1699 udelay(50);
1700 }
1701}
1702
1703void rtl92s_phy_switch_ephy_parameter(struct ieee80211_hw *hw)
1704{
1705 struct rtl_priv *rtlpriv = rtl_priv(hw);
1706 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1707
1708 /* The way to be capable to switch clock request
1709 * when the PG setting does not support clock request.
1710 * This is the backdoor solution to switch clock
1711 * request before ASPM or D3. */
1712 rtl_write_dword(rtlpriv, 0x540, 0x73c11);
1713 rtl_write_dword(rtlpriv, 0x548, 0x2407c);
1714
1715 /* Switch EPHY parameter!!!! */
1716 rtl_write_word(rtlpriv, 0x550, 0x1000);
1717 rtl_write_byte(rtlpriv, 0x554, 0x20);
1718 _rtl92s_phy_check_ephy_switchready(hw);
1719
1720 rtl_write_word(rtlpriv, 0x550, 0xa0eb);
1721 rtl_write_byte(rtlpriv, 0x554, 0x3e);
1722 _rtl92s_phy_check_ephy_switchready(hw);
1723
1724 rtl_write_word(rtlpriv, 0x550, 0xff80);
1725 rtl_write_byte(rtlpriv, 0x554, 0x39);
1726 _rtl92s_phy_check_ephy_switchready(hw);
1727
1728 /* Delay L1 enter time */
1729 if (ppsc->support_aspm && !ppsc->support_backdoor)
1730 rtl_write_byte(rtlpriv, 0x560, 0x40);
1731 else
1732 rtl_write_byte(rtlpriv, 0x560, 0x00);
1733
1734}
1735
1736void rtl92s_phy_set_beacon_hwreg(struct ieee80211_hw *hw, u16 BeaconInterval)
1737{
1738 struct rtl_priv *rtlpriv = rtl_priv(hw);
1739 rtl_write_dword(rtlpriv, WFM5, 0xF1000000 | (BeaconInterval << 8));
1740}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.h b/drivers/net/wireless/rtlwifi/rtl8192se/phy.h
new file mode 100644
index 000000000000..37e504af6446
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.h
@@ -0,0 +1,101 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29#ifndef __RTL92S_PHY_H__
30#define __RTL92S_PHY_H__
31
32#define MAX_TXPWR_IDX_NMODE_92S 63
33#define MAX_DOZE_WAITING_TIMES_9x 64
34
35/* Channel switch:The size of
36 * command tables for switch channel */
37#define MAX_PRECMD_CNT 16
38#define MAX_RFDEPENDCMD_CNT 16
39#define MAX_POSTCMD_CNT 16
40
41#define RF90_PATH_MAX 4
42
43enum version_8192s {
44 VERSION_8192S_ACUT,
45 VERSION_8192S_BCUT,
46 VERSION_8192S_CCUT
47};
48
49enum swchnlcmd_id {
50 CMDID_END,
51 CMDID_SET_TXPOWEROWER_LEVEL,
52 CMDID_BBREGWRITE10,
53 CMDID_WRITEPORT_ULONG,
54 CMDID_WRITEPORT_USHORT,
55 CMDID_WRITEPORT_UCHAR,
56 CMDID_RF_WRITEREG,
57};
58
59struct swchnlcmd {
60 enum swchnlcmd_id cmdid;
61 u32 para1;
62 u32 para2;
63 u32 msdelay;
64};
65
66enum baseband_config_type {
67 /* Radio Path A */
68 BASEBAND_CONFIG_PHY_REG = 0,
69 /* Radio Path B */
70 BASEBAND_CONFIG_AGC_TAB = 1,
71};
72
73#define hal_get_firmwareversion(rtlpriv) \
74 (((struct rt_firmware *)(rtlpriv->rtlhal.pfirmware))->firmwareversion)
75
76u32 rtl92s_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask);
77void rtl92s_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask,
78 u32 data);
79void rtl92s_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation);
80u32 rtl92s_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
81 u32 regaddr, u32 bitmask);
82void rtl92s_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
83 u32 regaddr, u32 bitmask, u32 data);
84void rtl92s_phy_set_bw_mode(struct ieee80211_hw *hw,
85 enum nl80211_channel_type ch_type);
86u8 rtl92s_phy_sw_chnl(struct ieee80211_hw *hw);
87bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw,
88 enum rf_pwrstate rfpower_state);
89bool rtl92s_phy_mac_config(struct ieee80211_hw *hw);
90void rtl92s_phy_switch_ephy_parameter(struct ieee80211_hw *hw);
91bool rtl92s_phy_bb_config(struct ieee80211_hw *hw);
92bool rtl92s_phy_rf_config(struct ieee80211_hw *hw);
93void rtl92s_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
94void rtl92s_phy_set_txpower(struct ieee80211_hw *hw, u8 channel);
95bool rtl92s_phy_set_fw_cmd(struct ieee80211_hw *hw, enum fwcmd_iotype fwcmd_io);
96void rtl92s_phy_chk_fwcmd_iodone(struct ieee80211_hw *hw);
97void rtl92s_phy_set_beacon_hwreg(struct ieee80211_hw *hw, u16 beaconinterval);
98u8 rtl92s_phy_config_rf(struct ieee80211_hw *hw, enum radio_path rfpath) ;
99
100#endif
101
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/reg.h b/drivers/net/wireless/rtlwifi/rtl8192se/reg.h
new file mode 100644
index 000000000000..0116eaddbfac
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/reg.h
@@ -0,0 +1,1188 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29#ifndef __REALTEK_92S_REG_H__
30#define __REALTEK_92S_REG_H__
31
32/* 1. System Configuration Registers */
33#define REG_SYS_ISO_CTRL 0x0000
34#define REG_SYS_FUNC_EN 0x0002
35#define PMC_FSM 0x0004
36#define SYS_CLKR 0x0008
37#define EPROM_CMD 0x000A
38#define EE_VPD 0x000C
39#define AFE_MISC 0x0010
40#define SPS0_CTRL 0x0011
41#define SPS1_CTRL 0x0018
42#define RF_CTRL 0x001F
43#define LDOA15_CTRL 0x0020
44#define LDOV12D_CTRL 0x0021
45#define LDOHCI12_CTRL 0x0022
46#define LDO_USB_SDIO 0x0023
47#define LPLDO_CTRL 0x0024
48#define AFE_XTAL_CTRL 0x0026
49#define AFE_PLL_CTRL 0x0028
50#define REG_EFUSE_CTRL 0x0030
51#define REG_EFUSE_TEST 0x0034
52#define PWR_DATA 0x0038
53#define DBG_PORT 0x003A
54#define DPS_TIMER 0x003C
55#define RCLK_MON 0x003E
56
57/* 2. Command Control Registers */
58#define CMDR 0x0040
59#define TXPAUSE 0x0042
60#define LBKMD_SEL 0x0043
61#define TCR 0x0044
62#define RCR 0x0048
63#define MSR 0x004C
64#define SYSF_CFG 0x004D
65#define RX_PKY_LIMIT 0x004E
66#define MBIDCTRL 0x004F
67
68/* 3. MACID Setting Registers */
69#define MACIDR 0x0050
70#define MACIDR0 0x0050
71#define MACIDR4 0x0054
72#define BSSIDR 0x0058
73#define HWVID 0x005E
74#define MAR 0x0060
75#define MBIDCAMCONTENT 0x0068
76#define MBIDCAMCFG 0x0070
77#define BUILDTIME 0x0074
78#define BUILDUSER 0x0078
79
80#define IDR0 MACIDR0
81#define IDR4 MACIDR4
82
83/* 4. Timing Control Registers */
84#define TSFR 0x0080
85#define SLOT_TIME 0x0089
86#define USTIME 0x008A
87#define SIFS_CCK 0x008C
88#define SIFS_OFDM 0x008E
89#define PIFS_TIME 0x0090
90#define ACK_TIMEOUT 0x0091
91#define EIFSTR 0x0092
92#define BCN_INTERVAL 0x0094
93#define ATIMWND 0x0096
94#define BCN_DRV_EARLY_INT 0x0098
95#define BCN_DMATIME 0x009A
96#define BCN_ERR_THRESH 0x009C
97#define MLT 0x009D
98#define RSVD_MAC_TUNE_US 0x009E
99
100/* 5. FIFO Control Registers */
101#define RQPN 0x00A0
102#define RQPN1 0x00A0
103#define RQPN2 0x00A1
104#define RQPN3 0x00A2
105#define RQPN4 0x00A3
106#define RQPN5 0x00A4
107#define RQPN6 0x00A5
108#define RQPN7 0x00A6
109#define RQPN8 0x00A7
110#define RQPN9 0x00A8
111#define RQPN10 0x00A9
112#define LD_RQPN 0x00AB
113#define RXFF_BNDY 0x00AC
114#define RXRPT_BNDY 0x00B0
115#define TXPKTBUF_PGBNDY 0x00B4
116#define PBP 0x00B5
117#define RXDRVINFO_SZ 0x00B6
118#define TXFF_STATUS 0x00B7
119#define RXFF_STATUS 0x00B8
120#define TXFF_EMPTY_TH 0x00B9
121#define SDIO_RX_BLKSZ 0x00BC
122#define RXDMA 0x00BD
123#define RXPKT_NUM 0x00BE
124#define C2HCMD_UDT_SIZE 0x00C0
125#define C2HCMD_UDT_ADDR 0x00C2
126#define FIFOPAGE1 0x00C4
127#define FIFOPAGE2 0x00C8
128#define FIFOPAGE3 0x00CC
129#define FIFOPAGE4 0x00D0
130#define FIFOPAGE5 0x00D4
131#define FW_RSVD_PG_CRTL 0x00D8
132#define RXDMA_AGG_PG_TH 0x00D9
133#define TXDESC_MSK 0x00DC
134#define TXRPTFF_RDPTR 0x00E0
135#define TXRPTFF_WTPTR 0x00E4
136#define C2HFF_RDPTR 0x00E8
137#define C2HFF_WTPTR 0x00EC
138#define RXFF0_RDPTR 0x00F0
139#define RXFF0_WTPTR 0x00F4
140#define RXFF1_RDPTR 0x00F8
141#define RXFF1_WTPTR 0x00FC
142#define RXRPT0_RDPTR 0x0100
143#define RXRPT0_WTPTR 0x0104
144#define RXRPT1_RDPTR 0x0108
145#define RXRPT1_WTPTR 0x010C
146#define RX0_UDT_SIZE 0x0110
147#define RX1PKTNUM 0x0114
148#define RXFILTERMAP 0x0116
149#define RXFILTERMAP_GP1 0x0118
150#define RXFILTERMAP_GP2 0x011A
151#define RXFILTERMAP_GP3 0x011C
152#define BCNQ_CTRL 0x0120
153#define MGTQ_CTRL 0x0124
154#define HIQ_CTRL 0x0128
155#define VOTID7_CTRL 0x012c
156#define VOTID6_CTRL 0x0130
157#define VITID5_CTRL 0x0134
158#define VITID4_CTRL 0x0138
159#define BETID3_CTRL 0x013c
160#define BETID0_CTRL 0x0140
161#define BKTID2_CTRL 0x0144
162#define BKTID1_CTRL 0x0148
163#define CMDQ_CTRL 0x014c
164#define TXPKT_NUM_CTRL 0x0150
165#define TXQ_PGADD 0x0152
166#define TXFF_PG_NUM 0x0154
167#define TRXDMA_STATUS 0x0156
168
169/* 6. Adaptive Control Registers */
170#define INIMCS_SEL 0x0160
171#define TX_RATE_REG INIMCS_SEL
172#define INIRTSMCS_SEL 0x0180
173#define RRSR 0x0181
174#define ARFR0 0x0184
175#define ARFR1 0x0188
176#define ARFR2 0x018C
177#define ARFR3 0x0190
178#define ARFR4 0x0194
179#define ARFR5 0x0198
180#define ARFR6 0x019C
181#define ARFR7 0x01A0
182#define AGGLEN_LMT_H 0x01A7
183#define AGGLEN_LMT_L 0x01A8
184#define DARFRC 0x01B0
185#define RARFRC 0x01B8
186#define MCS_TXAGC 0x01C0
187#define CCK_TXAGC 0x01C8
188
189/* 7. EDCA Setting Registers */
190#define EDCAPARA_VO 0x01D0
191#define EDCAPARA_VI 0x01D4
192#define EDCAPARA_BE 0x01D8
193#define EDCAPARA_BK 0x01DC
194#define BCNTCFG 0x01E0
195#define CWRR 0x01E2
196#define ACMAVG 0x01E4
197#define AcmHwCtrl 0x01E7
198#define VO_ADMTM 0x01E8
199#define VI_ADMTM 0x01EC
200#define BE_ADMTM 0x01F0
201#define RETRY_LIMIT 0x01F4
202#define SG_RATE 0x01F6
203
204/* 8. WMAC, BA and CCX related Register. */
205#define NAV_CTRL 0x0200
206#define BW_OPMODE 0x0203
207#define BACAMCMD 0x0204
208#define BACAMCONTENT 0x0208
209
210/* the 0x2xx register WMAC definition */
211#define LBDLY 0x0210
212#define FWDLY 0x0211
213#define HWPC_RX_CTRL 0x0218
214#define MQIR 0x0220
215#define MAIR 0x0222
216#define MSIR 0x0224
217#define CLM_RESULT 0x0227
218#define NHM_RPI_CNT 0x0228
219#define RXERR_RPT 0x0230
220#define NAV_PROT_LEN 0x0234
221#define CFEND_TH 0x0236
222#define AMPDU_MIN_SPACE 0x0237
223#define TXOP_STALL_CTRL 0x0238
224
225/* 9. Security Control Registers */
226#define REG_RWCAM 0x0240
227#define REG_WCAMI 0x0244
228#define REG_RCAMO 0x0248
229#define REG_CAMDBG 0x024C
230#define REG_SECR 0x0250
231
232/* 10. Power Save Control Registers */
233#define WOW_CTRL 0x0260
234#define PSSTATUS 0x0261
235#define PSSWITCH 0x0262
236#define MIMOPS_WAIT_PERIOD 0x0263
237#define LPNAV_CTRL 0x0264
238#define WFM0 0x0270
239#define WFM1 0x0280
240#define WFM2 0x0290
241#define WFM3 0x02A0
242#define WFM4 0x02B0
243#define WFM5 0x02C0
244#define WFCRC 0x02D0
245#define FW_RPT_REG 0x02c4
246
247/* 11. General Purpose Registers */
248#define PSTIME 0x02E0
249#define TIMER0 0x02E4
250#define TIMER1 0x02E8
251#define GPIO_CTRL 0x02EC
252#define GPIO_IN 0x02EC
253#define GPIO_OUT 0x02ED
254#define GPIO_IO_SEL 0x02EE
255#define GPIO_MOD 0x02EF
256#define GPIO_INTCTRL 0x02F0
257#define MAC_PINMUX_CFG 0x02F1
258#define LEDCFG 0x02F2
259#define PHY_REG 0x02F3
260#define PHY_REG_DATA 0x02F4
261#define REG_EFUSE_CLK 0x02F8
262
263/* 12. Host Interrupt Status Registers */
264#define INTA_MASK 0x0300
265#define ISR 0x0308
266
267/* 13. Test Mode and Debug Control Registers */
268#define DBG_PORT_SWITCH 0x003A
269#define BIST 0x0310
270#define DBS 0x0314
271#define CPUINST 0x0318
272#define CPUCAUSE 0x031C
273#define LBUS_ERR_ADDR 0x0320
274#define LBUS_ERR_CMD 0x0324
275#define LBUS_ERR_DATA_L 0x0328
276#define LBUS_ERR_DATA_H 0x032C
277#define LX_EXCEPTION_ADDR 0x0330
278#define WDG_CTRL 0x0334
279#define INTMTU 0x0338
280#define INTM 0x033A
281#define FDLOCKTURN0 0x033C
282#define FDLOCKTURN1 0x033D
283#define TRXPKTBUF_DBG_DATA 0x0340
284#define TRXPKTBUF_DBG_CTRL 0x0348
285#define DPLL 0x034A
286#define CBUS_ERR_ADDR 0x0350
287#define CBUS_ERR_CMD 0x0354
288#define CBUS_ERR_DATA_L 0x0358
289#define CBUS_ERR_DATA_H 0x035C
290#define USB_SIE_INTF_ADDR 0x0360
291#define USB_SIE_INTF_WD 0x0361
292#define USB_SIE_INTF_RD 0x0362
293#define USB_SIE_INTF_CTRL 0x0363
294#define LBUS_MON_ADDR 0x0364
295#define LBUS_ADDR_MASK 0x0368
296
297/* Boundary is 0x37F */
298
299/* 14. PCIE config register */
300#define TP_POLL 0x0500
301#define PM_CTRL 0x0502
302#define PCIF 0x0503
303
304#define THPDA 0x0514
305#define TMDA 0x0518
306#define TCDA 0x051C
307#define HDA 0x0520
308#define TVODA 0x0524
309#define TVIDA 0x0528
310#define TBEDA 0x052C
311#define TBKDA 0x0530
312#define TBDA 0x0534
313#define RCDA 0x0538
314#define RDQDA 0x053C
315#define DBI_WDATA 0x0540
316#define DBI_RDATA 0x0544
317#define DBI_CTRL 0x0548
318#define MDIO_DATA 0x0550
319#define MDIO_CTRL 0x0554
320#define PCI_RPWM 0x0561
321#define PCI_CPWM 0x0563
322
323/* Config register (Offset 0x800-) */
324#define PHY_CCA 0x803
325
326/* Min Spacing related settings. */
327#define MAX_MSS_DENSITY_2T 0x13
328#define MAX_MSS_DENSITY_1T 0x0A
329
330/* Rx DMA Control related settings */
331#define RXDMA_AGG_EN BIT(7)
332
333#define RPWM PCI_RPWM
334
335/* Regsiter Bit and Content definition */
336
337#define ISO_MD2PP BIT(0)
338#define ISO_PA2PCIE BIT(3)
339#define ISO_PLL2MD BIT(4)
340#define ISO_PWC_DV2RP BIT(11)
341#define ISO_PWC_RV2RP BIT(12)
342
343
344#define FEN_MREGEN BIT(15)
345#define FEN_DCORE BIT(11)
346#define FEN_CPUEN BIT(10)
347
348#define PAD_HWPD_IDN BIT(22)
349
350#define SYS_CLKSEL_80M BIT(0)
351#define SYS_PS_CLKSEL BIT(1)
352#define SYS_CPU_CLKSEL BIT(2)
353#define SYS_MAC_CLK_EN BIT(11)
354#define SYS_SWHW_SEL BIT(14)
355#define SYS_FWHW_SEL BIT(15)
356
357#define CmdEEPROM_En BIT(5)
358#define CmdEERPOMSEL BIT(4)
359#define Cmd9346CR_9356SEL BIT(4)
360
361#define AFE_MBEN BIT(1)
362#define AFE_BGEN BIT(0)
363
364#define SPS1_SWEN BIT(1)
365#define SPS1_LDEN BIT(0)
366
367#define RF_EN BIT(0)
368#define RF_RSTB BIT(1)
369#define RF_SDMRSTB BIT(2)
370
371#define LDA15_EN BIT(0)
372
373#define LDV12_EN BIT(0)
374#define LDV12_SDBY BIT(1)
375
376#define XTAL_GATE_AFE BIT(10)
377
378#define APLL_EN BIT(0)
379
380#define AFR_CardBEn BIT(0)
381#define AFR_CLKRUN_SEL BIT(1)
382#define AFR_FuncRegEn BIT(2)
383
384#define APSDOFF_STATUS BIT(15)
385#define APSDOFF BIT(14)
386#define BBRSTN BIT(13)
387#define BB_GLB_RSTN BIT(12)
388#define SCHEDULE_EN BIT(10)
389#define MACRXEN BIT(9)
390#define MACTXEN BIT(8)
391#define DDMA_EN BIT(7)
392#define FW2HW_EN BIT(6)
393#define RXDMA_EN BIT(5)
394#define TXDMA_EN BIT(4)
395#define HCI_RXDMA_EN BIT(3)
396#define HCI_TXDMA_EN BIT(2)
397
398#define StopHCCA BIT(6)
399#define StopHigh BIT(5)
400#define StopMgt BIT(4)
401#define StopVO BIT(3)
402#define StopVI BIT(2)
403#define StopBE BIT(1)
404#define StopBK BIT(0)
405
406#define LBK_NORMAL 0x00
407#define LBK_MAC_LB (BIT(0) | BIT(1) | BIT(3))
408#define LBK_MAC_DLB (BIT(0) | BIT(1))
409#define LBK_DMA_LB (BIT(0) | BIT(1) | BIT(2))
410
411#define TCP_OFDL_EN BIT(25)
412#define HWPC_TX_EN BIT(24)
413#define TXDMAPRE2FULL BIT(23)
414#define DISCW BIT(20)
415#define TCRICV BIT(19)
416#define CfendForm BIT(17)
417#define TCRCRC BIT(16)
418#define FAKE_IMEM_EN BIT(15)
419#define TSFRST BIT(9)
420#define TSFEN BIT(8)
421#define FWALLRDY (BIT(0) | BIT(1) | BIT(2) | \
422 BIT(3) | BIT(4) | BIT(5) | \
423 BIT(6) | BIT(7))
424#define FWRDY BIT(7)
425#define BASECHG BIT(6)
426#define IMEM BIT(5)
427#define DMEM_CODE_DONE BIT(4)
428#define EXT_IMEM_CHK_RPT BIT(3)
429#define EXT_IMEM_CODE_DONE BIT(2)
430#define IMEM_CHK_RPT BIT(1)
431#define IMEM_CODE_DONE BIT(0)
432#define IMEM_CODE_DONE BIT(0)
433#define IMEM_CHK_RPT BIT(1)
434#define EMEM_CODE_DONE BIT(2)
435#define EMEM_CHK_RPT BIT(3)
436#define DMEM_CODE_DONE BIT(4)
437#define IMEM_RDY BIT(5)
438#define BASECHG BIT(6)
439#define FWRDY BIT(7)
440#define LOAD_FW_READY (IMEM_CODE_DONE | \
441 IMEM_CHK_RPT | \
442 EMEM_CODE_DONE | \
443 EMEM_CHK_RPT | \
444 DMEM_CODE_DONE | \
445 IMEM_RDY | \
446 BASECHG | \
447 FWRDY)
448#define TCR_TSFEN BIT(8)
449#define TCR_TSFRST BIT(9)
450#define TCR_FAKE_IMEM_EN BIT(15)
451#define TCR_CRC BIT(16)
452#define TCR_ICV BIT(19)
453#define TCR_DISCW BIT(20)
454#define TCR_HWPC_TX_EN BIT(24)
455#define TCR_TCP_OFDL_EN BIT(25)
456#define TXDMA_INIT_VALUE (IMEM_CHK_RPT | \
457 EXT_IMEM_CHK_RPT)
458
459#define RCR_APPFCS BIT(31)
460#define RCR_DIS_ENC_2BYTE BIT(30)
461#define RCR_DIS_AES_2BYTE BIT(29)
462#define RCR_HTC_LOC_CTRL BIT(28)
463#define RCR_ENMBID BIT(27)
464#define RCR_RX_TCPOFDL_EN BIT(26)
465#define RCR_APP_PHYST_RXFF BIT(25)
466#define RCR_APP_PHYST_STAFF BIT(24)
467#define RCR_CBSSID BIT(23)
468#define RCR_APWRMGT BIT(22)
469#define RCR_ADD3 BIT(21)
470#define RCR_AMF BIT(20)
471#define RCR_ACF BIT(19)
472#define RCR_ADF BIT(18)
473#define RCR_APP_MIC BIT(17)
474#define RCR_APP_ICV BIT(16)
475#define RCR_RXFTH BIT(13)
476#define RCR_AICV BIT(12)
477#define RCR_RXDESC_LK_EN BIT(11)
478#define RCR_APP_BA_SSN BIT(6)
479#define RCR_ACRC32 BIT(5)
480#define RCR_RXSHFT_EN BIT(4)
481#define RCR_AB BIT(3)
482#define RCR_AM BIT(2)
483#define RCR_APM BIT(1)
484#define RCR_AAP BIT(0)
485#define RCR_MXDMA_OFFSET 8
486#define RCR_FIFO_OFFSET 13
487
488
489#define MSR_LINK_MASK ((1 << 0) | (1 << 1))
490#define MSR_LINK_MANAGED 2
491#define MSR_LINK_NONE 0
492#define MSR_LINK_SHIFT 0
493#define MSR_LINK_ADHOC 1
494#define MSR_LINK_MASTER 3
495#define MSR_NOLINK 0x00
496#define MSR_ADHOC 0x01
497#define MSR_INFRA 0x02
498#define MSR_AP 0x03
499
500#define ENUART BIT(7)
501#define ENJTAG BIT(3)
502#define BTMODE (BIT(2) | BIT(1))
503#define ENBT BIT(0)
504
505#define ENMBID BIT(7)
506#define BCNUM (BIT(6) | BIT(5) | BIT(4))
507
508#define USTIME_EDCA 0xFF00
509#define USTIME_TSF 0x00FF
510
511#define SIFS_TRX 0xFF00
512#define SIFS_CTX 0x00FF
513
514#define ENSWBCN BIT(15)
515#define DRVERLY_TU 0x0FF0
516#define DRVERLY_US 0x000F
517#define BCN_TCFG_CW_SHIFT 8
518#define BCN_TCFG_IFS 0
519
520#define RRSR_RSC_OFFSET 21
521#define RRSR_SHORT_OFFSET 23
522#define RRSR_RSC_BW_40M 0x600000
523#define RRSR_RSC_UPSUBCHNL 0x400000
524#define RRSR_RSC_LOWSUBCHNL 0x200000
525#define RRSR_SHORT 0x800000
526#define RRSR_1M BIT(0)
527#define RRSR_2M BIT(1)
528#define RRSR_5_5M BIT(2)
529#define RRSR_11M BIT(3)
530#define RRSR_6M BIT(4)
531#define RRSR_9M BIT(5)
532#define RRSR_12M BIT(6)
533#define RRSR_18M BIT(7)
534#define RRSR_24M BIT(8)
535#define RRSR_36M BIT(9)
536#define RRSR_48M BIT(10)
537#define RRSR_54M BIT(11)
538#define RRSR_MCS0 BIT(12)
539#define RRSR_MCS1 BIT(13)
540#define RRSR_MCS2 BIT(14)
541#define RRSR_MCS3 BIT(15)
542#define RRSR_MCS4 BIT(16)
543#define RRSR_MCS5 BIT(17)
544#define RRSR_MCS6 BIT(18)
545#define RRSR_MCS7 BIT(19)
546#define BRSR_AckShortPmb BIT(23)
547
548#define RATR_1M 0x00000001
549#define RATR_2M 0x00000002
550#define RATR_55M 0x00000004
551#define RATR_11M 0x00000008
552#define RATR_6M 0x00000010
553#define RATR_9M 0x00000020
554#define RATR_12M 0x00000040
555#define RATR_18M 0x00000080
556#define RATR_24M 0x00000100
557#define RATR_36M 0x00000200
558#define RATR_48M 0x00000400
559#define RATR_54M 0x00000800
560#define RATR_MCS0 0x00001000
561#define RATR_MCS1 0x00002000
562#define RATR_MCS2 0x00004000
563#define RATR_MCS3 0x00008000
564#define RATR_MCS4 0x00010000
565#define RATR_MCS5 0x00020000
566#define RATR_MCS6 0x00040000
567#define RATR_MCS7 0x00080000
568#define RATR_MCS8 0x00100000
569#define RATR_MCS9 0x00200000
570#define RATR_MCS10 0x00400000
571#define RATR_MCS11 0x00800000
572#define RATR_MCS12 0x01000000
573#define RATR_MCS13 0x02000000
574#define RATR_MCS14 0x04000000
575#define RATR_MCS15 0x08000000
576
577#define RATE_ALL_CCK (RATR_1M | RATR_2M | \
578 RATR_55M | RATR_11M)
579#define RATE_ALL_OFDM_AG (RATR_6M | RATR_9M | \
580 RATR_12M | RATR_18M | \
581 RATR_24M | RATR_36M | \
582 RATR_48M | RATR_54M)
583#define RATE_ALL_OFDM_1SS (RATR_MCS0 | RATR_MCS1 | \
584 RATR_MCS2 | RATR_MCS3 | \
585 RATR_MCS4 | RATR_MCS5 | \
586 RATR_MCS6 | RATR_MCS7)
587#define RATE_ALL_OFDM_2SS (RATR_MCS8 | RATR_MCS9 | \
588 RATR_MCS10 | RATR_MCS11 | \
589 RATR_MCS12 | RATR_MCS13 | \
590 RATR_MCS14 | RATR_MCS15)
591
592#define AC_PARAM_TXOP_LIMIT_OFFSET 16
593#define AC_PARAM_ECW_MAX_OFFSET 12
594#define AC_PARAM_ECW_MIN_OFFSET 8
595#define AC_PARAM_AIFS_OFFSET 0
596
597#define AcmHw_HwEn BIT(0)
598#define AcmHw_BeqEn BIT(1)
599#define AcmHw_ViqEn BIT(2)
600#define AcmHw_VoqEn BIT(3)
601#define AcmHw_BeqStatus BIT(4)
602#define AcmHw_ViqStatus BIT(5)
603#define AcmHw_VoqStatus BIT(6)
604
605#define RETRY_LIMIT_SHORT_SHIFT 8
606#define RETRY_LIMIT_LONG_SHIFT 0
607
608#define NAV_UPPER_EN BIT(16)
609#define NAV_UPPER 0xFF00
610#define NAV_RTSRST 0xFF
611
612#define BW_OPMODE_20MHZ BIT(2)
613#define BW_OPMODE_5G BIT(1)
614#define BW_OPMODE_11J BIT(0)
615
616#define RXERR_RPT_RST BIT(27)
617#define RXERR_OFDM_PPDU 0
618#define RXERR_OFDM_FALSE_ALARM 1
619#define RXERR_OFDM_MPDU_OK 2
620#define RXERR_OFDM_MPDU_FAIL 3
621#define RXERR_CCK_PPDU 4
622#define RXERR_CCK_FALSE_ALARM 5
623#define RXERR_CCK_MPDU_OK 6
624#define RXERR_CCK_MPDU_FAIL 7
625#define RXERR_HT_PPDU 8
626#define RXERR_HT_FALSE_ALARM 9
627#define RXERR_HT_MPDU_TOTAL 10
628#define RXERR_HT_MPDU_OK 11
629#define RXERR_HT_MPDU_FAIL 12
630#define RXERR_RX_FULL_DROP 15
631
632#define SCR_TXUSEDK BIT(0)
633#define SCR_RXUSEDK BIT(1)
634#define SCR_TXENCENABLE BIT(2)
635#define SCR_RXENCENABLE BIT(3)
636#define SCR_SKBYA2 BIT(4)
637#define SCR_NOSKMC BIT(5)
638
639#define CAM_VALID BIT(15)
640#define CAM_NOTVALID 0x0000
641#define CAM_USEDK BIT(5)
642
643#define CAM_NONE 0x0
644#define CAM_WEP40 0x01
645#define CAM_TKIP 0x02
646#define CAM_AES 0x04
647#define CAM_WEP104 0x05
648
649#define TOTAL_CAM_ENTRY 32
650#define HALF_CAM_ENTRY 16
651
652#define CAM_WRITE BIT(16)
653#define CAM_READ 0x00000000
654#define CAM_POLLINIG BIT(31)
655
656#define WOW_PMEN BIT(0)
657#define WOW_WOMEN BIT(1)
658#define WOW_MAGIC BIT(2)
659#define WOW_UWF BIT(3)
660
661#define GPIOMUX_EN BIT(3)
662#define GPIOSEL_GPIO 0
663#define GPIOSEL_PHYDBG 1
664#define GPIOSEL_BT 2
665#define GPIOSEL_WLANDBG 3
666#define GPIOSEL_GPIO_MASK (~(BIT(0)|BIT(1)))
667
668#define HST_RDBUSY BIT(0)
669#define CPU_WTBUSY BIT(1)
670
671#define IMR8190_DISABLED 0x0
672#define IMR_CPUERR BIT(5)
673#define IMR_ATIMEND BIT(4)
674#define IMR_TBDOK BIT(3)
675#define IMR_TBDER BIT(2)
676#define IMR_BCNDMAINT8 BIT(1)
677#define IMR_BCNDMAINT7 BIT(0)
678#define IMR_BCNDMAINT6 BIT(31)
679#define IMR_BCNDMAINT5 BIT(30)
680#define IMR_BCNDMAINT4 BIT(29)
681#define IMR_BCNDMAINT3 BIT(28)
682#define IMR_BCNDMAINT2 BIT(27)
683#define IMR_BCNDMAINT1 BIT(26)
684#define IMR_BCNDOK8 BIT(25)
685#define IMR_BCNDOK7 BIT(24)
686#define IMR_BCNDOK6 BIT(23)
687#define IMR_BCNDOK5 BIT(22)
688#define IMR_BCNDOK4 BIT(21)
689#define IMR_BCNDOK3 BIT(20)
690#define IMR_BCNDOK2 BIT(19)
691#define IMR_BCNDOK1 BIT(18)
692#define IMR_TIMEOUT2 BIT(17)
693#define IMR_TIMEOUT1 BIT(16)
694#define IMR_TXFOVW BIT(15)
695#define IMR_PSTIMEOUT BIT(14)
696#define IMR_BCNINT BIT(13)
697#define IMR_RXFOVW BIT(12)
698#define IMR_RDU BIT(11)
699#define IMR_RXCMDOK BIT(10)
700#define IMR_BDOK BIT(9)
701#define IMR_HIGHDOK BIT(8)
702#define IMR_COMDOK BIT(7)
703#define IMR_MGNTDOK BIT(6)
704#define IMR_HCCADOK BIT(5)
705#define IMR_BKDOK BIT(4)
706#define IMR_BEDOK BIT(3)
707#define IMR_VIDOK BIT(2)
708#define IMR_VODOK BIT(1)
709#define IMR_ROK BIT(0)
710
711#define TPPOLL_BKQ BIT(0)
712#define TPPOLL_BEQ BIT(1)
713#define TPPOLL_VIQ BIT(2)
714#define TPPOLL_VOQ BIT(3)
715#define TPPOLL_BQ BIT(4)
716#define TPPOLL_CQ BIT(5)
717#define TPPOLL_MQ BIT(6)
718#define TPPOLL_HQ BIT(7)
719#define TPPOLL_HCCAQ BIT(8)
720#define TPPOLL_STOPBK BIT(9)
721#define TPPOLL_STOPBE BIT(10)
722#define TPPOLL_STOPVI BIT(11)
723#define TPPOLL_STOPVO BIT(12)
724#define TPPOLL_STOPMGT BIT(13)
725#define TPPOLL_STOPHIGH BIT(14)
726#define TPPOLL_STOPHCCA BIT(15)
727#define TPPOLL_SHIFT 8
728
729#define CCX_CMD_CLM_ENABLE BIT(0)
730#define CCX_CMD_NHM_ENABLE BIT(1)
731#define CCX_CMD_FUNCTION_ENABLE BIT(8)
732#define CCX_CMD_IGNORE_CCA BIT(9)
733#define CCX_CMD_IGNORE_TXON BIT(10)
734#define CCX_CLM_RESULT_READY BIT(16)
735#define CCX_NHM_RESULT_READY BIT(16)
736#define CCX_CMD_RESET 0x0
737
738
739#define HWSET_MAX_SIZE_92S 128
740#define EFUSE_MAX_SECTION 16
741#define EFUSE_REAL_CONTENT_LEN 512
742
743#define RTL8190_EEPROM_ID 0x8129
744#define EEPROM_HPON 0x02
745#define EEPROM_CLK 0x06
746#define EEPROM_TESTR 0x08
747
748#define EEPROM_VID 0x0A
749#define EEPROM_DID 0x0C
750#define EEPROM_SVID 0x0E
751#define EEPROM_SMID 0x10
752
753#define EEPROM_MAC_ADDR 0x12
754#define EEPROM_NODE_ADDRESS_BYTE_0 0x12
755
756#define EEPROM_PWDIFF 0x54
757
758#define EEPROM_TXPOWERBASE 0x50
759#define EEPROM_TX_PWR_INDEX_RANGE 28
760
761#define EEPROM_TX_PWR_HT20_DIFF 0x62
762#define DEFAULT_HT20_TXPWR_DIFF 2
763#define EEPROM_TX_PWR_OFDM_DIFF 0x65
764
765#define EEPROM_TXPWRGROUP 0x67
766#define EEPROM_REGULATORY 0x6D
767
768#define TX_PWR_SAFETY_CHK 0x6D
769#define EEPROM_TXPWINDEX_CCK_24G 0x5D
770#define EEPROM_TXPWINDEX_OFDM_24G 0x6B
771#define EEPROM_HT2T_CH1_A 0x6c
772#define EEPROM_HT2T_CH7_A 0x6d
773#define EEPROM_HT2T_CH13_A 0x6e
774#define EEPROM_HT2T_CH1_B 0x6f
775#define EEPROM_HT2T_CH7_B 0x70
776#define EEPROM_HT2T_CH13_B 0x71
777
778#define EEPROM_TSSI_A 0x74
779#define EEPROM_TSSI_B 0x75
780
781#define EEPROM_RFIND_POWERDIFF 0x76
782#define EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF 0x3
783
784#define EEPROM_THERMALMETER 0x77
785#define EEPROM_BLUETOOTH_COEXIST 0x78
786#define EEPROM_BLUETOOTH_TYPE 0x4f
787
788#define EEPROM_OPTIONAL 0x78
789#define EEPROM_WOWLAN 0x78
790
791#define EEPROM_CRYSTALCAP 0x79
792#define EEPROM_CHANNELPLAN 0x7B
793#define EEPROM_VERSION 0x7C
794#define EEPROM_CUSTOMID 0x7A
795#define EEPROM_BOARDTYPE 0x7E
796
797#define EEPROM_CHANNEL_PLAN_FCC 0x0
798#define EEPROM_CHANNEL_PLAN_IC 0x1
799#define EEPROM_CHANNEL_PLAN_ETSI 0x2
800#define EEPROM_CHANNEL_PLAN_SPAIN 0x3
801#define EEPROM_CHANNEL_PLAN_FRANCE 0x4
802#define EEPROM_CHANNEL_PLAN_MKK 0x5
803#define EEPROM_CHANNEL_PLAN_MKK1 0x6
804#define EEPROM_CHANNEL_PLAN_ISRAEL 0x7
805#define EEPROM_CHANNEL_PLAN_TELEC 0x8
806#define EEPROM_CHANNEL_PLAN_GLOBAL_DOMAIN 0x9
807#define EEPROM_CHANNEL_PLAN_WORLD_WIDE_13 0xA
808#define EEPROM_CHANNEL_PLAN_NCC 0xB
809#define EEPROM_CHANNEL_PLAN_BY_HW_MASK 0x80
810
811#define FW_DIG_DISABLE 0xfd00cc00
812#define FW_DIG_ENABLE 0xfd000000
813#define FW_DIG_HALT 0xfd000001
814#define FW_DIG_RESUME 0xfd000002
815#define FW_HIGH_PWR_DISABLE 0xfd000008
816#define FW_HIGH_PWR_ENABLE 0xfd000009
817#define FW_ADD_A2_ENTRY 0xfd000016
818#define FW_TXPWR_TRACK_ENABLE 0xfd000017
819#define FW_TXPWR_TRACK_DISABLE 0xfd000018
820#define FW_TXPWR_TRACK_THERMAL 0xfd000019
821#define FW_TXANT_SWITCH_ENABLE 0xfd000023
822#define FW_TXANT_SWITCH_DISABLE 0xfd000024
823#define FW_RA_INIT 0xfd000026
824#define FW_CTRL_DM_BY_DRIVER 0Xfd00002a
825#define FW_RA_IOT_BG_COMB 0xfd000030
826#define FW_RA_IOT_N_COMB 0xfd000031
827#define FW_RA_REFRESH 0xfd0000a0
828#define FW_RA_UPDATE_MASK 0xfd0000a2
829#define FW_RA_DISABLE 0xfd0000a4
830#define FW_RA_ACTIVE 0xfd0000a6
831#define FW_RA_DISABLE_RSSI_MASK 0xfd0000ac
832#define FW_RA_ENABLE_RSSI_MASK 0xfd0000ad
833#define FW_RA_RESET 0xfd0000af
834#define FW_DM_DISABLE 0xfd00aa00
835#define FW_IQK_ENABLE 0xf0000020
836#define FW_IQK_SUCCESS 0x0000dddd
837#define FW_IQK_FAIL 0x0000ffff
838#define FW_OP_FAILURE 0xffffffff
839#define FW_TX_FEEDBACK_NONE 0xfb000000
840#define FW_TX_FEEDBACK_DTM_ENABLE (FW_TX_FEEDBACK_NONE | 0x1)
841#define FW_TX_FEEDBACK_CCX_ENABL (FW_TX_FEEDBACK_NONE | 0x2)
842#define FW_BB_RESET_ENABLE 0xff00000d
843#define FW_BB_RESET_DISABLE 0xff00000e
844#define FW_CCA_CHK_ENABLE 0xff000011
845#define FW_CCK_RESET_CNT 0xff000013
846#define FW_LPS_ENTER 0xfe000010
847#define FW_LPS_LEAVE 0xfe000011
848#define FW_INDIRECT_READ 0xf2000000
849#define FW_INDIRECT_WRITE 0xf2000001
850#define FW_CHAN_SET 0xf3000001
851
852#define RFPC 0x5F
853#define RCR_9356SEL BIT(6)
854#define TCR_LRL_OFFSET 0
855#define TCR_SRL_OFFSET 8
856#define TCR_MXDMA_OFFSET 21
857#define TCR_SAT BIT(24)
858#define RCR_MXDMA_OFFSET 8
859#define RCR_FIFO_OFFSET 13
860#define RCR_OnlyErlPkt BIT(31)
861#define CWR 0xDC
862#define RETRYCTR 0xDE
863
864#define CPU_GEN_SYSTEM_RESET 0x00000001
865
866#define CCX_COMMAND_REG 0x890
867#define CLM_PERIOD_REG 0x894
868#define NHM_PERIOD_REG 0x896
869
870#define NHM_THRESHOLD0 0x898
871#define NHM_THRESHOLD1 0x899
872#define NHM_THRESHOLD2 0x89A
873#define NHM_THRESHOLD3 0x89B
874#define NHM_THRESHOLD4 0x89C
875#define NHM_THRESHOLD5 0x89D
876#define NHM_THRESHOLD6 0x89E
877#define CLM_RESULT_REG 0x8D0
878#define NHM_RESULT_REG 0x8D4
879#define NHM_RPI_COUNTER0 0x8D8
880#define NHM_RPI_COUNTER1 0x8D9
881#define NHM_RPI_COUNTER2 0x8DA
882#define NHM_RPI_COUNTER3 0x8DB
883#define NHM_RPI_COUNTER4 0x8DC
884#define NHM_RPI_COUNTER5 0x8DD
885#define NHM_RPI_COUNTER6 0x8DE
886#define NHM_RPI_COUNTER7 0x8DF
887
888#define HAL_8192S_HW_GPIO_OFF_BIT BIT(3)
889#define HAL_8192S_HW_GPIO_OFF_MASK 0xF7
890#define HAL_8192S_HW_GPIO_WPS_BIT BIT(4)
891
892#define RPMAC_RESET 0x100
893#define RPMAC_TXSTART 0x104
894#define RPMAC_TXLEGACYSIG 0x108
895#define RPMAC_TXHTSIG1 0x10c
896#define RPMAC_TXHTSIG2 0x110
897#define RPMAC_PHYDEBUG 0x114
898#define RPMAC_TXPACKETNNM 0x118
899#define RPMAC_TXIDLE 0x11c
900#define RPMAC_TXMACHEADER0 0x120
901#define RPMAC_TXMACHEADER1 0x124
902#define RPMAC_TXMACHEADER2 0x128
903#define RPMAC_TXMACHEADER3 0x12c
904#define RPMAC_TXMACHEADER4 0x130
905#define RPMAC_TXMACHEADER5 0x134
906#define RPMAC_TXDATATYPE 0x138
907#define RPMAC_TXRANDOMSEED 0x13c
908#define RPMAC_CCKPLCPPREAMBLE 0x140
909#define RPMAC_CCKPLCPHEADER 0x144
910#define RPMAC_CCKCRC16 0x148
911#define RPMAC_OFDMRXCRC32OK 0x170
912#define RPMAC_OFDMRXCRC32ER 0x174
913#define RPMAC_OFDMRXPARITYER 0x178
914#define RPMAC_OFDMRXCRC8ER 0x17c
915#define RPMAC_CCKCRXRC16ER 0x180
916#define RPMAC_CCKCRXRC32ER 0x184
917#define RPMAC_CCKCRXRC32OK 0x188
918#define RPMAC_TXSTATUS 0x18c
919
920#define RF_BB_CMD_ADDR 0x02c0
921#define RF_BB_CMD_DATA 0x02c4
922
923#define RFPGA0_RFMOD 0x800
924
925#define RFPGA0_TXINFO 0x804
926#define RFPGA0_PSDFUNCTION 0x808
927
928#define RFPGA0_TXGAINSTAGE 0x80c
929
930#define RFPGA0_RFTIMING1 0x810
931#define RFPGA0_RFTIMING2 0x814
932#define RFPGA0_XA_HSSIPARAMETER1 0x820
933#define RFPGA0_XA_HSSIPARAMETER2 0x824
934#define RFPGA0_XB_HSSIPARAMETER1 0x828
935#define RFPGA0_XB_HSSIPARAMETER2 0x82c
936#define RFPGA0_XC_HSSIPARAMETER1 0x830
937#define RFPGA0_XC_HSSIPARAMETER2 0x834
938#define RFPGA0_XD_HSSIPARAMETER1 0x838
939#define RFPGA0_XD_HSSIPARAMETER2 0x83c
940#define RFPGA0_XA_LSSIPARAMETER 0x840
941#define RFPGA0_XB_LSSIPARAMETER 0x844
942#define RFPGA0_XC_LSSIPARAMETER 0x848
943#define RFPGA0_XD_LSSIPARAMETER 0x84c
944
945#define RFPGA0_RFWAKEUP_PARAMETER 0x850
946#define RFPGA0_RFSLEEPUP_PARAMETER 0x854
947
948#define RFPGA0_XAB_SWITCHCONTROL 0x858
949#define RFPGA0_XCD_SWITCHCONTROL 0x85c
950
951#define RFPGA0_XA_RFINTERFACEOE 0x860
952#define RFPGA0_XB_RFINTERFACEOE 0x864
953#define RFPGA0_XC_RFINTERFACEOE 0x868
954#define RFPGA0_XD_RFINTERFACEOE 0x86c
955
956#define RFPGA0_XAB_RFINTERFACESW 0x870
957#define RFPGA0_XCD_RFINTERFACESW 0x874
958
959#define RFPGA0_XAB_RFPARAMETER 0x878
960#define RFPGA0_XCD_RFPARAMETER 0x87c
961
962#define RFPGA0_ANALOGPARAMETER1 0x880
963#define RFPGA0_ANALOGPARAMETER2 0x884
964#define RFPGA0_ANALOGPARAMETER3 0x888
965#define RFPGA0_ANALOGPARAMETER4 0x88c
966
967#define RFPGA0_XA_LSSIREADBACK 0x8a0
968#define RFPGA0_XB_LSSIREADBACK 0x8a4
969#define RFPGA0_XC_LSSIREADBACK 0x8a8
970#define RFPGA0_XD_LSSIREADBACK 0x8ac
971
972#define RFPGA0_PSDREPORT 0x8b4
973#define TRANSCEIVERA_HSPI_READBACK 0x8b8
974#define TRANSCEIVERB_HSPI_READBACK 0x8bc
975#define RFPGA0_XAB_RFINTERFACERB 0x8e0
976#define RFPGA0_XCD_RFINTERFACERB 0x8e4
977#define RFPGA1_RFMOD 0x900
978
979#define RFPGA1_TXBLOCK 0x904
980#define RFPGA1_DEBUGSELECT 0x908
981#define RFPGA1_TXINFO 0x90c
982
983#define RCCK0_SYSTEM 0xa00
984
985#define RCCK0_AFESETTING 0xa04
986#define RCCK0_CCA 0xa08
987
988#define RCCK0_RXAGC1 0xa0c
989#define RCCK0_RXAGC2 0xa10
990
991#define RCCK0_RXHP 0xa14
992
993#define RCCK0_DSPPARAMETER1 0xa18
994#define RCCK0_DSPPARAMETER2 0xa1c
995
996#define RCCK0_TXFILTER1 0xa20
997#define RCCK0_TXFILTER2 0xa24
998#define RCCK0_DEBUGPORT 0xa28
999#define RCCK0_FALSEALARMREPORT 0xa2c
1000#define RCCK0_TRSSIREPORT 0xa50
1001#define RCCK0_RXREPORT 0xa54
1002#define RCCK0_FACOUNTERLOWER 0xa5c
1003#define RCCK0_FACOUNTERUPPER 0xa58
1004
1005#define ROFDM0_LSTF 0xc00
1006
1007#define ROFDM0_TRXPATHENABLE 0xc04
1008#define ROFDM0_TRMUXPAR 0xc08
1009#define ROFDM0_TRSWISOLATION 0xc0c
1010
1011#define ROFDM0_XARXAFE 0xc10
1012#define ROFDM0_XARXIQIMBALANCE 0xc14
1013#define ROFDM0_XBRXAFE 0xc18
1014#define ROFDM0_XBRXIQIMBALANCE 0xc1c
1015#define ROFDM0_XCRXAFE 0xc20
1016#define ROFDM0_XCRXIQIMBALANCE 0xc24
1017#define ROFDM0_XDRXAFE 0xc28
1018#define ROFDM0_XDRXIQIMBALANCE 0xc2c
1019
1020#define ROFDM0_RXDETECTOR1 0xc30
1021#define ROFDM0_RXDETECTOR2 0xc34
1022#define ROFDM0_RXDETECTOR3 0xc38
1023#define ROFDM0_RXDETECTOR4 0xc3c
1024
1025#define ROFDM0_RXDSP 0xc40
1026#define ROFDM0_CFO_AND_DAGC 0xc44
1027#define ROFDM0_CCADROP_THRESHOLD 0xc48
1028#define ROFDM0_ECCA_THRESHOLD 0xc4c
1029
1030#define ROFDM0_XAAGCCORE1 0xc50
1031#define ROFDM0_XAAGCCORE2 0xc54
1032#define ROFDM0_XBAGCCORE1 0xc58
1033#define ROFDM0_XBAGCCORE2 0xc5c
1034#define ROFDM0_XCAGCCORE1 0xc60
1035#define ROFDM0_XCAGCCORE2 0xc64
1036#define ROFDM0_XDAGCCORE1 0xc68
1037#define ROFDM0_XDAGCCORE2 0xc6c
1038
1039#define ROFDM0_AGCPARAMETER1 0xc70
1040#define ROFDM0_AGCPARAMETER2 0xc74
1041#define ROFDM0_AGCRSSITABLE 0xc78
1042#define ROFDM0_HTSTFAGC 0xc7c
1043
1044#define ROFDM0_XATXIQIMBALANCE 0xc80
1045#define ROFDM0_XATXAFE 0xc84
1046#define ROFDM0_XBTXIQIMBALANCE 0xc88
1047#define ROFDM0_XBTXAFE 0xc8c
1048#define ROFDM0_XCTXIQIMBALANCE 0xc90
1049#define ROFDM0_XCTXAFE 0xc94
1050#define ROFDM0_XDTXIQIMBALANCE 0xc98
1051#define ROFDM0_XDTXAFE 0xc9c
1052
1053#define ROFDM0_RXHP_PARAMETER 0xce0
1054#define ROFDM0_TXPSEUDO_NOISE_WGT 0xce4
1055#define ROFDM0_FRAME_SYNC 0xcf0
1056#define ROFDM0_DFSREPORT 0xcf4
1057#define ROFDM0_TXCOEFF1 0xca4
1058#define ROFDM0_TXCOEFF2 0xca8
1059#define ROFDM0_TXCOEFF3 0xcac
1060#define ROFDM0_TXCOEFF4 0xcb0
1061#define ROFDM0_TXCOEFF5 0xcb4
1062#define ROFDM0_TXCOEFF6 0xcb8
1063
1064
1065#define ROFDM1_LSTF 0xd00
1066#define ROFDM1_TRXPATHENABLE 0xd04
1067
1068#define ROFDM1_CFO 0xd08
1069#define ROFDM1_CSI1 0xd10
1070#define ROFDM1_SBD 0xd14
1071#define ROFDM1_CSI2 0xd18
1072#define ROFDM1_CFOTRACKING 0xd2c
1073#define ROFDM1_TRXMESAURE1 0xd34
1074#define ROFDM1_INTF_DET 0xd3c
1075#define ROFDM1_PSEUDO_NOISESTATEAB 0xd50
1076#define ROFDM1_PSEUDO_NOISESTATECD 0xd54
1077#define ROFDM1_RX_PSEUDO_NOISE_WGT 0xd58
1078
1079#define ROFDM_PHYCOUNTER1 0xda0
1080#define ROFDM_PHYCOUNTER2 0xda4
1081#define ROFDM_PHYCOUNTER3 0xda8
1082
1083#define ROFDM_SHORT_CFOAB 0xdac
1084#define ROFDM_SHORT_CFOCD 0xdb0
1085#define ROFDM_LONG_CFOAB 0xdb4
1086#define ROFDM_LONG_CFOCD 0xdb8
1087#define ROFDM_TAIL_CFOAB 0xdbc
1088#define ROFDM_TAIL_CFOCD 0xdc0
1089#define ROFDM_PW_MEASURE1 0xdc4
1090#define ROFDM_PW_MEASURE2 0xdc8
1091#define ROFDM_BW_REPORT 0xdcc
1092#define ROFDM_AGC_REPORT 0xdd0
1093#define ROFDM_RXSNR 0xdd4
1094#define ROFDM_RXEVMCSI 0xdd8
1095#define ROFDM_SIG_REPORT 0xddc
1096
1097
1098#define RTXAGC_RATE18_06 0xe00
1099#define RTXAGC_RATE54_24 0xe04
1100#define RTXAGC_CCK_MCS32 0xe08
1101#define RTXAGC_MCS03_MCS00 0xe10
1102#define RTXAGC_MCS07_MCS04 0xe14
1103#define RTXAGC_MCS11_MCS08 0xe18
1104#define RTXAGC_MCS15_MCS12 0xe1c
1105
1106
1107#define RF_AC 0x00
1108#define RF_IQADJ_G1 0x01
1109#define RF_IQADJ_G2 0x02
1110#define RF_POW_TRSW 0x05
1111#define RF_GAIN_RX 0x06
1112#define RF_GAIN_TX 0x07
1113#define RF_TXM_IDAC 0x08
1114#define RF_BS_IQGEN 0x0F
1115
1116#define RF_MODE1 0x10
1117#define RF_MODE2 0x11
1118#define RF_RX_AGC_HP 0x12
1119#define RF_TX_AGC 0x13
1120#define RF_BIAS 0x14
1121#define RF_IPA 0x15
1122#define RF_POW_ABILITY 0x17
1123#define RF_MODE_AG 0x18
1124#define RF_CHANNEL 0x18
1125#define RF_CHNLBW 0x18
1126#define RF_TOP 0x19
1127#define RF_RX_G1 0x1A
1128#define RF_RX_G2 0x1B
1129#define RF_RX_BB2 0x1C
1130#define RF_RX_BB1 0x1D
1131#define RF_RCK1 0x1E
1132#define RF_RCK2 0x1F
1133
1134#define RF_TX_G1 0x20
1135#define RF_TX_G2 0x21
1136#define RF_TX_G3 0x22
1137#define RF_TX_BB1 0x23
1138#define RF_T_METER 0x24
1139#define RF_SYN_G1 0x25
1140#define RF_SYN_G2 0x26
1141#define RF_SYN_G3 0x27
1142#define RF_SYN_G4 0x28
1143#define RF_SYN_G5 0x29
1144#define RF_SYN_G6 0x2A
1145#define RF_SYN_G7 0x2B
1146#define RF_SYN_G8 0x2C
1147
1148#define RF_RCK_OS 0x30
1149#define RF_TXPA_G1 0x31
1150#define RF_TXPA_G2 0x32
1151#define RF_TXPA_G3 0x33
1152
1153#define BRFMOD 0x1
1154#define BCCKEN 0x1000000
1155#define BOFDMEN 0x2000000
1156
1157#define BXBTXAGC 0xf00
1158#define BXCTXAGC 0xf000
1159#define BXDTXAGC 0xf0000
1160
1161#define B3WIRE_DATALENGTH 0x800
1162#define B3WIRE_ADDRESSLENGTH 0x400
1163
1164#define BRFSI_RFENV 0x10
1165
1166#define BLSSI_READADDRESS 0x7f800000
1167#define BLSSI_READEDGE 0x80000000
1168#define BLSSI_READBACK_DATA 0xfffff
1169
1170#define BADCLKPHASE 0x4000000
1171
1172#define BCCK_SIDEBAND 0x10
1173
1174#define BTX_AGCRATECCK 0x7f00
1175
1176#define MASKBYTE0 0xff
1177#define MASKBYTE1 0xff00
1178#define MASKBYTE2 0xff0000
1179#define MASKBYTE3 0xff000000
1180#define MASKHWORD 0xffff0000
1181#define MASKLWORD 0x0000ffff
1182#define MASKDWORD 0xffffffff
1183
1184#define MAKS12BITS 0xfffff
1185#define MASK20BITS 0xfffff
1186#define RFREG_OFFSET_MASK 0xfffff
1187
1188#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c b/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
new file mode 100644
index 000000000000..1d3a48330399
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
@@ -0,0 +1,546 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "reg.h"
32#include "def.h"
33#include "phy.h"
34#include "rf.h"
35#include "dm.h"
36
37
38static void _rtl92s_get_powerbase(struct ieee80211_hw *hw, u8 *p_pwrlevel,
39 u8 chnl, u32 *ofdmbase, u32 *mcsbase,
40 u8 *p_final_pwridx)
41{
42 struct rtl_priv *rtlpriv = rtl_priv(hw);
43 struct rtl_phy *rtlphy = &(rtlpriv->phy);
44 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
45 u32 pwrbase0, pwrbase1;
46 u8 legacy_pwrdiff = 0, ht20_pwrdiff = 0;
47 u8 i, pwrlevel[4];
48
49 for (i = 0; i < 2; i++)
50 pwrlevel[i] = p_pwrlevel[i];
51
52 /* We only care about the path A for legacy. */
53 if (rtlefuse->eeprom_version < 2) {
54 pwrbase0 = pwrlevel[0] + (rtlefuse->legacy_httxpowerdiff & 0xf);
55 } else if (rtlefuse->eeprom_version >= 2) {
56 legacy_pwrdiff = rtlefuse->txpwr_legacyhtdiff
57 [RF90_PATH_A][chnl - 1];
58
59 /* For legacy OFDM, tx pwr always > HT OFDM pwr.
60 * We do not care Path B
61 * legacy OFDM pwr diff. NO BB register
62 * to notify HW. */
63 pwrbase0 = pwrlevel[0] + legacy_pwrdiff;
64 }
65
66 pwrbase0 = (pwrbase0 << 24) | (pwrbase0 << 16) | (pwrbase0 << 8) |
67 pwrbase0;
68 *ofdmbase = pwrbase0;
69
70 /* MCS rates */
71 if (rtlefuse->eeprom_version >= 2) {
72 /* Check HT20 to HT40 diff */
73 if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) {
74 for (i = 0; i < 2; i++) {
75 /* rf-A, rf-B */
76 /* HT 20<->40 pwr diff */
77 ht20_pwrdiff = rtlefuse->txpwr_ht20diff
78 [i][chnl - 1];
79
80 if (ht20_pwrdiff < 8) /* 0~+7 */
81 pwrlevel[i] += ht20_pwrdiff;
82 else /* index8-15=-8~-1 */
83 pwrlevel[i] -= (16 - ht20_pwrdiff);
84 }
85 }
86 }
87
88 /* use index of rf-A */
89 pwrbase1 = pwrlevel[0];
90 pwrbase1 = (pwrbase1 << 24) | (pwrbase1 << 16) | (pwrbase1 << 8) |
91 pwrbase1;
92 *mcsbase = pwrbase1;
93
94 /* The following is for Antenna
95 * diff from Ant-B to Ant-A */
96 p_final_pwridx[0] = pwrlevel[0];
97 p_final_pwridx[1] = pwrlevel[1];
98
99 switch (rtlefuse->eeprom_regulatory) {
100 case 3:
101 /* The following is for calculation
102 * of the power diff for Ant-B to Ant-A. */
103 if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
104 p_final_pwridx[0] += rtlefuse->pwrgroup_ht40
105 [RF90_PATH_A][
106 chnl - 1];
107 p_final_pwridx[1] += rtlefuse->pwrgroup_ht40
108 [RF90_PATH_B][
109 chnl - 1];
110 } else {
111 p_final_pwridx[0] += rtlefuse->pwrgroup_ht20
112 [RF90_PATH_A][
113 chnl - 1];
114 p_final_pwridx[1] += rtlefuse->pwrgroup_ht20
115 [RF90_PATH_B][
116 chnl - 1];
117 }
118 break;
119 default:
120 break;
121 }
122
123 if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
124 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("40MHz finalpwr_idx "
125 "(A / B) = 0x%x / 0x%x\n", p_final_pwridx[0],
126 p_final_pwridx[1]));
127 } else {
128 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("20MHz finalpwr_idx "
129 "(A / B) = 0x%x / 0x%x\n", p_final_pwridx[0],
130 p_final_pwridx[1]));
131 }
132}
133
134static void _rtl92s_set_antennadiff(struct ieee80211_hw *hw,
135 u8 *p_final_pwridx)
136{
137 struct rtl_priv *rtlpriv = rtl_priv(hw);
138 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
139 struct rtl_phy *rtlphy = &(rtlpriv->phy);
140 char ant_pwr_diff = 0;
141 u32 u4reg_val = 0;
142
143 if (rtlphy->rf_type == RF_2T2R) {
144 ant_pwr_diff = p_final_pwridx[1] - p_final_pwridx[0];
145
146 /* range is from 7~-8,
147 * index = 0x0~0xf */
148 if (ant_pwr_diff > 7)
149 ant_pwr_diff = 7;
150 if (ant_pwr_diff < -8)
151 ant_pwr_diff = -8;
152
153 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
154 ("Antenna Diff from RF-B "
155 "to RF-A = %d (0x%x)\n", ant_pwr_diff,
156 ant_pwr_diff & 0xf));
157
158 ant_pwr_diff &= 0xf;
159 }
160
161 /* Antenna TX power difference */
162 rtlefuse->antenna_txpwdiff[2] = 0;/* RF-D, don't care */
163 rtlefuse->antenna_txpwdiff[1] = 0;/* RF-C, don't care */
164 rtlefuse->antenna_txpwdiff[0] = (u8)(ant_pwr_diff); /* RF-B */
165
166 u4reg_val = rtlefuse->antenna_txpwdiff[2] << 8 |
167 rtlefuse->antenna_txpwdiff[1] << 4 |
168 rtlefuse->antenna_txpwdiff[0];
169
170 rtl_set_bbreg(hw, RFPGA0_TXGAINSTAGE, (BXBTXAGC | BXCTXAGC | BXDTXAGC),
171 u4reg_val);
172
173 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
174 ("Write BCD-Diff(0x%x) = 0x%x\n",
175 RFPGA0_TXGAINSTAGE, u4reg_val));
176}
177
178static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw,
179 u8 chnl, u8 index,
180 u32 pwrbase0,
181 u32 pwrbase1,
182 u32 *p_outwrite_val)
183{
184 struct rtl_priv *rtlpriv = rtl_priv(hw);
185 struct rtl_phy *rtlphy = &(rtlpriv->phy);
186 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
187 u8 i, chnlgroup, pwrdiff_limit[4];
188 u32 writeval, customer_limit;
189
190 /* Index 0 & 1= legacy OFDM, 2-5=HT_MCS rate */
191 switch (rtlefuse->eeprom_regulatory) {
192 case 0:
193 /* Realtek better performance increase power diff
194 * defined by Realtek for large power */
195 chnlgroup = 0;
196
197 writeval = rtlphy->mcs_txpwrlevel_origoffset
198 [chnlgroup][index] +
199 ((index < 2) ? pwrbase0 : pwrbase1);
200
201 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
202 ("RTK better performance, "
203 "writeval = 0x%x\n", writeval));
204 break;
205 case 1:
206 /* Realtek regulatory increase power diff defined
207 * by Realtek for regulatory */
208 if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
209 writeval = ((index < 2) ? pwrbase0 : pwrbase1);
210
211 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
212 ("Realtek regulatory, "
213 "40MHz, writeval = 0x%x\n", writeval));
214 } else {
215 if (rtlphy->pwrgroup_cnt == 1)
216 chnlgroup = 0;
217
218 if (rtlphy->pwrgroup_cnt >= 3) {
219 if (chnl <= 3)
220 chnlgroup = 0;
221 else if (chnl >= 4 && chnl <= 8)
222 chnlgroup = 1;
223 else if (chnl > 8)
224 chnlgroup = 2;
225 if (rtlphy->pwrgroup_cnt == 4)
226 chnlgroup++;
227 }
228
229 writeval = rtlphy->mcs_txpwrlevel_origoffset
230 [chnlgroup][index]
231 + ((index < 2) ?
232 pwrbase0 : pwrbase1);
233
234 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
235 ("Realtek regulatory, "
236 "20MHz, writeval = 0x%x\n", writeval));
237 }
238 break;
239 case 2:
240 /* Better regulatory don't increase any power diff */
241 writeval = ((index < 2) ? pwrbase0 : pwrbase1);
242 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
243 ("Better regulatory, "
244 "writeval = 0x%x\n", writeval));
245 break;
246 case 3:
247 /* Customer defined power diff. increase power diff
248 defined by customer. */
249 chnlgroup = 0;
250
251 if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
252 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
253 ("customer's limit, 40MHz = 0x%x\n",
254 rtlefuse->pwrgroup_ht40
255 [RF90_PATH_A][chnl - 1]));
256 } else {
257 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
258 ("customer's limit, 20MHz = 0x%x\n",
259 rtlefuse->pwrgroup_ht20
260 [RF90_PATH_A][chnl - 1]));
261 }
262
263 for (i = 0; i < 4; i++) {
264 pwrdiff_limit[i] =
265 (u8)((rtlphy->mcs_txpwrlevel_origoffset
266 [chnlgroup][index] & (0x7f << (i * 8)))
267 >> (i * 8));
268
269 if (rtlphy->current_chan_bw ==
270 HT_CHANNEL_WIDTH_20_40) {
271 if (pwrdiff_limit[i] >
272 rtlefuse->pwrgroup_ht40
273 [RF90_PATH_A][chnl - 1]) {
274 pwrdiff_limit[i] =
275 rtlefuse->pwrgroup_ht20
276 [RF90_PATH_A][chnl - 1];
277 }
278 } else {
279 if (pwrdiff_limit[i] >
280 rtlefuse->pwrgroup_ht20
281 [RF90_PATH_A][chnl - 1]) {
282 pwrdiff_limit[i] =
283 rtlefuse->pwrgroup_ht20
284 [RF90_PATH_A][chnl - 1];
285 }
286 }
287 }
288
289 customer_limit = (pwrdiff_limit[3] << 24) |
290 (pwrdiff_limit[2] << 16) |
291 (pwrdiff_limit[1] << 8) |
292 (pwrdiff_limit[0]);
293 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
294 ("Customer's limit = 0x%x\n",
295 customer_limit));
296
297 writeval = customer_limit + ((index < 2) ?
298 pwrbase0 : pwrbase1);
299 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
300 ("Customer, writeval = "
301 "0x%x\n", writeval));
302 break;
303 default:
304 chnlgroup = 0;
305 writeval = rtlphy->mcs_txpwrlevel_origoffset[chnlgroup][index] +
306 ((index < 2) ? pwrbase0 : pwrbase1);
307 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
308 ("RTK better performance, "
309 "writeval = 0x%x\n", writeval));
310 break;
311 }
312
313 if (rtlpriv->dm.dynamic_txhighpower_lvl == TX_HIGH_PWR_LEVEL_LEVEL1)
314 writeval = 0x10101010;
315 else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
316 TX_HIGH_PWR_LEVEL_LEVEL2)
317 writeval = 0x0;
318
319 *p_outwrite_val = writeval;
320
321}
322
323static void _rtl92s_write_ofdm_powerreg(struct ieee80211_hw *hw,
324 u8 index, u32 val)
325{
326 struct rtl_priv *rtlpriv = rtl_priv(hw);
327 struct rtl_phy *rtlphy = &(rtlpriv->phy);
328 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
329 u16 regoffset[6] = {0xe00, 0xe04, 0xe10, 0xe14, 0xe18, 0xe1c};
330 u8 i, rfa_pwr[4];
331 u8 rfa_lower_bound = 0, rfa_upper_bound = 0, rf_pwr_diff = 0;
332 u32 writeval = val;
333
334 /* If path A and Path B coexist, we must limit Path A tx power.
335 * Protect Path B pwr over or under flow. We need to calculate
336 * upper and lower bound of path A tx power. */
337 if (rtlphy->rf_type == RF_2T2R) {
338 rf_pwr_diff = rtlefuse->antenna_txpwdiff[0];
339
340 /* Diff=-8~-1 */
341 if (rf_pwr_diff >= 8) {
342 /* Prevent underflow!! */
343 rfa_lower_bound = 0x10 - rf_pwr_diff;
344 /* if (rf_pwr_diff >= 0) Diff = 0-7 */
345 } else {
346 rfa_upper_bound = RF6052_MAX_TX_PWR - rf_pwr_diff;
347 }
348 }
349
350 for (i = 0; i < 4; i++) {
351 rfa_pwr[i] = (u8)((writeval & (0x7f << (i * 8))) >> (i * 8));
352 if (rfa_pwr[i] > RF6052_MAX_TX_PWR)
353 rfa_pwr[i] = RF6052_MAX_TX_PWR;
354
355 /* If path A and Path B coexist, we must limit Path A tx power.
356 * Protect Path B pwr over or under flow. We need to calculate
357 * upper and lower bound of path A tx power. */
358 if (rtlphy->rf_type == RF_2T2R) {
359 /* Diff=-8~-1 */
360 if (rf_pwr_diff >= 8) {
361 /* Prevent underflow!! */
362 if (rfa_pwr[i] < rfa_lower_bound)
363 rfa_pwr[i] = rfa_lower_bound;
364 /* Diff = 0-7 */
365 } else if (rf_pwr_diff >= 1) {
366 /* Prevent overflow */
367 if (rfa_pwr[i] > rfa_upper_bound)
368 rfa_pwr[i] = rfa_upper_bound;
369 }
370 }
371
372 }
373
374 writeval = (rfa_pwr[3] << 24) | (rfa_pwr[2] << 16) | (rfa_pwr[1] << 8) |
375 rfa_pwr[0];
376
377 rtl_set_bbreg(hw, regoffset[index], 0x7f7f7f7f, writeval);
378}
379
380void rtl92s_phy_rf6052_set_ofdmtxpower(struct ieee80211_hw *hw,
381 u8 *p_pwrlevel, u8 chnl)
382{
383 u32 writeval, pwrbase0, pwrbase1;
384 u8 index = 0;
385 u8 finalpwr_idx[4];
386
387 _rtl92s_get_powerbase(hw, p_pwrlevel, chnl, &pwrbase0, &pwrbase1,
388 &finalpwr_idx[0]);
389 _rtl92s_set_antennadiff(hw, &finalpwr_idx[0]);
390
391 for (index = 0; index < 6; index++) {
392 _rtl92s_get_txpower_writeval_byregulatory(hw, chnl, index,
393 pwrbase0, pwrbase1, &writeval);
394
395 _rtl92s_write_ofdm_powerreg(hw, index, writeval);
396 }
397}
398
399void rtl92s_phy_rf6052_set_ccktxpower(struct ieee80211_hw *hw, u8 pwrlevel)
400{
401 struct rtl_priv *rtlpriv = rtl_priv(hw);
402 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
403 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
404 u32 txagc = 0;
405 bool dont_inc_cck_or_turboscanoff = false;
406
407 if (((rtlefuse->eeprom_version >= 2) &&
408 (rtlefuse->txpwr_safetyflag == 1)) ||
409 ((rtlefuse->eeprom_version >= 2) &&
410 (rtlefuse->eeprom_regulatory != 0)))
411 dont_inc_cck_or_turboscanoff = true;
412
413 if (mac->act_scanning == true) {
414 txagc = 0x3f;
415 if (dont_inc_cck_or_turboscanoff)
416 txagc = pwrlevel;
417 } else {
418 txagc = pwrlevel;
419
420 if (rtlpriv->dm.dynamic_txhighpower_lvl ==
421 TX_HIGH_PWR_LEVEL_LEVEL1)
422 txagc = 0x10;
423 else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
424 TX_HIGH_PWR_LEVEL_LEVEL2)
425 txagc = 0x0;
426 }
427
428 if (txagc > RF6052_MAX_TX_PWR)
429 txagc = RF6052_MAX_TX_PWR;
430
431 rtl_set_bbreg(hw, RTXAGC_CCK_MCS32, BTX_AGCRATECCK, txagc);
432
433}
434
435bool rtl92s_phy_rf6052_config(struct ieee80211_hw *hw)
436{
437 struct rtl_priv *rtlpriv = rtl_priv(hw);
438 struct rtl_phy *rtlphy = &(rtlpriv->phy);
439 u32 u4reg_val = 0;
440 u8 rfpath;
441 bool rtstatus = true;
442 struct bb_reg_def *pphyreg;
443
444 /* Initialize RF */
445 for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) {
446
447 pphyreg = &rtlphy->phyreg_def[rfpath];
448
449 /* Store original RFENV control type */
450 switch (rfpath) {
451 case RF90_PATH_A:
452 case RF90_PATH_C:
453 u4reg_val = rtl92s_phy_query_bb_reg(hw,
454 pphyreg->rfintfs,
455 BRFSI_RFENV);
456 break;
457 case RF90_PATH_B:
458 case RF90_PATH_D:
459 u4reg_val = rtl92s_phy_query_bb_reg(hw,
460 pphyreg->rfintfs,
461 BRFSI_RFENV << 16);
462 break;
463 }
464
465 /* Set RF_ENV enable */
466 rtl92s_phy_set_bb_reg(hw, pphyreg->rfintfe,
467 BRFSI_RFENV << 16, 0x1);
468
469 /* Set RF_ENV output high */
470 rtl92s_phy_set_bb_reg(hw, pphyreg->rfintfo, BRFSI_RFENV, 0x1);
471
472 /* Set bit number of Address and Data for RF register */
473 rtl92s_phy_set_bb_reg(hw, pphyreg->rfhssi_para2,
474 B3WIRE_ADDRESSLENGTH, 0x0);
475 rtl92s_phy_set_bb_reg(hw, pphyreg->rfhssi_para2,
476 B3WIRE_DATALENGTH, 0x0);
477
478 /* Initialize RF fom connfiguration file */
479 switch (rfpath) {
480 case RF90_PATH_A:
481 rtstatus = rtl92s_phy_config_rf(hw,
482 (enum radio_path)rfpath);
483 break;
484 case RF90_PATH_B:
485 rtstatus = rtl92s_phy_config_rf(hw,
486 (enum radio_path)rfpath);
487 break;
488 case RF90_PATH_C:
489 break;
490 case RF90_PATH_D:
491 break;
492 }
493
494 /* Restore RFENV control type */
495 switch (rfpath) {
496 case RF90_PATH_A:
497 case RF90_PATH_C:
498 rtl92s_phy_set_bb_reg(hw, pphyreg->rfintfs, BRFSI_RFENV,
499 u4reg_val);
500 break;
501 case RF90_PATH_B:
502 case RF90_PATH_D:
503 rtl92s_phy_set_bb_reg(hw, pphyreg->rfintfs,
504 BRFSI_RFENV << 16,
505 u4reg_val);
506 break;
507 }
508
509 if (rtstatus != true) {
510 printk(KERN_ERR "Radio[%d] Fail!!", rfpath);
511 goto fail;
512 }
513
514 }
515
516 return rtstatus;
517
518fail:
519 return rtstatus;
520}
521
522void rtl92s_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
523{
524 struct rtl_priv *rtlpriv = rtl_priv(hw);
525 struct rtl_phy *rtlphy = &(rtlpriv->phy);
526
527 switch (bandwidth) {
528 case HT_CHANNEL_WIDTH_20:
529 rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] &
530 0xfffff3ff) | 0x0400);
531 rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK,
532 rtlphy->rfreg_chnlval[0]);
533 break;
534 case HT_CHANNEL_WIDTH_20_40:
535 rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] &
536 0xfffff3ff));
537 rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK,
538 rtlphy->rfreg_chnlval[0]);
539 break;
540 default:
541 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
542 ("unknown bandwidth: %#X\n",
543 bandwidth));
544 break;
545 }
546}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/rf.h b/drivers/net/wireless/rtlwifi/rtl8192se/rf.h
new file mode 100644
index 000000000000..3843baa1a874
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/rf.h
@@ -0,0 +1,43 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29#ifndef __INC_RTL92S_RF_H
30#define __INC_RTL92S_RF_H
31
32#define RF6052_MAX_TX_PWR 0x3F
33
34void rtl92s_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
35 u8 bandwidth);
36bool rtl92s_phy_rf6052_config(struct ieee80211_hw *hw) ;
37void rtl92s_phy_rf6052_set_ccktxpower(struct ieee80211_hw *hw,
38 u8 powerlevel);
39void rtl92s_phy_rf6052_set_ofdmtxpower(struct ieee80211_hw *hw,
40 u8 *p_pwrlevel, u8 chnl);
41
42#endif
43
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
new file mode 100644
index 000000000000..1c6cb1d7d660
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
@@ -0,0 +1,423 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include <linux/vmalloc.h>
31
32#include "../wifi.h"
33#include "../core.h"
34#include "../pci.h"
35#include "reg.h"
36#include "def.h"
37#include "phy.h"
38#include "dm.h"
39#include "fw.h"
40#include "hw.h"
41#include "sw.h"
42#include "trx.h"
43#include "led.h"
44
45static void rtl92s_init_aspm_vars(struct ieee80211_hw *hw)
46{
47 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
48
49 /*close ASPM for AMD defaultly */
50 rtlpci->const_amdpci_aspm = 0;
51
52 /*
53 * ASPM PS mode.
54 * 0 - Disable ASPM,
55 * 1 - Enable ASPM without Clock Req,
56 * 2 - Enable ASPM with Clock Req,
57 * 3 - Alwyas Enable ASPM with Clock Req,
58 * 4 - Always Enable ASPM without Clock Req.
59 * set defult to RTL8192CE:3 RTL8192E:2
60 * */
61 rtlpci->const_pci_aspm = 2;
62
63 /*Setting for PCI-E device */
64 rtlpci->const_devicepci_aspm_setting = 0x03;
65
66 /*Setting for PCI-E bridge */
67 rtlpci->const_hostpci_aspm_setting = 0x02;
68
69 /*
70 * In Hw/Sw Radio Off situation.
71 * 0 - Default,
72 * 1 - From ASPM setting without low Mac Pwr,
73 * 2 - From ASPM setting with low Mac Pwr,
74 * 3 - Bus D3
75 * set default to RTL8192CE:0 RTL8192SE:2
76 */
77 rtlpci->const_hwsw_rfoff_d3 = 2;
78
79 /*
80 * This setting works for those device with
81 * backdoor ASPM setting such as EPHY setting.
82 * 0 - Not support ASPM,
83 * 1 - Support ASPM,
84 * 2 - According to chipset.
85 */
86 rtlpci->const_support_pciaspm = 2;
87}
88
89static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
90{
91 struct rtl_priv *rtlpriv = rtl_priv(hw);
92 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
93 const struct firmware *firmware;
94 struct rt_firmware *pfirmware = NULL;
95 int err = 0;
96 u16 earlyrxthreshold = 7;
97
98 rtlpriv->dm.dm_initialgain_enable = 1;
99 rtlpriv->dm.dm_flag = 0;
100 rtlpriv->dm.disable_framebursting = 0;
101 rtlpriv->dm.thermalvalue = 0;
102 rtlpriv->dm.useramask = true;
103
104 /* compatible 5G band 91se just 2.4G band & smsp */
105 rtlpriv->rtlhal.current_bandtype = BAND_ON_2_4G;
106 rtlpriv->rtlhal.bandset = BAND_ON_2_4G;
107 rtlpriv->rtlhal.macphymode = SINGLEMAC_SINGLEPHY;
108
109 rtlpci->transmit_config = 0;
110
111 rtlpci->receive_config =
112 RCR_APPFCS |
113 RCR_APWRMGT |
114 /*RCR_ADD3 |*/
115 RCR_AMF |
116 RCR_ADF |
117 RCR_APP_MIC |
118 RCR_APP_ICV |
119 RCR_AICV |
120 /* Accept ICV error, CRC32 Error */
121 RCR_ACRC32 |
122 RCR_AB |
123 /* Accept Broadcast, Multicast */
124 RCR_AM |
125 /* Accept Physical match */
126 RCR_APM |
127 /* Accept Destination Address packets */
128 /*RCR_AAP |*/
129 RCR_APP_PHYST_STAFF |
130 /* Accept PHY status */
131 RCR_APP_PHYST_RXFF |
132 (earlyrxthreshold << RCR_FIFO_OFFSET);
133
134 rtlpci->irq_mask[0] = (u32)
135 (IMR_ROK |
136 IMR_VODOK |
137 IMR_VIDOK |
138 IMR_BEDOK |
139 IMR_BKDOK |
140 IMR_HCCADOK |
141 IMR_MGNTDOK |
142 IMR_COMDOK |
143 IMR_HIGHDOK |
144 IMR_BDOK |
145 IMR_RXCMDOK |
146 /*IMR_TIMEOUT0 |*/
147 IMR_RDU |
148 IMR_RXFOVW |
149 IMR_BCNINT
150 /*| IMR_TXFOVW*/
151 /*| IMR_TBDOK |
152 IMR_TBDER*/);
153
154 rtlpci->irq_mask[1] = (u32) 0;
155
156 rtlpci->shortretry_limit = 0x30;
157 rtlpci->longretry_limit = 0x30;
158
159 rtlpci->first_init = true;
160
161 /* for LPS & IPS */
162 rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
163 rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
164 rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
165 rtlpriv->psc.reg_fwctrl_lps = 3;
166 rtlpriv->psc.reg_max_lps_awakeintvl = 5;
167 /* for ASPM, you can close aspm through
168 * set const_support_pciaspm = 0 */
169 rtl92s_init_aspm_vars(hw);
170
171 if (rtlpriv->psc.reg_fwctrl_lps == 1)
172 rtlpriv->psc.fwctrl_psmode = FW_PS_MIN_MODE;
173 else if (rtlpriv->psc.reg_fwctrl_lps == 2)
174 rtlpriv->psc.fwctrl_psmode = FW_PS_MAX_MODE;
175 else if (rtlpriv->psc.reg_fwctrl_lps == 3)
176 rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE;
177
178 /* for firmware buf */
179 rtlpriv->rtlhal.pfirmware = vzalloc(sizeof(struct rt_firmware));
180 if (!rtlpriv->rtlhal.pfirmware) {
181 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
182 ("Can't alloc buffer for fw.\n"));
183 return 1;
184 }
185
186 printk(KERN_INFO "rtl8192se: Driver for Realtek RTL8192SE/RTL8191SE\n"
187 " Loading firmware %s\n", rtlpriv->cfg->fw_name);
188 /* request fw */
189 err = request_firmware(&firmware, rtlpriv->cfg->fw_name,
190 rtlpriv->io.dev);
191 if (err) {
192 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
193 ("Failed to request firmware!\n"));
194 return 1;
195 }
196 if (firmware->size > sizeof(struct rt_firmware)) {
197 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
198 ("Firmware is too big!\n"));
199 release_firmware(firmware);
200 return 1;
201 }
202
203 pfirmware = (struct rt_firmware *)rtlpriv->rtlhal.pfirmware;
204 memcpy(pfirmware->sz_fw_tmpbuffer, firmware->data, firmware->size);
205 pfirmware->sz_fw_tmpbufferlen = firmware->size;
206 release_firmware(firmware);
207
208 return err;
209}
210
211static void rtl92s_deinit_sw_vars(struct ieee80211_hw *hw)
212{
213 struct rtl_priv *rtlpriv = rtl_priv(hw);
214
215 if (rtlpriv->rtlhal.pfirmware) {
216 vfree(rtlpriv->rtlhal.pfirmware);
217 rtlpriv->rtlhal.pfirmware = NULL;
218 }
219}
220
221static struct rtl_hal_ops rtl8192se_hal_ops = {
222 .init_sw_vars = rtl92s_init_sw_vars,
223 .deinit_sw_vars = rtl92s_deinit_sw_vars,
224 .read_eeprom_info = rtl92se_read_eeprom_info,
225 .interrupt_recognized = rtl92se_interrupt_recognized,
226 .hw_init = rtl92se_hw_init,
227 .hw_disable = rtl92se_card_disable,
228 .hw_suspend = rtl92se_suspend,
229 .hw_resume = rtl92se_resume,
230 .enable_interrupt = rtl92se_enable_interrupt,
231 .disable_interrupt = rtl92se_disable_interrupt,
232 .set_network_type = rtl92se_set_network_type,
233 .set_chk_bssid = rtl92se_set_check_bssid,
234 .set_qos = rtl92se_set_qos,
235 .set_bcn_reg = rtl92se_set_beacon_related_registers,
236 .set_bcn_intv = rtl92se_set_beacon_interval,
237 .update_interrupt_mask = rtl92se_update_interrupt_mask,
238 .get_hw_reg = rtl92se_get_hw_reg,
239 .set_hw_reg = rtl92se_set_hw_reg,
240 .update_rate_tbl = rtl92se_update_hal_rate_tbl,
241 .fill_tx_desc = rtl92se_tx_fill_desc,
242 .fill_tx_cmddesc = rtl92se_tx_fill_cmddesc,
243 .query_rx_desc = rtl92se_rx_query_desc,
244 .set_channel_access = rtl92se_update_channel_access_setting,
245 .radio_onoff_checking = rtl92se_gpio_radio_on_off_checking,
246 .set_bw_mode = rtl92s_phy_set_bw_mode,
247 .switch_channel = rtl92s_phy_sw_chnl,
248 .dm_watchdog = rtl92s_dm_watchdog,
249 .scan_operation_backup = rtl92s_phy_scan_operation_backup,
250 .set_rf_power_state = rtl92s_phy_set_rf_power_state,
251 .led_control = rtl92se_led_control,
252 .set_desc = rtl92se_set_desc,
253 .get_desc = rtl92se_get_desc,
254 .tx_polling = rtl92se_tx_polling,
255 .enable_hw_sec = rtl92se_enable_hw_security_config,
256 .set_key = rtl92se_set_key,
257 .init_sw_leds = rtl92se_init_sw_leds,
258 .get_bbreg = rtl92s_phy_query_bb_reg,
259 .set_bbreg = rtl92s_phy_set_bb_reg,
260 .get_rfreg = rtl92s_phy_query_rf_reg,
261 .set_rfreg = rtl92s_phy_set_rf_reg,
262};
263
264static struct rtl_mod_params rtl92se_mod_params = {
265 .sw_crypto = false,
266 .inactiveps = true,
267 .swctrl_lps = true,
268 .fwctrl_lps = false,
269};
270
271/* Because memory R/W bursting will cause system hang/crash
272 * for 92se, so we don't read back after every write action */
273static struct rtl_hal_cfg rtl92se_hal_cfg = {
274 .bar_id = 1,
275 .write_readback = false,
276 .name = "rtl92s_pci",
277 .fw_name = "rtlwifi/rtl8192sefw.bin",
278 .ops = &rtl8192se_hal_ops,
279 .mod_params = &rtl92se_mod_params,
280
281 .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
282 .maps[SYS_FUNC_EN] = REG_SYS_FUNC_EN,
283 .maps[SYS_CLK] = SYS_CLKR,
284 .maps[MAC_RCR_AM] = RCR_AM,
285 .maps[MAC_RCR_AB] = RCR_AB,
286 .maps[MAC_RCR_ACRC32] = RCR_ACRC32,
287 .maps[MAC_RCR_ACF] = RCR_ACF,
288 .maps[MAC_RCR_AAP] = RCR_AAP,
289
290 .maps[EFUSE_TEST] = REG_EFUSE_TEST,
291 .maps[EFUSE_CTRL] = REG_EFUSE_CTRL,
292 .maps[EFUSE_CLK] = REG_EFUSE_CLK,
293 .maps[EFUSE_CLK_CTRL] = REG_EFUSE_CTRL,
294 .maps[EFUSE_PWC_EV12V] = 0, /* nouse for 8192se */
295 .maps[EFUSE_FEN_ELDR] = 0, /* nouse for 8192se */
296 .maps[EFUSE_LOADER_CLK_EN] = 0,/* nouse for 8192se */
297 .maps[EFUSE_ANA8M] = EFUSE_ANA8M,
298 .maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE_92S,
299 .maps[EFUSE_MAX_SECTION_MAP] = EFUSE_MAX_SECTION,
300 .maps[EFUSE_REAL_CONTENT_SIZE] = EFUSE_REAL_CONTENT_LEN,
301
302 .maps[RWCAM] = REG_RWCAM,
303 .maps[WCAMI] = REG_WCAMI,
304 .maps[RCAMO] = REG_RCAMO,
305 .maps[CAMDBG] = REG_CAMDBG,
306 .maps[SECR] = REG_SECR,
307 .maps[SEC_CAM_NONE] = CAM_NONE,
308 .maps[SEC_CAM_WEP40] = CAM_WEP40,
309 .maps[SEC_CAM_TKIP] = CAM_TKIP,
310 .maps[SEC_CAM_AES] = CAM_AES,
311 .maps[SEC_CAM_WEP104] = CAM_WEP104,
312
313 .maps[RTL_IMR_BCNDMAINT6] = IMR_BCNDMAINT6,
314 .maps[RTL_IMR_BCNDMAINT5] = IMR_BCNDMAINT5,
315 .maps[RTL_IMR_BCNDMAINT4] = IMR_BCNDMAINT4,
316 .maps[RTL_IMR_BCNDMAINT3] = IMR_BCNDMAINT3,
317 .maps[RTL_IMR_BCNDMAINT2] = IMR_BCNDMAINT2,
318 .maps[RTL_IMR_BCNDMAINT1] = IMR_BCNDMAINT1,
319 .maps[RTL_IMR_BCNDOK8] = IMR_BCNDOK8,
320 .maps[RTL_IMR_BCNDOK7] = IMR_BCNDOK7,
321 .maps[RTL_IMR_BCNDOK6] = IMR_BCNDOK6,
322 .maps[RTL_IMR_BCNDOK5] = IMR_BCNDOK5,
323 .maps[RTL_IMR_BCNDOK4] = IMR_BCNDOK4,
324 .maps[RTL_IMR_BCNDOK3] = IMR_BCNDOK3,
325 .maps[RTL_IMR_BCNDOK2] = IMR_BCNDOK2,
326 .maps[RTL_IMR_BCNDOK1] = IMR_BCNDOK1,
327 .maps[RTL_IMR_TIMEOUT2] = IMR_TIMEOUT2,
328 .maps[RTL_IMR_TIMEOUT1] = IMR_TIMEOUT1,
329
330 .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW,
331 .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT,
332 .maps[RTL_IMR_BcnInt] = IMR_BCNINT,
333 .maps[RTL_IMR_RXFOVW] = IMR_RXFOVW,
334 .maps[RTL_IMR_RDU] = IMR_RDU,
335 .maps[RTL_IMR_ATIMEND] = IMR_ATIMEND,
336 .maps[RTL_IMR_BDOK] = IMR_BDOK,
337 .maps[RTL_IMR_MGNTDOK] = IMR_MGNTDOK,
338 .maps[RTL_IMR_TBDER] = IMR_TBDER,
339 .maps[RTL_IMR_HIGHDOK] = IMR_HIGHDOK,
340 .maps[RTL_IMR_COMDOK] = IMR_COMDOK,
341 .maps[RTL_IMR_TBDOK] = IMR_TBDOK,
342 .maps[RTL_IMR_BKDOK] = IMR_BKDOK,
343 .maps[RTL_IMR_BEDOK] = IMR_BEDOK,
344 .maps[RTL_IMR_VIDOK] = IMR_VIDOK,
345 .maps[RTL_IMR_VODOK] = IMR_VODOK,
346 .maps[RTL_IMR_ROK] = IMR_ROK,
347 .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER),
348
349 .maps[RTL_RC_CCK_RATE1M] = DESC92S_RATE1M,
350 .maps[RTL_RC_CCK_RATE2M] = DESC92S_RATE2M,
351 .maps[RTL_RC_CCK_RATE5_5M] = DESC92S_RATE5_5M,
352 .maps[RTL_RC_CCK_RATE11M] = DESC92S_RATE11M,
353 .maps[RTL_RC_OFDM_RATE6M] = DESC92S_RATE6M,
354 .maps[RTL_RC_OFDM_RATE9M] = DESC92S_RATE9M,
355 .maps[RTL_RC_OFDM_RATE12M] = DESC92S_RATE12M,
356 .maps[RTL_RC_OFDM_RATE18M] = DESC92S_RATE18M,
357 .maps[RTL_RC_OFDM_RATE24M] = DESC92S_RATE24M,
358 .maps[RTL_RC_OFDM_RATE36M] = DESC92S_RATE36M,
359 .maps[RTL_RC_OFDM_RATE48M] = DESC92S_RATE48M,
360 .maps[RTL_RC_OFDM_RATE54M] = DESC92S_RATE54M,
361
362 .maps[RTL_RC_HT_RATEMCS7] = DESC92S_RATEMCS7,
363 .maps[RTL_RC_HT_RATEMCS15] = DESC92S_RATEMCS15,
364};
365
366static struct pci_device_id rtl92se_pci_ids[] __devinitdata = {
367 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8192, rtl92se_hal_cfg)},
368 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8171, rtl92se_hal_cfg)},
369 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8172, rtl92se_hal_cfg)},
370 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8173, rtl92se_hal_cfg)},
371 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8174, rtl92se_hal_cfg)},
372 {},
373};
374
375MODULE_DEVICE_TABLE(pci, rtl92se_pci_ids);
376
377MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>");
378MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
379MODULE_LICENSE("GPL");
380MODULE_DESCRIPTION("Realtek 8192S/8191S 802.11n PCI wireless");
381MODULE_FIRMWARE("rtlwifi/rtl8192sefw.bin");
382
383module_param_named(swenc, rtl92se_mod_params.sw_crypto, bool, 0444);
384module_param_named(ips, rtl92se_mod_params.inactiveps, bool, 0444);
385module_param_named(swlps, rtl92se_mod_params.swctrl_lps, bool, 0444);
386module_param_named(fwlps, rtl92se_mod_params.fwctrl_lps, bool, 0444);
387MODULE_PARM_DESC(swenc, "using hardware crypto (default 0 [hardware])\n");
388MODULE_PARM_DESC(ips, "using no link power save (default 1 is open)\n");
389MODULE_PARM_DESC(swlps, "using linked sw control power save (default 1 is "
390 "open)\n");
391
392
393static struct pci_driver rtl92se_driver = {
394 .name = KBUILD_MODNAME,
395 .id_table = rtl92se_pci_ids,
396 .probe = rtl_pci_probe,
397 .remove = rtl_pci_disconnect,
398
399#ifdef CONFIG_PM
400 .suspend = rtl_pci_suspend,
401 .resume = rtl_pci_resume,
402#endif
403
404};
405
406static int __init rtl92se_module_init(void)
407{
408 int ret = 0;
409
410 ret = pci_register_driver(&rtl92se_driver);
411 if (ret)
412 RT_ASSERT(false, (": No device found\n"));
413
414 return ret;
415}
416
417static void __exit rtl92se_module_exit(void)
418{
419 pci_unregister_driver(&rtl92se_driver);
420}
421
422module_init(rtl92se_module_init);
423module_exit(rtl92se_module_exit);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-led.h b/drivers/net/wireless/rtlwifi/rtl8192se/sw.h
index 96f323dc5dd6..fc4eb285a0ac 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-led.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2009-2010 Realtek Corporation.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -19,15 +19,18 @@
19 * file called LICENSE. 19 * file called LICENSE.
20 * 20 *
21 * Contact Information: 21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com> 22 * wlanfae <wlanfae@realtek.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
24 * 25 *
25 *****************************************************************************/ 26 *****************************************************************************/
27#ifndef __REALTEK_PCI92SE_SW_H__
28#define __REALTEK_PCI92SE_SW_H__
26 29
27#ifndef __iwl_agn_led_h__ 30#define EFUSE_MAX_SECTION 16
28#define __iwl_agn_led_h__
29 31
30extern const struct iwl_led_ops iwlagn_led_ops; 32int rtl92se_init_sw(struct ieee80211_hw *hw);
31void iwlagn_led_enable(struct iwl_priv *priv); 33void rtl92se_deinit_sw(struct ieee80211_hw *hw);
34void rtl92se_init_var_map(struct ieee80211_hw *hw);
32 35
33#endif /* __iwl_agn_led_h__ */ 36#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/table.c b/drivers/net/wireless/rtlwifi/rtl8192se/table.c
new file mode 100644
index 000000000000..154185b3969d
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/table.c
@@ -0,0 +1,634 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 * Created on 2010/ 5/18, 1:41
29 *****************************************************************************/
30
31#include "table.h"
32
33u32 rtl8192sephy_reg_2t2rarray[PHY_REG_2T2RARRAYLENGTH] = {
34 0x01c, 0x07000000,
35 0x800, 0x00040000,
36 0x804, 0x00008003,
37 0x808, 0x0000fc00,
38 0x80c, 0x0000000a,
39 0x810, 0x10005088,
40 0x814, 0x020c3d10,
41 0x818, 0x00200185,
42 0x81c, 0x00000000,
43 0x820, 0x01000000,
44 0x824, 0x00390004,
45 0x828, 0x01000000,
46 0x82c, 0x00390004,
47 0x830, 0x00000004,
48 0x834, 0x00690200,
49 0x838, 0x00000004,
50 0x83c, 0x00690200,
51 0x840, 0x00010000,
52 0x844, 0x00010000,
53 0x848, 0x00000000,
54 0x84c, 0x00000000,
55 0x850, 0x00000000,
56 0x854, 0x00000000,
57 0x858, 0x48484848,
58 0x85c, 0x65a965a9,
59 0x860, 0x0f7f0130,
60 0x864, 0x0f7f0130,
61 0x868, 0x0f7f0130,
62 0x86c, 0x0f7f0130,
63 0x870, 0x03000700,
64 0x874, 0x03000300,
65 0x878, 0x00020002,
66 0x87c, 0x004f0201,
67 0x880, 0xa8300ac1,
68 0x884, 0x00000058,
69 0x888, 0x00000008,
70 0x88c, 0x00000004,
71 0x890, 0x00000000,
72 0x894, 0xfffffffe,
73 0x898, 0x40302010,
74 0x89c, 0x00706050,
75 0x8b0, 0x00000000,
76 0x8e0, 0x00000000,
77 0x8e4, 0x00000000,
78 0xe00, 0x30333333,
79 0xe04, 0x2a2d2e2f,
80 0xe08, 0x00003232,
81 0xe10, 0x30333333,
82 0xe14, 0x2a2d2e2f,
83 0xe18, 0x30333333,
84 0xe1c, 0x2a2d2e2f,
85 0xe30, 0x01007c00,
86 0xe34, 0x01004800,
87 0xe38, 0x1000dc1f,
88 0xe3c, 0x10008c1f,
89 0xe40, 0x021400a0,
90 0xe44, 0x281600a0,
91 0xe48, 0xf8000001,
92 0xe4c, 0x00002910,
93 0xe50, 0x01007c00,
94 0xe54, 0x01004800,
95 0xe58, 0x1000dc1f,
96 0xe5c, 0x10008c1f,
97 0xe60, 0x021400a0,
98 0xe64, 0x281600a0,
99 0xe6c, 0x00002910,
100 0xe70, 0x31ed92fb,
101 0xe74, 0x361536fb,
102 0xe78, 0x361536fb,
103 0xe7c, 0x361536fb,
104 0xe80, 0x361536fb,
105 0xe84, 0x000d92fb,
106 0xe88, 0x000d92fb,
107 0xe8c, 0x31ed92fb,
108 0xed0, 0x31ed92fb,
109 0xed4, 0x31ed92fb,
110 0xed8, 0x000d92fb,
111 0xedc, 0x000d92fb,
112 0xee0, 0x000d92fb,
113 0xee4, 0x015e5448,
114 0xee8, 0x21555448,
115 0x900, 0x00000000,
116 0x904, 0x00000023,
117 0x908, 0x00000000,
118 0x90c, 0x01121313,
119 0xa00, 0x00d047c8,
120 0xa04, 0x80ff0008,
121 0xa08, 0x8ccd8300,
122 0xa0c, 0x2e62120f,
123 0xa10, 0x9500bb78,
124 0xa14, 0x11144028,
125 0xa18, 0x00881117,
126 0xa1c, 0x89140f00,
127 0xa20, 0x1a1b0000,
128 0xa24, 0x090e1317,
129 0xa28, 0x00000204,
130 0xa2c, 0x10d30000,
131 0xc00, 0x40071d40,
132 0xc04, 0x00a05633,
133 0xc08, 0x000000e4,
134 0xc0c, 0x6c6c6c6c,
135 0xc10, 0x08800000,
136 0xc14, 0x40000100,
137 0xc18, 0x08000000,
138 0xc1c, 0x40000100,
139 0xc20, 0x08000000,
140 0xc24, 0x40000100,
141 0xc28, 0x08000000,
142 0xc2c, 0x40000100,
143 0xc30, 0x6de9ac44,
144 0xc34, 0x469652cf,
145 0xc38, 0x49795994,
146 0xc3c, 0x0a979764,
147 0xc40, 0x1f7c403f,
148 0xc44, 0x000100b7,
149 0xc48, 0xec020000,
150 0xc4c, 0x007f037f,
151 0xc50, 0x69543420,
152 0xc54, 0x433c0094,
153 0xc58, 0x69543420,
154 0xc5c, 0x433c0094,
155 0xc60, 0x69543420,
156 0xc64, 0x433c0094,
157 0xc68, 0x69543420,
158 0xc6c, 0x433c0094,
159 0xc70, 0x2c7f000d,
160 0xc74, 0x0186155b,
161 0xc78, 0x0000001f,
162 0xc7c, 0x00b91612,
163 0xc80, 0x40000100,
164 0xc84, 0x20f60000,
165 0xc88, 0x20000080,
166 0xc8c, 0x20200000,
167 0xc90, 0x40000100,
168 0xc94, 0x00000000,
169 0xc98, 0x40000100,
170 0xc9c, 0x00000000,
171 0xca0, 0x00492492,
172 0xca4, 0x00000000,
173 0xca8, 0x00000000,
174 0xcac, 0x00000000,
175 0xcb0, 0x00000000,
176 0xcb4, 0x00000000,
177 0xcb8, 0x00000000,
178 0xcbc, 0x28000000,
179 0xcc0, 0x00000000,
180 0xcc4, 0x00000000,
181 0xcc8, 0x00000000,
182 0xccc, 0x00000000,
183 0xcd0, 0x00000000,
184 0xcd4, 0x00000000,
185 0xcd8, 0x64b22427,
186 0xcdc, 0x00766932,
187 0xce0, 0x00222222,
188 0xce4, 0x00000000,
189 0xce8, 0x37644302,
190 0xcec, 0x2f97d40c,
191 0xd00, 0x00000750,
192 0xd04, 0x00000403,
193 0xd08, 0x0000907f,
194 0xd0c, 0x00000001,
195 0xd10, 0xa0633333,
196 0xd14, 0x33333c63,
197 0xd18, 0x6a8f5b6b,
198 0xd1c, 0x00000000,
199 0xd20, 0x00000000,
200 0xd24, 0x00000000,
201 0xd28, 0x00000000,
202 0xd2c, 0xcc979975,
203 0xd30, 0x00000000,
204 0xd34, 0x00000000,
205 0xd38, 0x00000000,
206 0xd3c, 0x00027293,
207 0xd40, 0x00000000,
208 0xd44, 0x00000000,
209 0xd48, 0x00000000,
210 0xd50, 0x6437140a,
211 0xd54, 0x024dbd02,
212 0xd58, 0x00000000,
213 0xd5c, 0x30032064,
214 0xd60, 0x4653de68,
215 0xd64, 0x00518a3c,
216 0xd68, 0x00002101,
217 0xf14, 0x00000003,
218 0xf4c, 0x00000000,
219 0xf00, 0x00000300,
220};
221
222u32 rtl8192sephy_changeto_1t1rarray[PHY_CHANGETO_1T1RARRAYLENGTH] = {
223 0x844, 0xffffffff, 0x00010000,
224 0x804, 0x0000000f, 0x00000001,
225 0x824, 0x00f0000f, 0x00300004,
226 0x82c, 0x00f0000f, 0x00100002,
227 0x870, 0x04000000, 0x00000001,
228 0x864, 0x00000400, 0x00000000,
229 0x878, 0x000f000f, 0x00000002,
230 0xe74, 0x0f000000, 0x00000002,
231 0xe78, 0x0f000000, 0x00000002,
232 0xe7c, 0x0f000000, 0x00000002,
233 0xe80, 0x0f000000, 0x00000002,
234 0x90c, 0x000000ff, 0x00000011,
235 0xc04, 0x000000ff, 0x00000011,
236 0xd04, 0x0000000f, 0x00000001,
237 0x1f4, 0xffff0000, 0x00007777,
238 0x234, 0xf8000000, 0x0000000a,
239};
240
241u32 rtl8192sephy_changeto_1t2rarray[PHY_CHANGETO_1T2RARRAYLENGTH] = {
242 0x804, 0x0000000f, 0x00000003,
243 0x824, 0x00f0000f, 0x00300004,
244 0x82c, 0x00f0000f, 0x00300002,
245 0x870, 0x04000000, 0x00000001,
246 0x864, 0x00000400, 0x00000000,
247 0x878, 0x000f000f, 0x00000002,
248 0xe74, 0x0f000000, 0x00000002,
249 0xe78, 0x0f000000, 0x00000002,
250 0xe7c, 0x0f000000, 0x00000002,
251 0xe80, 0x0f000000, 0x00000002,
252 0x90c, 0x000000ff, 0x00000011,
253 0xc04, 0x000000ff, 0x00000033,
254 0xd04, 0x0000000f, 0x00000003,
255 0x1f4, 0xffff0000, 0x00007777,
256 0x234, 0xf8000000, 0x0000000a,
257};
258
259u32 rtl8192sephy_reg_array_pg[PHY_REG_ARRAY_PGLENGTH] = {
260 0xe00, 0xffffffff, 0x06090909,
261 0xe04, 0xffffffff, 0x00030406,
262 0xe08, 0x0000ff00, 0x00000000,
263 0xe10, 0xffffffff, 0x0a0c0d0e,
264 0xe14, 0xffffffff, 0x04070809,
265 0xe18, 0xffffffff, 0x0a0c0d0e,
266 0xe1c, 0xffffffff, 0x04070809,
267 0xe00, 0xffffffff, 0x04040404,
268 0xe04, 0xffffffff, 0x00020204,
269 0xe08, 0x0000ff00, 0x00000000,
270 0xe10, 0xffffffff, 0x02040404,
271 0xe14, 0xffffffff, 0x00000002,
272 0xe18, 0xffffffff, 0x02040404,
273 0xe1c, 0xffffffff, 0x00000002,
274 0xe00, 0xffffffff, 0x04040404,
275 0xe04, 0xffffffff, 0x00020204,
276 0xe08, 0x0000ff00, 0x00000000,
277 0xe10, 0xffffffff, 0x02040404,
278 0xe14, 0xffffffff, 0x00000002,
279 0xe18, 0xffffffff, 0x02040404,
280 0xe1c, 0xffffffff, 0x00000002,
281 0xe00, 0xffffffff, 0x02020202,
282 0xe04, 0xffffffff, 0x00020202,
283 0xe08, 0x0000ff00, 0x00000000,
284 0xe10, 0xffffffff, 0x02020202,
285 0xe14, 0xffffffff, 0x00000002,
286 0xe18, 0xffffffff, 0x02020202,
287 0xe1c, 0xffffffff, 0x00000002,
288};
289
290u32 rtl8192seradioa_1t_array[RADIOA_1T_ARRAYLENGTH] = {
291 0x000, 0x00030159,
292 0x001, 0x00030250,
293 0x002, 0x00010000,
294 0x010, 0x0008000f,
295 0x011, 0x000231fc,
296 0x010, 0x000c000f,
297 0x011, 0x0003f9f8,
298 0x010, 0x0002000f,
299 0x011, 0x00020101,
300 0x014, 0x0001093e,
301 0x014, 0x0009093e,
302 0x015, 0x0000f8f4,
303 0x017, 0x000f6500,
304 0x01a, 0x00013056,
305 0x01b, 0x00060000,
306 0x01c, 0x00000300,
307 0x01e, 0x00031059,
308 0x021, 0x00054000,
309 0x022, 0x0000083c,
310 0x023, 0x00001558,
311 0x024, 0x00000060,
312 0x025, 0x00022583,
313 0x026, 0x0000f200,
314 0x027, 0x000eacf1,
315 0x028, 0x0009bd54,
316 0x029, 0x00004582,
317 0x02a, 0x00000001,
318 0x02b, 0x00021334,
319 0x02a, 0x00000000,
320 0x02b, 0x0000000a,
321 0x02a, 0x00000001,
322 0x02b, 0x00000808,
323 0x02b, 0x00053333,
324 0x02c, 0x0000000c,
325 0x02a, 0x00000002,
326 0x02b, 0x00000808,
327 0x02b, 0x0005b333,
328 0x02c, 0x0000000d,
329 0x02a, 0x00000003,
330 0x02b, 0x00000808,
331 0x02b, 0x00063333,
332 0x02c, 0x0000000d,
333 0x02a, 0x00000004,
334 0x02b, 0x00000808,
335 0x02b, 0x0006b333,
336 0x02c, 0x0000000d,
337 0x02a, 0x00000005,
338 0x02b, 0x00000709,
339 0x02b, 0x00053333,
340 0x02c, 0x0000000d,
341 0x02a, 0x00000006,
342 0x02b, 0x00000709,
343 0x02b, 0x0005b333,
344 0x02c, 0x0000000d,
345 0x02a, 0x00000007,
346 0x02b, 0x00000709,
347 0x02b, 0x00063333,
348 0x02c, 0x0000000d,
349 0x02a, 0x00000008,
350 0x02b, 0x00000709,
351 0x02b, 0x0006b333,
352 0x02c, 0x0000000d,
353 0x02a, 0x00000009,
354 0x02b, 0x0000060a,
355 0x02b, 0x00053333,
356 0x02c, 0x0000000d,
357 0x02a, 0x0000000a,
358 0x02b, 0x0000060a,
359 0x02b, 0x0005b333,
360 0x02c, 0x0000000d,
361 0x02a, 0x0000000b,
362 0x02b, 0x0000060a,
363 0x02b, 0x00063333,
364 0x02c, 0x0000000d,
365 0x02a, 0x0000000c,
366 0x02b, 0x0000060a,
367 0x02b, 0x0006b333,
368 0x02c, 0x0000000d,
369 0x02a, 0x0000000d,
370 0x02b, 0x0000050b,
371 0x02b, 0x00053333,
372 0x02c, 0x0000000d,
373 0x02a, 0x0000000e,
374 0x02b, 0x0000050b,
375 0x02b, 0x00066623,
376 0x02c, 0x0000001a,
377 0x02a, 0x000e4000,
378 0x030, 0x00020000,
379 0x031, 0x000b9631,
380 0x032, 0x0000130d,
381 0x033, 0x00000187,
382 0x013, 0x00019e6c,
383 0x013, 0x00015e94,
384 0x000, 0x00010159,
385 0x018, 0x0000f401,
386 0x0fe, 0x00000000,
387 0x01e, 0x0003105b,
388 0x0fe, 0x00000000,
389 0x000, 0x00030159,
390 0x010, 0x0004000f,
391 0x011, 0x000203f9,
392};
393
394u32 rtl8192seradiob_array[RADIOB_ARRAYLENGTH] = {
395 0x000, 0x00030159,
396 0x001, 0x00001041,
397 0x002, 0x00011000,
398 0x005, 0x00080fc0,
399 0x007, 0x000fc803,
400 0x013, 0x00017cb0,
401 0x013, 0x00011cc0,
402 0x013, 0x0000dc60,
403 0x013, 0x00008c60,
404 0x013, 0x00004450,
405 0x013, 0x00000020,
406};
407
408u32 rtl8192seradiob_gm_array[RADIOB_GM_ARRAYLENGTH] = {
409 0x000, 0x00030159,
410 0x001, 0x00001041,
411 0x002, 0x00011000,
412 0x005, 0x00080fc0,
413 0x007, 0x000fc803,
414};
415
416u32 rtl8192semac_2t_array[MAC_2T_ARRAYLENGTH] = {
417 0x020, 0x00000035,
418 0x048, 0x0000000e,
419 0x049, 0x000000f0,
420 0x04a, 0x00000077,
421 0x04b, 0x00000083,
422 0x0b5, 0x00000021,
423 0x0dc, 0x000000ff,
424 0x0dd, 0x000000ff,
425 0x0de, 0x000000ff,
426 0x0df, 0x000000ff,
427 0x116, 0x00000000,
428 0x117, 0x00000000,
429 0x118, 0x00000000,
430 0x119, 0x00000000,
431 0x11a, 0x00000000,
432 0x11b, 0x00000000,
433 0x11c, 0x00000000,
434 0x11d, 0x00000000,
435 0x160, 0x0000000b,
436 0x161, 0x0000000b,
437 0x162, 0x0000000b,
438 0x163, 0x0000000b,
439 0x164, 0x0000000b,
440 0x165, 0x0000000b,
441 0x166, 0x0000000b,
442 0x167, 0x0000000b,
443 0x168, 0x0000000b,
444 0x169, 0x0000000b,
445 0x16a, 0x0000000b,
446 0x16b, 0x0000000b,
447 0x16c, 0x0000000b,
448 0x16d, 0x0000000b,
449 0x16e, 0x0000000b,
450 0x16f, 0x0000000b,
451 0x170, 0x0000000b,
452 0x171, 0x0000000b,
453 0x172, 0x0000000b,
454 0x173, 0x0000000b,
455 0x174, 0x0000000b,
456 0x175, 0x0000000b,
457 0x176, 0x0000000b,
458 0x177, 0x0000000b,
459 0x178, 0x0000000b,
460 0x179, 0x0000000b,
461 0x17a, 0x0000000b,
462 0x17b, 0x0000000b,
463 0x17c, 0x0000000b,
464 0x17d, 0x0000000b,
465 0x17e, 0x0000000b,
466 0x17f, 0x0000000b,
467 0x236, 0x0000000c,
468 0x503, 0x00000022,
469 0x560, 0x00000000,
470};
471
472u32 rtl8192seagctab_array[AGCTAB_ARRAYLENGTH] = {
473 0xc78, 0x7f000001,
474 0xc78, 0x7f010001,
475 0xc78, 0x7e020001,
476 0xc78, 0x7d030001,
477 0xc78, 0x7c040001,
478 0xc78, 0x7b050001,
479 0xc78, 0x7a060001,
480 0xc78, 0x79070001,
481 0xc78, 0x78080001,
482 0xc78, 0x77090001,
483 0xc78, 0x760a0001,
484 0xc78, 0x750b0001,
485 0xc78, 0x740c0001,
486 0xc78, 0x730d0001,
487 0xc78, 0x720e0001,
488 0xc78, 0x710f0001,
489 0xc78, 0x70100001,
490 0xc78, 0x6f110001,
491 0xc78, 0x6f120001,
492 0xc78, 0x6e130001,
493 0xc78, 0x6d140001,
494 0xc78, 0x6d150001,
495 0xc78, 0x6c160001,
496 0xc78, 0x6b170001,
497 0xc78, 0x6a180001,
498 0xc78, 0x6a190001,
499 0xc78, 0x691a0001,
500 0xc78, 0x681b0001,
501 0xc78, 0x671c0001,
502 0xc78, 0x661d0001,
503 0xc78, 0x651e0001,
504 0xc78, 0x641f0001,
505 0xc78, 0x63200001,
506 0xc78, 0x4c210001,
507 0xc78, 0x4b220001,
508 0xc78, 0x4a230001,
509 0xc78, 0x49240001,
510 0xc78, 0x48250001,
511 0xc78, 0x47260001,
512 0xc78, 0x46270001,
513 0xc78, 0x45280001,
514 0xc78, 0x44290001,
515 0xc78, 0x2c2a0001,
516 0xc78, 0x2b2b0001,
517 0xc78, 0x2a2c0001,
518 0xc78, 0x292d0001,
519 0xc78, 0x282e0001,
520 0xc78, 0x272f0001,
521 0xc78, 0x26300001,
522 0xc78, 0x25310001,
523 0xc78, 0x24320001,
524 0xc78, 0x23330001,
525 0xc78, 0x22340001,
526 0xc78, 0x09350001,
527 0xc78, 0x08360001,
528 0xc78, 0x07370001,
529 0xc78, 0x06380001,
530 0xc78, 0x05390001,
531 0xc78, 0x043a0001,
532 0xc78, 0x033b0001,
533 0xc78, 0x023c0001,
534 0xc78, 0x013d0001,
535 0xc78, 0x003e0001,
536 0xc78, 0x003f0001,
537 0xc78, 0x7f400001,
538 0xc78, 0x7f410001,
539 0xc78, 0x7e420001,
540 0xc78, 0x7d430001,
541 0xc78, 0x7c440001,
542 0xc78, 0x7b450001,
543 0xc78, 0x7a460001,
544 0xc78, 0x79470001,
545 0xc78, 0x78480001,
546 0xc78, 0x77490001,
547 0xc78, 0x764a0001,
548 0xc78, 0x754b0001,
549 0xc78, 0x744c0001,
550 0xc78, 0x734d0001,
551 0xc78, 0x724e0001,
552 0xc78, 0x714f0001,
553 0xc78, 0x70500001,
554 0xc78, 0x6f510001,
555 0xc78, 0x6f520001,
556 0xc78, 0x6e530001,
557 0xc78, 0x6d540001,
558 0xc78, 0x6d550001,
559 0xc78, 0x6c560001,
560 0xc78, 0x6b570001,
561 0xc78, 0x6a580001,
562 0xc78, 0x6a590001,
563 0xc78, 0x695a0001,
564 0xc78, 0x685b0001,
565 0xc78, 0x675c0001,
566 0xc78, 0x665d0001,
567 0xc78, 0x655e0001,
568 0xc78, 0x645f0001,
569 0xc78, 0x63600001,
570 0xc78, 0x4c610001,
571 0xc78, 0x4b620001,
572 0xc78, 0x4a630001,
573 0xc78, 0x49640001,
574 0xc78, 0x48650001,
575 0xc78, 0x47660001,
576 0xc78, 0x46670001,
577 0xc78, 0x45680001,
578 0xc78, 0x44690001,
579 0xc78, 0x2c6a0001,
580 0xc78, 0x2b6b0001,
581 0xc78, 0x2a6c0001,
582 0xc78, 0x296d0001,
583 0xc78, 0x286e0001,
584 0xc78, 0x276f0001,
585 0xc78, 0x26700001,
586 0xc78, 0x25710001,
587 0xc78, 0x24720001,
588 0xc78, 0x23730001,
589 0xc78, 0x22740001,
590 0xc78, 0x09750001,
591 0xc78, 0x08760001,
592 0xc78, 0x07770001,
593 0xc78, 0x06780001,
594 0xc78, 0x05790001,
595 0xc78, 0x047a0001,
596 0xc78, 0x037b0001,
597 0xc78, 0x027c0001,
598 0xc78, 0x017d0001,
599 0xc78, 0x007e0001,
600 0xc78, 0x007f0001,
601 0xc78, 0x3000001e,
602 0xc78, 0x3001001e,
603 0xc78, 0x3002001e,
604 0xc78, 0x3003001e,
605 0xc78, 0x3004001e,
606 0xc78, 0x3405001e,
607 0xc78, 0x3806001e,
608 0xc78, 0x3e07001e,
609 0xc78, 0x3e08001e,
610 0xc78, 0x4409001e,
611 0xc78, 0x460a001e,
612 0xc78, 0x480b001e,
613 0xc78, 0x480c001e,
614 0xc78, 0x4e0d001e,
615 0xc78, 0x560e001e,
616 0xc78, 0x5a0f001e,
617 0xc78, 0x5e10001e,
618 0xc78, 0x6211001e,
619 0xc78, 0x6c12001e,
620 0xc78, 0x7213001e,
621 0xc78, 0x7214001e,
622 0xc78, 0x7215001e,
623 0xc78, 0x7216001e,
624 0xc78, 0x7217001e,
625 0xc78, 0x7218001e,
626 0xc78, 0x7219001e,
627 0xc78, 0x721a001e,
628 0xc78, 0x721b001e,
629 0xc78, 0x721c001e,
630 0xc78, 0x721d001e,
631 0xc78, 0x721e001e,
632 0xc78, 0x721f001e,
633};
634
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/table.h b/drivers/net/wireless/rtlwifi/rtl8192se/table.h
new file mode 100644
index 000000000000..b4ed6d951ebb
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/table.h
@@ -0,0 +1,49 @@
1/******************************************************************************
2 * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
3 *
4 * This program is distributed in the hope that it will be useful, but WITHOUT
5 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
6 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
7 * more details.
8 *
9 * You should have received a copy of the GNU General Public License along with
10 * this program; if not, write to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
12 *
13 * The full GNU General Public License is included in this distribution in the
14 * file called LICENSE.
15 *
16 * Contact Information:
17 * wlanfae <wlanfae@realtek.com>
18 *
19 * Larry Finger <Larry.Finger@lwfinger.net>
20 *
21 ******************************************************************************/
22#ifndef __INC_HAL8192SE_FW_IMG_H
23#define __INC_HAL8192SE_FW_IMG_H
24
25#include <linux/types.h>
26
27/*Created on 2010/ 4/12, 5:56*/
28
29#define PHY_REG_2T2RARRAYLENGTH 372
30extern u32 rtl8192sephy_reg_2t2rarray[PHY_REG_2T2RARRAYLENGTH];
31#define PHY_CHANGETO_1T1RARRAYLENGTH 48
32extern u32 rtl8192sephy_changeto_1t1rarray[PHY_CHANGETO_1T1RARRAYLENGTH];
33#define PHY_CHANGETO_1T2RARRAYLENGTH 45
34extern u32 rtl8192sephy_changeto_1t2rarray[PHY_CHANGETO_1T2RARRAYLENGTH];
35#define PHY_REG_ARRAY_PGLENGTH 84
36extern u32 rtl8192sephy_reg_array_pg[PHY_REG_ARRAY_PGLENGTH];
37#define RADIOA_1T_ARRAYLENGTH 202
38extern u32 rtl8192seradioa_1t_array[RADIOA_1T_ARRAYLENGTH];
39#define RADIOB_ARRAYLENGTH 22
40extern u32 rtl8192seradiob_array[RADIOB_ARRAYLENGTH];
41#define RADIOB_GM_ARRAYLENGTH 10
42extern u32 rtl8192seradiob_gm_array[RADIOB_GM_ARRAYLENGTH];
43#define MAC_2T_ARRAYLENGTH 106
44extern u32 rtl8192semac_2t_array[MAC_2T_ARRAYLENGTH];
45#define AGCTAB_ARRAYLENGTH 320
46extern u32 rtl8192seagctab_array[AGCTAB_ARRAYLENGTH];
47
48#endif
49
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
new file mode 100644
index 000000000000..5cf442373d46
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -0,0 +1,976 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "../pci.h"
32#include "../base.h"
33#include "reg.h"
34#include "def.h"
35#include "phy.h"
36#include "fw.h"
37#include "trx.h"
38#include "led.h"
39
40static u8 _rtl92se_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 skb_queue)
41{
42 __le16 fc = rtl_get_fc(skb);
43
44 if (unlikely(ieee80211_is_beacon(fc)))
45 return QSLT_BEACON;
46 if (ieee80211_is_mgmt(fc))
47 return QSLT_MGNT;
48 if (ieee80211_is_nullfunc(fc))
49 return QSLT_HIGH;
50
51 return skb->priority;
52}
53
54static int _rtl92se_rate_mapping(bool isht, u8 desc_rate, bool first_ampdu)
55{
56 int rate_idx = 0;
57
58 if (first_ampdu) {
59 if (false == isht) {
60 switch (desc_rate) {
61 case DESC92S_RATE1M:
62 rate_idx = 0;
63 break;
64 case DESC92S_RATE2M:
65 rate_idx = 1;
66 break;
67 case DESC92S_RATE5_5M:
68 rate_idx = 2;
69 break;
70 case DESC92S_RATE11M:
71 rate_idx = 3;
72 break;
73 case DESC92S_RATE6M:
74 rate_idx = 4;
75 break;
76 case DESC92S_RATE9M:
77 rate_idx = 5;
78 break;
79 case DESC92S_RATE12M:
80 rate_idx = 6;
81 break;
82 case DESC92S_RATE18M:
83 rate_idx = 7;
84 break;
85 case DESC92S_RATE24M:
86 rate_idx = 8;
87 break;
88 case DESC92S_RATE36M:
89 rate_idx = 9;
90 break;
91 case DESC92S_RATE48M:
92 rate_idx = 10;
93 break;
94 case DESC92S_RATE54M:
95 rate_idx = 11;
96 break;
97 default:
98 rate_idx = 0;
99 break;
100 }
101 } else {
102 rate_idx = 11;
103 }
104
105 return rate_idx;
106 }
107
108 switch (desc_rate) {
109 case DESC92S_RATE1M:
110 rate_idx = 0;
111 break;
112 case DESC92S_RATE2M:
113 rate_idx = 1;
114 break;
115 case DESC92S_RATE5_5M:
116 rate_idx = 2;
117 break;
118 case DESC92S_RATE11M:
119 rate_idx = 3;
120 break;
121 case DESC92S_RATE6M:
122 rate_idx = 4;
123 break;
124 case DESC92S_RATE9M:
125 rate_idx = 5;
126 break;
127 case DESC92S_RATE12M:
128 rate_idx = 6;
129 break;
130 case DESC92S_RATE18M:
131 rate_idx = 7;
132 break;
133 case DESC92S_RATE24M:
134 rate_idx = 8;
135 break;
136 case DESC92S_RATE36M:
137 rate_idx = 9;
138 break;
139 case DESC92S_RATE48M:
140 rate_idx = 10;
141 break;
142 case DESC92S_RATE54M:
143 rate_idx = 11;
144 break;
145 default:
146 rate_idx = 11;
147 break;
148 }
149 return rate_idx;
150}
151
152static u8 _rtl92s_query_rxpwrpercentage(char antpower)
153{
154 if ((antpower <= -100) || (antpower >= 20))
155 return 0;
156 else if (antpower >= 0)
157 return 100;
158 else
159 return 100 + antpower;
160}
161
162static u8 _rtl92s_evm_db_to_percentage(char value)
163{
164 char ret_val;
165 ret_val = value;
166
167 if (ret_val >= 0)
168 ret_val = 0;
169
170 if (ret_val <= -33)
171 ret_val = -33;
172
173 ret_val = 0 - ret_val;
174 ret_val *= 3;
175
176 if (ret_val == 99)
177 ret_val = 100;
178
179 return ret_val;
180}
181
182static long _rtl92se_translate_todbm(struct ieee80211_hw *hw,
183 u8 signal_strength_index)
184{
185 long signal_power;
186
187 signal_power = (long)((signal_strength_index + 1) >> 1);
188 signal_power -= 95;
189 return signal_power;
190}
191
192static long _rtl92se_signal_scale_mapping(struct ieee80211_hw *hw,
193 long currsig)
194{
195 long retsig = 0;
196
197 /* Step 1. Scale mapping. */
198 if (currsig > 47)
199 retsig = 100;
200 else if (currsig > 14 && currsig <= 47)
201 retsig = 100 - ((47 - currsig) * 3) / 2;
202 else if (currsig > 2 && currsig <= 14)
203 retsig = 48 - ((14 - currsig) * 15) / 7;
204 else if (currsig >= 0)
205 retsig = currsig * 9 + 1;
206
207 return retsig;
208}
209
210
211static void _rtl92se_query_rxphystatus(struct ieee80211_hw *hw,
212 struct rtl_stats *pstats, u8 *pdesc,
213 struct rx_fwinfo *p_drvinfo,
214 bool packet_match_bssid,
215 bool packet_toself,
216 bool packet_beacon)
217{
218 struct rtl_priv *rtlpriv = rtl_priv(hw);
219 struct phy_sts_cck_8192s_t *cck_buf;
220 s8 rx_pwr_all = 0, rx_pwr[4];
221 u8 rf_rx_num = 0, evm, pwdb_all;
222 u8 i, max_spatial_stream;
223 u32 rssi, total_rssi = 0;
224 bool in_powersavemode = false;
225 bool is_cck_rate;
226
227 is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc);
228 pstats->packet_matchbssid = packet_match_bssid;
229 pstats->packet_toself = packet_toself;
230 pstats->is_cck = is_cck_rate;
231 pstats->packet_beacon = packet_beacon;
232 pstats->is_cck = is_cck_rate;
233 pstats->rx_mimo_signalquality[0] = -1;
234 pstats->rx_mimo_signalquality[1] = -1;
235
236 if (is_cck_rate) {
237 u8 report, cck_highpwr;
238 cck_buf = (struct phy_sts_cck_8192s_t *)p_drvinfo;
239
240 if (!in_powersavemode)
241 cck_highpwr = (u8) rtl_get_bbreg(hw,
242 RFPGA0_XA_HSSIPARAMETER2,
243 0x200);
244 else
245 cck_highpwr = false;
246
247 if (!cck_highpwr) {
248 u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
249 report = cck_buf->cck_agc_rpt & 0xc0;
250 report = report >> 6;
251 switch (report) {
252 case 0x3:
253 rx_pwr_all = -40 - (cck_agc_rpt & 0x3e);
254 break;
255 case 0x2:
256 rx_pwr_all = -20 - (cck_agc_rpt & 0x3e);
257 break;
258 case 0x1:
259 rx_pwr_all = -2 - (cck_agc_rpt & 0x3e);
260 break;
261 case 0x0:
262 rx_pwr_all = 14 - (cck_agc_rpt & 0x3e);
263 break;
264 }
265 } else {
266 u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
267 report = p_drvinfo->cfosho[0] & 0x60;
268 report = report >> 5;
269 switch (report) {
270 case 0x3:
271 rx_pwr_all = -40 - ((cck_agc_rpt & 0x1f) << 1);
272 break;
273 case 0x2:
274 rx_pwr_all = -20 - ((cck_agc_rpt & 0x1f) << 1);
275 break;
276 case 0x1:
277 rx_pwr_all = -2 - ((cck_agc_rpt & 0x1f) << 1);
278 break;
279 case 0x0:
280 rx_pwr_all = 14 - ((cck_agc_rpt & 0x1f) << 1);
281 break;
282 }
283 }
284
285 pwdb_all = _rtl92s_query_rxpwrpercentage(rx_pwr_all);
286
287 /* CCK gain is smaller than OFDM/MCS gain, */
288 /* so we add gain diff by experiences, the val is 6 */
289 pwdb_all += 6;
290 if (pwdb_all > 100)
291 pwdb_all = 100;
292 /* modify the offset to make the same gain index with OFDM. */
293 if (pwdb_all > 34 && pwdb_all <= 42)
294 pwdb_all -= 2;
295 else if (pwdb_all > 26 && pwdb_all <= 34)
296 pwdb_all -= 6;
297 else if (pwdb_all > 14 && pwdb_all <= 26)
298 pwdb_all -= 8;
299 else if (pwdb_all > 4 && pwdb_all <= 14)
300 pwdb_all -= 4;
301
302 pstats->rx_pwdb_all = pwdb_all;
303 pstats->recvsignalpower = rx_pwr_all;
304
305 if (packet_match_bssid) {
306 u8 sq;
307 if (pstats->rx_pwdb_all > 40) {
308 sq = 100;
309 } else {
310 sq = cck_buf->sq_rpt;
311 if (sq > 64)
312 sq = 0;
313 else if (sq < 20)
314 sq = 100;
315 else
316 sq = ((64 - sq) * 100) / 44;
317 }
318
319 pstats->signalquality = sq;
320 pstats->rx_mimo_signalquality[0] = sq;
321 pstats->rx_mimo_signalquality[1] = -1;
322 }
323 } else {
324 rtlpriv->dm.rfpath_rxenable[0] =
325 rtlpriv->dm.rfpath_rxenable[1] = true;
326 for (i = RF90_PATH_A; i < RF90_PATH_MAX; i++) {
327 if (rtlpriv->dm.rfpath_rxenable[i])
328 rf_rx_num++;
329
330 rx_pwr[i] = ((p_drvinfo->gain_trsw[i] &
331 0x3f) * 2) - 110;
332 rssi = _rtl92s_query_rxpwrpercentage(rx_pwr[i]);
333 total_rssi += rssi;
334 rtlpriv->stats.rx_snr_db[i] =
335 (long)(p_drvinfo->rxsnr[i] / 2);
336
337 if (packet_match_bssid)
338 pstats->rx_mimo_signalstrength[i] = (u8) rssi;
339 }
340
341 rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
342 pwdb_all = _rtl92s_query_rxpwrpercentage(rx_pwr_all);
343 pstats->rx_pwdb_all = pwdb_all;
344 pstats->rxpower = rx_pwr_all;
345 pstats->recvsignalpower = rx_pwr_all;
346
347 if (GET_RX_STATUS_DESC_RX_HT(pdesc) &&
348 GET_RX_STATUS_DESC_RX_MCS(pdesc) >= DESC92S_RATEMCS8 &&
349 GET_RX_STATUS_DESC_RX_MCS(pdesc) <= DESC92S_RATEMCS15)
350 max_spatial_stream = 2;
351 else
352 max_spatial_stream = 1;
353
354 for (i = 0; i < max_spatial_stream; i++) {
355 evm = _rtl92s_evm_db_to_percentage(p_drvinfo->rxevm[i]);
356
357 if (packet_match_bssid) {
358 if (i == 0)
359 pstats->signalquality = (u8)(evm &
360 0xff);
361 pstats->rx_mimo_signalquality[i] =
362 (u8) (evm & 0xff);
363 }
364 }
365 }
366
367 if (is_cck_rate)
368 pstats->signalstrength = (u8)(_rtl92se_signal_scale_mapping(hw,
369 pwdb_all));
370 else if (rf_rx_num != 0)
371 pstats->signalstrength = (u8) (_rtl92se_signal_scale_mapping(hw,
372 total_rssi /= rf_rx_num));
373}
374
375static void _rtl92se_process_ui_rssi(struct ieee80211_hw *hw,
376 struct rtl_stats *pstats)
377{
378 struct rtl_priv *rtlpriv = rtl_priv(hw);
379 struct rtl_phy *rtlphy = &(rtlpriv->phy);
380 u8 rfpath;
381 u32 last_rssi, tmpval;
382
383 if (pstats->packet_toself || pstats->packet_beacon) {
384 rtlpriv->stats.rssi_calculate_cnt++;
385
386 if (rtlpriv->stats.ui_rssi.total_num++ >=
387 PHY_RSSI_SLID_WIN_MAX) {
388 rtlpriv->stats.ui_rssi.total_num =
389 PHY_RSSI_SLID_WIN_MAX;
390 last_rssi = rtlpriv->stats.ui_rssi.elements[
391 rtlpriv->stats.ui_rssi.index];
392 rtlpriv->stats.ui_rssi.total_val -= last_rssi;
393 }
394
395 rtlpriv->stats.ui_rssi.total_val += pstats->signalstrength;
396 rtlpriv->stats.ui_rssi.elements[rtlpriv->stats.ui_rssi.index++]
397 = pstats->signalstrength;
398
399 if (rtlpriv->stats.ui_rssi.index >= PHY_RSSI_SLID_WIN_MAX)
400 rtlpriv->stats.ui_rssi.index = 0;
401
402 tmpval = rtlpriv->stats.ui_rssi.total_val /
403 rtlpriv->stats.ui_rssi.total_num;
404 rtlpriv->stats.signal_strength = _rtl92se_translate_todbm(hw,
405 (u8) tmpval);
406 pstats->rssi = rtlpriv->stats.signal_strength;
407 }
408
409 if (!pstats->is_cck && pstats->packet_toself) {
410 for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
411 rfpath++) {
412 if (rtlpriv->stats.rx_rssi_percentage[rfpath] == 0) {
413 rtlpriv->stats.rx_rssi_percentage[rfpath] =
414 pstats->rx_mimo_signalstrength[rfpath];
415
416 }
417
418 if (pstats->rx_mimo_signalstrength[rfpath] >
419 rtlpriv->stats.rx_rssi_percentage[rfpath]) {
420 rtlpriv->stats.rx_rssi_percentage[rfpath] =
421 ((rtlpriv->stats.rx_rssi_percentage[rfpath]
422 * (RX_SMOOTH_FACTOR - 1)) +
423 (pstats->rx_mimo_signalstrength[rfpath])) /
424 (RX_SMOOTH_FACTOR);
425
426 rtlpriv->stats.rx_rssi_percentage[rfpath] =
427 rtlpriv->stats.rx_rssi_percentage[rfpath]
428 + 1;
429 } else {
430 rtlpriv->stats.rx_rssi_percentage[rfpath] =
431 ((rtlpriv->stats.rx_rssi_percentage[rfpath]
432 * (RX_SMOOTH_FACTOR - 1)) +
433 (pstats->rx_mimo_signalstrength[rfpath])) /
434 (RX_SMOOTH_FACTOR);
435 }
436
437 }
438 }
439}
440
441static void _rtl92se_update_rxsignalstatistics(struct ieee80211_hw *hw,
442 struct rtl_stats *pstats)
443{
444 struct rtl_priv *rtlpriv = rtl_priv(hw);
445 int weighting = 0;
446
447 if (rtlpriv->stats.recv_signal_power == 0)
448 rtlpriv->stats.recv_signal_power = pstats->recvsignalpower;
449
450 if (pstats->recvsignalpower > rtlpriv->stats.recv_signal_power)
451 weighting = 5;
452 else if (pstats->recvsignalpower < rtlpriv->stats.recv_signal_power)
453 weighting = (-5);
454
455 rtlpriv->stats.recv_signal_power = (rtlpriv->stats.recv_signal_power * 5
456 + pstats->recvsignalpower +
457 weighting) / 6;
458}
459
460static void _rtl92se_process_pwdb(struct ieee80211_hw *hw,
461 struct rtl_stats *pstats)
462{
463 struct rtl_priv *rtlpriv = rtl_priv(hw);
464 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
465 long undec_sm_pwdb = 0;
466
467 if (mac->opmode == NL80211_IFTYPE_ADHOC) {
468 return;
469 } else {
470 undec_sm_pwdb =
471 rtlpriv->dm.undecorated_smoothed_pwdb;
472 }
473
474 if (pstats->packet_toself || pstats->packet_beacon) {
475 if (undec_sm_pwdb < 0)
476 undec_sm_pwdb = pstats->rx_pwdb_all;
477
478 if (pstats->rx_pwdb_all > (u32) undec_sm_pwdb) {
479 undec_sm_pwdb =
480 (((undec_sm_pwdb) *
481 (RX_SMOOTH_FACTOR - 1)) +
482 (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
483
484 undec_sm_pwdb = undec_sm_pwdb + 1;
485 } else {
486 undec_sm_pwdb = (((undec_sm_pwdb) *
487 (RX_SMOOTH_FACTOR - 1)) + (pstats->rx_pwdb_all)) /
488 (RX_SMOOTH_FACTOR);
489 }
490
491 rtlpriv->dm.undecorated_smoothed_pwdb = undec_sm_pwdb;
492 _rtl92se_update_rxsignalstatistics(hw, pstats);
493 }
494}
495
496static void rtl_92s_process_streams(struct ieee80211_hw *hw,
497 struct rtl_stats *pstats)
498{
499 struct rtl_priv *rtlpriv = rtl_priv(hw);
500 u32 stream;
501
502 for (stream = 0; stream < 2; stream++) {
503 if (pstats->rx_mimo_signalquality[stream] != -1) {
504 if (rtlpriv->stats.rx_evm_percentage[stream] == 0) {
505 rtlpriv->stats.rx_evm_percentage[stream] =
506 pstats->rx_mimo_signalquality[stream];
507 }
508
509 rtlpriv->stats.rx_evm_percentage[stream] =
510 ((rtlpriv->stats.rx_evm_percentage[stream] *
511 (RX_SMOOTH_FACTOR - 1)) +
512 (pstats->rx_mimo_signalquality[stream] *
513 1)) / (RX_SMOOTH_FACTOR);
514 }
515 }
516}
517
518static void _rtl92se_process_ui_link_quality(struct ieee80211_hw *hw,
519 struct rtl_stats *pstats)
520{
521 struct rtl_priv *rtlpriv = rtl_priv(hw);
522 u32 last_evm = 0, tmpval;
523
524 if (pstats->signalquality != 0) {
525 if (pstats->packet_toself || pstats->packet_beacon) {
526
527 if (rtlpriv->stats.ui_link_quality.total_num++ >=
528 PHY_LINKQUALITY_SLID_WIN_MAX) {
529 rtlpriv->stats.ui_link_quality.total_num =
530 PHY_LINKQUALITY_SLID_WIN_MAX;
531 last_evm =
532 rtlpriv->stats.ui_link_quality.elements[
533 rtlpriv->stats.ui_link_quality.index];
534 rtlpriv->stats.ui_link_quality.total_val -=
535 last_evm;
536 }
537
538 rtlpriv->stats.ui_link_quality.total_val +=
539 pstats->signalquality;
540 rtlpriv->stats.ui_link_quality.elements[
541 rtlpriv->stats.ui_link_quality.index++] =
542 pstats->signalquality;
543
544 if (rtlpriv->stats.ui_link_quality.index >=
545 PHY_LINKQUALITY_SLID_WIN_MAX)
546 rtlpriv->stats.ui_link_quality.index = 0;
547
548 tmpval = rtlpriv->stats.ui_link_quality.total_val /
549 rtlpriv->stats.ui_link_quality.total_num;
550 rtlpriv->stats.signal_quality = tmpval;
551
552 rtlpriv->stats.last_sigstrength_inpercent = tmpval;
553
554 rtl_92s_process_streams(hw, pstats);
555
556 }
557 }
558}
559
560static void _rtl92se_process_phyinfo(struct ieee80211_hw *hw,
561 u8 *buffer,
562 struct rtl_stats *pcurrent_stats)
563{
564
565 if (!pcurrent_stats->packet_matchbssid &&
566 !pcurrent_stats->packet_beacon)
567 return;
568
569 _rtl92se_process_ui_rssi(hw, pcurrent_stats);
570 _rtl92se_process_pwdb(hw, pcurrent_stats);
571 _rtl92se_process_ui_link_quality(hw, pcurrent_stats);
572}
573
574static void _rtl92se_translate_rx_signal_stuff(struct ieee80211_hw *hw,
575 struct sk_buff *skb, struct rtl_stats *pstats,
576 u8 *pdesc, struct rx_fwinfo *p_drvinfo)
577{
578 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
579 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
580
581 struct ieee80211_hdr *hdr;
582 u8 *tmp_buf;
583 u8 *praddr;
584 u8 *psaddr;
585 __le16 fc;
586 u16 type, cfc;
587 bool packet_matchbssid, packet_toself, packet_beacon;
588
589 tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift;
590
591 hdr = (struct ieee80211_hdr *)tmp_buf;
592 fc = hdr->frame_control;
593 cfc = le16_to_cpu(fc);
594 type = WLAN_FC_GET_TYPE(fc);
595 praddr = hdr->addr1;
596 psaddr = hdr->addr2;
597
598 packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) &&
599 (!compare_ether_addr(mac->bssid, (cfc & IEEE80211_FCTL_TODS) ?
600 hdr->addr1 : (cfc & IEEE80211_FCTL_FROMDS) ?
601 hdr->addr2 : hdr->addr3)) && (!pstats->hwerror) &&
602 (!pstats->crc) && (!pstats->icv));
603
604 packet_toself = packet_matchbssid &&
605 (!compare_ether_addr(praddr, rtlefuse->dev_addr));
606
607 if (ieee80211_is_beacon(fc))
608 packet_beacon = true;
609
610 _rtl92se_query_rxphystatus(hw, pstats, pdesc, p_drvinfo,
611 packet_matchbssid, packet_toself, packet_beacon);
612 _rtl92se_process_phyinfo(hw, tmp_buf, pstats);
613}
614
615bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
616 struct ieee80211_rx_status *rx_status, u8 *pdesc,
617 struct sk_buff *skb)
618{
619 struct rx_fwinfo *p_drvinfo;
620 u32 phystatus = (u32)GET_RX_STATUS_DESC_PHY_STATUS(pdesc);
621
622 stats->length = (u16)GET_RX_STATUS_DESC_PKT_LEN(pdesc);
623 stats->rx_drvinfo_size = (u8)GET_RX_STATUS_DESC_DRVINFO_SIZE(pdesc) * 8;
624 stats->rx_bufshift = (u8)(GET_RX_STATUS_DESC_SHIFT(pdesc) & 0x03);
625 stats->icv = (u16)GET_RX_STATUS_DESC_ICV(pdesc);
626 stats->crc = (u16)GET_RX_STATUS_DESC_CRC32(pdesc);
627 stats->hwerror = (u16)(stats->crc | stats->icv);
628 stats->decrypted = !GET_RX_STATUS_DESC_SWDEC(pdesc);
629
630 stats->rate = (u8)GET_RX_STATUS_DESC_RX_MCS(pdesc);
631 stats->shortpreamble = (u16)GET_RX_STATUS_DESC_SPLCP(pdesc);
632 stats->isampdu = (bool)(GET_RX_STATUS_DESC_PAGGR(pdesc) == 1);
633 stats->timestamp_low = GET_RX_STATUS_DESC_TSFL(pdesc);
634 stats->rx_is40Mhzpacket = (bool)GET_RX_STATUS_DESC_BW(pdesc);
635
636 if (stats->hwerror)
637 return false;
638
639 rx_status->freq = hw->conf.channel->center_freq;
640 rx_status->band = hw->conf.channel->band;
641
642 if (GET_RX_STATUS_DESC_CRC32(pdesc))
643 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
644
645 if (!GET_RX_STATUS_DESC_SWDEC(pdesc))
646 rx_status->flag |= RX_FLAG_DECRYPTED;
647
648 if (GET_RX_STATUS_DESC_BW(pdesc))
649 rx_status->flag |= RX_FLAG_40MHZ;
650
651 if (GET_RX_STATUS_DESC_RX_HT(pdesc))
652 rx_status->flag |= RX_FLAG_HT;
653
654 rx_status->flag |= RX_FLAG_MACTIME_MPDU;
655
656 if (stats->decrypted)
657 rx_status->flag |= RX_FLAG_DECRYPTED;
658
659 rx_status->rate_idx = _rtl92se_rate_mapping((bool)
660 GET_RX_STATUS_DESC_RX_HT(pdesc),
661 (u8)GET_RX_STATUS_DESC_RX_MCS(pdesc),
662 (bool)GET_RX_STATUS_DESC_PAGGR(pdesc));
663
664
665 rx_status->mactime = GET_RX_STATUS_DESC_TSFL(pdesc);
666 if (phystatus == true) {
667 p_drvinfo = (struct rx_fwinfo *)(skb->data +
668 stats->rx_bufshift);
669 _rtl92se_translate_rx_signal_stuff(hw, skb, stats, pdesc,
670 p_drvinfo);
671 }
672
673 /*rx_status->qual = stats->signal; */
674 rx_status->signal = stats->rssi + 10;
675 /*rx_status->noise = -stats->noise; */
676
677 return true;
678}
679
680void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
681 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
682 struct ieee80211_tx_info *info, struct sk_buff *skb,
683 u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
684{
685 struct rtl_priv *rtlpriv = rtl_priv(hw);
686 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
687 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
688 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
689 struct ieee80211_sta *sta = info->control.sta;
690 u8 *pdesc = (u8 *) pdesc_tx;
691 u16 seq_number;
692 __le16 fc = hdr->frame_control;
693 u8 reserved_macid = 0;
694 u8 fw_qsel = _rtl92se_map_hwqueue_to_fwqueue(skb, hw_queue);
695 bool firstseg = (!(hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG)));
696 bool lastseg = (!(hdr->frame_control &
697 cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)));
698 dma_addr_t mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
699 PCI_DMA_TODEVICE);
700 u8 bw_40 = 0;
701
702 if (mac->opmode == NL80211_IFTYPE_STATION) {
703 bw_40 = mac->bw_40;
704 } else if (mac->opmode == NL80211_IFTYPE_AP ||
705 mac->opmode == NL80211_IFTYPE_ADHOC) {
706 if (sta)
707 bw_40 = sta->ht_cap.cap &
708 IEEE80211_HT_CAP_SUP_WIDTH_20_40;
709 }
710
711 seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
712
713 rtl_get_tcb_desc(hw, info, sta, skb, ptcb_desc);
714
715 CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE_RTL8192S);
716
717 if (firstseg) {
718 if (rtlpriv->dm.useramask) {
719 /* set txdesc macId */
720 if (ptcb_desc->mac_id < 32) {
721 SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id);
722 reserved_macid |= ptcb_desc->mac_id;
723 }
724 }
725 SET_TX_DESC_RSVD_MACID(pdesc, reserved_macid);
726
727 SET_TX_DESC_TXHT(pdesc, ((ptcb_desc->hw_rate >=
728 DESC92S_RATEMCS0) ? 1 : 0));
729
730 if (rtlhal->version == VERSION_8192S_ACUT) {
731 if (ptcb_desc->hw_rate == DESC92S_RATE1M ||
732 ptcb_desc->hw_rate == DESC92S_RATE2M ||
733 ptcb_desc->hw_rate == DESC92S_RATE5_5M ||
734 ptcb_desc->hw_rate == DESC92S_RATE11M) {
735 ptcb_desc->hw_rate = DESC92S_RATE12M;
736 }
737 }
738
739 SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate);
740
741 if (ptcb_desc->use_shortgi || ptcb_desc->use_shortpreamble)
742 SET_TX_DESC_TX_SHORT(pdesc, 0);
743
744 /* Aggregation related */
745 if (info->flags & IEEE80211_TX_CTL_AMPDU)
746 SET_TX_DESC_AGG_ENABLE(pdesc, 1);
747
748 /* For AMPDU, we must insert SSN into TX_DESC */
749 SET_TX_DESC_SEQ(pdesc, seq_number);
750
751 /* Protection mode related */
752 /* For 92S, if RTS/CTS are set, HW will execute RTS. */
753 /* We choose only one protection mode to execute */
754 SET_TX_DESC_RTS_ENABLE(pdesc, ((ptcb_desc->rts_enable &&
755 !ptcb_desc->cts_enable) ? 1 : 0));
756 SET_TX_DESC_CTS_ENABLE(pdesc, ((ptcb_desc->cts_enable) ?
757 1 : 0));
758 SET_TX_DESC_RTS_STBC(pdesc, ((ptcb_desc->rts_stbc) ? 1 : 0));
759
760 SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate);
761 SET_TX_DESC_RTS_BANDWIDTH(pdesc, 0);
762 SET_TX_DESC_RTS_SUB_CARRIER(pdesc, ptcb_desc->rts_sc);
763 SET_TX_DESC_RTS_SHORT(pdesc, ((ptcb_desc->rts_rate <=
764 DESC92S_RATE54M) ?
765 (ptcb_desc->rts_use_shortpreamble ? 1 : 0)
766 : (ptcb_desc->rts_use_shortgi ? 1 : 0)));
767
768
769 /* Set Bandwidth and sub-channel settings. */
770 if (bw_40) {
771 if (ptcb_desc->packet_bw) {
772 SET_TX_DESC_TX_BANDWIDTH(pdesc, 1);
773 /* use duplicated mode */
774 SET_TX_DESC_TX_SUB_CARRIER(pdesc, 0);
775 } else {
776 SET_TX_DESC_TX_BANDWIDTH(pdesc, 0);
777 SET_TX_DESC_TX_SUB_CARRIER(pdesc,
778 mac->cur_40_prime_sc);
779 }
780 } else {
781 SET_TX_DESC_TX_BANDWIDTH(pdesc, 0);
782 SET_TX_DESC_TX_SUB_CARRIER(pdesc, 0);
783 }
784
785 /* 3 Fill necessary field in First Descriptor */
786 /*DWORD 0*/
787 SET_TX_DESC_LINIP(pdesc, 0);
788 SET_TX_DESC_OFFSET(pdesc, 32);
789 SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb->len);
790
791 /*DWORD 1*/
792 SET_TX_DESC_RA_BRSR_ID(pdesc, ptcb_desc->ratr_index);
793
794 /* Fill security related */
795 if (info->control.hw_key) {
796 struct ieee80211_key_conf *keyconf;
797
798 keyconf = info->control.hw_key;
799 switch (keyconf->cipher) {
800 case WLAN_CIPHER_SUITE_WEP40:
801 case WLAN_CIPHER_SUITE_WEP104:
802 SET_TX_DESC_SEC_TYPE(pdesc, 0x1);
803 break;
804 case WLAN_CIPHER_SUITE_TKIP:
805 SET_TX_DESC_SEC_TYPE(pdesc, 0x2);
806 break;
807 case WLAN_CIPHER_SUITE_CCMP:
808 SET_TX_DESC_SEC_TYPE(pdesc, 0x3);
809 break;
810 default:
811 SET_TX_DESC_SEC_TYPE(pdesc, 0x0);
812 break;
813
814 }
815 }
816
817 /* Set Packet ID */
818 SET_TX_DESC_PACKET_ID(pdesc, 0);
819
820 /* We will assign magement queue to BK. */
821 SET_TX_DESC_QUEUE_SEL(pdesc, fw_qsel);
822
823 /* Alwasy enable all rate fallback range */
824 SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F);
825
826 /* Fix: I don't kown why hw use 6.5M to tx when set it */
827 SET_TX_DESC_USER_RATE(pdesc,
828 ptcb_desc->use_driver_rate ? 1 : 0);
829
830 /* Set NON_QOS bit. */
831 if (!ieee80211_is_data_qos(fc))
832 SET_TX_DESC_NON_QOS(pdesc, 1);
833
834 }
835
836 /* Fill fields that are required to be initialized
837 * in all of the descriptors */
838 /*DWORD 0 */
839 SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0));
840 SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0));
841
842 /* DWORD 7 */
843 SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) skb->len);
844
845 /* DOWRD 8 */
846 SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping));
847
848 RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, ("\n"));
849}
850
851void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
852 bool firstseg, bool lastseg, struct sk_buff *skb)
853{
854 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
855 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
856 struct rtl_tcb_desc *tcb_desc = (struct rtl_tcb_desc *)(skb->cb);
857
858 dma_addr_t mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
859 PCI_DMA_TODEVICE);
860
861 /* Clear all status */
862 CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_CMDDESC_SIZE_RTL8192S);
863
864 /* This bit indicate this packet is used for FW download. */
865 if (tcb_desc->cmd_or_init == DESC_PACKET_TYPE_INIT) {
866 /* For firmware downlaod we only need to set LINIP */
867 SET_TX_DESC_LINIP(pdesc, tcb_desc->last_inipkt);
868
869 /* 92SE must set as 1 for firmware download HW DMA error */
870 SET_TX_DESC_FIRST_SEG(pdesc, 1);
871 SET_TX_DESC_LAST_SEG(pdesc, 1);
872
873 /* 92SE need not to set TX packet size when firmware download */
874 SET_TX_DESC_PKT_SIZE(pdesc, (u16)(skb->len));
875 SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len));
876 SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping));
877
878 SET_TX_DESC_OWN(pdesc, 1);
879 } else { /* H2C Command Desc format (Host TXCMD) */
880 /* 92SE must set as 1 for firmware download HW DMA error */
881 SET_TX_DESC_FIRST_SEG(pdesc, 1);
882 SET_TX_DESC_LAST_SEG(pdesc, 1);
883
884 SET_TX_DESC_OFFSET(pdesc, 0x20);
885
886 /* Buffer size + command header */
887 SET_TX_DESC_PKT_SIZE(pdesc, (u16)(skb->len));
888 /* Fixed queue of H2C command */
889 SET_TX_DESC_QUEUE_SEL(pdesc, 0x13);
890
891 SET_BITS_TO_LE_4BYTE(skb->data, 24, 7, rtlhal->h2c_txcmd_seq);
892
893 SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len));
894 SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping));
895
896 SET_TX_DESC_OWN(pdesc, 1);
897
898 }
899}
900
901void rtl92se_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val)
902{
903 if (istx == true) {
904 switch (desc_name) {
905 case HW_DESC_OWN:
906 SET_TX_DESC_OWN(pdesc, 1);
907 break;
908 case HW_DESC_TX_NEXTDESC_ADDR:
909 SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val);
910 break;
911 default:
912 RT_ASSERT(false, ("ERR txdesc :%d not process\n",
913 desc_name));
914 break;
915 }
916 } else {
917 switch (desc_name) {
918 case HW_DESC_RXOWN:
919 SET_RX_STATUS_DESC_OWN(pdesc, 1);
920 break;
921 case HW_DESC_RXBUFF_ADDR:
922 SET_RX_STATUS__DESC_BUFF_ADDR(pdesc, *(u32 *) val);
923 break;
924 case HW_DESC_RXPKT_LEN:
925 SET_RX_STATUS_DESC_PKT_LEN(pdesc, *(u32 *) val);
926 break;
927 case HW_DESC_RXERO:
928 SET_RX_STATUS_DESC_EOR(pdesc, 1);
929 break;
930 default:
931 RT_ASSERT(false, ("ERR rxdesc :%d not process\n",
932 desc_name));
933 break;
934 }
935 }
936}
937
938u32 rtl92se_get_desc(u8 *desc, bool istx, u8 desc_name)
939{
940 u32 ret = 0;
941
942 if (istx == true) {
943 switch (desc_name) {
944 case HW_DESC_OWN:
945 ret = GET_TX_DESC_OWN(desc);
946 break;
947 case HW_DESC_TXBUFF_ADDR:
948 ret = GET_TX_DESC_TX_BUFFER_ADDRESS(desc);
949 break;
950 default:
951 RT_ASSERT(false, ("ERR txdesc :%d not process\n",
952 desc_name));
953 break;
954 }
955 } else {
956 switch (desc_name) {
957 case HW_DESC_OWN:
958 ret = GET_RX_STATUS_DESC_OWN(desc);
959 break;
960 case HW_DESC_RXPKT_LEN:
961 ret = GET_RX_STATUS_DESC_PKT_LEN(desc);
962 break;
963 default:
964 RT_ASSERT(false, ("ERR rxdesc :%d not process\n",
965 desc_name));
966 break;
967 }
968 }
969 return ret;
970}
971
972void rtl92se_tx_polling(struct ieee80211_hw *hw, u8 hw_queue)
973{
974 struct rtl_priv *rtlpriv = rtl_priv(hw);
975 rtl_write_word(rtlpriv, TP_POLL, BIT(0) << (hw_queue));
976}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.h b/drivers/net/wireless/rtlwifi/rtl8192se/trx.h
new file mode 100644
index 000000000000..05862c51b861
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.h
@@ -0,0 +1,45 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29#ifndef __REALTEK_PCI92SE_TRX_H__
30#define __REALTEK_PCI92SE_TRX_H__
31
32void rtl92se_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
33 u8 *pdesc, struct ieee80211_tx_info *info,
34 struct sk_buff *skb, u8 hw_queue,
35 struct rtl_tcb_desc *ptcb_desc);
36void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool firstseg,
37 bool lastseg, struct sk_buff *skb);
38bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
39 struct ieee80211_rx_status *rx_status, u8 *pdesc,
40 struct sk_buff *skb);
41void rtl92se_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val);
42u32 rtl92se_get_desc(u8 *pdesc, bool istx, u8 desc_name);
43void rtl92se_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
44
45#endif
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index f5d85735d642..a9367eba1ea7 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -852,6 +852,7 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb,
852 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 852 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
853 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 853 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
854 struct rtl_tx_desc *pdesc = NULL; 854 struct rtl_tx_desc *pdesc = NULL;
855 struct rtl_tcb_desc tcb_desc;
855 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); 856 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
856 __le16 fc = hdr->frame_control; 857 __le16 fc = hdr->frame_control;
857 u8 *pda_addr = hdr->addr1; 858 u8 *pda_addr = hdr->addr1;
@@ -860,8 +861,17 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb,
860 u8 tid = 0; 861 u8 tid = 0;
861 u16 seq_number = 0; 862 u16 seq_number = 0;
862 863
863 if (ieee80211_is_mgmt(fc)) 864 if (ieee80211_is_auth(fc)) {
864 rtl_tx_mgmt_proc(hw, skb); 865 RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, ("MAC80211_LINKING\n"));
866 rtl_ips_nic_on(hw);
867 }
868
869 if (rtlpriv->psc.sw_ps_enabled) {
870 if (ieee80211_is_data(fc) && !ieee80211_is_nullfunc(fc) &&
871 !ieee80211_has_pm(fc))
872 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
873 }
874
865 rtl_action_proc(hw, skb, true); 875 rtl_action_proc(hw, skb, true);
866 if (is_multicast_ether_addr(pda_addr)) 876 if (is_multicast_ether_addr(pda_addr))
867 rtlpriv->stats.txbytesmulticast += skb->len; 877 rtlpriv->stats.txbytesmulticast += skb->len;
@@ -878,7 +888,7 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb,
878 seq_number <<= 4; 888 seq_number <<= 4;
879 } 889 }
880 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, info, skb, 890 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, info, skb,
881 hw_queue); 891 hw_queue, &tcb_desc);
882 if (!ieee80211_has_morefrags(hdr->frame_control)) { 892 if (!ieee80211_has_morefrags(hdr->frame_control)) {
883 if (qc) 893 if (qc)
884 mac->tids[tid].seq_number = seq_number; 894 mac->tids[tid].seq_number = seq_number;
@@ -887,7 +897,8 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb,
887 rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX); 897 rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
888} 898}
889 899
890static int rtl_usb_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 900static int rtl_usb_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
901 struct rtl_tcb_desc *dummy)
891{ 902{
892 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); 903 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
893 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 904 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
diff --git a/drivers/net/wireless/rtlwifi/usb.h b/drivers/net/wireless/rtlwifi/usb.h
index abadfe918d30..d2a63fb3e1e6 100644
--- a/drivers/net/wireless/rtlwifi/usb.h
+++ b/drivers/net/wireless/rtlwifi/usb.h
@@ -31,6 +31,8 @@
31#include <linux/usb.h> 31#include <linux/usb.h>
32#include <linux/skbuff.h> 32#include <linux/skbuff.h>
33 33
34#define RTL_RX_DESC_SIZE 24
35
34#define RTL_USB_DEVICE(vend, prod, cfg) \ 36#define RTL_USB_DEVICE(vend, prod, cfg) \
35 .match_flags = USB_DEVICE_ID_MATCH_DEVICE, \ 37 .match_flags = USB_DEVICE_ID_MATCH_DEVICE, \
36 .idVendor = (vend), \ 38 .idVendor = (vend), \
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 07db95ff9bc5..693395ee98f9 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -68,6 +68,8 @@
68#define QBSS_LOAD_SIZE 5 68#define QBSS_LOAD_SIZE 5
69#define MAX_WMMELE_LENGTH 64 69#define MAX_WMMELE_LENGTH 64
70 70
71#define TOTAL_CAM_ENTRY 32
72
71/*slot time for 11g. */ 73/*slot time for 11g. */
72#define RTL_SLOT_TIME_9 9 74#define RTL_SLOT_TIME_9 9
73#define RTL_SLOT_TIME_20 20 75#define RTL_SLOT_TIME_20 20
@@ -94,8 +96,10 @@
94#define CHANNEL_GROUP_MAX_5G 9 96#define CHANNEL_GROUP_MAX_5G 9
95#define CHANNEL_MAX_NUMBER_2G 14 97#define CHANNEL_MAX_NUMBER_2G 14
96#define AVG_THERMAL_NUM 8 98#define AVG_THERMAL_NUM 8
99#define MAX_TID_COUNT 9
97 100
98/* for early mode */ 101/* for early mode */
102#define FCS_LEN 4
99#define EM_HDR_LEN 8 103#define EM_HDR_LEN 8
100enum intf_type { 104enum intf_type {
101 INTF_PCI = 0, 105 INTF_PCI = 0,
@@ -159,6 +163,8 @@ enum hardware_type {
159(IS_HARDWARE_TYPE_8192DE(rtlhal) || IS_HARDWARE_TYPE_8192DU(rtlhal)) 163(IS_HARDWARE_TYPE_8192DE(rtlhal) || IS_HARDWARE_TYPE_8192DU(rtlhal))
160#define IS_HARDWARE_TYPE_8723(rtlhal) \ 164#define IS_HARDWARE_TYPE_8723(rtlhal) \
161(IS_HARDWARE_TYPE_8723E(rtlhal) || IS_HARDWARE_TYPE_8723U(rtlhal)) 165(IS_HARDWARE_TYPE_8723E(rtlhal) || IS_HARDWARE_TYPE_8723U(rtlhal))
166#define IS_HARDWARE_TYPE_8723U(rtlhal) \
167 (rtlhal->hw_type == HARDWARE_TYPE_RTL8723U)
162 168
163enum scan_operation_backup_opt { 169enum scan_operation_backup_opt {
164 SCAN_OPT_BACKUP = 0, 170 SCAN_OPT_BACKUP = 0,
@@ -297,6 +303,9 @@ enum hw_variables {
297 HW_VAR_DATA_FILTER, 303 HW_VAR_DATA_FILTER,
298}; 304};
299 305
306#define HWSET_MAX_SIZE 128
307#define EFUSE_MAX_SECTION 16
308
300enum _RT_MEDIA_STATUS { 309enum _RT_MEDIA_STATUS {
301 RT_MEDIA_DISCONNECT = 0, 310 RT_MEDIA_DISCONNECT = 0,
302 RT_MEDIA_CONNECT = 1 311 RT_MEDIA_CONNECT = 1
@@ -766,7 +775,7 @@ struct rtl_rfkill {
766#define IQK_MATRIX_REG_NUM 8 775#define IQK_MATRIX_REG_NUM 8
767#define IQK_MATRIX_SETTINGS_NUM (1 + 24 + 21) 776#define IQK_MATRIX_SETTINGS_NUM (1 + 24 + 21)
768struct iqk_matrix_regs { 777struct iqk_matrix_regs {
769 bool b_iqk_done; 778 bool iqk_done;
770 long value[1][IQK_MATRIX_REG_NUM]; 779 long value[1][IQK_MATRIX_REG_NUM];
771}; 780};
772 781
@@ -843,6 +852,7 @@ struct rtl_phy {
843 bool apk_done; 852 bool apk_done;
844 u32 reg_rf3c[2]; /* pathA / pathB */ 853 u32 reg_rf3c[2]; /* pathA / pathB */
845 854
855 /* bfsync */
846 u8 framesync; 856 u8 framesync;
847 u32 framesync_c34; 857 u32 framesync_c34;
848 858
@@ -852,6 +862,10 @@ struct rtl_phy {
852}; 862};
853 863
854#define MAX_TID_COUNT 9 864#define MAX_TID_COUNT 9
865#define RTL_AGG_STOP 0
866#define RTL_AGG_PROGRESS 1
867#define RTL_AGG_START 2
868#define RTL_AGG_OPERATIONAL 3
855#define RTL_AGG_OFF 0 869#define RTL_AGG_OFF 0
856#define RTL_AGG_ON 1 870#define RTL_AGG_ON 1
857#define RTL_AGG_EMPTYING_HW_QUEUE_ADDBA 2 871#define RTL_AGG_EMPTYING_HW_QUEUE_ADDBA 2
@@ -871,6 +885,13 @@ struct rtl_tid_data {
871 struct rtl_ht_agg agg; 885 struct rtl_ht_agg agg;
872}; 886};
873 887
888struct rtl_sta_info {
889 u8 ratr_index;
890 u8 wireless_mode;
891 u8 mimo_ps;
892 struct rtl_tid_data tids[MAX_TID_COUNT];
893} __packed;
894
874struct rtl_priv; 895struct rtl_priv;
875struct rtl_io { 896struct rtl_io {
876 struct device *dev; 897 struct device *dev;
@@ -894,6 +915,7 @@ struct rtl_io {
894 u32(*read32_sync) (struct rtl_priv *rtlpriv, u32 addr); 915 u32(*read32_sync) (struct rtl_priv *rtlpriv, u32 addr);
895 int (*readN_sync) (struct rtl_priv *rtlpriv, u32 addr, u16 len, 916 int (*readN_sync) (struct rtl_priv *rtlpriv, u32 addr, u16 len,
896 u8 *pdata); 917 u8 *pdata);
918
897}; 919};
898 920
899struct rtl_mac { 921struct rtl_mac {
@@ -916,6 +938,8 @@ struct rtl_mac {
916 int n_channels; 938 int n_channels;
917 int n_bitrates; 939 int n_bitrates;
918 940
941 bool offchan_deley;
942
919 /*filters */ 943 /*filters */
920 u32 rx_conf; 944 u32 rx_conf;
921 u16 rx_mgt_filter; 945 u16 rx_mgt_filter;
@@ -1032,7 +1056,9 @@ struct rtl_security {
1032 enum rt_enc_alg pairwise_enc_algorithm; 1056 enum rt_enc_alg pairwise_enc_algorithm;
1033 /*Encryption Algorithm for Brocast/Multicast */ 1057 /*Encryption Algorithm for Brocast/Multicast */
1034 enum rt_enc_alg group_enc_algorithm; 1058 enum rt_enc_alg group_enc_algorithm;
1035 1059 /*Cam Entry Bitmap */
1060 u32 hwsec_cam_bitmap;
1061 u8 hwsec_cam_sta_addr[TOTAL_CAM_ENTRY][ETH_ALEN];
1036 /*local Key buffer, indx 0 is for 1062 /*local Key buffer, indx 0 is for
1037 pairwise key 1-4 is for agoup key. */ 1063 pairwise key 1-4 is for agoup key. */
1038 u8 key_buf[KEY_BUF_SIZE][MAX_KEY_LEN]; 1064 u8 key_buf[KEY_BUF_SIZE][MAX_KEY_LEN];
@@ -1053,7 +1079,7 @@ struct rtl_dm {
1053 bool current_turbo_edca; 1079 bool current_turbo_edca;
1054 bool is_any_nonbepkts; /*out dm */ 1080 bool is_any_nonbepkts; /*out dm */
1055 bool is_cur_rdlstate; 1081 bool is_cur_rdlstate;
1056 bool txpower_trackingInit; 1082 bool txpower_trackinginit;
1057 bool disable_framebursting; 1083 bool disable_framebursting;
1058 bool cck_inch14; 1084 bool cck_inch14;
1059 bool txpower_tracking; 1085 bool txpower_tracking;
@@ -1079,7 +1105,6 @@ struct rtl_dm {
1079 bool disable_tx_int; 1105 bool disable_tx_int;
1080 char ofdm_index[2]; 1106 char ofdm_index[2];
1081 char cck_index; 1107 char cck_index;
1082 u8 power_index_backup[6];
1083}; 1108};
1084 1109
1085#define EFUSE_MAX_LOGICAL_SIZE 256 1110#define EFUSE_MAX_LOGICAL_SIZE 256
@@ -1175,6 +1200,7 @@ struct rtl_ps_ctl {
1175 * otherwise Offset[560h] = 0x00. 1200 * otherwise Offset[560h] = 0x00.
1176 * */ 1201 * */
1177 bool support_aspm; 1202 bool support_aspm;
1203
1178 bool support_backdoor; 1204 bool support_backdoor;
1179 1205
1180 /*for LPS */ 1206 /*for LPS */
@@ -1201,7 +1227,6 @@ struct rtl_ps_ctl {
1201 1227
1202 /*just for PCIE ASPM */ 1228 /*just for PCIE ASPM */
1203 u8 const_amdpci_aspm; 1229 u8 const_amdpci_aspm;
1204
1205 bool pwrdown_mode; 1230 bool pwrdown_mode;
1206 1231
1207 enum rf_pwrstate inactive_pwrstate; 1232 enum rf_pwrstate inactive_pwrstate;
@@ -1282,6 +1307,10 @@ struct rt_link_detect {
1282 bool busytraffic; 1307 bool busytraffic;
1283 bool higher_busytraffic; 1308 bool higher_busytraffic;
1284 bool higher_busyrxtraffic; 1309 bool higher_busyrxtraffic;
1310
1311 u32 tidtx_in4period[MAX_TID_COUNT][4];
1312 u32 tidtx_inperiod[MAX_TID_COUNT];
1313 bool higher_busytxtraffic[MAX_TID_COUNT];
1285}; 1314};
1286 1315
1287struct rtl_tcb_desc { 1316struct rtl_tcb_desc {
@@ -1344,13 +1373,15 @@ struct rtl_hal_ops {
1344 u32 add_msr, u32 rm_msr); 1373 u32 add_msr, u32 rm_msr);
1345 void (*get_hw_reg) (struct ieee80211_hw *hw, u8 variable, u8 *val); 1374 void (*get_hw_reg) (struct ieee80211_hw *hw, u8 variable, u8 *val);
1346 void (*set_hw_reg) (struct ieee80211_hw *hw, u8 variable, u8 *val); 1375 void (*set_hw_reg) (struct ieee80211_hw *hw, u8 variable, u8 *val);
1347 void (*update_rate_table) (struct ieee80211_hw *hw); 1376 void (*update_rate_tbl) (struct ieee80211_hw *hw,
1377 struct ieee80211_sta *sta, u8 rssi_level);
1348 void (*update_rate_mask) (struct ieee80211_hw *hw, u8 rssi_level); 1378 void (*update_rate_mask) (struct ieee80211_hw *hw, u8 rssi_level);
1349 void (*fill_tx_desc) (struct ieee80211_hw *hw, 1379 void (*fill_tx_desc) (struct ieee80211_hw *hw,
1350 struct ieee80211_hdr *hdr, u8 *pdesc_tx, 1380 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
1351 struct ieee80211_tx_info *info, 1381 struct ieee80211_tx_info *info,
1352 struct sk_buff *skb, unsigned int queue_index); 1382 struct sk_buff *skb, u8 hw_queue,
1353 void (*fill_fake_txdesc) (struct ieee80211_hw *hw, u8 * pDesc, 1383 struct rtl_tcb_desc *ptcb_desc);
1384 void (*fill_fake_txdesc) (struct ieee80211_hw *hw, u8 *pDesc,
1354 u32 buffer_len, bool bIsPsPoll); 1385 u32 buffer_len, bool bIsPsPoll);
1355 void (*fill_tx_cmddesc) (struct ieee80211_hw *hw, u8 *pdesc, 1386 void (*fill_tx_cmddesc) (struct ieee80211_hw *hw, u8 *pdesc,
1356 bool firstseg, bool lastseg, 1387 bool firstseg, bool lastseg,
@@ -1370,10 +1401,10 @@ struct rtl_hal_ops {
1370 enum led_ctl_mode ledaction); 1401 enum led_ctl_mode ledaction);
1371 void (*set_desc) (u8 *pdesc, bool istx, u8 desc_name, u8 *val); 1402 void (*set_desc) (u8 *pdesc, bool istx, u8 desc_name, u8 *val);
1372 u32 (*get_desc) (u8 *pdesc, bool istx, u8 desc_name); 1403 u32 (*get_desc) (u8 *pdesc, bool istx, u8 desc_name);
1373 void (*tx_polling) (struct ieee80211_hw *hw, unsigned int hw_queue); 1404 void (*tx_polling) (struct ieee80211_hw *hw, u8 hw_queue);
1374 void (*enable_hw_sec) (struct ieee80211_hw *hw); 1405 void (*enable_hw_sec) (struct ieee80211_hw *hw);
1375 void (*set_key) (struct ieee80211_hw *hw, u32 key_index, 1406 void (*set_key) (struct ieee80211_hw *hw, u32 key_index,
1376 u8 *p_macaddr, bool is_group, u8 enc_algo, 1407 u8 *macaddr, bool is_group, u8 enc_algo,
1377 bool is_wepkey, bool clear_all); 1408 bool is_wepkey, bool clear_all);
1378 void (*init_sw_leds) (struct ieee80211_hw *hw); 1409 void (*init_sw_leds) (struct ieee80211_hw *hw);
1379 void (*deinit_sw_leds) (struct ieee80211_hw *hw); 1410 void (*deinit_sw_leds) (struct ieee80211_hw *hw);
@@ -1384,6 +1415,7 @@ struct rtl_hal_ops {
1384 u32 regaddr, u32 bitmask); 1415 u32 regaddr, u32 bitmask);
1385 void (*set_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath, 1416 void (*set_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath,
1386 u32 regaddr, u32 bitmask, u32 data); 1417 u32 regaddr, u32 bitmask, u32 data);
1418 void (*linked_set_reg) (struct ieee80211_hw *hw);
1387 bool (*phy_rf6052_config) (struct ieee80211_hw *hw); 1419 bool (*phy_rf6052_config) (struct ieee80211_hw *hw);
1388 void (*phy_rf6052_set_cck_txpower) (struct ieee80211_hw *hw, 1420 void (*phy_rf6052_set_cck_txpower) (struct ieee80211_hw *hw,
1389 u8 *powerlevel); 1421 u8 *powerlevel);
@@ -1404,7 +1436,9 @@ struct rtl_intf_ops {
1404 int (*adapter_start) (struct ieee80211_hw *hw); 1436 int (*adapter_start) (struct ieee80211_hw *hw);
1405 void (*adapter_stop) (struct ieee80211_hw *hw); 1437 void (*adapter_stop) (struct ieee80211_hw *hw);
1406 1438
1407 int (*adapter_tx) (struct ieee80211_hw *hw, struct sk_buff *skb); 1439 int (*adapter_tx) (struct ieee80211_hw *hw, struct sk_buff *skb,
1440 struct rtl_tcb_desc *ptcb_desc);
1441 void (*flush)(struct ieee80211_hw *hw, bool drop);
1408 int (*reset_trx_ring) (struct ieee80211_hw *hw); 1442 int (*reset_trx_ring) (struct ieee80211_hw *hw);
1409 bool (*waitq_insert) (struct ieee80211_hw *hw, struct sk_buff *skb); 1443 bool (*waitq_insert) (struct ieee80211_hw *hw, struct sk_buff *skb);
1410 1444
@@ -1418,6 +1452,15 @@ struct rtl_intf_ops {
1418struct rtl_mod_params { 1452struct rtl_mod_params {
1419 /* default: 0 = using hardware encryption */ 1453 /* default: 0 = using hardware encryption */
1420 int sw_crypto; 1454 int sw_crypto;
1455
1456 /* default: 1 = using no linked power save */
1457 bool inactiveps;
1458
1459 /* default: 1 = using linked sw power save */
1460 bool swctrl_lps;
1461
1462 /* default: 1 = using linked fw power save */
1463 bool fwctrl_lps;
1421}; 1464};
1422 1465
1423struct rtl_hal_usbint_cfg { 1466struct rtl_hal_usbint_cfg {
@@ -1445,6 +1488,7 @@ struct rtl_hal_usbint_cfg {
1445 1488
1446struct rtl_hal_cfg { 1489struct rtl_hal_cfg {
1447 u8 bar_id; 1490 u8 bar_id;
1491 bool write_readback;
1448 char *name; 1492 char *name;
1449 char *fw_name; 1493 char *fw_name;
1450 struct rtl_hal_ops *ops; 1494 struct rtl_hal_ops *ops;
@@ -1469,7 +1513,6 @@ struct rtl_locks {
1469 spinlock_t rf_lock; 1513 spinlock_t rf_lock;
1470 spinlock_t lps_lock; 1514 spinlock_t lps_lock;
1471 spinlock_t waitq_lock; 1515 spinlock_t waitq_lock;
1472 spinlock_t tx_urb_lock;
1473 1516
1474 /*Dual mac*/ 1517 /*Dual mac*/
1475 spinlock_t cck_and_rw_pagea_lock; 1518 spinlock_t cck_and_rw_pagea_lock;
@@ -1621,19 +1664,19 @@ struct bt_coexist_info {
1621 u32 bt_edca_ul; 1664 u32 bt_edca_ul;
1622 u32 bt_edca_dl; 1665 u32 bt_edca_dl;
1623 1666
1624 bool b_init_set; 1667 bool init_set;
1625 bool b_bt_busy_traffic; 1668 bool bt_busy_traffic;
1626 bool b_bt_traffic_mode_set; 1669 bool bt_traffic_mode_set;
1627 bool b_bt_non_traffic_mode_set; 1670 bool bt_non_traffic_mode_set;
1628 1671
1629 bool b_fw_coexist_all_off; 1672 bool fw_coexist_all_off;
1630 bool b_sw_coexist_all_off; 1673 bool sw_coexist_all_off;
1631 u32 current_state; 1674 u32 current_state;
1632 u32 previous_state; 1675 u32 previous_state;
1633 u8 bt_pre_rssi_state; 1676 u8 bt_pre_rssi_state;
1634 1677
1635 u8 b_reg_bt_iso; 1678 u8 reg_bt_iso;
1636 u8 b_reg_bt_sco; 1679 u8 reg_bt_sco;
1637 1680
1638}; 1681};
1639 1682
@@ -1653,13 +1696,23 @@ struct bt_coexist_info {
1653#define EF4BYTE(_val) \ 1696#define EF4BYTE(_val) \
1654 (le32_to_cpu(_val)) 1697 (le32_to_cpu(_val))
1655 1698
1699/* Read data from memory */
1700#define READEF1BYTE(_ptr) \
1701 EF1BYTE(*((u8 *)(_ptr)))
1656/* Read le16 data from memory and convert to host ordering */ 1702/* Read le16 data from memory and convert to host ordering */
1657#define READEF2BYTE(_ptr) \ 1703#define READEF2BYTE(_ptr) \
1658 EF2BYTE(*((u16 *)(_ptr))) 1704 EF2BYTE(*((u16 *)(_ptr)))
1705#define READEF4BYTE(_ptr) \
1706 EF4BYTE(*((u32 *)(_ptr)))
1659 1707
1708/* Write data to memory */
1709#define WRITEEF1BYTE(_ptr, _val) \
1710 (*((u8 *)(_ptr))) = EF1BYTE(_val)
1660/* Write le16 data to memory in host ordering */ 1711/* Write le16 data to memory in host ordering */
1661#define WRITEEF2BYTE(_ptr, _val) \ 1712#define WRITEEF2BYTE(_ptr, _val) \
1662 (*((u16 *)(_ptr))) = EF2BYTE(_val) 1713 (*((u16 *)(_ptr))) = EF2BYTE(_val)
1714#define WRITEEF4BYTE(_ptr, _val) \
1715 (*((u16 *)(_ptr))) = EF2BYTE(_val)
1663 1716
1664/* Create a bit mask 1717/* Create a bit mask
1665 * Examples: 1718 * Examples:
@@ -1698,6 +1751,25 @@ struct bt_coexist_info {
1698#define LE_P1BYTE_TO_HOST_1BYTE(__pstart) \ 1751#define LE_P1BYTE_TO_HOST_1BYTE(__pstart) \
1699 (EF1BYTE(*((u8 *)(__pstart)))) 1752 (EF1BYTE(*((u8 *)(__pstart))))
1700 1753
1754/*Description:
1755Translate subfield (continuous bits in little-endian) of 4-byte
1756value to host byte ordering.*/
1757#define LE_BITS_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
1758 ( \
1759 (LE_P4BYTE_TO_HOST_4BYTE(__pstart) >> (__bitoffset)) & \
1760 BIT_LEN_MASK_32(__bitlen) \
1761 )
1762#define LE_BITS_TO_2BYTE(__pstart, __bitoffset, __bitlen) \
1763 ( \
1764 (LE_P2BYTE_TO_HOST_2BYTE(__pstart) >> (__bitoffset)) & \
1765 BIT_LEN_MASK_16(__bitlen) \
1766 )
1767#define LE_BITS_TO_1BYTE(__pstart, __bitoffset, __bitlen) \
1768 ( \
1769 (LE_P1BYTE_TO_HOST_1BYTE(__pstart) >> (__bitoffset)) & \
1770 BIT_LEN_MASK_8(__bitlen) \
1771 )
1772
1701/* Description: 1773/* Description:
1702 * Mask subfield (continuous bits in little-endian) of 4-byte value 1774 * Mask subfield (continuous bits in little-endian) of 4-byte value
1703 * and return the result in 4-byte value in host byte ordering. 1775 * and return the result in 4-byte value in host byte ordering.
@@ -1721,6 +1793,18 @@ struct bt_coexist_info {
1721/* Description: 1793/* Description:
1722 * Set subfield of little-endian 4-byte value to specified value. 1794 * Set subfield of little-endian 4-byte value to specified value.
1723 */ 1795 */
1796#define SET_BITS_TO_LE_4BYTE(__pstart, __bitoffset, __bitlen, __val) \
1797 *((u32 *)(__pstart)) = EF4BYTE \
1798 ( \
1799 LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) | \
1800 ((((u32)__val) & BIT_LEN_MASK_32(__bitlen)) << (__bitoffset)) \
1801 );
1802#define SET_BITS_TO_LE_2BYTE(__pstart, __bitoffset, __bitlen, __val) \
1803 *((u16 *)(__pstart)) = EF2BYTE \
1804 ( \
1805 LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) | \
1806 ((((u16)__val) & BIT_LEN_MASK_16(__bitlen)) << (__bitoffset)) \
1807 );
1724#define SET_BITS_TO_LE_1BYTE(__pstart, __bitoffset, __bitlen, __val) \ 1808#define SET_BITS_TO_LE_1BYTE(__pstart, __bitoffset, __bitlen, __val) \
1725 *((u8 *)(__pstart)) = EF1BYTE \ 1809 *((u8 *)(__pstart)) = EF1BYTE \
1726 ( \ 1810 ( \
@@ -1728,12 +1812,16 @@ struct bt_coexist_info {
1728 ((((u8)__val) & BIT_LEN_MASK_8(__bitlen)) << (__bitoffset)) \ 1812 ((((u8)__val) & BIT_LEN_MASK_8(__bitlen)) << (__bitoffset)) \
1729 ); 1813 );
1730 1814
1815#define N_BYTE_ALIGMENT(__value, __aligment) ((__aligment == 1) ? \
1816 (__value) : (((__value + __aligment - 1) / __aligment) * __aligment))
1817
1731/**************************************** 1818/****************************************
1732 mem access macro define end 1819 mem access macro define end
1733****************************************/ 1820****************************************/
1734 1821
1735#define byte(x, n) ((x >> (8 * n)) & 0xff) 1822#define byte(x, n) ((x >> (8 * n)) & 0xff)
1736 1823
1824#define packet_get_type(_packet) (EF1BYTE((_packet).octet[0]) & 0xFC)
1737#define RTL_WATCH_DOG_TIME 2000 1825#define RTL_WATCH_DOG_TIME 2000
1738#define MSECS(t) msecs_to_jiffies(t) 1826#define MSECS(t) msecs_to_jiffies(t)
1739#define WLAN_FC_GET_VERS(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_VERS) 1827#define WLAN_FC_GET_VERS(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_VERS)
@@ -1768,6 +1856,15 @@ struct bt_coexist_info {
1768#define container_of_dwork_rtl(x, y, z) \ 1856#define container_of_dwork_rtl(x, y, z) \
1769 container_of(container_of(x, struct delayed_work, work), y, z) 1857 container_of(container_of(x, struct delayed_work, work), y, z)
1770 1858
1859#define FILL_OCTET_STRING(_os, _octet, _len) \
1860 (_os).octet = (u8 *)(_octet); \
1861 (_os).length = (_len);
1862
1863#define CP_MACADDR(des, src) \
1864 ((des)[0] = (src)[0], (des)[1] = (src)[1],\
1865 (des)[2] = (src)[2], (des)[3] = (src)[3],\
1866 (des)[4] = (src)[4], (des)[5] = (src)[5])
1867
1771static inline u8 rtl_read_byte(struct rtl_priv *rtlpriv, u32 addr) 1868static inline u8 rtl_read_byte(struct rtl_priv *rtlpriv, u32 addr)
1772{ 1869{
1773 return rtlpriv->io.read8_sync(rtlpriv, addr); 1870 return rtlpriv->io.read8_sync(rtlpriv, addr);
@@ -1786,17 +1883,26 @@ static inline u32 rtl_read_dword(struct rtl_priv *rtlpriv, u32 addr)
1786static inline void rtl_write_byte(struct rtl_priv *rtlpriv, u32 addr, u8 val8) 1883static inline void rtl_write_byte(struct rtl_priv *rtlpriv, u32 addr, u8 val8)
1787{ 1884{
1788 rtlpriv->io.write8_async(rtlpriv, addr, val8); 1885 rtlpriv->io.write8_async(rtlpriv, addr, val8);
1886
1887 if (rtlpriv->cfg->write_readback)
1888 rtlpriv->io.read8_sync(rtlpriv, addr);
1789} 1889}
1790 1890
1791static inline void rtl_write_word(struct rtl_priv *rtlpriv, u32 addr, u16 val16) 1891static inline void rtl_write_word(struct rtl_priv *rtlpriv, u32 addr, u16 val16)
1792{ 1892{
1793 rtlpriv->io.write16_async(rtlpriv, addr, val16); 1893 rtlpriv->io.write16_async(rtlpriv, addr, val16);
1894
1895 if (rtlpriv->cfg->write_readback)
1896 rtlpriv->io.read16_sync(rtlpriv, addr);
1794} 1897}
1795 1898
1796static inline void rtl_write_dword(struct rtl_priv *rtlpriv, 1899static inline void rtl_write_dword(struct rtl_priv *rtlpriv,
1797 u32 addr, u32 val32) 1900 u32 addr, u32 val32)
1798{ 1901{
1799 rtlpriv->io.write32_async(rtlpriv, addr, val32); 1902 rtlpriv->io.write32_async(rtlpriv, addr, val32);
1903
1904 if (rtlpriv->cfg->write_readback)
1905 rtlpriv->io.read32_sync(rtlpriv, addr);
1800} 1906}
1801 1907
1802static inline u32 rtl_get_bbreg(struct ieee80211_hw *hw, 1908static inline u32 rtl_get_bbreg(struct ieee80211_hw *hw,
@@ -1855,4 +1961,31 @@ static inline u8 get_rf_type(struct rtl_phy *rtlphy)
1855 return rtlphy->rf_type; 1961 return rtlphy->rf_type;
1856} 1962}
1857 1963
1964static inline struct ieee80211_hdr *rtl_get_hdr(struct sk_buff *skb)
1965{
1966 return (struct ieee80211_hdr *)(skb->data);
1967}
1968
1969static inline __le16 rtl_get_fc(struct sk_buff *skb)
1970{
1971 return rtl_get_hdr(skb)->frame_control;
1972}
1973
1974static inline u16 rtl_get_tid_h(struct ieee80211_hdr *hdr)
1975{
1976 return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK;
1977}
1978
1979static inline u16 rtl_get_tid(struct sk_buff *skb)
1980{
1981 return rtl_get_tid_h(rtl_get_hdr(skb));
1982}
1983
1984static inline struct ieee80211_sta *get_sta(struct ieee80211_hw *hw,
1985 struct ieee80211_vif *vif,
1986 u8 *bssid)
1987{
1988 return ieee80211_find_sta(vif, bssid);
1989}
1990
1858#endif 1991#endif
diff --git a/drivers/net/wireless/wl1251/cmd.h b/drivers/net/wireless/wl1251/cmd.h
index e5c74c631374..79ca5273c9e9 100644
--- a/drivers/net/wireless/wl1251/cmd.h
+++ b/drivers/net/wireless/wl1251/cmd.h
@@ -313,8 +313,8 @@ struct wl1251_cmd_vbm_update {
313} __packed; 313} __packed;
314 314
315enum wl1251_cmd_ps_mode { 315enum wl1251_cmd_ps_mode {
316 STATION_ACTIVE_MODE, 316 CHIP_ACTIVE_MODE,
317 STATION_POWER_SAVE_MODE 317 CHIP_POWER_SAVE_MODE
318}; 318};
319 319
320struct wl1251_cmd_ps_params { 320struct wl1251_cmd_ps_params {
diff --git a/drivers/net/wireless/wl1251/event.c b/drivers/net/wireless/wl1251/event.c
index dfc4579acb06..9f15ccaf8f05 100644
--- a/drivers/net/wireless/wl1251/event.c
+++ b/drivers/net/wireless/wl1251/event.c
@@ -68,14 +68,16 @@ static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox)
68 if (vector & BSS_LOSE_EVENT_ID) { 68 if (vector & BSS_LOSE_EVENT_ID) {
69 wl1251_debug(DEBUG_EVENT, "BSS_LOSE_EVENT"); 69 wl1251_debug(DEBUG_EVENT, "BSS_LOSE_EVENT");
70 70
71 if (wl->psm_requested && wl->psm) { 71 if (wl->psm_requested &&
72 wl->station_mode != STATION_ACTIVE_MODE) {
72 ret = wl1251_ps_set_mode(wl, STATION_ACTIVE_MODE); 73 ret = wl1251_ps_set_mode(wl, STATION_ACTIVE_MODE);
73 if (ret < 0) 74 if (ret < 0)
74 return ret; 75 return ret;
75 } 76 }
76 } 77 }
77 78
78 if (vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID && wl->psm) { 79 if (vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID &&
80 wl->station_mode != STATION_ACTIVE_MODE) {
79 wl1251_debug(DEBUG_EVENT, "SYNCHRONIZATION_TIMEOUT_EVENT"); 81 wl1251_debug(DEBUG_EVENT, "SYNCHRONIZATION_TIMEOUT_EVENT");
80 82
81 /* indicate to the stack, that beacons have been lost */ 83 /* indicate to the stack, that beacons have been lost */
diff --git a/drivers/net/wireless/wl1251/main.c b/drivers/net/wireless/wl1251/main.c
index 12c9e635a6d6..a14a48c99cdc 100644
--- a/drivers/net/wireless/wl1251/main.c
+++ b/drivers/net/wireless/wl1251/main.c
@@ -497,7 +497,7 @@ static void wl1251_op_stop(struct ieee80211_hw *hw)
497 wl->rx_last_id = 0; 497 wl->rx_last_id = 0;
498 wl->next_tx_complete = 0; 498 wl->next_tx_complete = 0;
499 wl->elp = false; 499 wl->elp = false;
500 wl->psm = 0; 500 wl->station_mode = STATION_ACTIVE_MODE;
501 wl->tx_queue_stopped = false; 501 wl->tx_queue_stopped = false;
502 wl->power_level = WL1251_DEFAULT_POWER_LEVEL; 502 wl->power_level = WL1251_DEFAULT_POWER_LEVEL;
503 wl->rssi_thold = 0; 503 wl->rssi_thold = 0;
@@ -632,13 +632,29 @@ static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
632 632
633 wl->psm_requested = false; 633 wl->psm_requested = false;
634 634
635 if (wl->psm) { 635 if (wl->station_mode != STATION_ACTIVE_MODE) {
636 ret = wl1251_ps_set_mode(wl, STATION_ACTIVE_MODE); 636 ret = wl1251_ps_set_mode(wl, STATION_ACTIVE_MODE);
637 if (ret < 0) 637 if (ret < 0)
638 goto out_sleep; 638 goto out_sleep;
639 } 639 }
640 } 640 }
641 641
642 if (changed & IEEE80211_CONF_CHANGE_IDLE) {
643 if (conf->flags & IEEE80211_CONF_IDLE) {
644 ret = wl1251_ps_set_mode(wl, STATION_IDLE);
645 if (ret < 0)
646 goto out_sleep;
647 } else {
648 ret = wl1251_ps_set_mode(wl, STATION_ACTIVE_MODE);
649 if (ret < 0)
650 goto out_sleep;
651 ret = wl1251_join(wl, wl->bss_type, wl->channel,
652 wl->beacon_int, wl->dtim_period);
653 if (ret < 0)
654 goto out_sleep;
655 }
656 }
657
642 if (conf->power_level != wl->power_level) { 658 if (conf->power_level != wl->power_level) {
643 ret = wl1251_acx_tx_power(wl, conf->power_level); 659 ret = wl1251_acx_tx_power(wl, conf->power_level);
644 if (ret < 0) 660 if (ret < 0)
@@ -1384,7 +1400,7 @@ struct ieee80211_hw *wl1251_alloc_hw(void)
1384 wl->rx_config = WL1251_DEFAULT_RX_CONFIG; 1400 wl->rx_config = WL1251_DEFAULT_RX_CONFIG;
1385 wl->rx_filter = WL1251_DEFAULT_RX_FILTER; 1401 wl->rx_filter = WL1251_DEFAULT_RX_FILTER;
1386 wl->elp = false; 1402 wl->elp = false;
1387 wl->psm = 0; 1403 wl->station_mode = STATION_ACTIVE_MODE;
1388 wl->psm_requested = false; 1404 wl->psm_requested = false;
1389 wl->tx_queue_stopped = false; 1405 wl->tx_queue_stopped = false;
1390 wl->power_level = WL1251_DEFAULT_POWER_LEVEL; 1406 wl->power_level = WL1251_DEFAULT_POWER_LEVEL;
diff --git a/drivers/net/wireless/wl1251/ps.c b/drivers/net/wireless/wl1251/ps.c
index 9cc514703d2a..db719f7d2692 100644
--- a/drivers/net/wireless/wl1251/ps.c
+++ b/drivers/net/wireless/wl1251/ps.c
@@ -39,7 +39,7 @@ void wl1251_elp_work(struct work_struct *work)
39 39
40 mutex_lock(&wl->mutex); 40 mutex_lock(&wl->mutex);
41 41
42 if (wl->elp || !wl->psm) 42 if (wl->elp || wl->station_mode == STATION_ACTIVE_MODE)
43 goto out; 43 goto out;
44 44
45 wl1251_debug(DEBUG_PSM, "chip to elp"); 45 wl1251_debug(DEBUG_PSM, "chip to elp");
@@ -57,7 +57,7 @@ void wl1251_ps_elp_sleep(struct wl1251 *wl)
57{ 57{
58 unsigned long delay; 58 unsigned long delay;
59 59
60 if (wl->psm) { 60 if (wl->station_mode != STATION_ACTIVE_MODE) {
61 delay = msecs_to_jiffies(ELP_ENTRY_DELAY); 61 delay = msecs_to_jiffies(ELP_ENTRY_DELAY);
62 ieee80211_queue_delayed_work(wl->hw, &wl->elp_work, delay); 62 ieee80211_queue_delayed_work(wl->hw, &wl->elp_work, delay);
63 } 63 }
@@ -104,7 +104,7 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl)
104 return 0; 104 return 0;
105} 105}
106 106
107int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode) 107int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_station_mode mode)
108{ 108{
109 int ret; 109 int ret;
110 110
@@ -128,15 +128,24 @@ int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode)
128 if (ret < 0) 128 if (ret < 0)
129 return ret; 129 return ret;
130 130
131 ret = wl1251_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE); 131 ret = wl1251_cmd_ps_mode(wl, CHIP_POWER_SAVE_MODE);
132 if (ret < 0) 132 if (ret < 0)
133 return ret; 133 return ret;
134 134
135 ret = wl1251_acx_sleep_auth(wl, WL1251_PSM_ELP); 135 ret = wl1251_acx_sleep_auth(wl, WL1251_PSM_ELP);
136 if (ret < 0) 136 if (ret < 0)
137 return ret; 137 return ret;
138 break;
139 case STATION_IDLE:
140 wl1251_debug(DEBUG_PSM, "entering idle");
138 141
139 wl->psm = 1; 142 ret = wl1251_acx_sleep_auth(wl, WL1251_PSM_ELP);
143 if (ret < 0)
144 return ret;
145
146 ret = wl1251_cmd_template_set(wl, CMD_DISCONNECT, NULL, 0);
147 if (ret < 0)
148 return ret;
140 break; 149 break;
141 case STATION_ACTIVE_MODE: 150 case STATION_ACTIVE_MODE:
142 default: 151 default:
@@ -163,13 +172,13 @@ int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode)
163 if (ret < 0) 172 if (ret < 0)
164 return ret; 173 return ret;
165 174
166 ret = wl1251_cmd_ps_mode(wl, STATION_ACTIVE_MODE); 175 ret = wl1251_cmd_ps_mode(wl, CHIP_ACTIVE_MODE);
167 if (ret < 0) 176 if (ret < 0)
168 return ret; 177 return ret;
169 178
170 wl->psm = 0;
171 break; 179 break;
172 } 180 }
181 wl->station_mode = mode;
173 182
174 return ret; 183 return ret;
175} 184}
diff --git a/drivers/net/wireless/wl1251/ps.h b/drivers/net/wireless/wl1251/ps.h
index 55c3dda75e69..75efad246d67 100644
--- a/drivers/net/wireless/wl1251/ps.h
+++ b/drivers/net/wireless/wl1251/ps.h
@@ -26,7 +26,7 @@
26#include "wl1251.h" 26#include "wl1251.h"
27#include "acx.h" 27#include "acx.h"
28 28
29int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode); 29int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_station_mode mode);
30void wl1251_ps_elp_sleep(struct wl1251 *wl); 30void wl1251_ps_elp_sleep(struct wl1251 *wl);
31int wl1251_ps_elp_wakeup(struct wl1251 *wl); 31int wl1251_ps_elp_wakeup(struct wl1251 *wl);
32void wl1251_elp_work(struct work_struct *work); 32void wl1251_elp_work(struct work_struct *work);
diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
index bb23cd522b22..a77f1bbbed0a 100644
--- a/drivers/net/wireless/wl1251/wl1251.h
+++ b/drivers/net/wireless/wl1251/wl1251.h
@@ -129,6 +129,12 @@ enum wl1251_partition_type {
129 PART_TABLE_LEN 129 PART_TABLE_LEN
130}; 130};
131 131
132enum wl1251_station_mode {
133 STATION_ACTIVE_MODE,
134 STATION_POWER_SAVE_MODE,
135 STATION_IDLE,
136};
137
132struct wl1251_partition { 138struct wl1251_partition {
133 u32 size; 139 u32 size;
134 u32 start; 140 u32 start;
@@ -358,8 +364,7 @@ struct wl1251 {
358 364
359 struct delayed_work elp_work; 365 struct delayed_work elp_work;
360 366
361 /* we can be in psm, but not in elp, we have to differentiate */ 367 enum wl1251_station_mode station_mode;
362 bool psm;
363 368
364 /* PSM mode requested */ 369 /* PSM mode requested */
365 bool psm_requested; 370 bool psm_requested;
diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig
index 692ebff38fc8..35ce7b0f4a60 100644
--- a/drivers/net/wireless/wl12xx/Kconfig
+++ b/drivers/net/wireless/wl12xx/Kconfig
@@ -3,7 +3,7 @@ menuconfig WL12XX_MENU
3 depends on MAC80211 && EXPERIMENTAL 3 depends on MAC80211 && EXPERIMENTAL
4 ---help--- 4 ---help---
5 This will enable TI wl12xx driver support for the following chips: 5 This will enable TI wl12xx driver support for the following chips:
6 wl1271 and wl1273. 6 wl1271, wl1273, wl1281 and wl1283.
7 The drivers make use of the mac80211 stack. 7 The drivers make use of the mac80211 stack.
8 8
9config WL12XX 9config WL12XX
diff --git a/drivers/net/wireless/wl12xx/acx.c b/drivers/net/wireless/wl12xx/acx.c
index a3db755ceeda..c6ee530e5bf7 100644
--- a/drivers/net/wireless/wl12xx/acx.c
+++ b/drivers/net/wireless/wl12xx/acx.c
@@ -325,12 +325,19 @@ out:
325 return ret; 325 return ret;
326} 326}
327 327
328int wl1271_acx_rts_threshold(struct wl1271 *wl, u16 rts_threshold) 328int wl1271_acx_rts_threshold(struct wl1271 *wl, u32 rts_threshold)
329{ 329{
330 struct acx_rts_threshold *rts; 330 struct acx_rts_threshold *rts;
331 int ret; 331 int ret;
332 332
333 wl1271_debug(DEBUG_ACX, "acx rts threshold"); 333 /*
334 * If the RTS threshold is not configured or out of range, use the
335 * default value.
336 */
337 if (rts_threshold > IEEE80211_MAX_RTS_THRESHOLD)
338 rts_threshold = wl->conf.rx.rts_threshold;
339
340 wl1271_debug(DEBUG_ACX, "acx rts threshold: %d", rts_threshold);
334 341
335 rts = kzalloc(sizeof(*rts), GFP_KERNEL); 342 rts = kzalloc(sizeof(*rts), GFP_KERNEL);
336 if (!rts) { 343 if (!rts) {
@@ -338,7 +345,7 @@ int wl1271_acx_rts_threshold(struct wl1271 *wl, u16 rts_threshold)
338 goto out; 345 goto out;
339 } 346 }
340 347
341 rts->threshold = cpu_to_le16(rts_threshold); 348 rts->threshold = cpu_to_le16((u16)rts_threshold);
342 349
343 ret = wl1271_cmd_configure(wl, DOT11_RTS_THRESHOLD, rts, sizeof(*rts)); 350 ret = wl1271_cmd_configure(wl, DOT11_RTS_THRESHOLD, rts, sizeof(*rts));
344 if (ret < 0) { 351 if (ret < 0) {
@@ -540,13 +547,13 @@ out:
540 return ret; 547 return ret;
541} 548}
542 549
543int wl1271_acx_sg_cfg(struct wl1271 *wl) 550int wl1271_acx_sta_sg_cfg(struct wl1271 *wl)
544{ 551{
545 struct acx_bt_wlan_coex_param *param; 552 struct acx_sta_bt_wlan_coex_param *param;
546 struct conf_sg_settings *c = &wl->conf.sg; 553 struct conf_sg_settings *c = &wl->conf.sg;
547 int i, ret; 554 int i, ret;
548 555
549 wl1271_debug(DEBUG_ACX, "acx sg cfg"); 556 wl1271_debug(DEBUG_ACX, "acx sg sta cfg");
550 557
551 param = kzalloc(sizeof(*param), GFP_KERNEL); 558 param = kzalloc(sizeof(*param), GFP_KERNEL);
552 if (!param) { 559 if (!param) {
@@ -555,8 +562,38 @@ int wl1271_acx_sg_cfg(struct wl1271 *wl)
555 } 562 }
556 563
557 /* BT-WLAN coext parameters */ 564 /* BT-WLAN coext parameters */
558 for (i = 0; i < CONF_SG_PARAMS_MAX; i++) 565 for (i = 0; i < CONF_SG_STA_PARAMS_MAX; i++)
559 param->params[i] = cpu_to_le32(c->params[i]); 566 param->params[i] = cpu_to_le32(c->sta_params[i]);
567 param->param_idx = CONF_SG_PARAMS_ALL;
568
569 ret = wl1271_cmd_configure(wl, ACX_SG_CFG, param, sizeof(*param));
570 if (ret < 0) {
571 wl1271_warning("failed to set sg config: %d", ret);
572 goto out;
573 }
574
575out:
576 kfree(param);
577 return ret;
578}
579
580int wl1271_acx_ap_sg_cfg(struct wl1271 *wl)
581{
582 struct acx_ap_bt_wlan_coex_param *param;
583 struct conf_sg_settings *c = &wl->conf.sg;
584 int i, ret;
585
586 wl1271_debug(DEBUG_ACX, "acx sg ap cfg");
587
588 param = kzalloc(sizeof(*param), GFP_KERNEL);
589 if (!param) {
590 ret = -ENOMEM;
591 goto out;
592 }
593
594 /* BT-WLAN coext parameters */
595 for (i = 0; i < CONF_SG_AP_PARAMS_MAX; i++)
596 param->params[i] = cpu_to_le32(c->ap_params[i]);
560 param->param_idx = CONF_SG_PARAMS_ALL; 597 param->param_idx = CONF_SG_PARAMS_ALL;
561 598
562 ret = wl1271_cmd_configure(wl, ACX_SG_CFG, param, sizeof(*param)); 599 ret = wl1271_cmd_configure(wl, ACX_SG_CFG, param, sizeof(*param));
@@ -804,7 +841,8 @@ int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c,
804 struct acx_ap_rate_policy *acx; 841 struct acx_ap_rate_policy *acx;
805 int ret = 0; 842 int ret = 0;
806 843
807 wl1271_debug(DEBUG_ACX, "acx ap rate policy"); 844 wl1271_debug(DEBUG_ACX, "acx ap rate policy %d rates 0x%x",
845 idx, c->enabled_rates);
808 846
809 acx = kzalloc(sizeof(*acx), GFP_KERNEL); 847 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
810 if (!acx) { 848 if (!acx) {
@@ -898,12 +936,19 @@ out:
898 return ret; 936 return ret;
899} 937}
900 938
901int wl1271_acx_frag_threshold(struct wl1271 *wl, u16 frag_threshold) 939int wl1271_acx_frag_threshold(struct wl1271 *wl, u32 frag_threshold)
902{ 940{
903 struct acx_frag_threshold *acx; 941 struct acx_frag_threshold *acx;
904 int ret = 0; 942 int ret = 0;
905 943
906 wl1271_debug(DEBUG_ACX, "acx frag threshold"); 944 /*
945 * If the fragmentation is not configured or out of range, use the
946 * default value.
947 */
948 if (frag_threshold > IEEE80211_MAX_FRAG_THRESHOLD)
949 frag_threshold = wl->conf.tx.frag_threshold;
950
951 wl1271_debug(DEBUG_ACX, "acx frag threshold: %d", frag_threshold);
907 952
908 acx = kzalloc(sizeof(*acx), GFP_KERNEL); 953 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
909 954
@@ -912,7 +957,7 @@ int wl1271_acx_frag_threshold(struct wl1271 *wl, u16 frag_threshold)
912 goto out; 957 goto out;
913 } 958 }
914 959
915 acx->frag_threshold = cpu_to_le16(frag_threshold); 960 acx->frag_threshold = cpu_to_le16((u16)frag_threshold);
916 ret = wl1271_cmd_configure(wl, ACX_FRAG_CFG, acx, sizeof(*acx)); 961 ret = wl1271_cmd_configure(wl, ACX_FRAG_CFG, acx, sizeof(*acx));
917 if (ret < 0) { 962 if (ret < 0) {
918 wl1271_warning("Setting of frag threshold failed: %d", ret); 963 wl1271_warning("Setting of frag threshold failed: %d", ret);
@@ -954,6 +999,7 @@ out:
954int wl1271_acx_ap_mem_cfg(struct wl1271 *wl) 999int wl1271_acx_ap_mem_cfg(struct wl1271 *wl)
955{ 1000{
956 struct wl1271_acx_ap_config_memory *mem_conf; 1001 struct wl1271_acx_ap_config_memory *mem_conf;
1002 struct conf_memory_settings *mem;
957 int ret; 1003 int ret;
958 1004
959 wl1271_debug(DEBUG_ACX, "wl1271 mem cfg"); 1005 wl1271_debug(DEBUG_ACX, "wl1271 mem cfg");
@@ -964,11 +1010,21 @@ int wl1271_acx_ap_mem_cfg(struct wl1271 *wl)
964 goto out; 1010 goto out;
965 } 1011 }
966 1012
1013 if (wl->chip.id == CHIP_ID_1283_PG20)
1014 /*
1015 * FIXME: The 128x AP FW does not yet support dynamic memory.
1016 * Use the base memory configuration for 128x for now. This
1017 * should be fine tuned in the future.
1018 */
1019 mem = &wl->conf.mem_wl128x;
1020 else
1021 mem = &wl->conf.mem_wl127x;
1022
967 /* memory config */ 1023 /* memory config */
968 mem_conf->num_stations = wl->conf.mem.num_stations; 1024 mem_conf->num_stations = mem->num_stations;
969 mem_conf->rx_mem_block_num = wl->conf.mem.rx_block_num; 1025 mem_conf->rx_mem_block_num = mem->rx_block_num;
970 mem_conf->tx_min_mem_block_num = wl->conf.mem.tx_min_block_num; 1026 mem_conf->tx_min_mem_block_num = mem->tx_min_block_num;
971 mem_conf->num_ssid_profiles = wl->conf.mem.ssid_profiles; 1027 mem_conf->num_ssid_profiles = mem->ssid_profiles;
972 mem_conf->total_tx_descriptors = cpu_to_le32(ACX_TX_DESCRIPTORS); 1028 mem_conf->total_tx_descriptors = cpu_to_le32(ACX_TX_DESCRIPTORS);
973 1029
974 ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf, 1030 ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf,
@@ -986,6 +1042,7 @@ out:
986int wl1271_acx_sta_mem_cfg(struct wl1271 *wl) 1042int wl1271_acx_sta_mem_cfg(struct wl1271 *wl)
987{ 1043{
988 struct wl1271_acx_sta_config_memory *mem_conf; 1044 struct wl1271_acx_sta_config_memory *mem_conf;
1045 struct conf_memory_settings *mem;
989 int ret; 1046 int ret;
990 1047
991 wl1271_debug(DEBUG_ACX, "wl1271 mem cfg"); 1048 wl1271_debug(DEBUG_ACX, "wl1271 mem cfg");
@@ -996,16 +1053,21 @@ int wl1271_acx_sta_mem_cfg(struct wl1271 *wl)
996 goto out; 1053 goto out;
997 } 1054 }
998 1055
1056 if (wl->chip.id == CHIP_ID_1283_PG20)
1057 mem = &wl->conf.mem_wl128x;
1058 else
1059 mem = &wl->conf.mem_wl127x;
1060
999 /* memory config */ 1061 /* memory config */
1000 mem_conf->num_stations = wl->conf.mem.num_stations; 1062 mem_conf->num_stations = mem->num_stations;
1001 mem_conf->rx_mem_block_num = wl->conf.mem.rx_block_num; 1063 mem_conf->rx_mem_block_num = mem->rx_block_num;
1002 mem_conf->tx_min_mem_block_num = wl->conf.mem.tx_min_block_num; 1064 mem_conf->tx_min_mem_block_num = mem->tx_min_block_num;
1003 mem_conf->num_ssid_profiles = wl->conf.mem.ssid_profiles; 1065 mem_conf->num_ssid_profiles = mem->ssid_profiles;
1004 mem_conf->total_tx_descriptors = cpu_to_le32(ACX_TX_DESCRIPTORS); 1066 mem_conf->total_tx_descriptors = cpu_to_le32(ACX_TX_DESCRIPTORS);
1005 mem_conf->dyn_mem_enable = wl->conf.mem.dynamic_memory; 1067 mem_conf->dyn_mem_enable = mem->dynamic_memory;
1006 mem_conf->tx_free_req = wl->conf.mem.min_req_tx_blocks; 1068 mem_conf->tx_free_req = mem->min_req_tx_blocks;
1007 mem_conf->rx_free_req = wl->conf.mem.min_req_rx_blocks; 1069 mem_conf->rx_free_req = mem->min_req_rx_blocks;
1008 mem_conf->tx_min = wl->conf.mem.tx_min; 1070 mem_conf->tx_min = mem->tx_min;
1009 1071
1010 ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf, 1072 ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf,
1011 sizeof(*mem_conf)); 1073 sizeof(*mem_conf));
@@ -1019,6 +1081,32 @@ out:
1019 return ret; 1081 return ret;
1020} 1082}
1021 1083
1084int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap)
1085{
1086 struct wl1271_acx_host_config_bitmap *bitmap_conf;
1087 int ret;
1088
1089 bitmap_conf = kzalloc(sizeof(*bitmap_conf), GFP_KERNEL);
1090 if (!bitmap_conf) {
1091 ret = -ENOMEM;
1092 goto out;
1093 }
1094
1095 bitmap_conf->host_cfg_bitmap = cpu_to_le32(host_cfg_bitmap);
1096
1097 ret = wl1271_cmd_configure(wl, ACX_HOST_IF_CFG_BITMAP,
1098 bitmap_conf, sizeof(*bitmap_conf));
1099 if (ret < 0) {
1100 wl1271_warning("wl1271 bitmap config opt failed: %d", ret);
1101 goto out;
1102 }
1103
1104out:
1105 kfree(bitmap_conf);
1106
1107 return ret;
1108}
1109
1022int wl1271_acx_init_mem_config(struct wl1271 *wl) 1110int wl1271_acx_init_mem_config(struct wl1271 *wl)
1023{ 1111{
1024 int ret; 1112 int ret;
@@ -1567,3 +1655,68 @@ out:
1567 kfree(acx); 1655 kfree(acx);
1568 return ret; 1656 return ret;
1569} 1657}
1658
1659int wl1271_acx_set_ap_beacon_filter(struct wl1271 *wl, bool enable)
1660{
1661 struct acx_ap_beacon_filter *acx = NULL;
1662 int ret;
1663
1664 wl1271_debug(DEBUG_ACX, "acx set ap beacon filter: %d", enable);
1665
1666 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1667 if (!acx)
1668 return -ENOMEM;
1669
1670 acx->enable = enable ? 1 : 0;
1671
1672 ret = wl1271_cmd_configure(wl, ACX_AP_BEACON_FILTER_OPT,
1673 acx, sizeof(*acx));
1674 if (ret < 0) {
1675 wl1271_warning("acx set ap beacon filter failed: %d", ret);
1676 goto out;
1677 }
1678
1679out:
1680 kfree(acx);
1681 return ret;
1682}
1683
1684int wl1271_acx_fm_coex(struct wl1271 *wl)
1685{
1686 struct wl1271_acx_fm_coex *acx;
1687 int ret;
1688
1689 wl1271_debug(DEBUG_ACX, "acx fm coex setting");
1690
1691 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1692 if (!acx) {
1693 ret = -ENOMEM;
1694 goto out;
1695 }
1696
1697 acx->enable = wl->conf.fm_coex.enable;
1698 acx->swallow_period = wl->conf.fm_coex.swallow_period;
1699 acx->n_divider_fref_set_1 = wl->conf.fm_coex.n_divider_fref_set_1;
1700 acx->n_divider_fref_set_2 = wl->conf.fm_coex.n_divider_fref_set_2;
1701 acx->m_divider_fref_set_1 =
1702 cpu_to_le16(wl->conf.fm_coex.m_divider_fref_set_1);
1703 acx->m_divider_fref_set_2 =
1704 cpu_to_le16(wl->conf.fm_coex.m_divider_fref_set_2);
1705 acx->coex_pll_stabilization_time =
1706 cpu_to_le32(wl->conf.fm_coex.coex_pll_stabilization_time);
1707 acx->ldo_stabilization_time =
1708 cpu_to_le16(wl->conf.fm_coex.ldo_stabilization_time);
1709 acx->fm_disturbed_band_margin =
1710 wl->conf.fm_coex.fm_disturbed_band_margin;
1711 acx->swallow_clk_diff = wl->conf.fm_coex.swallow_clk_diff;
1712
1713 ret = wl1271_cmd_configure(wl, ACX_FM_COEX_CFG, acx, sizeof(*acx));
1714 if (ret < 0) {
1715 wl1271_warning("acx fm coex setting failed: %d", ret);
1716 goto out;
1717 }
1718
1719out:
1720 kfree(acx);
1721 return ret;
1722}
diff --git a/drivers/net/wireless/wl12xx/acx.h b/drivers/net/wireless/wl12xx/acx.h
index dd19b01d807b..9a895e3cc613 100644
--- a/drivers/net/wireless/wl12xx/acx.h
+++ b/drivers/net/wireless/wl12xx/acx.h
@@ -303,7 +303,6 @@ struct acx_beacon_filter_option {
303 struct acx_header header; 303 struct acx_header header;
304 304
305 u8 enable; 305 u8 enable;
306
307 /* 306 /*
308 * The number of beacons without the unicast TIM 307 * The number of beacons without the unicast TIM
309 * bit set that the firmware buffers before 308 * bit set that the firmware buffers before
@@ -370,14 +369,23 @@ struct acx_bt_wlan_coex {
370 u8 pad[3]; 369 u8 pad[3];
371} __packed; 370} __packed;
372 371
373struct acx_bt_wlan_coex_param { 372struct acx_sta_bt_wlan_coex_param {
374 struct acx_header header; 373 struct acx_header header;
375 374
376 __le32 params[CONF_SG_PARAMS_MAX]; 375 __le32 params[CONF_SG_STA_PARAMS_MAX];
377 u8 param_idx; 376 u8 param_idx;
378 u8 padding[3]; 377 u8 padding[3];
379} __packed; 378} __packed;
380 379
380struct acx_ap_bt_wlan_coex_param {
381 struct acx_header header;
382
383 __le32 params[CONF_SG_AP_PARAMS_MAX];
384 u8 param_idx;
385 u8 padding[3];
386} __packed;
387
388
381struct acx_dco_itrim_params { 389struct acx_dco_itrim_params {
382 struct acx_header header; 390 struct acx_header header;
383 391
@@ -939,6 +947,16 @@ struct wl1271_acx_keep_alive_config {
939 u8 padding; 947 u8 padding;
940} __packed; 948} __packed;
941 949
950#define HOST_IF_CFG_RX_FIFO_ENABLE BIT(0)
951#define HOST_IF_CFG_TX_EXTRA_BLKS_SWAP BIT(1)
952#define HOST_IF_CFG_TX_PAD_TO_SDIO_BLK BIT(3)
953
954struct wl1271_acx_host_config_bitmap {
955 struct acx_header header;
956
957 __le32 host_cfg_bitmap;
958} __packed;
959
942enum { 960enum {
943 WL1271_ACX_TRIG_TYPE_LEVEL = 0, 961 WL1271_ACX_TRIG_TYPE_LEVEL = 0,
944 WL1271_ACX_TRIG_TYPE_EDGE, 962 WL1271_ACX_TRIG_TYPE_EDGE,
@@ -1162,6 +1180,72 @@ struct wl1271_acx_inconnection_sta {
1162 u8 padding1[2]; 1180 u8 padding1[2];
1163} __packed; 1181} __packed;
1164 1182
1183struct acx_ap_beacon_filter {
1184 struct acx_header header;
1185
1186 u8 enable;
1187 u8 pad[3];
1188} __packed;
1189
1190/*
1191 * ACX_FM_COEX_CFG
1192 * set the FM co-existence parameters.
1193 */
1194struct wl1271_acx_fm_coex {
1195 struct acx_header header;
1196 /* enable(1) / disable(0) the FM Coex feature */
1197 u8 enable;
1198 /*
1199 * Swallow period used in COEX PLL swallowing mechanism.
1200 * 0xFF = use FW default
1201 */
1202 u8 swallow_period;
1203 /*
1204 * The N divider used in COEX PLL swallowing mechanism for Fref of
1205 * 38.4/19.2 Mhz. 0xFF = use FW default
1206 */
1207 u8 n_divider_fref_set_1;
1208 /*
1209 * The N divider used in COEX PLL swallowing mechanism for Fref of
1210 * 26/52 Mhz. 0xFF = use FW default
1211 */
1212 u8 n_divider_fref_set_2;
1213 /*
1214 * The M divider used in COEX PLL swallowing mechanism for Fref of
1215 * 38.4/19.2 Mhz. 0xFFFF = use FW default
1216 */
1217 __le16 m_divider_fref_set_1;
1218 /*
1219 * The M divider used in COEX PLL swallowing mechanism for Fref of
1220 * 26/52 Mhz. 0xFFFF = use FW default
1221 */
1222 __le16 m_divider_fref_set_2;
1223 /*
1224 * The time duration in uSec required for COEX PLL to stabilize.
1225 * 0xFFFFFFFF = use FW default
1226 */
1227 __le32 coex_pll_stabilization_time;
1228 /*
1229 * The time duration in uSec required for LDO to stabilize.
1230 * 0xFFFFFFFF = use FW default
1231 */
1232 __le16 ldo_stabilization_time;
1233 /*
1234 * The disturbed frequency band margin around the disturbed frequency
1235 * center (single sided).
1236 * For example, if 2 is configured, the following channels will be
1237 * considered disturbed channel:
1238 * 80 +- 0.1 MHz, 91 +- 0.1 MHz, 98 +- 0.1 MHz, 102 +- 0.1 MH
1239 * 0xFF = use FW default
1240 */
1241 u8 fm_disturbed_band_margin;
1242 /*
1243 * The swallow clock difference of the swallowing mechanism.
1244 * 0xFF = use FW default
1245 */
1246 u8 swallow_clk_diff;
1247} __packed;
1248
1165enum { 1249enum {
1166 ACX_WAKE_UP_CONDITIONS = 0x0002, 1250 ACX_WAKE_UP_CONDITIONS = 0x0002,
1167 ACX_MEM_CFG = 0x0003, 1251 ACX_MEM_CFG = 0x0003,
@@ -1180,6 +1264,7 @@ enum {
1180 ACX_TID_CFG = 0x001A, 1264 ACX_TID_CFG = 0x001A,
1181 ACX_PS_RX_STREAMING = 0x001B, 1265 ACX_PS_RX_STREAMING = 0x001B,
1182 ACX_BEACON_FILTER_OPT = 0x001F, 1266 ACX_BEACON_FILTER_OPT = 0x001F,
1267 ACX_AP_BEACON_FILTER_OPT = 0x0020,
1183 ACX_NOISE_HIST = 0x0021, 1268 ACX_NOISE_HIST = 0x0021,
1184 ACX_HDK_VERSION = 0x0022, /* ??? */ 1269 ACX_HDK_VERSION = 0x0022, /* ??? */
1185 ACX_PD_THRESHOLD = 0x0023, 1270 ACX_PD_THRESHOLD = 0x0023,
@@ -1191,6 +1276,7 @@ enum {
1191 ACX_BCN_DTIM_OPTIONS = 0x0031, 1276 ACX_BCN_DTIM_OPTIONS = 0x0031,
1192 ACX_SG_ENABLE = 0x0032, 1277 ACX_SG_ENABLE = 0x0032,
1193 ACX_SG_CFG = 0x0033, 1278 ACX_SG_CFG = 0x0033,
1279 ACX_FM_COEX_CFG = 0x0034,
1194 ACX_BEACON_FILTER_TABLE = 0x0038, 1280 ACX_BEACON_FILTER_TABLE = 0x0038,
1195 ACX_ARP_IP_FILTER = 0x0039, 1281 ACX_ARP_IP_FILTER = 0x0039,
1196 ACX_ROAMING_STATISTICS_TBL = 0x003B, 1282 ACX_ROAMING_STATISTICS_TBL = 0x003B,
@@ -1247,13 +1333,14 @@ int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time);
1247int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable, 1333int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
1248 void *mc_list, u32 mc_list_len); 1334 void *mc_list, u32 mc_list_len);
1249int wl1271_acx_service_period_timeout(struct wl1271 *wl); 1335int wl1271_acx_service_period_timeout(struct wl1271 *wl);
1250int wl1271_acx_rts_threshold(struct wl1271 *wl, u16 rts_threshold); 1336int wl1271_acx_rts_threshold(struct wl1271 *wl, u32 rts_threshold);
1251int wl1271_acx_dco_itrim_params(struct wl1271 *wl); 1337int wl1271_acx_dco_itrim_params(struct wl1271 *wl);
1252int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter); 1338int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter);
1253int wl1271_acx_beacon_filter_table(struct wl1271 *wl); 1339int wl1271_acx_beacon_filter_table(struct wl1271 *wl);
1254int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable); 1340int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable);
1255int wl1271_acx_sg_enable(struct wl1271 *wl, bool enable); 1341int wl1271_acx_sg_enable(struct wl1271 *wl, bool enable);
1256int wl1271_acx_sg_cfg(struct wl1271 *wl); 1342int wl1271_acx_sta_sg_cfg(struct wl1271 *wl);
1343int wl1271_acx_ap_sg_cfg(struct wl1271 *wl);
1257int wl1271_acx_cca_threshold(struct wl1271 *wl); 1344int wl1271_acx_cca_threshold(struct wl1271 *wl);
1258int wl1271_acx_bcn_dtim_options(struct wl1271 *wl); 1345int wl1271_acx_bcn_dtim_options(struct wl1271 *wl);
1259int wl1271_acx_aid(struct wl1271 *wl, u16 aid); 1346int wl1271_acx_aid(struct wl1271 *wl, u16 aid);
@@ -1270,11 +1357,12 @@ int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
1270int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type, 1357int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
1271 u8 tsid, u8 ps_scheme, u8 ack_policy, 1358 u8 tsid, u8 ps_scheme, u8 ack_policy,
1272 u32 apsd_conf0, u32 apsd_conf1); 1359 u32 apsd_conf0, u32 apsd_conf1);
1273int wl1271_acx_frag_threshold(struct wl1271 *wl, u16 frag_threshold); 1360int wl1271_acx_frag_threshold(struct wl1271 *wl, u32 frag_threshold);
1274int wl1271_acx_tx_config_options(struct wl1271 *wl); 1361int wl1271_acx_tx_config_options(struct wl1271 *wl);
1275int wl1271_acx_ap_mem_cfg(struct wl1271 *wl); 1362int wl1271_acx_ap_mem_cfg(struct wl1271 *wl);
1276int wl1271_acx_sta_mem_cfg(struct wl1271 *wl); 1363int wl1271_acx_sta_mem_cfg(struct wl1271 *wl);
1277int wl1271_acx_init_mem_config(struct wl1271 *wl); 1364int wl1271_acx_init_mem_config(struct wl1271 *wl);
1365int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap);
1278int wl1271_acx_init_rx_interrupt(struct wl1271 *wl); 1366int wl1271_acx_init_rx_interrupt(struct wl1271 *wl);
1279int wl1271_acx_smart_reflex(struct wl1271 *wl); 1367int wl1271_acx_smart_reflex(struct wl1271 *wl);
1280int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable); 1368int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable);
@@ -1299,5 +1387,7 @@ int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime);
1299int wl1271_acx_max_tx_retry(struct wl1271 *wl); 1387int wl1271_acx_max_tx_retry(struct wl1271 *wl);
1300int wl1271_acx_config_ps(struct wl1271 *wl); 1388int wl1271_acx_config_ps(struct wl1271 *wl);
1301int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr); 1389int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr);
1390int wl1271_acx_set_ap_beacon_filter(struct wl1271 *wl, bool enable);
1391int wl1271_acx_fm_coex(struct wl1271 *wl);
1302 1392
1303#endif /* __WL1271_ACX_H__ */ 1393#endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/boot.c b/drivers/net/wireless/wl12xx/boot.c
index 6934dffd5174..b07f8b7e5f11 100644
--- a/drivers/net/wireless/wl12xx/boot.c
+++ b/drivers/net/wireless/wl12xx/boot.c
@@ -22,6 +22,7 @@
22 */ 22 */
23 23
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/wl12xx.h>
25 26
26#include "acx.h" 27#include "acx.h"
27#include "reg.h" 28#include "reg.h"
@@ -243,33 +244,57 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
243 if (wl->nvs == NULL) 244 if (wl->nvs == NULL)
244 return -ENODEV; 245 return -ENODEV;
245 246
246 /* 247 if (wl->chip.id == CHIP_ID_1283_PG20) {
247 * FIXME: the LEGACY NVS image support (NVS's missing the 5GHz band 248 struct wl128x_nvs_file *nvs = (struct wl128x_nvs_file *)wl->nvs;
248 * configurations) can be removed when those NVS files stop floating 249
249 * around. 250 if (wl->nvs_len == sizeof(struct wl128x_nvs_file)) {
250 */ 251 if (nvs->general_params.dual_mode_select)
251 if (wl->nvs_len == sizeof(struct wl1271_nvs_file) || 252 wl->enable_11a = true;
252 wl->nvs_len == WL1271_INI_LEGACY_NVS_FILE_SIZE) { 253 } else {
253 /* for now 11a is unsupported in AP mode */ 254 wl1271_error("nvs size is not as expected: %zu != %zu",
254 if (wl->bss_type != BSS_TYPE_AP_BSS && 255 wl->nvs_len,
255 wl->nvs->general_params.dual_mode_select) 256 sizeof(struct wl128x_nvs_file));
256 wl->enable_11a = true; 257 kfree(wl->nvs);
257 } 258 wl->nvs = NULL;
259 wl->nvs_len = 0;
260 return -EILSEQ;
261 }
258 262
259 if (wl->nvs_len != sizeof(struct wl1271_nvs_file) && 263 /* only the first part of the NVS needs to be uploaded */
260 (wl->nvs_len != WL1271_INI_LEGACY_NVS_FILE_SIZE || 264 nvs_len = sizeof(nvs->nvs);
261 wl->enable_11a)) { 265 nvs_ptr = (u8 *)nvs->nvs;
262 wl1271_error("nvs size is not as expected: %zu != %zu", 266
263 wl->nvs_len, sizeof(struct wl1271_nvs_file)); 267 } else {
264 kfree(wl->nvs); 268 struct wl1271_nvs_file *nvs =
265 wl->nvs = NULL; 269 (struct wl1271_nvs_file *)wl->nvs;
266 wl->nvs_len = 0; 270 /*
267 return -EILSEQ; 271 * FIXME: the LEGACY NVS image support (NVS's missing the 5GHz
268 } 272 * band configurations) can be removed when those NVS files stop
273 * floating around.
274 */
275 if (wl->nvs_len == sizeof(struct wl1271_nvs_file) ||
276 wl->nvs_len == WL1271_INI_LEGACY_NVS_FILE_SIZE) {
277 /* for now 11a is unsupported in AP mode */
278 if (wl->bss_type != BSS_TYPE_AP_BSS &&
279 nvs->general_params.dual_mode_select)
280 wl->enable_11a = true;
281 }
269 282
270 /* only the first part of the NVS needs to be uploaded */ 283 if (wl->nvs_len != sizeof(struct wl1271_nvs_file) &&
271 nvs_len = sizeof(wl->nvs->nvs); 284 (wl->nvs_len != WL1271_INI_LEGACY_NVS_FILE_SIZE ||
272 nvs_ptr = (u8 *)wl->nvs->nvs; 285 wl->enable_11a)) {
286 wl1271_error("nvs size is not as expected: %zu != %zu",
287 wl->nvs_len, sizeof(struct wl1271_nvs_file));
288 kfree(wl->nvs);
289 wl->nvs = NULL;
290 wl->nvs_len = 0;
291 return -EILSEQ;
292 }
293
294 /* only the first part of the NVS needs to be uploaded */
295 nvs_len = sizeof(nvs->nvs);
296 nvs_ptr = (u8 *) nvs->nvs;
297 }
273 298
274 /* update current MAC address to NVS */ 299 /* update current MAC address to NVS */
275 nvs_ptr[11] = wl->mac_addr[0]; 300 nvs_ptr[11] = wl->mac_addr[0];
@@ -319,10 +344,13 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
319 /* 344 /*
320 * We've reached the first zero length, the first NVS table 345 * We've reached the first zero length, the first NVS table
321 * is located at an aligned offset which is at least 7 bytes further. 346 * is located at an aligned offset which is at least 7 bytes further.
347 * NOTE: The wl->nvs->nvs element must be first, in order to
348 * simplify the casting, we assume it is at the beginning of
349 * the wl->nvs structure.
322 */ 350 */
323 nvs_ptr = (u8 *)wl->nvs->nvs + 351 nvs_ptr = (u8 *)wl->nvs +
324 ALIGN(nvs_ptr - (u8 *)wl->nvs->nvs + 7, 4); 352 ALIGN(nvs_ptr - (u8 *)wl->nvs + 7, 4);
325 nvs_len -= nvs_ptr - (u8 *)wl->nvs->nvs; 353 nvs_len -= nvs_ptr - (u8 *)wl->nvs;
326 354
327 /* Now we must set the partition correctly */ 355 /* Now we must set the partition correctly */
328 wl1271_set_partition(wl, &part_table[PART_WORK]); 356 wl1271_set_partition(wl, &part_table[PART_WORK]);
@@ -450,10 +478,14 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
450 DISCONNECT_EVENT_COMPLETE_ID | 478 DISCONNECT_EVENT_COMPLETE_ID |
451 RSSI_SNR_TRIGGER_0_EVENT_ID | 479 RSSI_SNR_TRIGGER_0_EVENT_ID |
452 PSPOLL_DELIVERY_FAILURE_EVENT_ID | 480 PSPOLL_DELIVERY_FAILURE_EVENT_ID |
453 SOFT_GEMINI_SENSE_EVENT_ID; 481 SOFT_GEMINI_SENSE_EVENT_ID |
482 PERIODIC_SCAN_REPORT_EVENT_ID |
483 PERIODIC_SCAN_COMPLETE_EVENT_ID;
454 484
455 if (wl->bss_type == BSS_TYPE_AP_BSS) 485 if (wl->bss_type == BSS_TYPE_AP_BSS)
456 wl->event_mask |= STA_REMOVE_COMPLETE_EVENT_ID; 486 wl->event_mask |= STA_REMOVE_COMPLETE_EVENT_ID;
487 else
488 wl->event_mask |= DUMMY_PACKET_EVENT_ID;
457 489
458 ret = wl1271_event_unmask(wl); 490 ret = wl1271_event_unmask(wl);
459 if (ret < 0) { 491 if (ret < 0) {
@@ -493,24 +525,159 @@ static void wl1271_boot_hw_version(struct wl1271 *wl)
493 wl->quirks |= WL12XX_QUIRK_END_OF_TRANSACTION; 525 wl->quirks |= WL12XX_QUIRK_END_OF_TRANSACTION;
494} 526}
495 527
496/* uploads NVS and firmware */ 528static int wl128x_switch_tcxo_to_fref(struct wl1271 *wl)
497int wl1271_load_firmware(struct wl1271 *wl)
498{ 529{
499 int ret = 0; 530 u16 spare_reg;
500 u32 tmp, clk, pause; 531
532 /* Mask bits [2] & [8:4] in the sys_clk_cfg register */
533 spare_reg = wl1271_top_reg_read(wl, WL_SPARE_REG);
534 if (spare_reg == 0xFFFF)
535 return -EFAULT;
536 spare_reg |= (BIT(3) | BIT(5) | BIT(6));
537 wl1271_top_reg_write(wl, WL_SPARE_REG, spare_reg);
538
539 /* Enable FREF_CLK_REQ & mux MCS and coex PLLs to FREF */
540 wl1271_top_reg_write(wl, SYS_CLK_CFG_REG,
541 WL_CLK_REQ_TYPE_PG2 | MCS_PLL_CLK_SEL_FREF);
542
543 /* Delay execution for 15msec, to let the HW settle */
544 mdelay(15);
545
546 return 0;
547}
548
549static bool wl128x_is_tcxo_valid(struct wl1271 *wl)
550{
551 u16 tcxo_detection;
552
553 tcxo_detection = wl1271_top_reg_read(wl, TCXO_CLK_DETECT_REG);
554 if (tcxo_detection & TCXO_DET_FAILED)
555 return false;
556
557 return true;
558}
559
560static bool wl128x_is_fref_valid(struct wl1271 *wl)
561{
562 u16 fref_detection;
563
564 fref_detection = wl1271_top_reg_read(wl, FREF_CLK_DETECT_REG);
565 if (fref_detection & FREF_CLK_DETECT_FAIL)
566 return false;
567
568 return true;
569}
570
571static int wl128x_manually_configure_mcs_pll(struct wl1271 *wl)
572{
573 wl1271_top_reg_write(wl, MCS_PLL_M_REG, MCS_PLL_M_REG_VAL);
574 wl1271_top_reg_write(wl, MCS_PLL_N_REG, MCS_PLL_N_REG_VAL);
575 wl1271_top_reg_write(wl, MCS_PLL_CONFIG_REG, MCS_PLL_CONFIG_REG_VAL);
576
577 return 0;
578}
579
580static int wl128x_configure_mcs_pll(struct wl1271 *wl, int clk)
581{
582 u16 spare_reg;
583 u16 pll_config;
584 u8 input_freq;
585
586 /* Mask bits [3:1] in the sys_clk_cfg register */
587 spare_reg = wl1271_top_reg_read(wl, WL_SPARE_REG);
588 if (spare_reg == 0xFFFF)
589 return -EFAULT;
590 spare_reg |= BIT(2);
591 wl1271_top_reg_write(wl, WL_SPARE_REG, spare_reg);
592
593 /* Handle special cases of the TCXO clock */
594 if (wl->tcxo_clock == WL12XX_TCXOCLOCK_16_8 ||
595 wl->tcxo_clock == WL12XX_TCXOCLOCK_33_6)
596 return wl128x_manually_configure_mcs_pll(wl);
597
598 /* Set the input frequency according to the selected clock source */
599 input_freq = (clk & 1) + 1;
600
601 pll_config = wl1271_top_reg_read(wl, MCS_PLL_CONFIG_REG);
602 if (pll_config == 0xFFFF)
603 return -EFAULT;
604 pll_config |= (input_freq << MCS_SEL_IN_FREQ_SHIFT);
605 pll_config |= MCS_PLL_ENABLE_HP;
606 wl1271_top_reg_write(wl, MCS_PLL_CONFIG_REG, pll_config);
607
608 return 0;
609}
610
611/*
612 * WL128x has two clocks input - TCXO and FREF.
613 * TCXO is the main clock of the device, while FREF is used to sync
614 * between the GPS and the cellular modem.
615 * In cases where TCXO is 32.736MHz or 16.368MHz, the FREF will be used
616 * as the WLAN/BT main clock.
617 */
618static int wl128x_boot_clk(struct wl1271 *wl, int *selected_clock)
619{
620 u16 sys_clk_cfg;
621
622 /* For XTAL-only modes, FREF will be used after switching from TCXO */
623 if (wl->ref_clock == WL12XX_REFCLOCK_26_XTAL ||
624 wl->ref_clock == WL12XX_REFCLOCK_38_XTAL) {
625 if (!wl128x_switch_tcxo_to_fref(wl))
626 return -EINVAL;
627 goto fref_clk;
628 }
629
630 /* Query the HW, to determine which clock source we should use */
631 sys_clk_cfg = wl1271_top_reg_read(wl, SYS_CLK_CFG_REG);
632 if (sys_clk_cfg == 0xFFFF)
633 return -EINVAL;
634 if (sys_clk_cfg & PRCM_CM_EN_MUX_WLAN_FREF)
635 goto fref_clk;
636
637 /* If TCXO is either 32.736MHz or 16.368MHz, switch to FREF */
638 if (wl->tcxo_clock == WL12XX_TCXOCLOCK_16_368 ||
639 wl->tcxo_clock == WL12XX_TCXOCLOCK_32_736) {
640 if (!wl128x_switch_tcxo_to_fref(wl))
641 return -EINVAL;
642 goto fref_clk;
643 }
644
645 /* TCXO clock is selected */
646 if (!wl128x_is_tcxo_valid(wl))
647 return -EINVAL;
648 *selected_clock = wl->tcxo_clock;
649 goto config_mcs_pll;
650
651fref_clk:
652 /* FREF clock is selected */
653 if (!wl128x_is_fref_valid(wl))
654 return -EINVAL;
655 *selected_clock = wl->ref_clock;
656
657config_mcs_pll:
658 return wl128x_configure_mcs_pll(wl, *selected_clock);
659}
660
661static int wl127x_boot_clk(struct wl1271 *wl)
662{
663 u32 pause;
664 u32 clk;
501 665
502 wl1271_boot_hw_version(wl); 666 wl1271_boot_hw_version(wl);
503 667
504 if (wl->ref_clock == 0 || wl->ref_clock == 2 || wl->ref_clock == 4) 668 if (wl->ref_clock == CONF_REF_CLK_19_2_E ||
669 wl->ref_clock == CONF_REF_CLK_38_4_E ||
670 wl->ref_clock == CONF_REF_CLK_38_4_M_XTAL)
505 /* ref clk: 19.2/38.4/38.4-XTAL */ 671 /* ref clk: 19.2/38.4/38.4-XTAL */
506 clk = 0x3; 672 clk = 0x3;
507 else if (wl->ref_clock == 1 || wl->ref_clock == 3) 673 else if (wl->ref_clock == CONF_REF_CLK_26_E ||
674 wl->ref_clock == CONF_REF_CLK_52_E)
508 /* ref clk: 26/52 */ 675 /* ref clk: 26/52 */
509 clk = 0x5; 676 clk = 0x5;
510 else 677 else
511 return -EINVAL; 678 return -EINVAL;
512 679
513 if (wl->ref_clock != 0) { 680 if (wl->ref_clock != CONF_REF_CLK_19_2_E) {
514 u16 val; 681 u16 val;
515 /* Set clock type (open drain) */ 682 /* Set clock type (open drain) */
516 val = wl1271_top_reg_read(wl, OCP_REG_CLK_TYPE); 683 val = wl1271_top_reg_read(wl, OCP_REG_CLK_TYPE);
@@ -540,6 +707,26 @@ int wl1271_load_firmware(struct wl1271 *wl)
540 pause |= WU_COUNTER_PAUSE_VAL; 707 pause |= WU_COUNTER_PAUSE_VAL;
541 wl1271_write32(wl, WU_COUNTER_PAUSE, pause); 708 wl1271_write32(wl, WU_COUNTER_PAUSE, pause);
542 709
710 return 0;
711}
712
713/* uploads NVS and firmware */
714int wl1271_load_firmware(struct wl1271 *wl)
715{
716 int ret = 0;
717 u32 tmp, clk;
718 int selected_clock = -1;
719
720 if (wl->chip.id == CHIP_ID_1283_PG20) {
721 ret = wl128x_boot_clk(wl, &selected_clock);
722 if (ret < 0)
723 goto out;
724 } else {
725 ret = wl127x_boot_clk(wl);
726 if (ret < 0)
727 goto out;
728 }
729
543 /* Continue the ELP wake up sequence */ 730 /* Continue the ELP wake up sequence */
544 wl1271_write32(wl, WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL); 731 wl1271_write32(wl, WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
545 udelay(500); 732 udelay(500);
@@ -555,7 +742,12 @@ int wl1271_load_firmware(struct wl1271 *wl)
555 742
556 wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk); 743 wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk);
557 744
558 clk |= (wl->ref_clock << 1) << 4; 745 if (wl->chip.id == CHIP_ID_1283_PG20) {
746 clk |= ((selected_clock & 0x3) << 1) << 4;
747 } else {
748 clk |= (wl->ref_clock << 1) << 4;
749 }
750
559 wl1271_write32(wl, DRPW_SCRATCH_START, clk); 751 wl1271_write32(wl, DRPW_SCRATCH_START, clk);
560 752
561 wl1271_set_partition(wl, &part_table[PART_WORK]); 753 wl1271_set_partition(wl, &part_table[PART_WORK]);
@@ -585,16 +777,12 @@ int wl1271_load_firmware(struct wl1271 *wl)
585 /* 6. read the EEPROM parameters */ 777 /* 6. read the EEPROM parameters */
586 tmp = wl1271_read32(wl, SCR_PAD2); 778 tmp = wl1271_read32(wl, SCR_PAD2);
587 779
588 ret = wl1271_boot_write_irq_polarity(wl);
589 if (ret < 0)
590 goto out;
591
592 wl1271_write32(wl, ACX_REG_INTERRUPT_MASK,
593 WL1271_ACX_ALL_EVENTS_VECTOR);
594
595 /* WL1271: The reference driver skips steps 7 to 10 (jumps directly 780 /* WL1271: The reference driver skips steps 7 to 10 (jumps directly
596 * to upload_fw) */ 781 * to upload_fw) */
597 782
783 if (wl->chip.id == CHIP_ID_1283_PG20)
784 wl1271_top_reg_write(wl, SDIO_IO_DS, wl->conf.hci_io_ds);
785
598 ret = wl1271_boot_upload_firmware(wl); 786 ret = wl1271_boot_upload_firmware(wl);
599 if (ret < 0) 787 if (ret < 0)
600 goto out; 788 goto out;
@@ -618,6 +806,13 @@ int wl1271_boot(struct wl1271 *wl)
618 if (ret < 0) 806 if (ret < 0)
619 goto out; 807 goto out;
620 808
809 ret = wl1271_boot_write_irq_polarity(wl);
810 if (ret < 0)
811 goto out;
812
813 wl1271_write32(wl, ACX_REG_INTERRUPT_MASK,
814 WL1271_ACX_ALL_EVENTS_VECTOR);
815
621 /* Enable firmware interrupts now */ 816 /* Enable firmware interrupts now */
622 wl1271_boot_enable_interrupts(wl); 817 wl1271_boot_enable_interrupts(wl);
623 818
diff --git a/drivers/net/wireless/wl12xx/boot.h b/drivers/net/wireless/wl12xx/boot.h
index 17229b86fc71..e8f8255bbabe 100644
--- a/drivers/net/wireless/wl12xx/boot.h
+++ b/drivers/net/wireless/wl12xx/boot.h
@@ -74,4 +74,56 @@ struct wl1271_static_data {
74#define FREF_CLK_POLARITY_BITS 0xfffff8ff 74#define FREF_CLK_POLARITY_BITS 0xfffff8ff
75#define CLK_REQ_OUTN_SEL 0x700 75#define CLK_REQ_OUTN_SEL 0x700
76 76
77/* PLL configuration algorithm for wl128x */
78#define SYS_CLK_CFG_REG 0x2200
79/* Bit[0] - 0-TCXO, 1-FREF */
80#define MCS_PLL_CLK_SEL_FREF BIT(0)
81/* Bit[3:2] - 01-TCXO, 10-FREF */
82#define WL_CLK_REQ_TYPE_FREF BIT(3)
83#define WL_CLK_REQ_TYPE_PG2 (BIT(3) | BIT(2))
84/* Bit[4] - 0-TCXO, 1-FREF */
85#define PRCM_CM_EN_MUX_WLAN_FREF BIT(4)
86
87#define TCXO_ILOAD_INT_REG 0x2264
88#define TCXO_CLK_DETECT_REG 0x2266
89
90#define TCXO_DET_FAILED BIT(4)
91
92#define FREF_ILOAD_INT_REG 0x2084
93#define FREF_CLK_DETECT_REG 0x2086
94#define FREF_CLK_DETECT_FAIL BIT(4)
95
96/* Use this reg for masking during driver access */
97#define WL_SPARE_REG 0x2320
98#define WL_SPARE_VAL BIT(2)
99/* Bit[6:5:3] - mask wl write SYS_CLK_CFG[8:5:2:4] */
100#define WL_SPARE_MASK_8526 (BIT(6) | BIT(5) | BIT(3))
101
102#define PLL_LOCK_COUNTERS_REG 0xD8C
103#define PLL_LOCK_COUNTERS_COEX 0x0F
104#define PLL_LOCK_COUNTERS_MCS 0xF0
105#define MCS_PLL_OVERRIDE_REG 0xD90
106#define MCS_PLL_CONFIG_REG 0xD92
107#define MCS_SEL_IN_FREQ_MASK 0x0070
108#define MCS_SEL_IN_FREQ_SHIFT 4
109#define MCS_PLL_CONFIG_REG_VAL 0x73
110#define MCS_PLL_ENABLE_HP (BIT(0) | BIT(1))
111
112#define MCS_PLL_M_REG 0xD94
113#define MCS_PLL_N_REG 0xD96
114#define MCS_PLL_M_REG_VAL 0xC8
115#define MCS_PLL_N_REG_VAL 0x07
116
117#define SDIO_IO_DS 0xd14
118
119/* SDIO/wSPI DS configuration values */
120enum {
121 HCI_IO_DS_8MA = 0,
122 HCI_IO_DS_4MA = 1, /* default */
123 HCI_IO_DS_6MA = 2,
124 HCI_IO_DS_2MA = 3,
125};
126
127/* end PLL configuration algorithm for wl128x */
128
77#endif 129#endif
diff --git a/drivers/net/wireless/wl12xx/cmd.c b/drivers/net/wireless/wl12xx/cmd.c
index 96324336f936..42935ac72663 100644
--- a/drivers/net/wireless/wl12xx/cmd.c
+++ b/drivers/net/wireless/wl12xx/cmd.c
@@ -76,7 +76,7 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
76 if (time_after(jiffies, timeout)) { 76 if (time_after(jiffies, timeout)) {
77 wl1271_error("command complete timeout"); 77 wl1271_error("command complete timeout");
78 ret = -ETIMEDOUT; 78 ret = -ETIMEDOUT;
79 goto out; 79 goto fail;
80 } 80 }
81 81
82 poll_count++; 82 poll_count++;
@@ -96,21 +96,67 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
96 status = le16_to_cpu(cmd->status); 96 status = le16_to_cpu(cmd->status);
97 if (status != CMD_STATUS_SUCCESS) { 97 if (status != CMD_STATUS_SUCCESS) {
98 wl1271_error("command execute failure %d", status); 98 wl1271_error("command execute failure %d", status);
99 ieee80211_queue_work(wl->hw, &wl->recovery_work);
100 ret = -EIO; 99 ret = -EIO;
100 goto fail;
101 } 101 }
102 102
103 wl1271_write32(wl, ACX_REG_INTERRUPT_ACK, 103 wl1271_write32(wl, ACX_REG_INTERRUPT_ACK,
104 WL1271_ACX_INTR_CMD_COMPLETE); 104 WL1271_ACX_INTR_CMD_COMPLETE);
105 return 0;
105 106
106out: 107fail:
108 WARN_ON(1);
109 ieee80211_queue_work(wl->hw, &wl->recovery_work);
107 return ret; 110 return ret;
108} 111}
109 112
110int wl1271_cmd_general_parms(struct wl1271 *wl) 113int wl1271_cmd_general_parms(struct wl1271 *wl)
111{ 114{
112 struct wl1271_general_parms_cmd *gen_parms; 115 struct wl1271_general_parms_cmd *gen_parms;
113 struct wl1271_ini_general_params *gp = &wl->nvs->general_params; 116 struct wl1271_ini_general_params *gp =
117 &((struct wl1271_nvs_file *)wl->nvs)->general_params;
118 bool answer = false;
119 int ret;
120
121 if (!wl->nvs)
122 return -ENODEV;
123
124 gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
125 if (!gen_parms)
126 return -ENOMEM;
127
128 gen_parms->test.id = TEST_CMD_INI_FILE_GENERAL_PARAM;
129
130 memcpy(&gen_parms->general_params, gp, sizeof(*gp));
131
132 if (gp->tx_bip_fem_auto_detect)
133 answer = true;
134
135 /* Override the REF CLK from the NVS with the one from platform data */
136 gen_parms->general_params.ref_clock = wl->ref_clock;
137
138 ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer);
139 if (ret < 0) {
140 wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed");
141 goto out;
142 }
143
144 gp->tx_bip_fem_manufacturer =
145 gen_parms->general_params.tx_bip_fem_manufacturer;
146
147 wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
148 answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
149
150out:
151 kfree(gen_parms);
152 return ret;
153}
154
155int wl128x_cmd_general_parms(struct wl1271 *wl)
156{
157 struct wl128x_general_parms_cmd *gen_parms;
158 struct wl128x_ini_general_params *gp =
159 &((struct wl128x_nvs_file *)wl->nvs)->general_params;
114 bool answer = false; 160 bool answer = false;
115 int ret; 161 int ret;
116 162
@@ -128,6 +174,10 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
128 if (gp->tx_bip_fem_auto_detect) 174 if (gp->tx_bip_fem_auto_detect)
129 answer = true; 175 answer = true;
130 176
177 /* Replace REF and TCXO CLKs with the ones from platform data */
178 gen_parms->general_params.ref_clock = wl->ref_clock;
179 gen_parms->general_params.tcxo_ref_clock = wl->tcxo_clock;
180
131 ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer); 181 ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer);
132 if (ret < 0) { 182 if (ret < 0) {
133 wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed"); 183 wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed");
@@ -147,8 +197,9 @@ out:
147 197
148int wl1271_cmd_radio_parms(struct wl1271 *wl) 198int wl1271_cmd_radio_parms(struct wl1271 *wl)
149{ 199{
200 struct wl1271_nvs_file *nvs = (struct wl1271_nvs_file *)wl->nvs;
150 struct wl1271_radio_parms_cmd *radio_parms; 201 struct wl1271_radio_parms_cmd *radio_parms;
151 struct wl1271_ini_general_params *gp = &wl->nvs->general_params; 202 struct wl1271_ini_general_params *gp = &nvs->general_params;
152 int ret; 203 int ret;
153 204
154 if (!wl->nvs) 205 if (!wl->nvs)
@@ -161,18 +212,18 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
161 radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM; 212 radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM;
162 213
163 /* 2.4GHz parameters */ 214 /* 2.4GHz parameters */
164 memcpy(&radio_parms->static_params_2, &wl->nvs->stat_radio_params_2, 215 memcpy(&radio_parms->static_params_2, &nvs->stat_radio_params_2,
165 sizeof(struct wl1271_ini_band_params_2)); 216 sizeof(struct wl1271_ini_band_params_2));
166 memcpy(&radio_parms->dyn_params_2, 217 memcpy(&radio_parms->dyn_params_2,
167 &wl->nvs->dyn_radio_params_2[gp->tx_bip_fem_manufacturer].params, 218 &nvs->dyn_radio_params_2[gp->tx_bip_fem_manufacturer].params,
168 sizeof(struct wl1271_ini_fem_params_2)); 219 sizeof(struct wl1271_ini_fem_params_2));
169 220
170 /* 5GHz parameters */ 221 /* 5GHz parameters */
171 memcpy(&radio_parms->static_params_5, 222 memcpy(&radio_parms->static_params_5,
172 &wl->nvs->stat_radio_params_5, 223 &nvs->stat_radio_params_5,
173 sizeof(struct wl1271_ini_band_params_5)); 224 sizeof(struct wl1271_ini_band_params_5));
174 memcpy(&radio_parms->dyn_params_5, 225 memcpy(&radio_parms->dyn_params_5,
175 &wl->nvs->dyn_radio_params_5[gp->tx_bip_fem_manufacturer].params, 226 &nvs->dyn_radio_params_5[gp->tx_bip_fem_manufacturer].params,
176 sizeof(struct wl1271_ini_fem_params_5)); 227 sizeof(struct wl1271_ini_fem_params_5));
177 228
178 wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ", 229 wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ",
@@ -186,6 +237,50 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
186 return ret; 237 return ret;
187} 238}
188 239
240int wl128x_cmd_radio_parms(struct wl1271 *wl)
241{
242 struct wl128x_nvs_file *nvs = (struct wl128x_nvs_file *)wl->nvs;
243 struct wl128x_radio_parms_cmd *radio_parms;
244 struct wl128x_ini_general_params *gp = &nvs->general_params;
245 int ret;
246
247 if (!wl->nvs)
248 return -ENODEV;
249
250 radio_parms = kzalloc(sizeof(*radio_parms), GFP_KERNEL);
251 if (!radio_parms)
252 return -ENOMEM;
253
254 radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM;
255
256 /* 2.4GHz parameters */
257 memcpy(&radio_parms->static_params_2, &nvs->stat_radio_params_2,
258 sizeof(struct wl128x_ini_band_params_2));
259 memcpy(&radio_parms->dyn_params_2,
260 &nvs->dyn_radio_params_2[gp->tx_bip_fem_manufacturer].params,
261 sizeof(struct wl128x_ini_fem_params_2));
262
263 /* 5GHz parameters */
264 memcpy(&radio_parms->static_params_5,
265 &nvs->stat_radio_params_5,
266 sizeof(struct wl128x_ini_band_params_5));
267 memcpy(&radio_parms->dyn_params_5,
268 &nvs->dyn_radio_params_5[gp->tx_bip_fem_manufacturer].params,
269 sizeof(struct wl128x_ini_fem_params_5));
270
271 radio_parms->fem_vendor_and_options = nvs->fem_vendor_and_options;
272
273 wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ",
274 radio_parms, sizeof(*radio_parms));
275
276 ret = wl1271_cmd_test(wl, radio_parms, sizeof(*radio_parms), 0);
277 if (ret < 0)
278 wl1271_warning("CMD_INI_FILE_RADIO_PARAM failed");
279
280 kfree(radio_parms);
281 return ret;
282}
283
189int wl1271_cmd_ext_radio_parms(struct wl1271 *wl) 284int wl1271_cmd_ext_radio_parms(struct wl1271 *wl)
190{ 285{
191 struct wl1271_ext_radio_parms_cmd *ext_radio_parms; 286 struct wl1271_ext_radio_parms_cmd *ext_radio_parms;
diff --git a/drivers/net/wireless/wl12xx/cmd.h b/drivers/net/wireless/wl12xx/cmd.h
index 54c12e71417e..5cac95d9480c 100644
--- a/drivers/net/wireless/wl12xx/cmd.h
+++ b/drivers/net/wireless/wl12xx/cmd.h
@@ -32,7 +32,9 @@ struct acx_header;
32int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len, 32int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
33 size_t res_len); 33 size_t res_len);
34int wl1271_cmd_general_parms(struct wl1271 *wl); 34int wl1271_cmd_general_parms(struct wl1271 *wl);
35int wl128x_cmd_general_parms(struct wl1271 *wl);
35int wl1271_cmd_radio_parms(struct wl1271 *wl); 36int wl1271_cmd_radio_parms(struct wl1271 *wl);
37int wl128x_cmd_radio_parms(struct wl1271 *wl);
36int wl1271_cmd_ext_radio_parms(struct wl1271 *wl); 38int wl1271_cmd_ext_radio_parms(struct wl1271 *wl);
37int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type); 39int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type);
38int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer); 40int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
@@ -415,6 +417,21 @@ struct wl1271_general_parms_cmd {
415 u8 padding[3]; 417 u8 padding[3];
416} __packed; 418} __packed;
417 419
420struct wl128x_general_parms_cmd {
421 struct wl1271_cmd_header header;
422
423 struct wl1271_cmd_test_header test;
424
425 struct wl128x_ini_general_params general_params;
426
427 u8 sr_debug_table[WL1271_INI_MAX_SMART_REFLEX_PARAM];
428 u8 sr_sen_n_p;
429 u8 sr_sen_n_p_gain;
430 u8 sr_sen_nrn;
431 u8 sr_sen_prn;
432 u8 padding[3];
433} __packed;
434
418struct wl1271_radio_parms_cmd { 435struct wl1271_radio_parms_cmd {
419 struct wl1271_cmd_header header; 436 struct wl1271_cmd_header header;
420 437
@@ -431,6 +448,23 @@ struct wl1271_radio_parms_cmd {
431 u8 padding3[2]; 448 u8 padding3[2];
432} __packed; 449} __packed;
433 450
451struct wl128x_radio_parms_cmd {
452 struct wl1271_cmd_header header;
453
454 struct wl1271_cmd_test_header test;
455
456 /* Static radio parameters */
457 struct wl128x_ini_band_params_2 static_params_2;
458 struct wl128x_ini_band_params_5 static_params_5;
459
460 u8 fem_vendor_and_options;
461
462 /* Dynamic radio parameters */
463 struct wl128x_ini_fem_params_2 dyn_params_2;
464 u8 padding2;
465 struct wl128x_ini_fem_params_5 dyn_params_5;
466} __packed;
467
434struct wl1271_ext_radio_parms_cmd { 468struct wl1271_ext_radio_parms_cmd {
435 struct wl1271_cmd_header header; 469 struct wl1271_cmd_header header;
436 470
diff --git a/drivers/net/wireless/wl12xx/conf.h b/drivers/net/wireless/wl12xx/conf.h
index 8a8323896eec..1ab6c86aac40 100644
--- a/drivers/net/wireless/wl12xx/conf.h
+++ b/drivers/net/wireless/wl12xx/conf.h
@@ -396,12 +396,43 @@ enum {
396 CONF_SG_TEMP_PARAM_3, 396 CONF_SG_TEMP_PARAM_3,
397 CONF_SG_TEMP_PARAM_4, 397 CONF_SG_TEMP_PARAM_4,
398 CONF_SG_TEMP_PARAM_5, 398 CONF_SG_TEMP_PARAM_5,
399 CONF_SG_PARAMS_MAX, 399
400 /*
401 * AP beacon miss
402 *
403 * Range: 0 - 255
404 */
405 CONF_SG_AP_BEACON_MISS_TX,
406
407 /*
408 * AP RX window length
409 *
410 * Range: 0 - 50
411 */
412 CONF_SG_RX_WINDOW_LENGTH,
413
414 /*
415 * AP connection protection time
416 *
417 * Range: 0 - 5000
418 */
419 CONF_SG_AP_CONNECTION_PROTECTION_TIME,
420
421 CONF_SG_TEMP_PARAM_6,
422 CONF_SG_TEMP_PARAM_7,
423 CONF_SG_TEMP_PARAM_8,
424 CONF_SG_TEMP_PARAM_9,
425 CONF_SG_TEMP_PARAM_10,
426
427 CONF_SG_STA_PARAMS_MAX = CONF_SG_TEMP_PARAM_5 + 1,
428 CONF_SG_AP_PARAMS_MAX = CONF_SG_TEMP_PARAM_10 + 1,
429
400 CONF_SG_PARAMS_ALL = 0xff 430 CONF_SG_PARAMS_ALL = 0xff
401}; 431};
402 432
403struct conf_sg_settings { 433struct conf_sg_settings {
404 u32 params[CONF_SG_PARAMS_MAX]; 434 u32 sta_params[CONF_SG_STA_PARAMS_MAX];
435 u32 ap_params[CONF_SG_AP_PARAMS_MAX];
405 u8 state; 436 u8 state;
406}; 437};
407 438
@@ -509,6 +540,12 @@ struct conf_rx_settings {
509 CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS | \ 540 CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS | \
510 CONF_HW_BIT_RATE_54MBPS) 541 CONF_HW_BIT_RATE_54MBPS)
511 542
543#define CONF_TX_OFDM_RATES (CONF_HW_BIT_RATE_6MBPS | \
544 CONF_HW_BIT_RATE_12MBPS | CONF_HW_BIT_RATE_24MBPS | \
545 CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS | \
546 CONF_HW_BIT_RATE_54MBPS)
547
548
512/* 549/*
513 * Default rates for management traffic when operating in AP mode. This 550 * Default rates for management traffic when operating in AP mode. This
514 * should be configured according to the basic rate set of the AP 551 * should be configured according to the basic rate set of the AP
@@ -516,6 +553,13 @@ struct conf_rx_settings {
516#define CONF_TX_AP_DEFAULT_MGMT_RATES (CONF_HW_BIT_RATE_1MBPS | \ 553#define CONF_TX_AP_DEFAULT_MGMT_RATES (CONF_HW_BIT_RATE_1MBPS | \
517 CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS) 554 CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS)
518 555
556/*
557 * Default rates for working as IBSS. use 11b rates
558 */
559#define CONF_TX_IBSS_DEFAULT_RATES (CONF_HW_BIT_RATE_1MBPS | \
560 CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS | \
561 CONF_HW_BIT_RATE_11MBPS);
562
519struct conf_tx_rate_class { 563struct conf_tx_rate_class {
520 564
521 /* 565 /*
@@ -667,22 +711,6 @@ struct conf_tx_settings {
667 struct conf_tx_ac_category ac_conf[CONF_TX_MAX_AC_COUNT]; 711 struct conf_tx_ac_category ac_conf[CONF_TX_MAX_AC_COUNT];
668 712
669 /* 713 /*
670 * Configuration for rate classes in AP-mode. These rate classes
671 * are for the AC TX queues
672 */
673 struct conf_tx_rate_class ap_rc_conf[CONF_TX_MAX_AC_COUNT];
674
675 /*
676 * Management TX rate class for AP-mode.
677 */
678 struct conf_tx_rate_class ap_mgmt_conf;
679
680 /*
681 * Broadcast TX rate class for AP-mode.
682 */
683 struct conf_tx_rate_class ap_bcst_conf;
684
685 /*
686 * AP-mode - allow this number of TX retries to a station before an 714 * AP-mode - allow this number of TX retries to a station before an
687 * event is triggered from FW. 715 * event is triggered from FW.
688 */ 716 */
@@ -1004,7 +1032,9 @@ enum {
1004 CONF_REF_CLK_19_2_E, 1032 CONF_REF_CLK_19_2_E,
1005 CONF_REF_CLK_26_E, 1033 CONF_REF_CLK_26_E,
1006 CONF_REF_CLK_38_4_E, 1034 CONF_REF_CLK_38_4_E,
1007 CONF_REF_CLK_52_E 1035 CONF_REF_CLK_52_E,
1036 CONF_REF_CLK_38_4_M_XTAL,
1037 CONF_REF_CLK_26_M_XTAL,
1008}; 1038};
1009 1039
1010enum single_dual_band_enum { 1040enum single_dual_band_enum {
@@ -1018,15 +1048,6 @@ enum single_dual_band_enum {
1018#define CONF_NUMBER_OF_CHANNELS_2_4 14 1048#define CONF_NUMBER_OF_CHANNELS_2_4 14
1019#define CONF_NUMBER_OF_CHANNELS_5 35 1049#define CONF_NUMBER_OF_CHANNELS_5 35
1020 1050
1021struct conf_radio_parms {
1022 /*
1023 * FEM parameter set to use
1024 *
1025 * Range: 0 or 1
1026 */
1027 u8 fem;
1028};
1029
1030struct conf_itrim_settings { 1051struct conf_itrim_settings {
1031 /* enable dco itrim */ 1052 /* enable dco itrim */
1032 u8 enable; 1053 u8 enable;
@@ -1126,6 +1147,26 @@ struct conf_scan_settings {
1126 1147
1127}; 1148};
1128 1149
1150struct conf_sched_scan_settings {
1151 /* minimum time to wait on the channel for active scans (in TUs) */
1152 u16 min_dwell_time_active;
1153
1154 /* maximum time to wait on the channel for active scans (in TUs) */
1155 u16 max_dwell_time_active;
1156
1157 /* time to wait on the channel for passive scans (in TUs) */
1158 u32 dwell_time_passive;
1159
1160 /* number of probe requests to send on each channel in active scans */
1161 u8 num_probe_reqs;
1162
1163 /* RSSI threshold to be used for filtering */
1164 s8 rssi_threshold;
1165
1166 /* SNR threshold to be used for filtering */
1167 s8 snr_threshold;
1168};
1169
1129/* these are number of channels on the band divided by two, rounded up */ 1170/* these are number of channels on the band divided by two, rounded up */
1130#define CONF_TX_PWR_COMPENSATION_LEN_2 7 1171#define CONF_TX_PWR_COMPENSATION_LEN_2 7
1131#define CONF_TX_PWR_COMPENSATION_LEN_5 18 1172#define CONF_TX_PWR_COMPENSATION_LEN_5 18
@@ -1191,6 +1232,19 @@ struct conf_memory_settings {
1191 u8 tx_min; 1232 u8 tx_min;
1192}; 1233};
1193 1234
1235struct conf_fm_coex {
1236 u8 enable;
1237 u8 swallow_period;
1238 u8 n_divider_fref_set_1;
1239 u8 n_divider_fref_set_2;
1240 u16 m_divider_fref_set_1;
1241 u16 m_divider_fref_set_2;
1242 u32 coex_pll_stabilization_time;
1243 u16 ldo_stabilization_time;
1244 u8 fm_disturbed_band_margin;
1245 u8 swallow_clk_diff;
1246};
1247
1194struct conf_drv_settings { 1248struct conf_drv_settings {
1195 struct conf_sg_settings sg; 1249 struct conf_sg_settings sg;
1196 struct conf_rx_settings rx; 1250 struct conf_rx_settings rx;
@@ -1200,9 +1254,13 @@ struct conf_drv_settings {
1200 struct conf_pm_config_settings pm_config; 1254 struct conf_pm_config_settings pm_config;
1201 struct conf_roam_trigger_settings roam_trigger; 1255 struct conf_roam_trigger_settings roam_trigger;
1202 struct conf_scan_settings scan; 1256 struct conf_scan_settings scan;
1257 struct conf_sched_scan_settings sched_scan;
1203 struct conf_rf_settings rf; 1258 struct conf_rf_settings rf;
1204 struct conf_ht_setting ht; 1259 struct conf_ht_setting ht;
1205 struct conf_memory_settings mem; 1260 struct conf_memory_settings mem_wl127x;
1261 struct conf_memory_settings mem_wl128x;
1262 struct conf_fm_coex fm_coex;
1263 u8 hci_io_ds;
1206}; 1264};
1207 1265
1208#endif 1266#endif
diff --git a/drivers/net/wireless/wl12xx/debugfs.c b/drivers/net/wireless/wl12xx/debugfs.c
index 8e75b09723b9..f1f8df9b6cd7 100644
--- a/drivers/net/wireless/wl12xx/debugfs.c
+++ b/drivers/net/wireless/wl12xx/debugfs.c
@@ -267,7 +267,7 @@ static ssize_t gpio_power_write(struct file *file,
267 } 267 }
268 buf[len] = '\0'; 268 buf[len] = '\0';
269 269
270 ret = strict_strtoul(buf, 0, &value); 270 ret = kstrtoul(buf, 0, &value);
271 if (ret < 0) { 271 if (ret < 0) {
272 wl1271_warning("illegal value in gpio_power"); 272 wl1271_warning("illegal value in gpio_power");
273 return -EINVAL; 273 return -EINVAL;
@@ -291,6 +291,242 @@ static const struct file_operations gpio_power_ops = {
291 .llseek = default_llseek, 291 .llseek = default_llseek,
292}; 292};
293 293
294static ssize_t start_recovery_write(struct file *file,
295 const char __user *user_buf,
296 size_t count, loff_t *ppos)
297{
298 struct wl1271 *wl = file->private_data;
299
300 mutex_lock(&wl->mutex);
301 ieee80211_queue_work(wl->hw, &wl->recovery_work);
302 mutex_unlock(&wl->mutex);
303
304 return count;
305}
306
307static const struct file_operations start_recovery_ops = {
308 .write = start_recovery_write,
309 .open = wl1271_open_file_generic,
310 .llseek = default_llseek,
311};
312
313static ssize_t driver_state_read(struct file *file, char __user *user_buf,
314 size_t count, loff_t *ppos)
315{
316 struct wl1271 *wl = file->private_data;
317 int res = 0;
318 char buf[1024];
319
320 mutex_lock(&wl->mutex);
321
322#define DRIVER_STATE_PRINT(x, fmt) \
323 (res += scnprintf(buf + res, sizeof(buf) - res,\
324 #x " = " fmt "\n", wl->x))
325
326#define DRIVER_STATE_PRINT_LONG(x) DRIVER_STATE_PRINT(x, "%ld")
327#define DRIVER_STATE_PRINT_INT(x) DRIVER_STATE_PRINT(x, "%d")
328#define DRIVER_STATE_PRINT_STR(x) DRIVER_STATE_PRINT(x, "%s")
329#define DRIVER_STATE_PRINT_LHEX(x) DRIVER_STATE_PRINT(x, "0x%lx")
330#define DRIVER_STATE_PRINT_HEX(x) DRIVER_STATE_PRINT(x, "0x%x")
331
332 DRIVER_STATE_PRINT_INT(tx_blocks_available);
333 DRIVER_STATE_PRINT_INT(tx_allocated_blocks);
334 DRIVER_STATE_PRINT_INT(tx_frames_cnt);
335 DRIVER_STATE_PRINT_LHEX(tx_frames_map[0]);
336 DRIVER_STATE_PRINT_INT(tx_queue_count);
337 DRIVER_STATE_PRINT_INT(tx_packets_count);
338 DRIVER_STATE_PRINT_INT(tx_results_count);
339 DRIVER_STATE_PRINT_LHEX(flags);
340 DRIVER_STATE_PRINT_INT(tx_blocks_freed[0]);
341 DRIVER_STATE_PRINT_INT(tx_blocks_freed[1]);
342 DRIVER_STATE_PRINT_INT(tx_blocks_freed[2]);
343 DRIVER_STATE_PRINT_INT(tx_blocks_freed[3]);
344 DRIVER_STATE_PRINT_INT(tx_security_last_seq);
345 DRIVER_STATE_PRINT_INT(rx_counter);
346 DRIVER_STATE_PRINT_INT(session_counter);
347 DRIVER_STATE_PRINT_INT(state);
348 DRIVER_STATE_PRINT_INT(bss_type);
349 DRIVER_STATE_PRINT_INT(channel);
350 DRIVER_STATE_PRINT_HEX(rate_set);
351 DRIVER_STATE_PRINT_HEX(basic_rate_set);
352 DRIVER_STATE_PRINT_HEX(basic_rate);
353 DRIVER_STATE_PRINT_INT(band);
354 DRIVER_STATE_PRINT_INT(beacon_int);
355 DRIVER_STATE_PRINT_INT(psm_entry_retry);
356 DRIVER_STATE_PRINT_INT(ps_poll_failures);
357 DRIVER_STATE_PRINT_HEX(filters);
358 DRIVER_STATE_PRINT_HEX(rx_config);
359 DRIVER_STATE_PRINT_HEX(rx_filter);
360 DRIVER_STATE_PRINT_INT(power_level);
361 DRIVER_STATE_PRINT_INT(rssi_thold);
362 DRIVER_STATE_PRINT_INT(last_rssi_event);
363 DRIVER_STATE_PRINT_INT(sg_enabled);
364 DRIVER_STATE_PRINT_INT(enable_11a);
365 DRIVER_STATE_PRINT_INT(noise);
366 DRIVER_STATE_PRINT_LHEX(ap_hlid_map[0]);
367 DRIVER_STATE_PRINT_INT(last_tx_hlid);
368 DRIVER_STATE_PRINT_INT(ba_support);
369 DRIVER_STATE_PRINT_HEX(ba_rx_bitmap);
370 DRIVER_STATE_PRINT_HEX(ap_fw_ps_map);
371 DRIVER_STATE_PRINT_LHEX(ap_ps_map);
372 DRIVER_STATE_PRINT_HEX(quirks);
373 DRIVER_STATE_PRINT_HEX(irq);
374 DRIVER_STATE_PRINT_HEX(ref_clock);
375 DRIVER_STATE_PRINT_HEX(tcxo_clock);
376 DRIVER_STATE_PRINT_HEX(hw_pg_ver);
377 DRIVER_STATE_PRINT_HEX(platform_quirks);
378 DRIVER_STATE_PRINT_HEX(chip.id);
379 DRIVER_STATE_PRINT_STR(chip.fw_ver_str);
380 DRIVER_STATE_PRINT_INT(sched_scanning);
381
382#undef DRIVER_STATE_PRINT_INT
383#undef DRIVER_STATE_PRINT_LONG
384#undef DRIVER_STATE_PRINT_HEX
385#undef DRIVER_STATE_PRINT_LHEX
386#undef DRIVER_STATE_PRINT_STR
387#undef DRIVER_STATE_PRINT
388
389 mutex_unlock(&wl->mutex);
390
391 return simple_read_from_buffer(user_buf, count, ppos, buf, res);
392}
393
394static const struct file_operations driver_state_ops = {
395 .read = driver_state_read,
396 .open = wl1271_open_file_generic,
397 .llseek = default_llseek,
398};
399
400static ssize_t dtim_interval_read(struct file *file, char __user *user_buf,
401 size_t count, loff_t *ppos)
402{
403 struct wl1271 *wl = file->private_data;
404 u8 value;
405
406 if (wl->conf.conn.wake_up_event == CONF_WAKE_UP_EVENT_DTIM ||
407 wl->conf.conn.wake_up_event == CONF_WAKE_UP_EVENT_N_DTIM)
408 value = wl->conf.conn.listen_interval;
409 else
410 value = 0;
411
412 return wl1271_format_buffer(user_buf, count, ppos, "%d\n", value);
413}
414
415static ssize_t dtim_interval_write(struct file *file,
416 const char __user *user_buf,
417 size_t count, loff_t *ppos)
418{
419 struct wl1271 *wl = file->private_data;
420 char buf[10];
421 size_t len;
422 unsigned long value;
423 int ret;
424
425 len = min(count, sizeof(buf) - 1);
426 if (copy_from_user(buf, user_buf, len))
427 return -EFAULT;
428 buf[len] = '\0';
429
430 ret = kstrtoul(buf, 0, &value);
431 if (ret < 0) {
432 wl1271_warning("illegal value for dtim_interval");
433 return -EINVAL;
434 }
435
436 if (value < 1 || value > 10) {
437 wl1271_warning("dtim value is not in valid range");
438 return -ERANGE;
439 }
440
441 mutex_lock(&wl->mutex);
442
443 wl->conf.conn.listen_interval = value;
444 /* for some reason there are different event types for 1 and >1 */
445 if (value == 1)
446 wl->conf.conn.wake_up_event = CONF_WAKE_UP_EVENT_DTIM;
447 else
448 wl->conf.conn.wake_up_event = CONF_WAKE_UP_EVENT_N_DTIM;
449
450 /*
451 * we don't reconfigure ACX_WAKE_UP_CONDITIONS now, so it will only
452 * take effect on the next time we enter psm.
453 */
454 mutex_unlock(&wl->mutex);
455 return count;
456}
457
458static const struct file_operations dtim_interval_ops = {
459 .read = dtim_interval_read,
460 .write = dtim_interval_write,
461 .open = wl1271_open_file_generic,
462 .llseek = default_llseek,
463};
464
465static ssize_t beacon_interval_read(struct file *file, char __user *user_buf,
466 size_t count, loff_t *ppos)
467{
468 struct wl1271 *wl = file->private_data;
469 u8 value;
470
471 if (wl->conf.conn.wake_up_event == CONF_WAKE_UP_EVENT_BEACON ||
472 wl->conf.conn.wake_up_event == CONF_WAKE_UP_EVENT_N_BEACONS)
473 value = wl->conf.conn.listen_interval;
474 else
475 value = 0;
476
477 return wl1271_format_buffer(user_buf, count, ppos, "%d\n", value);
478}
479
480static ssize_t beacon_interval_write(struct file *file,
481 const char __user *user_buf,
482 size_t count, loff_t *ppos)
483{
484 struct wl1271 *wl = file->private_data;
485 char buf[10];
486 size_t len;
487 unsigned long value;
488 int ret;
489
490 len = min(count, sizeof(buf) - 1);
491 if (copy_from_user(buf, user_buf, len))
492 return -EFAULT;
493 buf[len] = '\0';
494
495 ret = kstrtoul(buf, 0, &value);
496 if (ret < 0) {
497 wl1271_warning("illegal value for beacon_interval");
498 return -EINVAL;
499 }
500
501 if (value < 1 || value > 255) {
502 wl1271_warning("beacon interval value is not in valid range");
503 return -ERANGE;
504 }
505
506 mutex_lock(&wl->mutex);
507
508 wl->conf.conn.listen_interval = value;
509 /* for some reason there are different event types for 1 and >1 */
510 if (value == 1)
511 wl->conf.conn.wake_up_event = CONF_WAKE_UP_EVENT_BEACON;
512 else
513 wl->conf.conn.wake_up_event = CONF_WAKE_UP_EVENT_N_BEACONS;
514
515 /*
516 * we don't reconfigure ACX_WAKE_UP_CONDITIONS now, so it will only
517 * take effect on the next time we enter psm.
518 */
519 mutex_unlock(&wl->mutex);
520 return count;
521}
522
523static const struct file_operations beacon_interval_ops = {
524 .read = beacon_interval_read,
525 .write = beacon_interval_write,
526 .open = wl1271_open_file_generic,
527 .llseek = default_llseek,
528};
529
294static int wl1271_debugfs_add_files(struct wl1271 *wl, 530static int wl1271_debugfs_add_files(struct wl1271 *wl,
295 struct dentry *rootdir) 531 struct dentry *rootdir)
296{ 532{
@@ -399,6 +635,10 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl,
399 DEBUGFS_ADD(excessive_retries, rootdir); 635 DEBUGFS_ADD(excessive_retries, rootdir);
400 636
401 DEBUGFS_ADD(gpio_power, rootdir); 637 DEBUGFS_ADD(gpio_power, rootdir);
638 DEBUGFS_ADD(start_recovery, rootdir);
639 DEBUGFS_ADD(driver_state, rootdir);
640 DEBUGFS_ADD(dtim_interval, rootdir);
641 DEBUGFS_ADD(beacon_interval, rootdir);
402 642
403 return 0; 643 return 0;
404 644
diff --git a/drivers/net/wireless/wl12xx/event.c b/drivers/net/wireless/wl12xx/event.c
index 1b170c5cc595..c3c554cd6580 100644
--- a/drivers/net/wireless/wl12xx/event.c
+++ b/drivers/net/wireless/wl12xx/event.c
@@ -33,6 +33,7 @@ void wl1271_pspoll_work(struct work_struct *work)
33{ 33{
34 struct delayed_work *dwork; 34 struct delayed_work *dwork;
35 struct wl1271 *wl; 35 struct wl1271 *wl;
36 int ret;
36 37
37 dwork = container_of(work, struct delayed_work, work); 38 dwork = container_of(work, struct delayed_work, work);
38 wl = container_of(dwork, struct wl1271, pspoll_work); 39 wl = container_of(dwork, struct wl1271, pspoll_work);
@@ -55,8 +56,13 @@ void wl1271_pspoll_work(struct work_struct *work)
55 * delivery failure occurred, and no-one changed state since, so 56 * delivery failure occurred, and no-one changed state since, so
56 * we should go back to powersave. 57 * we should go back to powersave.
57 */ 58 */
59 ret = wl1271_ps_elp_wakeup(wl);
60 if (ret < 0)
61 goto out;
62
58 wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE, wl->basic_rate, true); 63 wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE, wl->basic_rate, true);
59 64
65 wl1271_ps_elp_sleep(wl);
60out: 66out:
61 mutex_unlock(&wl->mutex); 67 mutex_unlock(&wl->mutex);
62}; 68};
@@ -132,8 +138,10 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
132 if (ret < 0) 138 if (ret < 0)
133 break; 139 break;
134 140
135 /* go to extremely low power mode */ 141 if (wl->ps_compl) {
136 wl1271_ps_elp_sleep(wl); 142 complete(wl->ps_compl);
143 wl->ps_compl = NULL;
144 }
137 break; 145 break;
138 default: 146 default:
139 break; 147 break;
@@ -187,6 +195,22 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
187 wl1271_scan_stm(wl); 195 wl1271_scan_stm(wl);
188 } 196 }
189 197
198 if (vector & PERIODIC_SCAN_REPORT_EVENT_ID) {
199 wl1271_debug(DEBUG_EVENT, "PERIODIC_SCAN_REPORT_EVENT "
200 "(status 0x%0x)", mbox->scheduled_scan_status);
201
202 wl1271_scan_sched_scan_results(wl);
203 }
204
205 if (vector & PERIODIC_SCAN_COMPLETE_EVENT_ID) {
206 wl1271_debug(DEBUG_EVENT, "PERIODIC_SCAN_COMPLETE_EVENT "
207 "(status 0x%0x)", mbox->scheduled_scan_status);
208 if (wl->sched_scanning) {
209 wl1271_scan_sched_scan_stop(wl);
210 ieee80211_sched_scan_stopped(wl->hw);
211 }
212 }
213
190 /* disable dynamic PS when requested by the firmware */ 214 /* disable dynamic PS when requested by the firmware */
191 if (vector & SOFT_GEMINI_SENSE_EVENT_ID && 215 if (vector & SOFT_GEMINI_SENSE_EVENT_ID &&
192 wl->bss_type == BSS_TYPE_STA_BSS) { 216 wl->bss_type == BSS_TYPE_STA_BSS) {
@@ -228,6 +252,12 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
228 wl1271_event_rssi_trigger(wl, mbox); 252 wl1271_event_rssi_trigger(wl, mbox);
229 } 253 }
230 254
255 if ((vector & DUMMY_PACKET_EVENT_ID) && !is_ap) {
256 wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID");
257 if (wl->vif)
258 wl1271_tx_dummy_packet(wl);
259 }
260
231 if (wl->vif && beacon_loss) 261 if (wl->vif && beacon_loss)
232 ieee80211_connection_loss(wl->vif); 262 ieee80211_connection_loss(wl->vif);
233 263
diff --git a/drivers/net/wireless/wl12xx/event.h b/drivers/net/wireless/wl12xx/event.h
index 0e80886f3031..b6cf06e565a4 100644
--- a/drivers/net/wireless/wl12xx/event.h
+++ b/drivers/net/wireless/wl12xx/event.h
@@ -59,7 +59,10 @@ enum {
59 BSS_LOSE_EVENT_ID = BIT(18), 59 BSS_LOSE_EVENT_ID = BIT(18),
60 REGAINED_BSS_EVENT_ID = BIT(19), 60 REGAINED_BSS_EVENT_ID = BIT(19),
61 ROAMING_TRIGGER_MAX_TX_RETRY_EVENT_ID = BIT(20), 61 ROAMING_TRIGGER_MAX_TX_RETRY_EVENT_ID = BIT(20),
62 STA_REMOVE_COMPLETE_EVENT_ID = BIT(21), /* AP */ 62 /* STA: dummy paket for dynamic mem blocks */
63 DUMMY_PACKET_EVENT_ID = BIT(21),
64 /* AP: STA remove complete */
65 STA_REMOVE_COMPLETE_EVENT_ID = BIT(21),
63 SOFT_GEMINI_SENSE_EVENT_ID = BIT(22), 66 SOFT_GEMINI_SENSE_EVENT_ID = BIT(22),
64 SOFT_GEMINI_PREDICTION_EVENT_ID = BIT(23), 67 SOFT_GEMINI_PREDICTION_EVENT_ID = BIT(23),
65 SOFT_GEMINI_AVALANCHE_EVENT_ID = BIT(24), 68 SOFT_GEMINI_AVALANCHE_EVENT_ID = BIT(24),
diff --git a/drivers/net/wireless/wl12xx/ini.h b/drivers/net/wireless/wl12xx/ini.h
index c330a2583dfd..1420c842b8f1 100644
--- a/drivers/net/wireless/wl12xx/ini.h
+++ b/drivers/net/wireless/wl12xx/ini.h
@@ -41,6 +41,28 @@ struct wl1271_ini_general_params {
41 u8 srf3[WL1271_INI_MAX_SMART_REFLEX_PARAM]; 41 u8 srf3[WL1271_INI_MAX_SMART_REFLEX_PARAM];
42} __packed; 42} __packed;
43 43
44#define WL128X_INI_MAX_SETTINGS_PARAM 4
45
46struct wl128x_ini_general_params {
47 u8 ref_clock;
48 u8 settling_time;
49 u8 clk_valid_on_wakeup;
50 u8 tcxo_ref_clock;
51 u8 tcxo_settling_time;
52 u8 tcxo_valid_on_wakeup;
53 u8 tcxo_ldo_voltage;
54 u8 xtal_itrim_val;
55 u8 platform_conf;
56 u8 dual_mode_select;
57 u8 tx_bip_fem_auto_detect;
58 u8 tx_bip_fem_manufacturer;
59 u8 general_settings[WL128X_INI_MAX_SETTINGS_PARAM];
60 u8 sr_state;
61 u8 srf1[WL1271_INI_MAX_SMART_REFLEX_PARAM];
62 u8 srf2[WL1271_INI_MAX_SMART_REFLEX_PARAM];
63 u8 srf3[WL1271_INI_MAX_SMART_REFLEX_PARAM];
64} __packed;
65
44#define WL1271_INI_RSSI_PROCESS_COMPENS_SIZE 15 66#define WL1271_INI_RSSI_PROCESS_COMPENS_SIZE 15
45 67
46struct wl1271_ini_band_params_2 { 68struct wl1271_ini_band_params_2 {
@@ -49,9 +71,16 @@ struct wl1271_ini_band_params_2 {
49 u8 rx_rssi_process_compens[WL1271_INI_RSSI_PROCESS_COMPENS_SIZE]; 71 u8 rx_rssi_process_compens[WL1271_INI_RSSI_PROCESS_COMPENS_SIZE];
50} __packed; 72} __packed;
51 73
52#define WL1271_INI_RATE_GROUP_COUNT 6
53#define WL1271_INI_CHANNEL_COUNT_2 14 74#define WL1271_INI_CHANNEL_COUNT_2 14
54 75
76struct wl128x_ini_band_params_2 {
77 u8 rx_trace_insertion_loss;
78 u8 tx_trace_loss[WL1271_INI_CHANNEL_COUNT_2];
79 u8 rx_rssi_process_compens[WL1271_INI_RSSI_PROCESS_COMPENS_SIZE];
80} __packed;
81
82#define WL1271_INI_RATE_GROUP_COUNT 6
83
55struct wl1271_ini_fem_params_2 { 84struct wl1271_ini_fem_params_2 {
56 __le16 tx_bip_ref_pd_voltage; 85 __le16 tx_bip_ref_pd_voltage;
57 u8 tx_bip_ref_power; 86 u8 tx_bip_ref_power;
@@ -68,6 +97,28 @@ struct wl1271_ini_fem_params_2 {
68 u8 normal_to_degraded_high_thr; 97 u8 normal_to_degraded_high_thr;
69} __packed; 98} __packed;
70 99
100#define WL128X_INI_RATE_GROUP_COUNT 7
101/* low and high temperatures */
102#define WL128X_INI_PD_VS_TEMPERATURE_RANGES 2
103
104struct wl128x_ini_fem_params_2 {
105 __le16 tx_bip_ref_pd_voltage;
106 u8 tx_bip_ref_power;
107 u8 tx_bip_ref_offset;
108 u8 tx_per_rate_pwr_limits_normal[WL128X_INI_RATE_GROUP_COUNT];
109 u8 tx_per_rate_pwr_limits_degraded[WL128X_INI_RATE_GROUP_COUNT];
110 u8 tx_per_rate_pwr_limits_extreme[WL128X_INI_RATE_GROUP_COUNT];
111 u8 tx_per_chan_pwr_limits_11b[WL1271_INI_CHANNEL_COUNT_2];
112 u8 tx_per_chan_pwr_limits_ofdm[WL1271_INI_CHANNEL_COUNT_2];
113 u8 tx_pd_vs_rate_offsets[WL128X_INI_RATE_GROUP_COUNT];
114 u8 tx_ibias[WL128X_INI_RATE_GROUP_COUNT + 1];
115 u8 tx_pd_vs_chan_offsets[WL1271_INI_CHANNEL_COUNT_2];
116 u8 tx_pd_vs_temperature[WL128X_INI_PD_VS_TEMPERATURE_RANGES];
117 u8 rx_fem_insertion_loss;
118 u8 degraded_low_to_normal_thr;
119 u8 normal_to_degraded_high_thr;
120} __packed;
121
71#define WL1271_INI_CHANNEL_COUNT_5 35 122#define WL1271_INI_CHANNEL_COUNT_5 35
72#define WL1271_INI_SUB_BAND_COUNT_5 7 123#define WL1271_INI_SUB_BAND_COUNT_5 7
73 124
@@ -77,6 +128,12 @@ struct wl1271_ini_band_params_5 {
77 u8 rx_rssi_process_compens[WL1271_INI_RSSI_PROCESS_COMPENS_SIZE]; 128 u8 rx_rssi_process_compens[WL1271_INI_RSSI_PROCESS_COMPENS_SIZE];
78} __packed; 129} __packed;
79 130
131struct wl128x_ini_band_params_5 {
132 u8 rx_trace_insertion_loss[WL1271_INI_SUB_BAND_COUNT_5];
133 u8 tx_trace_loss[WL1271_INI_CHANNEL_COUNT_5];
134 u8 rx_rssi_process_compens[WL1271_INI_RSSI_PROCESS_COMPENS_SIZE];
135} __packed;
136
80struct wl1271_ini_fem_params_5 { 137struct wl1271_ini_fem_params_5 {
81 __le16 tx_bip_ref_pd_voltage[WL1271_INI_SUB_BAND_COUNT_5]; 138 __le16 tx_bip_ref_pd_voltage[WL1271_INI_SUB_BAND_COUNT_5];
82 u8 tx_bip_ref_power[WL1271_INI_SUB_BAND_COUNT_5]; 139 u8 tx_bip_ref_power[WL1271_INI_SUB_BAND_COUNT_5];
@@ -92,6 +149,23 @@ struct wl1271_ini_fem_params_5 {
92 u8 normal_to_degraded_high_thr; 149 u8 normal_to_degraded_high_thr;
93} __packed; 150} __packed;
94 151
152struct wl128x_ini_fem_params_5 {
153 __le16 tx_bip_ref_pd_voltage[WL1271_INI_SUB_BAND_COUNT_5];
154 u8 tx_bip_ref_power[WL1271_INI_SUB_BAND_COUNT_5];
155 u8 tx_bip_ref_offset[WL1271_INI_SUB_BAND_COUNT_5];
156 u8 tx_per_rate_pwr_limits_normal[WL128X_INI_RATE_GROUP_COUNT];
157 u8 tx_per_rate_pwr_limits_degraded[WL128X_INI_RATE_GROUP_COUNT];
158 u8 tx_per_rate_pwr_limits_extreme[WL128X_INI_RATE_GROUP_COUNT];
159 u8 tx_per_chan_pwr_limits_ofdm[WL1271_INI_CHANNEL_COUNT_5];
160 u8 tx_pd_vs_rate_offsets[WL128X_INI_RATE_GROUP_COUNT];
161 u8 tx_ibias[WL128X_INI_RATE_GROUP_COUNT];
162 u8 tx_pd_vs_chan_offsets[WL1271_INI_CHANNEL_COUNT_5];
163 u8 tx_pd_vs_temperature[WL1271_INI_SUB_BAND_COUNT_5 *
164 WL128X_INI_PD_VS_TEMPERATURE_RANGES];
165 u8 rx_fem_insertion_loss[WL1271_INI_SUB_BAND_COUNT_5];
166 u8 degraded_low_to_normal_thr;
167 u8 normal_to_degraded_high_thr;
168} __packed;
95 169
96/* NVS data structure */ 170/* NVS data structure */
97#define WL1271_INI_NVS_SECTION_SIZE 468 171#define WL1271_INI_NVS_SECTION_SIZE 468
@@ -100,7 +174,7 @@ struct wl1271_ini_fem_params_5 {
100#define WL1271_INI_LEGACY_NVS_FILE_SIZE 800 174#define WL1271_INI_LEGACY_NVS_FILE_SIZE 800
101 175
102struct wl1271_nvs_file { 176struct wl1271_nvs_file {
103 /* NVS section */ 177 /* NVS section - must be first! */
104 u8 nvs[WL1271_INI_NVS_SECTION_SIZE]; 178 u8 nvs[WL1271_INI_NVS_SECTION_SIZE];
105 179
106 /* INI section */ 180 /* INI section */
@@ -120,4 +194,24 @@ struct wl1271_nvs_file {
120 } dyn_radio_params_5[WL1271_INI_FEM_MODULE_COUNT]; 194 } dyn_radio_params_5[WL1271_INI_FEM_MODULE_COUNT];
121} __packed; 195} __packed;
122 196
197struct wl128x_nvs_file {
198 /* NVS section - must be first! */
199 u8 nvs[WL1271_INI_NVS_SECTION_SIZE];
200
201 /* INI section */
202 struct wl128x_ini_general_params general_params;
203 u8 fem_vendor_and_options;
204 struct wl128x_ini_band_params_2 stat_radio_params_2;
205 u8 padding2;
206 struct {
207 struct wl128x_ini_fem_params_2 params;
208 u8 padding;
209 } dyn_radio_params_2[WL1271_INI_FEM_MODULE_COUNT];
210 struct wl128x_ini_band_params_5 stat_radio_params_5;
211 u8 padding3;
212 struct {
213 struct wl128x_ini_fem_params_5 params;
214 u8 padding;
215 } dyn_radio_params_5[WL1271_INI_FEM_MODULE_COUNT];
216} __packed;
123#endif 217#endif
diff --git a/drivers/net/wireless/wl12xx/init.c b/drivers/net/wireless/wl12xx/init.c
index 6072fe457135..a8f4f156c055 100644
--- a/drivers/net/wireless/wl12xx/init.c
+++ b/drivers/net/wireless/wl12xx/init.c
@@ -31,6 +31,7 @@
31#include "cmd.h" 31#include "cmd.h"
32#include "reg.h" 32#include "reg.h"
33#include "tx.h" 33#include "tx.h"
34#include "io.h"
34 35
35int wl1271_sta_init_templates_config(struct wl1271 *wl) 36int wl1271_sta_init_templates_config(struct wl1271 *wl)
36{ 37{
@@ -257,7 +258,7 @@ int wl1271_init_phy_config(struct wl1271 *wl)
257 if (ret < 0) 258 if (ret < 0)
258 return ret; 259 return ret;
259 260
260 ret = wl1271_acx_rts_threshold(wl, wl->conf.rx.rts_threshold); 261 ret = wl1271_acx_rts_threshold(wl, wl->hw->wiphy->rts_threshold);
261 if (ret < 0) 262 if (ret < 0)
262 return ret; 263 return ret;
263 264
@@ -284,7 +285,10 @@ int wl1271_init_pta(struct wl1271 *wl)
284{ 285{
285 int ret; 286 int ret;
286 287
287 ret = wl1271_acx_sg_cfg(wl); 288 if (wl->bss_type == BSS_TYPE_AP_BSS)
289 ret = wl1271_acx_ap_sg_cfg(wl);
290 else
291 ret = wl1271_acx_sta_sg_cfg(wl);
288 if (ret < 0) 292 if (ret < 0)
289 return ret; 293 return ret;
290 294
@@ -321,9 +325,11 @@ static int wl1271_sta_hw_init(struct wl1271 *wl)
321{ 325{
322 int ret; 326 int ret;
323 327
324 ret = wl1271_cmd_ext_radio_parms(wl); 328 if (wl->chip.id != CHIP_ID_1283_PG20) {
325 if (ret < 0) 329 ret = wl1271_cmd_ext_radio_parms(wl);
326 return ret; 330 if (ret < 0)
331 return ret;
332 }
327 333
328 /* PS config */ 334 /* PS config */
329 ret = wl1271_acx_config_ps(wl); 335 ret = wl1271_acx_config_ps(wl);
@@ -348,8 +354,8 @@ static int wl1271_sta_hw_init(struct wl1271 *wl)
348 if (ret < 0) 354 if (ret < 0)
349 return ret; 355 return ret;
350 356
351 /* Bluetooth WLAN coexistence */ 357 /* FM WLAN coexistence */
352 ret = wl1271_init_pta(wl); 358 ret = wl1271_acx_fm_coex(wl);
353 if (ret < 0) 359 if (ret < 0)
354 return ret; 360 return ret;
355 361
@@ -407,7 +413,7 @@ static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl)
407 413
408static int wl1271_ap_hw_init(struct wl1271 *wl) 414static int wl1271_ap_hw_init(struct wl1271 *wl)
409{ 415{
410 int ret, i; 416 int ret;
411 417
412 ret = wl1271_ap_init_templates_config(wl); 418 ret = wl1271_ap_init_templates_config(wl);
413 if (ret < 0) 419 if (ret < 0)
@@ -418,23 +424,7 @@ static int wl1271_ap_hw_init(struct wl1271 *wl)
418 if (ret < 0) 424 if (ret < 0)
419 return ret; 425 return ret;
420 426
421 /* Configure initial TX rate classes */ 427 ret = wl1271_init_ap_rates(wl);
422 for (i = 0; i < wl->conf.tx.ac_conf_count; i++) {
423 ret = wl1271_acx_ap_rate_policy(wl,
424 &wl->conf.tx.ap_rc_conf[i], i);
425 if (ret < 0)
426 return ret;
427 }
428
429 ret = wl1271_acx_ap_rate_policy(wl,
430 &wl->conf.tx.ap_mgmt_conf,
431 ACX_TX_AP_MODE_MGMT_RATE);
432 if (ret < 0)
433 return ret;
434
435 ret = wl1271_acx_ap_rate_policy(wl,
436 &wl->conf.tx.ap_bcst_conf,
437 ACX_TX_AP_MODE_BCST_RATE);
438 if (ret < 0) 428 if (ret < 0)
439 return ret; 429 return ret;
440 430
@@ -449,7 +439,7 @@ static int wl1271_ap_hw_init(struct wl1271 *wl)
449 return 0; 439 return 0;
450} 440}
451 441
452static int wl1271_ap_hw_init_post_mem(struct wl1271 *wl) 442int wl1271_ap_init_templates(struct wl1271 *wl)
453{ 443{
454 int ret; 444 int ret;
455 445
@@ -465,6 +455,70 @@ static int wl1271_ap_hw_init_post_mem(struct wl1271 *wl)
465 if (ret < 0) 455 if (ret < 0)
466 return ret; 456 return ret;
467 457
458 /*
459 * when operating as AP we want to receive external beacons for
460 * configuring ERP protection.
461 */
462 ret = wl1271_acx_set_ap_beacon_filter(wl, false);
463 if (ret < 0)
464 return ret;
465
466 return 0;
467}
468
469static int wl1271_ap_hw_init_post_mem(struct wl1271 *wl)
470{
471 return wl1271_ap_init_templates(wl);
472}
473
474int wl1271_init_ap_rates(struct wl1271 *wl)
475{
476 int i, ret;
477 struct conf_tx_rate_class rc;
478 u32 supported_rates;
479
480 wl1271_debug(DEBUG_AP, "AP basic rate set: 0x%x", wl->basic_rate_set);
481
482 if (wl->basic_rate_set == 0)
483 return -EINVAL;
484
485 rc.enabled_rates = wl->basic_rate_set;
486 rc.long_retry_limit = 10;
487 rc.short_retry_limit = 10;
488 rc.aflags = 0;
489 ret = wl1271_acx_ap_rate_policy(wl, &rc, ACX_TX_AP_MODE_MGMT_RATE);
490 if (ret < 0)
491 return ret;
492
493 /* use the min basic rate for AP broadcast/multicast */
494 rc.enabled_rates = wl1271_tx_min_rate_get(wl);
495 rc.short_retry_limit = 10;
496 rc.long_retry_limit = 10;
497 rc.aflags = 0;
498 ret = wl1271_acx_ap_rate_policy(wl, &rc, ACX_TX_AP_MODE_BCST_RATE);
499 if (ret < 0)
500 return ret;
501
502 /*
503 * If the basic rates contain OFDM rates, use OFDM only
504 * rates for unicast TX as well. Else use all supported rates.
505 */
506 if ((wl->basic_rate_set & CONF_TX_OFDM_RATES))
507 supported_rates = CONF_TX_OFDM_RATES;
508 else
509 supported_rates = CONF_TX_AP_ENABLED_RATES;
510
511 /* configure unicast TX rate classes */
512 for (i = 0; i < wl->conf.tx.ac_conf_count; i++) {
513 rc.enabled_rates = supported_rates;
514 rc.short_retry_limit = 10;
515 rc.long_retry_limit = 10;
516 rc.aflags = 0;
517 ret = wl1271_acx_ap_rate_policy(wl, &rc, i);
518 if (ret < 0)
519 return ret;
520 }
521
468 return 0; 522 return 0;
469} 523}
470 524
@@ -504,6 +558,27 @@ static int wl1271_set_ba_policies(struct wl1271 *wl)
504 return ret; 558 return ret;
505} 559}
506 560
561int wl1271_chip_specific_init(struct wl1271 *wl)
562{
563 int ret = 0;
564
565 if (wl->chip.id == CHIP_ID_1283_PG20) {
566 u32 host_cfg_bitmap = HOST_IF_CFG_RX_FIFO_ENABLE;
567
568 if (wl->quirks & WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT)
569 /* Enable SDIO padding */
570 host_cfg_bitmap |= HOST_IF_CFG_TX_PAD_TO_SDIO_BLK;
571
572 /* Must be before wl1271_acx_init_mem_config() */
573 ret = wl1271_acx_host_if_cfg_bitmap(wl, host_cfg_bitmap);
574 if (ret < 0)
575 goto out;
576 }
577out:
578 return ret;
579}
580
581
507int wl1271_hw_init(struct wl1271 *wl) 582int wl1271_hw_init(struct wl1271 *wl)
508{ 583{
509 struct conf_tx_ac_category *conf_ac; 584 struct conf_tx_ac_category *conf_ac;
@@ -511,11 +586,22 @@ int wl1271_hw_init(struct wl1271 *wl)
511 int ret, i; 586 int ret, i;
512 bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); 587 bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
513 588
514 ret = wl1271_cmd_general_parms(wl); 589 if (wl->chip.id == CHIP_ID_1283_PG20)
590 ret = wl128x_cmd_general_parms(wl);
591 else
592 ret = wl1271_cmd_general_parms(wl);
593 if (ret < 0)
594 return ret;
595
596 if (wl->chip.id == CHIP_ID_1283_PG20)
597 ret = wl128x_cmd_radio_parms(wl);
598 else
599 ret = wl1271_cmd_radio_parms(wl);
515 if (ret < 0) 600 if (ret < 0)
516 return ret; 601 return ret;
517 602
518 ret = wl1271_cmd_radio_parms(wl); 603 /* Chip-specific init */
604 ret = wl1271_chip_specific_init(wl);
519 if (ret < 0) 605 if (ret < 0)
520 return ret; 606 return ret;
521 607
@@ -528,6 +614,11 @@ int wl1271_hw_init(struct wl1271 *wl)
528 if (ret < 0) 614 if (ret < 0)
529 return ret; 615 return ret;
530 616
617 /* Bluetooth WLAN coexistence */
618 ret = wl1271_init_pta(wl);
619 if (ret < 0)
620 return ret;
621
531 /* Default memory configuration */ 622 /* Default memory configuration */
532 ret = wl1271_acx_init_mem_config(wl); 623 ret = wl1271_acx_init_mem_config(wl);
533 if (ret < 0) 624 if (ret < 0)
@@ -567,7 +658,7 @@ int wl1271_hw_init(struct wl1271 *wl)
567 goto out_free_memmap; 658 goto out_free_memmap;
568 659
569 /* Default fragmentation threshold */ 660 /* Default fragmentation threshold */
570 ret = wl1271_acx_frag_threshold(wl, wl->conf.tx.frag_threshold); 661 ret = wl1271_acx_frag_threshold(wl, wl->hw->wiphy->frag_threshold);
571 if (ret < 0) 662 if (ret < 0)
572 goto out_free_memmap; 663 goto out_free_memmap;
573 664
diff --git a/drivers/net/wireless/wl12xx/init.h b/drivers/net/wireless/wl12xx/init.h
index 3a8bd3f426d2..3a3c230fd292 100644
--- a/drivers/net/wireless/wl12xx/init.h
+++ b/drivers/net/wireless/wl12xx/init.h
@@ -31,6 +31,9 @@ int wl1271_sta_init_templates_config(struct wl1271 *wl);
31int wl1271_init_phy_config(struct wl1271 *wl); 31int wl1271_init_phy_config(struct wl1271 *wl);
32int wl1271_init_pta(struct wl1271 *wl); 32int wl1271_init_pta(struct wl1271 *wl);
33int wl1271_init_energy_detection(struct wl1271 *wl); 33int wl1271_init_energy_detection(struct wl1271 *wl);
34int wl1271_chip_specific_init(struct wl1271 *wl);
34int wl1271_hw_init(struct wl1271 *wl); 35int wl1271_hw_init(struct wl1271 *wl);
36int wl1271_init_ap_rates(struct wl1271 *wl);
37int wl1271_ap_init_templates(struct wl1271 *wl);
35 38
36#endif 39#endif
diff --git a/drivers/net/wireless/wl12xx/io.c b/drivers/net/wireless/wl12xx/io.c
index d557f73e7c19..da5c1ad942a4 100644
--- a/drivers/net/wireless/wl12xx/io.c
+++ b/drivers/net/wireless/wl12xx/io.c
@@ -29,6 +29,7 @@
29#include "wl12xx.h" 29#include "wl12xx.h"
30#include "wl12xx_80211.h" 30#include "wl12xx_80211.h"
31#include "io.h" 31#include "io.h"
32#include "tx.h"
32 33
33#define OCP_CMD_LOOP 32 34#define OCP_CMD_LOOP 32
34 35
@@ -43,6 +44,16 @@
43#define OCP_STATUS_REQ_FAILED 0x20000 44#define OCP_STATUS_REQ_FAILED 0x20000
44#define OCP_STATUS_RESP_ERROR 0x30000 45#define OCP_STATUS_RESP_ERROR 0x30000
45 46
47bool wl1271_set_block_size(struct wl1271 *wl)
48{
49 if (wl->if_ops->set_block_size) {
50 wl->if_ops->set_block_size(wl, WL12XX_BUS_BLOCK_SIZE);
51 return true;
52 }
53
54 return false;
55}
56
46void wl1271_disable_interrupts(struct wl1271 *wl) 57void wl1271_disable_interrupts(struct wl1271 *wl)
47{ 58{
48 wl->if_ops->disable_irq(wl); 59 wl->if_ops->disable_irq(wl);
diff --git a/drivers/net/wireless/wl12xx/io.h b/drivers/net/wireless/wl12xx/io.h
index 00c771ea70bf..beed621a8ae0 100644
--- a/drivers/net/wireless/wl12xx/io.h
+++ b/drivers/net/wireless/wl12xx/io.h
@@ -169,5 +169,8 @@ int wl1271_init_ieee80211(struct wl1271 *wl);
169struct ieee80211_hw *wl1271_alloc_hw(void); 169struct ieee80211_hw *wl1271_alloc_hw(void);
170int wl1271_free_hw(struct wl1271 *wl); 170int wl1271_free_hw(struct wl1271 *wl);
171irqreturn_t wl1271_irq(int irq, void *data); 171irqreturn_t wl1271_irq(int irq, void *data);
172bool wl1271_set_block_size(struct wl1271 *wl);
173int wl1271_tx_dummy_packet(struct wl1271 *wl);
174void wl1271_configure_filters(struct wl1271 *wl, unsigned int filters);
172 175
173#endif 176#endif
diff --git a/drivers/net/wireless/wl12xx/main.c b/drivers/net/wireless/wl12xx/main.c
index 8b3c8d196b03..610be03a198b 100644
--- a/drivers/net/wireless/wl12xx/main.c
+++ b/drivers/net/wireless/wl12xx/main.c
@@ -30,6 +30,7 @@
30#include <linux/vmalloc.h> 30#include <linux/vmalloc.h>
31#include <linux/platform_device.h> 31#include <linux/platform_device.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/wl12xx.h>
33 34
34#include "wl12xx.h" 35#include "wl12xx.h"
35#include "wl12xx_80211.h" 36#include "wl12xx_80211.h"
@@ -50,11 +51,11 @@
50 51
51static struct conf_drv_settings default_conf = { 52static struct conf_drv_settings default_conf = {
52 .sg = { 53 .sg = {
53 .params = { 54 .sta_params = {
54 [CONF_SG_BT_PER_THRESHOLD] = 7500, 55 [CONF_SG_BT_PER_THRESHOLD] = 7500,
55 [CONF_SG_HV3_MAX_OVERRIDE] = 0, 56 [CONF_SG_HV3_MAX_OVERRIDE] = 0,
56 [CONF_SG_BT_NFS_SAMPLE_INTERVAL] = 400, 57 [CONF_SG_BT_NFS_SAMPLE_INTERVAL] = 400,
57 [CONF_SG_BT_LOAD_RATIO] = 50, 58 [CONF_SG_BT_LOAD_RATIO] = 200,
58 [CONF_SG_AUTO_PS_MODE] = 1, 59 [CONF_SG_AUTO_PS_MODE] = 1,
59 [CONF_SG_AUTO_SCAN_PROBE_REQ] = 170, 60 [CONF_SG_AUTO_SCAN_PROBE_REQ] = 170,
60 [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50, 61 [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50,
@@ -100,6 +101,61 @@ static struct conf_drv_settings default_conf = {
100 [CONF_SG_DHCP_TIME] = 5000, 101 [CONF_SG_DHCP_TIME] = 5000,
101 [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP] = 100, 102 [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP] = 100,
102 }, 103 },
104 .ap_params = {
105 [CONF_SG_BT_PER_THRESHOLD] = 7500,
106 [CONF_SG_HV3_MAX_OVERRIDE] = 0,
107 [CONF_SG_BT_NFS_SAMPLE_INTERVAL] = 400,
108 [CONF_SG_BT_LOAD_RATIO] = 50,
109 [CONF_SG_AUTO_PS_MODE] = 1,
110 [CONF_SG_AUTO_SCAN_PROBE_REQ] = 170,
111 [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50,
112 [CONF_SG_ANTENNA_CONFIGURATION] = 0,
113 [CONF_SG_BEACON_MISS_PERCENT] = 60,
114 [CONF_SG_RATE_ADAPT_THRESH] = 64,
115 [CONF_SG_RATE_ADAPT_SNR] = 1,
116 [CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_BR] = 10,
117 [CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_BR] = 25,
118 [CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_BR] = 25,
119 [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_BR] = 20,
120 [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_BR] = 25,
121 [CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_BR] = 25,
122 [CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_EDR] = 7,
123 [CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_EDR] = 25,
124 [CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_EDR] = 25,
125 [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_EDR] = 8,
126 [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_EDR] = 25,
127 [CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_EDR] = 25,
128 [CONF_SG_RXT] = 1200,
129 [CONF_SG_TXT] = 1000,
130 [CONF_SG_ADAPTIVE_RXT_TXT] = 1,
131 [CONF_SG_PS_POLL_TIMEOUT] = 10,
132 [CONF_SG_UPSD_TIMEOUT] = 10,
133 [CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MIN_EDR] = 7,
134 [CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MAX_EDR] = 15,
135 [CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_MASTER_EDR] = 15,
136 [CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MIN_EDR] = 8,
137 [CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MAX_EDR] = 20,
138 [CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_SLAVE_EDR] = 15,
139 [CONF_SG_WLAN_ACTIVE_BT_ACL_MIN_BR] = 20,
140 [CONF_SG_WLAN_ACTIVE_BT_ACL_MAX_BR] = 50,
141 [CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_BR] = 10,
142 [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3] = 200,
143 [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP] = 800,
144 [CONF_SG_PASSIVE_SCAN_A2DP_BT_TIME] = 75,
145 [CONF_SG_PASSIVE_SCAN_A2DP_WLAN_TIME] = 15,
146 [CONF_SG_HV3_MAX_SERVED] = 6,
147 [CONF_SG_DHCP_TIME] = 5000,
148 [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP] = 100,
149 [CONF_SG_TEMP_PARAM_1] = 0,
150 [CONF_SG_TEMP_PARAM_2] = 0,
151 [CONF_SG_TEMP_PARAM_3] = 0,
152 [CONF_SG_TEMP_PARAM_4] = 0,
153 [CONF_SG_TEMP_PARAM_5] = 0,
154 [CONF_SG_AP_BEACON_MISS_TX] = 3,
155 [CONF_SG_RX_WINDOW_LENGTH] = 6,
156 [CONF_SG_AP_CONNECTION_PROTECTION_TIME] = 50,
157 [CONF_SG_TEMP_PARAM_6] = 1,
158 },
103 .state = CONF_SG_PROTECTIVE, 159 .state = CONF_SG_PROTECTIVE,
104 }, 160 },
105 .rx = { 161 .rx = {
@@ -107,7 +163,7 @@ static struct conf_drv_settings default_conf = {
107 .packet_detection_threshold = 0, 163 .packet_detection_threshold = 0,
108 .ps_poll_timeout = 15, 164 .ps_poll_timeout = 15,
109 .upsd_timeout = 15, 165 .upsd_timeout = 15,
110 .rts_threshold = 2347, 166 .rts_threshold = IEEE80211_MAX_RTS_THRESHOLD,
111 .rx_cca_threshold = 0, 167 .rx_cca_threshold = 0,
112 .irq_blk_threshold = 0xFFFF, 168 .irq_blk_threshold = 0xFFFF,
113 .irq_pkt_threshold = 0, 169 .irq_pkt_threshold = 0,
@@ -153,44 +209,6 @@ static struct conf_drv_settings default_conf = {
153 .tx_op_limit = 1504, 209 .tx_op_limit = 1504,
154 }, 210 },
155 }, 211 },
156 .ap_rc_conf = {
157 [0] = {
158 .enabled_rates = CONF_TX_AP_ENABLED_RATES,
159 .short_retry_limit = 10,
160 .long_retry_limit = 10,
161 .aflags = 0,
162 },
163 [1] = {
164 .enabled_rates = CONF_TX_AP_ENABLED_RATES,
165 .short_retry_limit = 10,
166 .long_retry_limit = 10,
167 .aflags = 0,
168 },
169 [2] = {
170 .enabled_rates = CONF_TX_AP_ENABLED_RATES,
171 .short_retry_limit = 10,
172 .long_retry_limit = 10,
173 .aflags = 0,
174 },
175 [3] = {
176 .enabled_rates = CONF_TX_AP_ENABLED_RATES,
177 .short_retry_limit = 10,
178 .long_retry_limit = 10,
179 .aflags = 0,
180 },
181 },
182 .ap_mgmt_conf = {
183 .enabled_rates = CONF_TX_AP_DEFAULT_MGMT_RATES,
184 .short_retry_limit = 10,
185 .long_retry_limit = 10,
186 .aflags = 0,
187 },
188 .ap_bcst_conf = {
189 .enabled_rates = CONF_HW_BIT_RATE_1MBPS,
190 .short_retry_limit = 10,
191 .long_retry_limit = 10,
192 .aflags = 0,
193 },
194 .ap_max_tx_retries = 100, 212 .ap_max_tx_retries = 100,
195 .tid_conf_count = 4, 213 .tid_conf_count = 4,
196 .tid_conf = { 214 .tid_conf = {
@@ -239,12 +257,16 @@ static struct conf_drv_settings default_conf = {
239 .wake_up_event = CONF_WAKE_UP_EVENT_DTIM, 257 .wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
240 .listen_interval = 1, 258 .listen_interval = 1,
241 .bcn_filt_mode = CONF_BCN_FILT_MODE_ENABLED, 259 .bcn_filt_mode = CONF_BCN_FILT_MODE_ENABLED,
242 .bcn_filt_ie_count = 1, 260 .bcn_filt_ie_count = 2,
243 .bcn_filt_ie = { 261 .bcn_filt_ie = {
244 [0] = { 262 [0] = {
245 .ie = WLAN_EID_CHANNEL_SWITCH, 263 .ie = WLAN_EID_CHANNEL_SWITCH,
246 .rule = CONF_BCN_RULE_PASS_ON_APPEARANCE, 264 .rule = CONF_BCN_RULE_PASS_ON_APPEARANCE,
247 } 265 },
266 [1] = {
267 .ie = WLAN_EID_HT_INFORMATION,
268 .rule = CONF_BCN_RULE_PASS_ON_CHANGE,
269 },
248 }, 270 },
249 .synch_fail_thold = 10, 271 .synch_fail_thold = 10,
250 .bss_lose_timeout = 100, 272 .bss_lose_timeout = 100,
@@ -254,9 +276,9 @@ static struct conf_drv_settings default_conf = {
254 .ps_poll_threshold = 10, 276 .ps_poll_threshold = 10,
255 .ps_poll_recovery_period = 700, 277 .ps_poll_recovery_period = 700,
256 .bet_enable = CONF_BET_MODE_ENABLE, 278 .bet_enable = CONF_BET_MODE_ENABLE,
257 .bet_max_consecutive = 10, 279 .bet_max_consecutive = 50,
258 .psm_entry_retries = 5, 280 .psm_entry_retries = 5,
259 .psm_exit_retries = 255, 281 .psm_exit_retries = 16,
260 .psm_entry_nullfunc_retries = 3, 282 .psm_entry_nullfunc_retries = 3,
261 .psm_entry_hangover_period = 1, 283 .psm_entry_hangover_period = 1,
262 .keep_alive_interval = 55000, 284 .keep_alive_interval = 55000,
@@ -284,6 +306,15 @@ static struct conf_drv_settings default_conf = {
284 .max_dwell_time_passive = 100000, 306 .max_dwell_time_passive = 100000,
285 .num_probe_reqs = 2, 307 .num_probe_reqs = 2,
286 }, 308 },
309 .sched_scan = {
310 /* sched_scan requires dwell times in TU instead of TU/1000 */
311 .min_dwell_time_active = 8,
312 .max_dwell_time_active = 30,
313 .dwell_time_passive = 100,
314 .num_probe_reqs = 2,
315 .rssi_threshold = -90,
316 .snr_threshold = 0,
317 },
287 .rf = { 318 .rf = {
288 .tx_per_channel_power_compensation_2 = { 319 .tx_per_channel_power_compensation_2 = {
289 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 320 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -298,19 +329,43 @@ static struct conf_drv_settings default_conf = {
298 .tx_ba_win_size = 64, 329 .tx_ba_win_size = 64,
299 .inactivity_timeout = 10000, 330 .inactivity_timeout = 10000,
300 }, 331 },
301 .mem = { 332 .mem_wl127x = {
302 .num_stations = 1, 333 .num_stations = 1,
303 .ssid_profiles = 1, 334 .ssid_profiles = 1,
304 .rx_block_num = 70, 335 .rx_block_num = 70,
305 .tx_min_block_num = 40, 336 .tx_min_block_num = 40,
306 .dynamic_memory = 0, 337 .dynamic_memory = 1,
307 .min_req_tx_blocks = 100, 338 .min_req_tx_blocks = 100,
308 .min_req_rx_blocks = 22, 339 .min_req_rx_blocks = 22,
309 .tx_min = 27, 340 .tx_min = 27,
310 } 341 },
342 .mem_wl128x = {
343 .num_stations = 1,
344 .ssid_profiles = 1,
345 .rx_block_num = 40,
346 .tx_min_block_num = 40,
347 .dynamic_memory = 1,
348 .min_req_tx_blocks = 45,
349 .min_req_rx_blocks = 22,
350 .tx_min = 27,
351 },
352 .fm_coex = {
353 .enable = true,
354 .swallow_period = 5,
355 .n_divider_fref_set_1 = 0xff, /* default */
356 .n_divider_fref_set_2 = 12,
357 .m_divider_fref_set_1 = 148,
358 .m_divider_fref_set_2 = 0xffff, /* default */
359 .coex_pll_stabilization_time = 0xffffffff, /* default */
360 .ldo_stabilization_time = 0xffff, /* default */
361 .fm_disturbed_band_margin = 0xff, /* default */
362 .swallow_clk_diff = 0xff, /* default */
363 },
364 .hci_io_ds = HCI_IO_DS_6MA,
311}; 365};
312 366
313static void __wl1271_op_remove_interface(struct wl1271 *wl); 367static void __wl1271_op_remove_interface(struct wl1271 *wl,
368 bool reset_tx_queues);
314static void wl1271_free_ap_keys(struct wl1271 *wl); 369static void wl1271_free_ap_keys(struct wl1271 *wl);
315 370
316 371
@@ -329,6 +384,7 @@ static struct platform_device wl1271_device = {
329 }, 384 },
330}; 385};
331 386
387static DEFINE_MUTEX(wl_list_mutex);
332static LIST_HEAD(wl_list); 388static LIST_HEAD(wl_list);
333 389
334static int wl1271_dev_notify(struct notifier_block *me, unsigned long what, 390static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
@@ -359,10 +415,12 @@ static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
359 return NOTIFY_DONE; 415 return NOTIFY_DONE;
360 416
361 wl_temp = hw->priv; 417 wl_temp = hw->priv;
418 mutex_lock(&wl_list_mutex);
362 list_for_each_entry(wl, &wl_list, list) { 419 list_for_each_entry(wl, &wl_list, list) {
363 if (wl == wl_temp) 420 if (wl == wl_temp)
364 break; 421 break;
365 } 422 }
423 mutex_unlock(&wl_list_mutex);
366 if (wl != wl_temp) 424 if (wl != wl_temp)
367 return NOTIFY_DONE; 425 return NOTIFY_DONE;
368 426
@@ -438,15 +496,30 @@ static int wl1271_plt_init(struct wl1271 *wl)
438 struct conf_tx_tid *conf_tid; 496 struct conf_tx_tid *conf_tid;
439 int ret, i; 497 int ret, i;
440 498
441 ret = wl1271_cmd_general_parms(wl); 499 if (wl->chip.id == CHIP_ID_1283_PG20)
500 ret = wl128x_cmd_general_parms(wl);
501 else
502 ret = wl1271_cmd_general_parms(wl);
503 if (ret < 0)
504 return ret;
505
506 if (wl->chip.id == CHIP_ID_1283_PG20)
507 ret = wl128x_cmd_radio_parms(wl);
508 else
509 ret = wl1271_cmd_radio_parms(wl);
442 if (ret < 0) 510 if (ret < 0)
443 return ret; 511 return ret;
444 512
445 ret = wl1271_cmd_radio_parms(wl); 513 if (wl->chip.id != CHIP_ID_1283_PG20) {
514 ret = wl1271_cmd_ext_radio_parms(wl);
515 if (ret < 0)
516 return ret;
517 }
446 if (ret < 0) 518 if (ret < 0)
447 return ret; 519 return ret;
448 520
449 ret = wl1271_cmd_ext_radio_parms(wl); 521 /* Chip-specific initializations */
522 ret = wl1271_chip_specific_init(wl);
450 if (ret < 0) 523 if (ret < 0)
451 return ret; 524 return ret;
452 525
@@ -477,6 +550,11 @@ static int wl1271_plt_init(struct wl1271 *wl)
477 if (ret < 0) 550 if (ret < 0)
478 goto out_free_memmap; 551 goto out_free_memmap;
479 552
553 /* FM WLAN coexistence */
554 ret = wl1271_acx_fm_coex(wl);
555 if (ret < 0)
556 goto out_free_memmap;
557
480 /* Energy detection */ 558 /* Energy detection */
481 ret = wl1271_init_energy_detection(wl); 559 ret = wl1271_init_energy_detection(wl);
482 if (ret < 0) 560 if (ret < 0)
@@ -593,15 +671,17 @@ static void wl1271_fw_status(struct wl1271 *wl,
593{ 671{
594 struct wl1271_fw_common_status *status = &full_status->common; 672 struct wl1271_fw_common_status *status = &full_status->common;
595 struct timespec ts; 673 struct timespec ts;
596 u32 total = 0; 674 u32 old_tx_blk_count = wl->tx_blocks_available;
675 u32 freed_blocks = 0;
597 int i; 676 int i;
598 677
599 if (wl->bss_type == BSS_TYPE_AP_BSS) 678 if (wl->bss_type == BSS_TYPE_AP_BSS) {
600 wl1271_raw_read(wl, FW_STATUS_ADDR, status, 679 wl1271_raw_read(wl, FW_STATUS_ADDR, status,
601 sizeof(struct wl1271_fw_ap_status), false); 680 sizeof(struct wl1271_fw_ap_status), false);
602 else 681 } else {
603 wl1271_raw_read(wl, FW_STATUS_ADDR, status, 682 wl1271_raw_read(wl, FW_STATUS_ADDR, status,
604 sizeof(struct wl1271_fw_sta_status), false); 683 sizeof(struct wl1271_fw_sta_status), false);
684 }
605 685
606 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, " 686 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
607 "drv_rx_counter = %d, tx_results_counter = %d)", 687 "drv_rx_counter = %d, tx_results_counter = %d)",
@@ -612,22 +692,37 @@ static void wl1271_fw_status(struct wl1271 *wl,
612 692
613 /* update number of available TX blocks */ 693 /* update number of available TX blocks */
614 for (i = 0; i < NUM_TX_QUEUES; i++) { 694 for (i = 0; i < NUM_TX_QUEUES; i++) {
615 u32 cnt = le32_to_cpu(status->tx_released_blks[i]) - 695 freed_blocks += le32_to_cpu(status->tx_released_blks[i]) -
616 wl->tx_blocks_freed[i]; 696 wl->tx_blocks_freed[i];
617 697
618 wl->tx_blocks_freed[i] = 698 wl->tx_blocks_freed[i] =
619 le32_to_cpu(status->tx_released_blks[i]); 699 le32_to_cpu(status->tx_released_blks[i]);
620 wl->tx_blocks_available += cnt;
621 total += cnt;
622 } 700 }
623 701
624 /* if more blocks are available now, tx work can be scheduled */ 702 wl->tx_allocated_blocks -= freed_blocks;
625 if (total)
626 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
627 703
628 /* for AP update num of allocated TX blocks per link and ps status */ 704 if (wl->bss_type == BSS_TYPE_AP_BSS) {
629 if (wl->bss_type == BSS_TYPE_AP_BSS) 705 /* Update num of allocated TX blocks per link and ps status */
630 wl1271_irq_update_links_status(wl, &full_status->ap); 706 wl1271_irq_update_links_status(wl, &full_status->ap);
707 wl->tx_blocks_available += freed_blocks;
708 } else {
709 int avail = full_status->sta.tx_total - wl->tx_allocated_blocks;
710
711 /*
712 * The FW might change the total number of TX memblocks before
713 * we get a notification about blocks being released. Thus, the
714 * available blocks calculation might yield a temporary result
715 * which is lower than the actual available blocks. Keeping in
716 * mind that only blocks that were allocated can be moved from
717 * TX to RX, tx_blocks_available should never decrease here.
718 */
719 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
720 avail);
721 }
722
723 /* if more blocks are available now, tx work can be scheduled */
724 if (wl->tx_blocks_available > old_tx_blk_count)
725 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
631 726
632 /* update the host-chipset time offset */ 727 /* update the host-chipset time offset */
633 getnstimeofday(&ts); 728 getnstimeofday(&ts);
@@ -674,6 +769,13 @@ irqreturn_t wl1271_irq(int irq, void *cookie)
674 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags); 769 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
675 cancel_work_sync(&wl->tx_work); 770 cancel_work_sync(&wl->tx_work);
676 771
772 /*
773 * In case edge triggered interrupt must be used, we cannot iterate
774 * more than once without introducing race conditions with the hardirq.
775 */
776 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
777 loopcount = 1;
778
677 mutex_lock(&wl->mutex); 779 mutex_lock(&wl->mutex);
678 780
679 wl1271_debug(DEBUG_IRQ, "IRQ work"); 781 wl1271_debug(DEBUG_IRQ, "IRQ work");
@@ -785,11 +887,17 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
785 887
786 switch (wl->bss_type) { 888 switch (wl->bss_type) {
787 case BSS_TYPE_AP_BSS: 889 case BSS_TYPE_AP_BSS:
788 fw_name = WL1271_AP_FW_NAME; 890 if (wl->chip.id == CHIP_ID_1283_PG20)
891 fw_name = WL128X_AP_FW_NAME;
892 else
893 fw_name = WL127X_AP_FW_NAME;
789 break; 894 break;
790 case BSS_TYPE_IBSS: 895 case BSS_TYPE_IBSS:
791 case BSS_TYPE_STA_BSS: 896 case BSS_TYPE_STA_BSS:
792 fw_name = WL1271_FW_NAME; 897 if (wl->chip.id == CHIP_ID_1283_PG20)
898 fw_name = WL128X_FW_NAME;
899 else
900 fw_name = WL1271_FW_NAME;
793 break; 901 break;
794 default: 902 default:
795 wl1271_error("no compatible firmware for bss_type %d", 903 wl1271_error("no compatible firmware for bss_type %d",
@@ -838,14 +946,14 @@ static int wl1271_fetch_nvs(struct wl1271 *wl)
838 const struct firmware *fw; 946 const struct firmware *fw;
839 int ret; 947 int ret;
840 948
841 ret = request_firmware(&fw, WL1271_NVS_NAME, wl1271_wl_to_dev(wl)); 949 ret = request_firmware(&fw, WL12XX_NVS_NAME, wl1271_wl_to_dev(wl));
842 950
843 if (ret < 0) { 951 if (ret < 0) {
844 wl1271_error("could not get nvs file: %d", ret); 952 wl1271_error("could not get nvs file: %d", ret);
845 return ret; 953 return ret;
846 } 954 }
847 955
848 wl->nvs = kmemdup(fw->data, sizeof(struct wl1271_nvs_file), GFP_KERNEL); 956 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
849 957
850 if (!wl->nvs) { 958 if (!wl->nvs) {
851 wl1271_error("could not allocate memory for the nvs file"); 959 wl1271_error("could not allocate memory for the nvs file");
@@ -871,15 +979,30 @@ static void wl1271_recovery_work(struct work_struct *work)
871 if (wl->state != WL1271_STATE_ON) 979 if (wl->state != WL1271_STATE_ON)
872 goto out; 980 goto out;
873 981
874 wl1271_info("Hardware recovery in progress."); 982 wl1271_info("Hardware recovery in progress. FW ver: %s pc: 0x%x",
983 wl->chip.fw_ver_str, wl1271_read32(wl, SCR_PAD4));
875 984
876 if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) 985 if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
877 ieee80211_connection_loss(wl->vif); 986 ieee80211_connection_loss(wl->vif);
878 987
988 /* Prevent spurious TX during FW restart */
989 ieee80211_stop_queues(wl->hw);
990
991 if (wl->sched_scanning) {
992 ieee80211_sched_scan_stopped(wl->hw);
993 wl->sched_scanning = false;
994 }
995
879 /* reboot the chipset */ 996 /* reboot the chipset */
880 __wl1271_op_remove_interface(wl); 997 __wl1271_op_remove_interface(wl, false);
881 ieee80211_restart_hw(wl->hw); 998 ieee80211_restart_hw(wl->hw);
882 999
1000 /*
1001 * Its safe to enable TX now - the queues are stopped after a request
1002 * to restart the HW.
1003 */
1004 ieee80211_wake_queues(wl->hw);
1005
883out: 1006out:
884 mutex_unlock(&wl->mutex); 1007 mutex_unlock(&wl->mutex);
885} 1008}
@@ -950,10 +1073,25 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
950 wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)", 1073 wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
951 wl->chip.id); 1074 wl->chip.id);
952 1075
1076 /* end-of-transaction flag should be set in wl127x AP mode */
1077 if (wl->bss_type == BSS_TYPE_AP_BSS)
1078 wl->quirks |= WL12XX_QUIRK_END_OF_TRANSACTION;
1079
953 ret = wl1271_setup(wl); 1080 ret = wl1271_setup(wl);
954 if (ret < 0) 1081 if (ret < 0)
955 goto out; 1082 goto out;
956 break; 1083 break;
1084 case CHIP_ID_1283_PG20:
1085 wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1283 PG20)",
1086 wl->chip.id);
1087
1088 ret = wl1271_setup(wl);
1089 if (ret < 0)
1090 goto out;
1091 if (wl1271_set_block_size(wl))
1092 wl->quirks |= WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT;
1093 break;
1094 case CHIP_ID_1283_PG10:
957 default: 1095 default:
958 wl1271_warning("unsupported chip id: 0x%x", wl->chip.id); 1096 wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
959 ret = -ENODEV; 1097 ret = -ENODEV;
@@ -978,6 +1116,24 @@ out:
978 return ret; 1116 return ret;
979} 1117}
980 1118
1119static unsigned int wl1271_get_fw_ver_quirks(struct wl1271 *wl)
1120{
1121 unsigned int quirks = 0;
1122 unsigned int *fw_ver = wl->chip.fw_ver;
1123
1124 /* Only for wl127x */
1125 if ((fw_ver[FW_VER_CHIP] == FW_VER_CHIP_WL127X) &&
1126 /* Check STA version */
1127 (((fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_STA) &&
1128 (fw_ver[FW_VER_MINOR] < FW_VER_MINOR_1_SPARE_STA_MIN)) ||
1129 /* Check AP version */
1130 ((fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_AP) &&
1131 (fw_ver[FW_VER_MINOR] < FW_VER_MINOR_1_SPARE_AP_MIN))))
1132 quirks |= WL12XX_QUIRK_USE_2_SPARE_BLOCKS;
1133
1134 return quirks;
1135}
1136
981int wl1271_plt_start(struct wl1271 *wl) 1137int wl1271_plt_start(struct wl1271 *wl)
982{ 1138{
983 int retries = WL1271_BOOT_RETRIES; 1139 int retries = WL1271_BOOT_RETRIES;
@@ -1013,6 +1169,9 @@ int wl1271_plt_start(struct wl1271 *wl)
1013 wl->state = WL1271_STATE_PLT; 1169 wl->state = WL1271_STATE_PLT;
1014 wl1271_notice("firmware booted in PLT mode (%s)", 1170 wl1271_notice("firmware booted in PLT mode (%s)",
1015 wl->chip.fw_ver_str); 1171 wl->chip.fw_ver_str);
1172
1173 /* Check if any quirks are needed with older fw versions */
1174 wl->quirks |= wl1271_get_fw_ver_quirks(wl);
1016 goto out; 1175 goto out;
1017 1176
1018irq_disable: 1177irq_disable:
@@ -1040,7 +1199,7 @@ out:
1040 return ret; 1199 return ret;
1041} 1200}
1042 1201
1043int __wl1271_plt_stop(struct wl1271 *wl) 1202static int __wl1271_plt_stop(struct wl1271 *wl)
1044{ 1203{
1045 int ret = 0; 1204 int ret = 0;
1046 1205
@@ -1124,10 +1283,217 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1124 spin_unlock_irqrestore(&wl->wl_lock, flags); 1283 spin_unlock_irqrestore(&wl->wl_lock, flags);
1125} 1284}
1126 1285
1286int wl1271_tx_dummy_packet(struct wl1271 *wl)
1287{
1288 unsigned long flags;
1289
1290 spin_lock_irqsave(&wl->wl_lock, flags);
1291 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1292 wl->tx_queue_count++;
1293 spin_unlock_irqrestore(&wl->wl_lock, flags);
1294
1295 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1296 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1297 wl1271_tx_work_locked(wl);
1298
1299 /*
1300 * If the FW TX is busy, TX work will be scheduled by the threaded
1301 * interrupt handler function
1302 */
1303 return 0;
1304}
1305
1306/*
1307 * The size of the dummy packet should be at least 1400 bytes. However, in
1308 * order to minimize the number of bus transactions, aligning it to 512 bytes
1309 * boundaries could be beneficial, performance wise
1310 */
1311#define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1312
1313static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1314{
1315 struct sk_buff *skb;
1316 struct ieee80211_hdr_3addr *hdr;
1317 unsigned int dummy_packet_size;
1318
1319 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1320 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1321
1322 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1323 if (!skb) {
1324 wl1271_warning("Failed to allocate a dummy packet skb");
1325 return NULL;
1326 }
1327
1328 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1329
1330 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1331 memset(hdr, 0, sizeof(*hdr));
1332 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1333 IEEE80211_STYPE_NULLFUNC |
1334 IEEE80211_FCTL_TODS);
1335
1336 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1337
1338 /* Dummy packets require the TID to be management */
1339 skb->priority = WL1271_TID_MGMT;
1340
1341 /* Initialize all fields that might be used */
1342 skb_set_queue_mapping(skb, 0);
1343 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1344
1345 return skb;
1346}
1347
1348
1127static struct notifier_block wl1271_dev_notifier = { 1349static struct notifier_block wl1271_dev_notifier = {
1128 .notifier_call = wl1271_dev_notify, 1350 .notifier_call = wl1271_dev_notify,
1129}; 1351};
1130 1352
1353static int wl1271_configure_suspend(struct wl1271 *wl)
1354{
1355 int ret;
1356
1357 if (wl->bss_type != BSS_TYPE_STA_BSS)
1358 return 0;
1359
1360 mutex_lock(&wl->mutex);
1361
1362 ret = wl1271_ps_elp_wakeup(wl);
1363 if (ret < 0)
1364 goto out_unlock;
1365
1366 /* enter psm if needed*/
1367 if (!test_bit(WL1271_FLAG_PSM, &wl->flags)) {
1368 DECLARE_COMPLETION_ONSTACK(compl);
1369
1370 wl->ps_compl = &compl;
1371 ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
1372 wl->basic_rate, true);
1373 if (ret < 0)
1374 goto out_sleep;
1375
1376 /* we must unlock here so we will be able to get events */
1377 wl1271_ps_elp_sleep(wl);
1378 mutex_unlock(&wl->mutex);
1379
1380 ret = wait_for_completion_timeout(
1381 &compl, msecs_to_jiffies(WL1271_PS_COMPLETE_TIMEOUT));
1382 if (ret <= 0) {
1383 wl1271_warning("couldn't enter ps mode!");
1384 ret = -EBUSY;
1385 goto out;
1386 }
1387
1388 /* take mutex again, and wakeup */
1389 mutex_lock(&wl->mutex);
1390
1391 ret = wl1271_ps_elp_wakeup(wl);
1392 if (ret < 0)
1393 goto out_unlock;
1394 }
1395out_sleep:
1396 wl1271_ps_elp_sleep(wl);
1397out_unlock:
1398 mutex_unlock(&wl->mutex);
1399out:
1400 return ret;
1401
1402}
1403
1404static void wl1271_configure_resume(struct wl1271 *wl)
1405{
1406 int ret;
1407
1408 if (wl->bss_type != BSS_TYPE_STA_BSS)
1409 return;
1410
1411 mutex_lock(&wl->mutex);
1412 ret = wl1271_ps_elp_wakeup(wl);
1413 if (ret < 0)
1414 goto out;
1415
1416 /* exit psm if it wasn't configured */
1417 if (!test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags))
1418 wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
1419 wl->basic_rate, true);
1420
1421 wl1271_ps_elp_sleep(wl);
1422out:
1423 mutex_unlock(&wl->mutex);
1424}
1425
1426static int wl1271_op_suspend(struct ieee80211_hw *hw,
1427 struct cfg80211_wowlan *wow)
1428{
1429 struct wl1271 *wl = hw->priv;
1430 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1431 wl->wow_enabled = !!wow;
1432 if (wl->wow_enabled) {
1433 int ret;
1434 ret = wl1271_configure_suspend(wl);
1435 if (ret < 0) {
1436 wl1271_warning("couldn't prepare device to suspend");
1437 return ret;
1438 }
1439 /* flush any remaining work */
1440 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1441 flush_delayed_work(&wl->scan_complete_work);
1442
1443 /*
1444 * disable and re-enable interrupts in order to flush
1445 * the threaded_irq
1446 */
1447 wl1271_disable_interrupts(wl);
1448
1449 /*
1450 * set suspended flag to avoid triggering a new threaded_irq
1451 * work. no need for spinlock as interrupts are disabled.
1452 */
1453 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1454
1455 wl1271_enable_interrupts(wl);
1456 flush_work(&wl->tx_work);
1457 flush_delayed_work(&wl->pspoll_work);
1458 flush_delayed_work(&wl->elp_work);
1459 }
1460 return 0;
1461}
1462
1463static int wl1271_op_resume(struct ieee80211_hw *hw)
1464{
1465 struct wl1271 *wl = hw->priv;
1466 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1467 wl->wow_enabled);
1468
1469 /*
1470 * re-enable irq_work enqueuing, and call irq_work directly if
1471 * there is a pending work.
1472 */
1473 if (wl->wow_enabled) {
1474 struct wl1271 *wl = hw->priv;
1475 unsigned long flags;
1476 bool run_irq_work = false;
1477
1478 spin_lock_irqsave(&wl->wl_lock, flags);
1479 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1480 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1481 run_irq_work = true;
1482 spin_unlock_irqrestore(&wl->wl_lock, flags);
1483
1484 if (run_irq_work) {
1485 wl1271_debug(DEBUG_MAC80211,
1486 "run postponed irq_work directly");
1487 wl1271_irq(0, wl);
1488 wl1271_enable_interrupts(wl);
1489 }
1490
1491 wl1271_configure_resume(wl);
1492 }
1493
1494 return 0;
1495}
1496
1131static int wl1271_op_start(struct ieee80211_hw *hw) 1497static int wl1271_op_start(struct ieee80211_hw *hw)
1132{ 1498{
1133 wl1271_debug(DEBUG_MAC80211, "mac80211 start"); 1499 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
@@ -1174,6 +1540,16 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
1174 goto out; 1540 goto out;
1175 } 1541 }
1176 1542
1543 /*
1544 * in some very corner case HW recovery scenarios its possible to
1545 * get here before __wl1271_op_remove_interface is complete, so
1546 * opt out if that is the case.
1547 */
1548 if (test_bit(WL1271_FLAG_IF_INITIALIZED, &wl->flags)) {
1549 ret = -EBUSY;
1550 goto out;
1551 }
1552
1177 switch (vif->type) { 1553 switch (vif->type) {
1178 case NL80211_IFTYPE_STATION: 1554 case NL80211_IFTYPE_STATION:
1179 wl->bss_type = BSS_TYPE_STA_BSS; 1555 wl->bss_type = BSS_TYPE_STA_BSS;
@@ -1242,6 +1618,7 @@ power_off:
1242 1618
1243 wl->vif = vif; 1619 wl->vif = vif;
1244 wl->state = WL1271_STATE_ON; 1620 wl->state = WL1271_STATE_ON;
1621 set_bit(WL1271_FLAG_IF_INITIALIZED, &wl->flags);
1245 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str); 1622 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
1246 1623
1247 /* update hw/fw version info in wiphy struct */ 1624 /* update hw/fw version info in wiphy struct */
@@ -1249,6 +1626,9 @@ power_off:
1249 strncpy(wiphy->fw_version, wl->chip.fw_ver_str, 1626 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1250 sizeof(wiphy->fw_version)); 1627 sizeof(wiphy->fw_version));
1251 1628
1629 /* Check if any quirks are needed with older fw versions */
1630 wl->quirks |= wl1271_get_fw_ver_quirks(wl);
1631
1252 /* 1632 /*
1253 * Now we know if 11a is supported (info from the NVS), so disable 1633 * Now we know if 11a is supported (info from the NVS), so disable
1254 * 11a channels if not supported 1634 * 11a channels if not supported
@@ -1262,23 +1642,30 @@ power_off:
1262out: 1642out:
1263 mutex_unlock(&wl->mutex); 1643 mutex_unlock(&wl->mutex);
1264 1644
1645 mutex_lock(&wl_list_mutex);
1265 if (!ret) 1646 if (!ret)
1266 list_add(&wl->list, &wl_list); 1647 list_add(&wl->list, &wl_list);
1648 mutex_unlock(&wl_list_mutex);
1267 1649
1268 return ret; 1650 return ret;
1269} 1651}
1270 1652
1271static void __wl1271_op_remove_interface(struct wl1271 *wl) 1653static void __wl1271_op_remove_interface(struct wl1271 *wl,
1654 bool reset_tx_queues)
1272{ 1655{
1273 int i; 1656 int i;
1274 1657
1275 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface"); 1658 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
1276 1659
1660 /* because of hardware recovery, we may get here twice */
1661 if (wl->state != WL1271_STATE_ON)
1662 return;
1663
1277 wl1271_info("down"); 1664 wl1271_info("down");
1278 1665
1666 mutex_lock(&wl_list_mutex);
1279 list_del(&wl->list); 1667 list_del(&wl->list);
1280 1668 mutex_unlock(&wl_list_mutex);
1281 WARN_ON(wl->state != WL1271_STATE_ON);
1282 1669
1283 /* enable dyn ps just in case (if left on due to fw crash etc) */ 1670 /* enable dyn ps just in case (if left on due to fw crash etc) */
1284 if (wl->bss_type == BSS_TYPE_STA_BSS) 1671 if (wl->bss_type == BSS_TYPE_STA_BSS)
@@ -1286,12 +1673,15 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl)
1286 1673
1287 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) { 1674 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
1288 wl->scan.state = WL1271_SCAN_STATE_IDLE; 1675 wl->scan.state = WL1271_SCAN_STATE_IDLE;
1289 kfree(wl->scan.scanned_ch); 1676 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
1290 wl->scan.scanned_ch = NULL;
1291 wl->scan.req = NULL; 1677 wl->scan.req = NULL;
1292 ieee80211_scan_completed(wl->hw, true); 1678 ieee80211_scan_completed(wl->hw, true);
1293 } 1679 }
1294 1680
1681 /*
1682 * this must be before the cancel_work calls below, so that the work
1683 * functions don't perform further work.
1684 */
1295 wl->state = WL1271_STATE_OFF; 1685 wl->state = WL1271_STATE_OFF;
1296 1686
1297 mutex_unlock(&wl->mutex); 1687 mutex_unlock(&wl->mutex);
@@ -1307,7 +1697,7 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl)
1307 mutex_lock(&wl->mutex); 1697 mutex_lock(&wl->mutex);
1308 1698
1309 /* let's notify MAC80211 about the remaining pending TX frames */ 1699 /* let's notify MAC80211 about the remaining pending TX frames */
1310 wl1271_tx_reset(wl); 1700 wl1271_tx_reset(wl, reset_tx_queues);
1311 wl1271_power_off(wl); 1701 wl1271_power_off(wl);
1312 1702
1313 memset(wl->bssid, 0, ETH_ALEN); 1703 memset(wl->bssid, 0, ETH_ALEN);
@@ -1321,6 +1711,7 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl)
1321 wl->psm_entry_retry = 0; 1711 wl->psm_entry_retry = 0;
1322 wl->power_level = WL1271_DEFAULT_POWER_LEVEL; 1712 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1323 wl->tx_blocks_available = 0; 1713 wl->tx_blocks_available = 0;
1714 wl->tx_allocated_blocks = 0;
1324 wl->tx_results_count = 0; 1715 wl->tx_results_count = 0;
1325 wl->tx_packets_count = 0; 1716 wl->tx_packets_count = 0;
1326 wl->tx_security_last_seq = 0; 1717 wl->tx_security_last_seq = 0;
@@ -1328,13 +1719,20 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl)
1328 wl->time_offset = 0; 1719 wl->time_offset = 0;
1329 wl->session_counter = 0; 1720 wl->session_counter = 0;
1330 wl->rate_set = CONF_TX_RATE_MASK_BASIC; 1721 wl->rate_set = CONF_TX_RATE_MASK_BASIC;
1331 wl->flags = 0;
1332 wl->vif = NULL; 1722 wl->vif = NULL;
1333 wl->filters = 0; 1723 wl->filters = 0;
1334 wl1271_free_ap_keys(wl); 1724 wl1271_free_ap_keys(wl);
1335 memset(wl->ap_hlid_map, 0, sizeof(wl->ap_hlid_map)); 1725 memset(wl->ap_hlid_map, 0, sizeof(wl->ap_hlid_map));
1336 wl->ap_fw_ps_map = 0; 1726 wl->ap_fw_ps_map = 0;
1337 wl->ap_ps_map = 0; 1727 wl->ap_ps_map = 0;
1728 wl->sched_scanning = false;
1729
1730 /*
1731 * this is performed after the cancel_work calls and the associated
1732 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1733 * get executed before all these vars have been reset.
1734 */
1735 wl->flags = 0;
1338 1736
1339 for (i = 0; i < NUM_TX_QUEUES; i++) 1737 for (i = 0; i < NUM_TX_QUEUES; i++)
1340 wl->tx_blocks_freed[i] = 0; 1738 wl->tx_blocks_freed[i] = 0;
@@ -1361,14 +1759,14 @@ static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
1361 */ 1759 */
1362 if (wl->vif) { 1760 if (wl->vif) {
1363 WARN_ON(wl->vif != vif); 1761 WARN_ON(wl->vif != vif);
1364 __wl1271_op_remove_interface(wl); 1762 __wl1271_op_remove_interface(wl, true);
1365 } 1763 }
1366 1764
1367 mutex_unlock(&wl->mutex); 1765 mutex_unlock(&wl->mutex);
1368 cancel_work_sync(&wl->recovery_work); 1766 cancel_work_sync(&wl->recovery_work);
1369} 1767}
1370 1768
1371static void wl1271_configure_filters(struct wl1271 *wl, unsigned int filters) 1769void wl1271_configure_filters(struct wl1271 *wl, unsigned int filters)
1372{ 1770{
1373 wl1271_set_default_filters(wl); 1771 wl1271_set_default_filters(wl);
1374 1772
@@ -1431,10 +1829,10 @@ static int wl1271_join(struct wl1271 *wl, bool set_assoc)
1431 * One of the side effects of the JOIN command is that is clears 1829 * One of the side effects of the JOIN command is that is clears
1432 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated 1830 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
1433 * to a WPA/WPA2 access point will therefore kill the data-path. 1831 * to a WPA/WPA2 access point will therefore kill the data-path.
1434 * Currently there is no supported scenario for JOIN during 1832 * Currently the only valid scenario for JOIN during association
1435 * association - if it becomes a supported scenario, the WPA/WPA2 keys 1833 * is on roaming, in which case we will also be given new keys.
1436 * must be handled somehow. 1834 * Keep the below message for now, unless it starts bothering
1437 * 1835 * users who really like to roam a lot :)
1438 */ 1836 */
1439 if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) 1837 if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
1440 wl1271_info("JOIN while associated."); 1838 wl1271_info("JOIN while associated.");
@@ -1490,7 +1888,7 @@ static int wl1271_unjoin(struct wl1271 *wl)
1490 clear_bit(WL1271_FLAG_JOINED, &wl->flags); 1888 clear_bit(WL1271_FLAG_JOINED, &wl->flags);
1491 memset(wl->bssid, 0, ETH_ALEN); 1889 memset(wl->bssid, 0, ETH_ALEN);
1492 1890
1493 /* stop filterting packets based on bssid */ 1891 /* stop filtering packets based on bssid */
1494 wl1271_configure_filters(wl, FIF_OTHER_BSS); 1892 wl1271_configure_filters(wl, FIF_OTHER_BSS);
1495 1893
1496out: 1894out:
@@ -1530,6 +1928,13 @@ static int wl1271_sta_handle_idle(struct wl1271 *wl, bool idle)
1530 wl->session_counter++; 1928 wl->session_counter++;
1531 if (wl->session_counter >= SESSION_COUNTER_MAX) 1929 if (wl->session_counter >= SESSION_COUNTER_MAX)
1532 wl->session_counter = 0; 1930 wl->session_counter = 0;
1931
1932 /* The current firmware only supports sched_scan in idle */
1933 if (wl->sched_scanning) {
1934 wl1271_scan_sched_scan_stop(wl);
1935 ieee80211_sched_scan_stopped(wl->hw);
1936 }
1937
1533 ret = wl1271_dummy_join(wl); 1938 ret = wl1271_dummy_join(wl);
1534 if (ret < 0) 1939 if (ret < 0)
1535 goto out; 1940 goto out;
@@ -1569,7 +1974,12 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
1569 mutex_lock(&wl->mutex); 1974 mutex_lock(&wl->mutex);
1570 1975
1571 if (unlikely(wl->state == WL1271_STATE_OFF)) { 1976 if (unlikely(wl->state == WL1271_STATE_OFF)) {
1572 ret = -EAGAIN; 1977 /* we support configuring the channel and band while off */
1978 if ((changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
1979 wl->band = conf->channel->band;
1980 wl->channel = channel;
1981 }
1982
1573 goto out; 1983 goto out;
1574 } 1984 }
1575 1985
@@ -2077,6 +2487,60 @@ out:
2077 return ret; 2487 return ret;
2078} 2488}
2079 2489
2490static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
2491 struct ieee80211_vif *vif,
2492 struct cfg80211_sched_scan_request *req,
2493 struct ieee80211_sched_scan_ies *ies)
2494{
2495 struct wl1271 *wl = hw->priv;
2496 int ret;
2497
2498 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
2499
2500 mutex_lock(&wl->mutex);
2501
2502 ret = wl1271_ps_elp_wakeup(wl);
2503 if (ret < 0)
2504 goto out;
2505
2506 ret = wl1271_scan_sched_scan_config(wl, req, ies);
2507 if (ret < 0)
2508 goto out_sleep;
2509
2510 ret = wl1271_scan_sched_scan_start(wl);
2511 if (ret < 0)
2512 goto out_sleep;
2513
2514 wl->sched_scanning = true;
2515
2516out_sleep:
2517 wl1271_ps_elp_sleep(wl);
2518out:
2519 mutex_unlock(&wl->mutex);
2520 return ret;
2521}
2522
2523static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
2524 struct ieee80211_vif *vif)
2525{
2526 struct wl1271 *wl = hw->priv;
2527 int ret;
2528
2529 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
2530
2531 mutex_lock(&wl->mutex);
2532
2533 ret = wl1271_ps_elp_wakeup(wl);
2534 if (ret < 0)
2535 goto out;
2536
2537 wl1271_scan_sched_scan_stop(wl);
2538
2539 wl1271_ps_elp_sleep(wl);
2540out:
2541 mutex_unlock(&wl->mutex);
2542}
2543
2080static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value) 2544static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
2081{ 2545{
2082 struct wl1271 *wl = hw->priv; 2546 struct wl1271 *wl = hw->priv;
@@ -2093,7 +2557,7 @@ static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
2093 if (ret < 0) 2557 if (ret < 0)
2094 goto out; 2558 goto out;
2095 2559
2096 ret = wl1271_acx_frag_threshold(wl, (u16)value); 2560 ret = wl1271_acx_frag_threshold(wl, value);
2097 if (ret < 0) 2561 if (ret < 0)
2098 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret); 2562 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
2099 2563
@@ -2121,7 +2585,7 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
2121 if (ret < 0) 2585 if (ret < 0)
2122 goto out; 2586 goto out;
2123 2587
2124 ret = wl1271_acx_rts_threshold(wl, (u16) value); 2588 ret = wl1271_acx_rts_threshold(wl, value);
2125 if (ret < 0) 2589 if (ret < 0)
2126 wl1271_warning("wl1271_op_set_rts_threshold failed: %d", ret); 2590 wl1271_warning("wl1271_op_set_rts_threshold failed: %d", ret);
2127 2591
@@ -2136,20 +2600,24 @@ out:
2136static int wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb, 2600static int wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb,
2137 int offset) 2601 int offset)
2138{ 2602{
2139 u8 *ptr = skb->data + offset; 2603 u8 ssid_len;
2604 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2605 skb->len - offset);
2140 2606
2141 /* find the location of the ssid in the beacon */ 2607 if (!ptr) {
2142 while (ptr < skb->data + skb->len) { 2608 wl1271_error("No SSID in IEs!");
2143 if (ptr[0] == WLAN_EID_SSID) { 2609 return -ENOENT;
2144 wl->ssid_len = ptr[1]; 2610 }
2145 memcpy(wl->ssid, ptr+2, wl->ssid_len); 2611
2146 return 0; 2612 ssid_len = ptr[1];
2147 } 2613 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2148 ptr += (ptr[1] + 2); 2614 wl1271_error("SSID is too long!");
2615 return -EINVAL;
2149 } 2616 }
2150 2617
2151 wl1271_error("No SSID in IEs!\n"); 2618 wl->ssid_len = ssid_len;
2152 return -ENOENT; 2619 memcpy(wl->ssid, ptr+2, ssid_len);
2620 return 0;
2153} 2621}
2154 2622
2155static int wl1271_bss_erp_info_changed(struct wl1271 *wl, 2623static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
@@ -2264,24 +2732,19 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
2264 2732
2265 if ((changed & BSS_CHANGED_BASIC_RATES)) { 2733 if ((changed & BSS_CHANGED_BASIC_RATES)) {
2266 u32 rates = bss_conf->basic_rates; 2734 u32 rates = bss_conf->basic_rates;
2267 struct conf_tx_rate_class mgmt_rc;
2268 2735
2269 wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates); 2736 wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates);
2270 wl->basic_rate = wl1271_tx_min_rate_get(wl); 2737 wl->basic_rate = wl1271_tx_min_rate_get(wl);
2271 wl1271_debug(DEBUG_AP, "basic rates: 0x%x", 2738
2272 wl->basic_rate_set); 2739 ret = wl1271_init_ap_rates(wl);
2273
2274 /* update the AP management rate policy with the new rates */
2275 mgmt_rc.enabled_rates = wl->basic_rate_set;
2276 mgmt_rc.long_retry_limit = 10;
2277 mgmt_rc.short_retry_limit = 10;
2278 mgmt_rc.aflags = 0;
2279 ret = wl1271_acx_ap_rate_policy(wl, &mgmt_rc,
2280 ACX_TX_AP_MODE_MGMT_RATE);
2281 if (ret < 0) { 2740 if (ret < 0) {
2282 wl1271_error("AP mgmt policy change failed %d", ret); 2741 wl1271_error("AP rate policy change failed %d", ret);
2283 goto out; 2742 goto out;
2284 } 2743 }
2744
2745 ret = wl1271_ap_init_templates(wl);
2746 if (ret < 0)
2747 goto out;
2285 } 2748 }
2286 2749
2287 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed); 2750 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
@@ -2314,6 +2777,24 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
2314 } 2777 }
2315 } 2778 }
2316 2779
2780 if (changed & BSS_CHANGED_IBSS) {
2781 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
2782 bss_conf->ibss_joined);
2783
2784 if (bss_conf->ibss_joined) {
2785 u32 rates = bss_conf->basic_rates;
2786 wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl,
2787 rates);
2788 wl->basic_rate = wl1271_tx_min_rate_get(wl);
2789
2790 /* by default, use 11b rates */
2791 wl->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
2792 ret = wl1271_acx_sta_rate_policies(wl);
2793 if (ret < 0)
2794 goto out;
2795 }
2796 }
2797
2317 ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed); 2798 ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed);
2318 if (ret < 0) 2799 if (ret < 0)
2319 goto out; 2800 goto out;
@@ -2503,8 +2984,10 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
2503 } 2984 }
2504 } else { 2985 } else {
2505 /* use defaults when not associated */ 2986 /* use defaults when not associated */
2987 bool was_assoc =
2988 !!test_and_clear_bit(WL1271_FLAG_STA_ASSOCIATED,
2989 &wl->flags);
2506 clear_bit(WL1271_FLAG_STA_STATE_SENT, &wl->flags); 2990 clear_bit(WL1271_FLAG_STA_STATE_SENT, &wl->flags);
2507 clear_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
2508 wl->aid = 0; 2991 wl->aid = 0;
2509 2992
2510 /* free probe-request template */ 2993 /* free probe-request template */
@@ -2530,8 +3013,10 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
2530 goto out; 3013 goto out;
2531 3014
2532 /* restore the bssid filter and go to dummy bssid */ 3015 /* restore the bssid filter and go to dummy bssid */
2533 wl1271_unjoin(wl); 3016 if (was_assoc) {
2534 wl1271_dummy_join(wl); 3017 wl1271_unjoin(wl);
3018 wl1271_dummy_join(wl);
3019 }
2535 } 3020 }
2536 } 3021 }
2537 3022
@@ -2650,32 +3135,31 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
2650 conf_tid->ack_policy = CONF_ACK_POLICY_LEGACY; 3135 conf_tid->ack_policy = CONF_ACK_POLICY_LEGACY;
2651 conf_tid->apsd_conf[0] = 0; 3136 conf_tid->apsd_conf[0] = 0;
2652 conf_tid->apsd_conf[1] = 0; 3137 conf_tid->apsd_conf[1] = 0;
2653 } else { 3138 goto out;
2654 ret = wl1271_ps_elp_wakeup(wl); 3139 }
2655 if (ret < 0)
2656 goto out;
2657 3140
2658 /* 3141 ret = wl1271_ps_elp_wakeup(wl);
2659 * the txop is confed in units of 32us by the mac80211, 3142 if (ret < 0)
2660 * we need us 3143 goto out;
2661 */
2662 ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue),
2663 params->cw_min, params->cw_max,
2664 params->aifs, params->txop << 5);
2665 if (ret < 0)
2666 goto out_sleep;
2667 3144
2668 ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue), 3145 /*
2669 CONF_CHANNEL_TYPE_EDCF, 3146 * the txop is confed in units of 32us by the mac80211,
2670 wl1271_tx_get_queue(queue), 3147 * we need us
2671 ps_scheme, CONF_ACK_POLICY_LEGACY, 3148 */
2672 0, 0); 3149 ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue),
2673 if (ret < 0) 3150 params->cw_min, params->cw_max,
2674 goto out_sleep; 3151 params->aifs, params->txop << 5);
3152 if (ret < 0)
3153 goto out_sleep;
3154
3155 ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue),
3156 CONF_CHANNEL_TYPE_EDCF,
3157 wl1271_tx_get_queue(queue),
3158 ps_scheme, CONF_ACK_POLICY_LEGACY,
3159 0, 0);
2675 3160
2676out_sleep: 3161out_sleep:
2677 wl1271_ps_elp_sleep(wl); 3162 wl1271_ps_elp_sleep(wl);
2678 }
2679 3163
2680out: 3164out:
2681 mutex_unlock(&wl->mutex); 3165 mutex_unlock(&wl->mutex);
@@ -2847,10 +3331,11 @@ out:
2847 return ret; 3331 return ret;
2848} 3332}
2849 3333
2850int wl1271_op_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 3334static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
2851 enum ieee80211_ampdu_mlme_action action, 3335 struct ieee80211_vif *vif,
2852 struct ieee80211_sta *sta, u16 tid, u16 *ssn, 3336 enum ieee80211_ampdu_mlme_action action,
2853 u8 buf_size) 3337 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
3338 u8 buf_size)
2854{ 3339{
2855 struct wl1271 *wl = hw->priv; 3340 struct wl1271 *wl = hw->priv;
2856 int ret; 3341 int ret;
@@ -2907,6 +3392,28 @@ out:
2907 return ret; 3392 return ret;
2908} 3393}
2909 3394
3395static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
3396{
3397 struct wl1271 *wl = hw->priv;
3398 bool ret = false;
3399
3400 mutex_lock(&wl->mutex);
3401
3402 if (unlikely(wl->state == WL1271_STATE_OFF))
3403 goto out;
3404
3405 /* packets are considered pending if in the TX queue or the FW */
3406 ret = (wl->tx_queue_count > 0) || (wl->tx_frames_cnt > 0);
3407
3408 /* the above is appropriate for STA mode for PS purposes */
3409 WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
3410
3411out:
3412 mutex_unlock(&wl->mutex);
3413
3414 return ret;
3415}
3416
2910/* can't be const, mac80211 writes to this */ 3417/* can't be const, mac80211 writes to this */
2911static struct ieee80211_rate wl1271_rates[] = { 3418static struct ieee80211_rate wl1271_rates[] = {
2912 { .bitrate = 10, 3419 { .bitrate = 10,
@@ -3003,7 +3510,8 @@ static const u8 wl1271_rate_to_idx_2ghz[] = {
3003 3510
3004#ifdef CONFIG_WL12XX_HT 3511#ifdef CONFIG_WL12XX_HT
3005#define WL12XX_HT_CAP { \ 3512#define WL12XX_HT_CAP { \
3006 .cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20, \ 3513 .cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 | \
3514 (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT), \
3007 .ht_supported = true, \ 3515 .ht_supported = true, \
3008 .ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K, \ 3516 .ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K, \
3009 .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, \ 3517 .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, \
@@ -3142,12 +3650,16 @@ static const struct ieee80211_ops wl1271_ops = {
3142 .stop = wl1271_op_stop, 3650 .stop = wl1271_op_stop,
3143 .add_interface = wl1271_op_add_interface, 3651 .add_interface = wl1271_op_add_interface,
3144 .remove_interface = wl1271_op_remove_interface, 3652 .remove_interface = wl1271_op_remove_interface,
3653 .suspend = wl1271_op_suspend,
3654 .resume = wl1271_op_resume,
3145 .config = wl1271_op_config, 3655 .config = wl1271_op_config,
3146 .prepare_multicast = wl1271_op_prepare_multicast, 3656 .prepare_multicast = wl1271_op_prepare_multicast,
3147 .configure_filter = wl1271_op_configure_filter, 3657 .configure_filter = wl1271_op_configure_filter,
3148 .tx = wl1271_op_tx, 3658 .tx = wl1271_op_tx,
3149 .set_key = wl1271_op_set_key, 3659 .set_key = wl1271_op_set_key,
3150 .hw_scan = wl1271_op_hw_scan, 3660 .hw_scan = wl1271_op_hw_scan,
3661 .sched_scan_start = wl1271_op_sched_scan_start,
3662 .sched_scan_stop = wl1271_op_sched_scan_stop,
3151 .bss_info_changed = wl1271_op_bss_info_changed, 3663 .bss_info_changed = wl1271_op_bss_info_changed,
3152 .set_frag_threshold = wl1271_op_set_frag_threshold, 3664 .set_frag_threshold = wl1271_op_set_frag_threshold,
3153 .set_rts_threshold = wl1271_op_set_rts_threshold, 3665 .set_rts_threshold = wl1271_op_set_rts_threshold,
@@ -3157,6 +3669,7 @@ static const struct ieee80211_ops wl1271_ops = {
3157 .sta_add = wl1271_op_sta_add, 3669 .sta_add = wl1271_op_sta_add,
3158 .sta_remove = wl1271_op_sta_remove, 3670 .sta_remove = wl1271_op_sta_remove,
3159 .ampdu_action = wl1271_op_ampdu_action, 3671 .ampdu_action = wl1271_op_ampdu_action,
3672 .tx_frames_pending = wl1271_tx_frames_pending,
3160 CFG80211_TESTMODE_CMD(wl1271_tm_cmd) 3673 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
3161}; 3674};
3162 3675
@@ -3207,8 +3720,7 @@ static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
3207 unsigned long res; 3720 unsigned long res;
3208 int ret; 3721 int ret;
3209 3722
3210 ret = strict_strtoul(buf, 10, &res); 3723 ret = kstrtoul(buf, 10, &res);
3211
3212 if (ret < 0) { 3724 if (ret < 0) {
3213 wl1271_warning("incorrect value written to bt_coex_mode"); 3725 wl1271_warning("incorrect value written to bt_coex_mode");
3214 return count; 3726 return count;
@@ -3273,7 +3785,11 @@ int wl1271_register_hw(struct wl1271 *wl)
3273 3785
3274 ret = wl1271_fetch_nvs(wl); 3786 ret = wl1271_fetch_nvs(wl);
3275 if (ret == 0) { 3787 if (ret == 0) {
3276 u8 *nvs_ptr = (u8 *)wl->nvs->nvs; 3788 /* NOTE: The wl->nvs->nvs element must be first, in
3789 * order to simplify the casting, we assume it is at
3790 * the beginning of the wl->nvs structure.
3791 */
3792 u8 *nvs_ptr = (u8 *)wl->nvs;
3277 3793
3278 wl->mac_addr[0] = nvs_ptr[11]; 3794 wl->mac_addr[0] = nvs_ptr[11];
3279 wl->mac_addr[1] = nvs_ptr[10]; 3795 wl->mac_addr[1] = nvs_ptr[10];
@@ -3342,6 +3858,7 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
3342 IEEE80211_HW_CONNECTION_MONITOR | 3858 IEEE80211_HW_CONNECTION_MONITOR |
3343 IEEE80211_HW_SUPPORTS_CQM_RSSI | 3859 IEEE80211_HW_SUPPORTS_CQM_RSSI |
3344 IEEE80211_HW_REPORTS_TX_ACK_STATUS | 3860 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
3861 IEEE80211_HW_SPECTRUM_MGMT |
3345 IEEE80211_HW_AP_LINK_PS; 3862 IEEE80211_HW_AP_LINK_PS;
3346 3863
3347 wl->hw->wiphy->cipher_suites = cipher_suites; 3864 wl->hw->wiphy->cipher_suites = cipher_suites;
@@ -3358,6 +3875,10 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
3358 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE - 3875 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
3359 sizeof(struct ieee80211_header); 3876 sizeof(struct ieee80211_header);
3360 3877
3878 /* make sure all our channels fit in the scanned_ch bitmask */
3879 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
3880 ARRAY_SIZE(wl1271_channels_5ghz) >
3881 WL1271_MAX_CHANNELS);
3361 /* 3882 /*
3362 * We keep local copies of the band structs because we need to 3883 * We keep local copies of the band structs because we need to
3363 * modify them on a per-device basis. 3884 * modify them on a per-device basis.
@@ -3458,6 +3979,8 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
3458 wl->ap_ps_map = 0; 3979 wl->ap_ps_map = 0;
3459 wl->ap_fw_ps_map = 0; 3980 wl->ap_fw_ps_map = 0;
3460 wl->quirks = 0; 3981 wl->quirks = 0;
3982 wl->platform_quirks = 0;
3983 wl->sched_scanning = false;
3461 3984
3462 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map)); 3985 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
3463 for (i = 0; i < ACX_TX_DESCRIPTORS; i++) 3986 for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
@@ -3478,11 +4001,17 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
3478 goto err_hw; 4001 goto err_hw;
3479 } 4002 }
3480 4003
4004 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
4005 if (!wl->dummy_packet) {
4006 ret = -ENOMEM;
4007 goto err_aggr;
4008 }
4009
3481 /* Register platform device */ 4010 /* Register platform device */
3482 ret = platform_device_register(wl->plat_dev); 4011 ret = platform_device_register(wl->plat_dev);
3483 if (ret) { 4012 if (ret) {
3484 wl1271_error("couldn't register platform device"); 4013 wl1271_error("couldn't register platform device");
3485 goto err_aggr; 4014 goto err_dummy_packet;
3486 } 4015 }
3487 dev_set_drvdata(&wl->plat_dev->dev, wl); 4016 dev_set_drvdata(&wl->plat_dev->dev, wl);
3488 4017
@@ -3508,6 +4037,9 @@ err_bt_coex_state:
3508err_platform: 4037err_platform:
3509 platform_device_unregister(wl->plat_dev); 4038 platform_device_unregister(wl->plat_dev);
3510 4039
4040err_dummy_packet:
4041 dev_kfree_skb(wl->dummy_packet);
4042
3511err_aggr: 4043err_aggr:
3512 free_pages((unsigned long)wl->aggr_buf, order); 4044 free_pages((unsigned long)wl->aggr_buf, order);
3513 4045
@@ -3527,6 +4059,7 @@ EXPORT_SYMBOL_GPL(wl1271_alloc_hw);
3527int wl1271_free_hw(struct wl1271 *wl) 4059int wl1271_free_hw(struct wl1271 *wl)
3528{ 4060{
3529 platform_device_unregister(wl->plat_dev); 4061 platform_device_unregister(wl->plat_dev);
4062 dev_kfree_skb(wl->dummy_packet);
3530 free_pages((unsigned long)wl->aggr_buf, 4063 free_pages((unsigned long)wl->aggr_buf,
3531 get_order(WL1271_AGGR_BUFFER_SIZE)); 4064 get_order(WL1271_AGGR_BUFFER_SIZE));
3532 kfree(wl->plat_dev); 4065 kfree(wl->plat_dev);
diff --git a/drivers/net/wireless/wl12xx/ps.c b/drivers/net/wireless/wl12xx/ps.c
index 971f13e792da..b59b67711a17 100644
--- a/drivers/net/wireless/wl12xx/ps.c
+++ b/drivers/net/wireless/wl12xx/ps.c
@@ -43,6 +43,10 @@ void wl1271_elp_work(struct work_struct *work)
43 if (unlikely(wl->state == WL1271_STATE_OFF)) 43 if (unlikely(wl->state == WL1271_STATE_OFF))
44 goto out; 44 goto out;
45 45
46 /* our work might have been already cancelled */
47 if (unlikely(!test_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags)))
48 goto out;
49
46 if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags) || 50 if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags) ||
47 (!test_bit(WL1271_FLAG_PSM, &wl->flags) && 51 (!test_bit(WL1271_FLAG_PSM, &wl->flags) &&
48 !test_bit(WL1271_FLAG_IDLE, &wl->flags))) 52 !test_bit(WL1271_FLAG_IDLE, &wl->flags)))
@@ -61,12 +65,16 @@ out:
61/* Routines to toggle sleep mode while in ELP */ 65/* Routines to toggle sleep mode while in ELP */
62void wl1271_ps_elp_sleep(struct wl1271 *wl) 66void wl1271_ps_elp_sleep(struct wl1271 *wl)
63{ 67{
64 if (test_bit(WL1271_FLAG_PSM, &wl->flags) || 68 /* we shouldn't get consecutive sleep requests */
65 test_bit(WL1271_FLAG_IDLE, &wl->flags)) { 69 if (WARN_ON(test_and_set_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags)))
66 cancel_delayed_work(&wl->elp_work); 70 return;
67 ieee80211_queue_delayed_work(wl->hw, &wl->elp_work, 71
68 msecs_to_jiffies(ELP_ENTRY_DELAY)); 72 if (!test_bit(WL1271_FLAG_PSM, &wl->flags) &&
69 } 73 !test_bit(WL1271_FLAG_IDLE, &wl->flags))
74 return;
75
76 ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
77 msecs_to_jiffies(ELP_ENTRY_DELAY));
70} 78}
71 79
72int wl1271_ps_elp_wakeup(struct wl1271 *wl) 80int wl1271_ps_elp_wakeup(struct wl1271 *wl)
@@ -77,6 +85,16 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl)
77 u32 start_time = jiffies; 85 u32 start_time = jiffies;
78 bool pending = false; 86 bool pending = false;
79 87
88 /*
89 * we might try to wake up even if we didn't go to sleep
90 * before (e.g. on boot)
91 */
92 if (!test_and_clear_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags))
93 return 0;
94
95 /* don't cancel_sync as it might contend for a mutex and deadlock */
96 cancel_delayed_work(&wl->elp_work);
97
80 if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags)) 98 if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
81 return 0; 99 return 0;
82 100
@@ -149,9 +167,6 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
149 case STATION_ACTIVE_MODE: 167 case STATION_ACTIVE_MODE:
150 default: 168 default:
151 wl1271_debug(DEBUG_PSM, "leaving psm"); 169 wl1271_debug(DEBUG_PSM, "leaving psm");
152 ret = wl1271_ps_elp_wakeup(wl);
153 if (ret < 0)
154 return ret;
155 170
156 /* disable beacon early termination */ 171 /* disable beacon early termination */
157 ret = wl1271_acx_bet_enable(wl, false); 172 ret = wl1271_acx_bet_enable(wl, false);
diff --git a/drivers/net/wireless/wl12xx/ps.h b/drivers/net/wireless/wl12xx/ps.h
index c41bd0a711bc..25eb9bc9b628 100644
--- a/drivers/net/wireless/wl12xx/ps.h
+++ b/drivers/net/wireless/wl12xx/ps.h
@@ -35,4 +35,6 @@ void wl1271_elp_work(struct work_struct *work);
35void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues); 35void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues);
36void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid); 36void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid);
37 37
38#define WL1271_PS_COMPLETE_TIMEOUT 500
39
38#endif /* __WL1271_PS_H__ */ 40#endif /* __WL1271_PS_H__ */
diff --git a/drivers/net/wireless/wl12xx/reg.h b/drivers/net/wireless/wl12xx/reg.h
index 990960771528..440a4ee9cb42 100644
--- a/drivers/net/wireless/wl12xx/reg.h
+++ b/drivers/net/wireless/wl12xx/reg.h
@@ -207,6 +207,8 @@
207 207
208#define CHIP_ID_1271_PG10 (0x4030101) 208#define CHIP_ID_1271_PG10 (0x4030101)
209#define CHIP_ID_1271_PG20 (0x4030111) 209#define CHIP_ID_1271_PG20 (0x4030111)
210#define CHIP_ID_1283_PG10 (0x05030101)
211#define CHIP_ID_1283_PG20 (0x05030111)
210 212
211#define ENABLE (REGISTERS_BASE + 0x5450) 213#define ENABLE (REGISTERS_BASE + 0x5450)
212 214
@@ -452,24 +454,11 @@
452#define HI_CFG_UART_TX_OUT_GPIO_14 0x00000200 454#define HI_CFG_UART_TX_OUT_GPIO_14 0x00000200
453#define HI_CFG_UART_TX_OUT_GPIO_7 0x00000400 455#define HI_CFG_UART_TX_OUT_GPIO_7 0x00000400
454 456
455/*
456 * NOTE: USE_ACTIVE_HIGH compilation flag should be defined in makefile
457 * for platforms using active high interrupt level
458 */
459#ifdef USE_ACTIVE_HIGH
460#define HI_CFG_DEF_VAL \ 457#define HI_CFG_DEF_VAL \
461 (HI_CFG_UART_ENABLE | \ 458 (HI_CFG_UART_ENABLE | \
462 HI_CFG_RST232_ENABLE | \ 459 HI_CFG_RST232_ENABLE | \
463 HI_CFG_CLOCK_REQ_SELECT | \ 460 HI_CFG_CLOCK_REQ_SELECT | \
464 HI_CFG_HOST_INT_ENABLE) 461 HI_CFG_HOST_INT_ENABLE)
465#else
466#define HI_CFG_DEF_VAL \
467 (HI_CFG_UART_ENABLE | \
468 HI_CFG_RST232_ENABLE | \
469 HI_CFG_CLOCK_REQ_SELECT | \
470 HI_CFG_HOST_INT_ENABLE)
471
472#endif
473 462
474#define REF_FREQ_19_2 0 463#define REF_FREQ_19_2 0
475#define REF_FREQ_26_0 1 464#define REF_FREQ_26_0 1
diff --git a/drivers/net/wireless/wl12xx/rx.c b/drivers/net/wireless/wl12xx/rx.c
index 919b59f00301..70091035e019 100644
--- a/drivers/net/wireless/wl12xx/rx.c
+++ b/drivers/net/wireless/wl12xx/rx.c
@@ -48,18 +48,14 @@ static void wl1271_rx_status(struct wl1271 *wl,
48 struct ieee80211_rx_status *status, 48 struct ieee80211_rx_status *status,
49 u8 beacon) 49 u8 beacon)
50{ 50{
51 enum ieee80211_band desc_band;
52
53 memset(status, 0, sizeof(struct ieee80211_rx_status)); 51 memset(status, 0, sizeof(struct ieee80211_rx_status));
54 52
55 status->band = wl->band;
56
57 if ((desc->flags & WL1271_RX_DESC_BAND_MASK) == WL1271_RX_DESC_BAND_BG) 53 if ((desc->flags & WL1271_RX_DESC_BAND_MASK) == WL1271_RX_DESC_BAND_BG)
58 desc_band = IEEE80211_BAND_2GHZ; 54 status->band = IEEE80211_BAND_2GHZ;
59 else 55 else
60 desc_band = IEEE80211_BAND_5GHZ; 56 status->band = IEEE80211_BAND_5GHZ;
61 57
62 status->rate_idx = wl1271_rate_to_idx(desc->rate, desc_band); 58 status->rate_idx = wl1271_rate_to_idx(desc->rate, status->band);
63 59
64#ifdef CONFIG_WL12XX_HT 60#ifdef CONFIG_WL12XX_HT
65 /* 11n support */ 61 /* 11n support */
@@ -76,15 +72,19 @@ static void wl1271_rx_status(struct wl1271 *wl,
76 */ 72 */
77 wl->noise = desc->rssi - (desc->snr >> 1); 73 wl->noise = desc->rssi - (desc->snr >> 1);
78 74
79 status->freq = ieee80211_channel_to_frequency(desc->channel, desc_band); 75 status->freq = ieee80211_channel_to_frequency(desc->channel,
76 status->band);
80 77
81 if (desc->flags & WL1271_RX_DESC_ENCRYPT_MASK) { 78 if (desc->flags & WL1271_RX_DESC_ENCRYPT_MASK) {
82 status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED; 79 u8 desc_err_code = desc->status & WL1271_RX_DESC_STATUS_MASK;
80
81 status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED |
82 RX_FLAG_DECRYPTED;
83 83
84 if (likely(!(desc->status & WL1271_RX_DESC_DECRYPT_FAIL))) 84 if (unlikely(desc_err_code == WL1271_RX_DESC_MIC_FAIL)) {
85 status->flag |= RX_FLAG_DECRYPTED;
86 if (unlikely(desc->status & WL1271_RX_DESC_MIC_FAIL))
87 status->flag |= RX_FLAG_MMIC_ERROR; 85 status->flag |= RX_FLAG_MMIC_ERROR;
86 wl1271_warning("Michael MIC error");
87 }
88 } 88 }
89} 89}
90 90
@@ -103,6 +103,25 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
103 if (unlikely(wl->state == WL1271_STATE_PLT)) 103 if (unlikely(wl->state == WL1271_STATE_PLT))
104 return -EINVAL; 104 return -EINVAL;
105 105
106 /* the data read starts with the descriptor */
107 desc = (struct wl1271_rx_descriptor *) data;
108
109 switch (desc->status & WL1271_RX_DESC_STATUS_MASK) {
110 /* discard corrupted packets */
111 case WL1271_RX_DESC_DRIVER_RX_Q_FAIL:
112 case WL1271_RX_DESC_DECRYPT_FAIL:
113 wl1271_warning("corrupted packet in RX with status: 0x%x",
114 desc->status & WL1271_RX_DESC_STATUS_MASK);
115 return -EINVAL;
116 case WL1271_RX_DESC_SUCCESS:
117 case WL1271_RX_DESC_MIC_FAIL:
118 break;
119 default:
120 wl1271_error("invalid RX descriptor status: 0x%x",
121 desc->status & WL1271_RX_DESC_STATUS_MASK);
122 return -EINVAL;
123 }
124
106 skb = __dev_alloc_skb(length, GFP_KERNEL); 125 skb = __dev_alloc_skb(length, GFP_KERNEL);
107 if (!skb) { 126 if (!skb) {
108 wl1271_error("Couldn't allocate RX frame"); 127 wl1271_error("Couldn't allocate RX frame");
@@ -112,9 +131,6 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
112 buf = skb_put(skb, length); 131 buf = skb_put(skb, length);
113 memcpy(buf, data, length); 132 memcpy(buf, data, length);
114 133
115 /* the data read starts with the descriptor */
116 desc = (struct wl1271_rx_descriptor *) buf;
117
118 /* now we pull the descriptor out of the buffer */ 134 /* now we pull the descriptor out of the buffer */
119 skb_pull(skb, sizeof(*desc)); 135 skb_pull(skb, sizeof(*desc));
120 136
@@ -124,7 +140,8 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
124 140
125 wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon); 141 wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
126 142
127 wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s", skb, skb->len, 143 wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s", skb,
144 skb->len - desc->pad_len,
128 beacon ? "beacon" : ""); 145 beacon ? "beacon" : "");
129 146
130 skb_trim(skb, skb->len - desc->pad_len); 147 skb_trim(skb, skb->len - desc->pad_len);
@@ -163,18 +180,25 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status)
163 break; 180 break;
164 } 181 }
165 182
166 /* 183 if (wl->chip.id != CHIP_ID_1283_PG20) {
167 * Choose the block we want to read 184 /*
168 * For aggregated packets, only the first memory block should 185 * Choose the block we want to read
169 * be retrieved. The FW takes care of the rest. 186 * For aggregated packets, only the first memory block
170 */ 187 * should be retrieved. The FW takes care of the rest.
171 mem_block = wl1271_rx_get_mem_block(status, drv_rx_counter); 188 */
172 wl->rx_mem_pool_addr.addr = (mem_block << 8) + 189 mem_block = wl1271_rx_get_mem_block(status,
173 le32_to_cpu(wl_mem_map->packet_memory_pool_start); 190 drv_rx_counter);
174 wl->rx_mem_pool_addr.addr_extra = 191
175 wl->rx_mem_pool_addr.addr + 4; 192 wl->rx_mem_pool_addr.addr = (mem_block << 8) +
176 wl1271_write(wl, WL1271_SLV_REG_DATA, &wl->rx_mem_pool_addr, 193 le32_to_cpu(wl_mem_map->packet_memory_pool_start);
177 sizeof(wl->rx_mem_pool_addr), false); 194
195 wl->rx_mem_pool_addr.addr_extra =
196 wl->rx_mem_pool_addr.addr + 4;
197
198 wl1271_write(wl, WL1271_SLV_REG_DATA,
199 &wl->rx_mem_pool_addr,
200 sizeof(wl->rx_mem_pool_addr), false);
201 }
178 202
179 /* Read all available packets at once */ 203 /* Read all available packets at once */
180 wl1271_read(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf, 204 wl1271_read(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
diff --git a/drivers/net/wireless/wl12xx/scan.c b/drivers/net/wireless/wl12xx/scan.c
index 420653a2859c..f37e5a391976 100644
--- a/drivers/net/wireless/wl12xx/scan.c
+++ b/drivers/net/wireless/wl12xx/scan.c
@@ -48,8 +48,7 @@ void wl1271_scan_complete_work(struct work_struct *work)
48 goto out; 48 goto out;
49 49
50 wl->scan.state = WL1271_SCAN_STATE_IDLE; 50 wl->scan.state = WL1271_SCAN_STATE_IDLE;
51 kfree(wl->scan.scanned_ch); 51 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
52 wl->scan.scanned_ch = NULL;
53 wl->scan.req = NULL; 52 wl->scan.req = NULL;
54 ieee80211_scan_completed(wl->hw, false); 53 ieee80211_scan_completed(wl->hw, false);
55 54
@@ -87,7 +86,7 @@ static int wl1271_get_scan_channels(struct wl1271 *wl,
87 86
88 flags = req->channels[i]->flags; 87 flags = req->channels[i]->flags;
89 88
90 if (!wl->scan.scanned_ch[i] && 89 if (!test_bit(i, wl->scan.scanned_ch) &&
91 !(flags & IEEE80211_CHAN_DISABLED) && 90 !(flags & IEEE80211_CHAN_DISABLED) &&
92 ((!!(flags & IEEE80211_CHAN_PASSIVE_SCAN)) == passive) && 91 ((!!(flags & IEEE80211_CHAN_PASSIVE_SCAN)) == passive) &&
93 (req->channels[i]->band == band)) { 92 (req->channels[i]->band == band)) {
@@ -124,7 +123,7 @@ static int wl1271_get_scan_channels(struct wl1271 *wl,
124 memset(&channels[j].bssid_msb, 0xff, 2); 123 memset(&channels[j].bssid_msb, 0xff, 2);
125 124
126 /* Mark the channels we already used */ 125 /* Mark the channels we already used */
127 wl->scan.scanned_ch[i] = true; 126 set_bit(i, wl->scan.scanned_ch);
128 127
129 j++; 128 j++;
130 } 129 }
@@ -291,6 +290,12 @@ void wl1271_scan_stm(struct wl1271 *wl)
291int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len, 290int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
292 struct cfg80211_scan_request *req) 291 struct cfg80211_scan_request *req)
293{ 292{
293 /*
294 * cfg80211 should guarantee that we don't get more channels
295 * than what we have registered.
296 */
297 BUG_ON(req->n_channels > WL1271_MAX_CHANNELS);
298
294 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) 299 if (wl->scan.state != WL1271_SCAN_STATE_IDLE)
295 return -EBUSY; 300 return -EBUSY;
296 301
@@ -304,10 +309,8 @@ int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
304 } 309 }
305 310
306 wl->scan.req = req; 311 wl->scan.req = req;
312 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
307 313
308 wl->scan.scanned_ch = kcalloc(req->n_channels,
309 sizeof(*wl->scan.scanned_ch),
310 GFP_KERNEL);
311 /* we assume failure so that timeout scenarios are handled correctly */ 314 /* we assume failure so that timeout scenarios are handled correctly */
312 wl->scan.failed = true; 315 wl->scan.failed = true;
313 ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work, 316 ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
@@ -317,3 +320,246 @@ int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
317 320
318 return 0; 321 return 0;
319} 322}
323
324static int
325wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
326 struct cfg80211_sched_scan_request *req,
327 struct conn_scan_ch_params *channels,
328 u32 band, bool radar, bool passive,
329 int start)
330{
331 struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
332 int i, j;
333 u32 flags;
334
335 for (i = 0, j = start;
336 i < req->n_channels && j < MAX_CHANNELS_ALL_BANDS;
337 i++) {
338 flags = req->channels[i]->flags;
339
340 if (!(flags & IEEE80211_CHAN_DISABLED) &&
341 ((flags & IEEE80211_CHAN_PASSIVE_SCAN) == passive) &&
342 ((flags & IEEE80211_CHAN_RADAR) == radar) &&
343 (req->channels[i]->band == band)) {
344 wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ",
345 req->channels[i]->band,
346 req->channels[i]->center_freq);
347 wl1271_debug(DEBUG_SCAN, "hw_value %d, flags %X",
348 req->channels[i]->hw_value,
349 req->channels[i]->flags);
350 wl1271_debug(DEBUG_SCAN, "max_power %d",
351 req->channels[i]->max_power);
352
353 if (flags & IEEE80211_CHAN_PASSIVE_SCAN) {
354 channels[j].passive_duration =
355 cpu_to_le16(c->dwell_time_passive);
356 } else {
357 channels[j].min_duration =
358 cpu_to_le16(c->min_dwell_time_active);
359 channels[j].max_duration =
360 cpu_to_le16(c->max_dwell_time_active);
361 }
362 channels[j].tx_power_att = req->channels[j]->max_power;
363 channels[j].channel = req->channels[i]->hw_value;
364
365 j++;
366 }
367 }
368
369 return j - start;
370}
371
372static int
373wl1271_scan_sched_scan_channels(struct wl1271 *wl,
374 struct cfg80211_sched_scan_request *req,
375 struct wl1271_cmd_sched_scan_config *cfg)
376{
377 int idx = 0;
378
379 cfg->passive[0] =
380 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
381 IEEE80211_BAND_2GHZ,
382 false, true, idx);
383 idx += cfg->passive[0];
384
385 cfg->active[0] =
386 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
387 IEEE80211_BAND_2GHZ,
388 false, false, idx);
389 idx += cfg->active[0];
390
391 cfg->passive[1] =
392 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
393 IEEE80211_BAND_5GHZ,
394 false, true, idx);
395 idx += cfg->passive[1];
396
397 cfg->active[1] =
398 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
399 IEEE80211_BAND_5GHZ,
400 false, false, 14);
401 idx += cfg->active[1];
402
403 cfg->dfs =
404 wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
405 IEEE80211_BAND_5GHZ,
406 true, false, idx);
407 idx += cfg->dfs;
408
409 wl1271_debug(DEBUG_SCAN, " 2.4GHz: active %d passive %d",
410 cfg->active[0], cfg->passive[0]);
411 wl1271_debug(DEBUG_SCAN, " 5GHz: active %d passive %d",
412 cfg->active[1], cfg->passive[1]);
413
414 return idx;
415}
416
417int wl1271_scan_sched_scan_config(struct wl1271 *wl,
418 struct cfg80211_sched_scan_request *req,
419 struct ieee80211_sched_scan_ies *ies)
420{
421 struct wl1271_cmd_sched_scan_config *cfg = NULL;
422 struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
423 int i, total_channels, ret;
424
425 wl1271_debug(DEBUG_CMD, "cmd sched_scan scan config");
426
427 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
428 if (!cfg)
429 return -ENOMEM;
430
431 cfg->rssi_threshold = c->rssi_threshold;
432 cfg->snr_threshold = c->snr_threshold;
433 cfg->n_probe_reqs = c->num_probe_reqs;
434 /* cycles set to 0 it means infinite (until manually stopped) */
435 cfg->cycles = 0;
436 /* report APs when at least 1 is found */
437 cfg->report_after = 1;
438 /* don't stop scanning automatically when something is found */
439 cfg->terminate = 0;
440 cfg->tag = WL1271_SCAN_DEFAULT_TAG;
441 /* don't filter on BSS type */
442 cfg->bss_type = SCAN_BSS_TYPE_ANY;
443 /* currently NL80211 supports only a single interval */
444 for (i = 0; i < SCAN_MAX_CYCLE_INTERVALS; i++)
445 cfg->intervals[i] = cpu_to_le32(req->interval);
446
447 if (req->ssids[0].ssid_len && req->ssids[0].ssid) {
448 cfg->filter_type = SCAN_SSID_FILTER_SPECIFIC;
449 cfg->ssid_len = req->ssids[0].ssid_len;
450 memcpy(cfg->ssid, req->ssids[0].ssid,
451 req->ssids[0].ssid_len);
452 } else {
453 cfg->filter_type = SCAN_SSID_FILTER_ANY;
454 cfg->ssid_len = 0;
455 }
456
457 total_channels = wl1271_scan_sched_scan_channels(wl, req, cfg);
458 if (total_channels == 0) {
459 wl1271_error("scan channel list is empty");
460 ret = -EINVAL;
461 goto out;
462 }
463
464 if (cfg->active[0]) {
465 ret = wl1271_cmd_build_probe_req(wl, req->ssids[0].ssid,
466 req->ssids[0].ssid_len,
467 ies->ie[IEEE80211_BAND_2GHZ],
468 ies->len[IEEE80211_BAND_2GHZ],
469 IEEE80211_BAND_2GHZ);
470 if (ret < 0) {
471 wl1271_error("2.4GHz PROBE request template failed");
472 goto out;
473 }
474 }
475
476 if (cfg->active[1]) {
477 ret = wl1271_cmd_build_probe_req(wl, req->ssids[0].ssid,
478 req->ssids[0].ssid_len,
479 ies->ie[IEEE80211_BAND_5GHZ],
480 ies->len[IEEE80211_BAND_5GHZ],
481 IEEE80211_BAND_5GHZ);
482 if (ret < 0) {
483 wl1271_error("5GHz PROBE request template failed");
484 goto out;
485 }
486 }
487
488 wl1271_dump(DEBUG_SCAN, "SCAN_CFG: ", cfg, sizeof(*cfg));
489
490 ret = wl1271_cmd_send(wl, CMD_CONNECTION_SCAN_CFG, cfg,
491 sizeof(*cfg), 0);
492 if (ret < 0) {
493 wl1271_error("SCAN configuration failed");
494 goto out;
495 }
496out:
497 kfree(cfg);
498 return ret;
499}
500
501int wl1271_scan_sched_scan_start(struct wl1271 *wl)
502{
503 struct wl1271_cmd_sched_scan_start *start;
504 int ret = 0;
505
506 wl1271_debug(DEBUG_CMD, "cmd periodic scan start");
507
508 if (wl->bss_type != BSS_TYPE_STA_BSS)
509 return -EOPNOTSUPP;
510
511 if (!test_bit(WL1271_FLAG_IDLE, &wl->flags))
512 return -EBUSY;
513
514 start = kzalloc(sizeof(*start), GFP_KERNEL);
515 if (!start)
516 return -ENOMEM;
517
518 start->tag = WL1271_SCAN_DEFAULT_TAG;
519
520 ret = wl1271_cmd_send(wl, CMD_START_PERIODIC_SCAN, start,
521 sizeof(*start), 0);
522 if (ret < 0) {
523 wl1271_error("failed to send scan start command");
524 goto out_free;
525 }
526
527out_free:
528 kfree(start);
529 return ret;
530}
531
532void wl1271_scan_sched_scan_results(struct wl1271 *wl)
533{
534 wl1271_debug(DEBUG_SCAN, "got periodic scan results");
535
536 ieee80211_sched_scan_results(wl->hw);
537}
538
539void wl1271_scan_sched_scan_stop(struct wl1271 *wl)
540{
541 struct wl1271_cmd_sched_scan_stop *stop;
542 int ret = 0;
543
544 wl1271_debug(DEBUG_CMD, "cmd periodic scan stop");
545
546 /* FIXME: what to do if alloc'ing to stop fails? */
547 stop = kzalloc(sizeof(*stop), GFP_KERNEL);
548 if (!stop) {
549 wl1271_error("failed to alloc memory to send sched scan stop");
550 return;
551 }
552
553 stop->tag = WL1271_SCAN_DEFAULT_TAG;
554
555 ret = wl1271_cmd_send(wl, CMD_STOP_PERIODIC_SCAN, stop,
556 sizeof(*stop), 0);
557 if (ret < 0) {
558 wl1271_error("failed to send sched scan stop command");
559 goto out_free;
560 }
561 wl->sched_scanning = false;
562
563out_free:
564 kfree(stop);
565}
diff --git a/drivers/net/wireless/wl12xx/scan.h b/drivers/net/wireless/wl12xx/scan.h
index 421a750add5a..c83319579ca3 100644
--- a/drivers/net/wireless/wl12xx/scan.h
+++ b/drivers/net/wireless/wl12xx/scan.h
@@ -33,6 +33,12 @@ int wl1271_scan_build_probe_req(struct wl1271 *wl,
33 const u8 *ie, size_t ie_len, u8 band); 33 const u8 *ie, size_t ie_len, u8 band);
34void wl1271_scan_stm(struct wl1271 *wl); 34void wl1271_scan_stm(struct wl1271 *wl);
35void wl1271_scan_complete_work(struct work_struct *work); 35void wl1271_scan_complete_work(struct work_struct *work);
36int wl1271_scan_sched_scan_config(struct wl1271 *wl,
37 struct cfg80211_sched_scan_request *req,
38 struct ieee80211_sched_scan_ies *ies);
39int wl1271_scan_sched_scan_start(struct wl1271 *wl);
40void wl1271_scan_sched_scan_stop(struct wl1271 *wl);
41void wl1271_scan_sched_scan_results(struct wl1271 *wl);
36 42
37#define WL1271_SCAN_MAX_CHANNELS 24 43#define WL1271_SCAN_MAX_CHANNELS 24
38#define WL1271_SCAN_DEFAULT_TAG 1 44#define WL1271_SCAN_DEFAULT_TAG 1
@@ -106,4 +112,112 @@ struct wl1271_cmd_trigger_scan_to {
106 __le32 timeout; 112 __le32 timeout;
107} __packed; 113} __packed;
108 114
115#define MAX_CHANNELS_ALL_BANDS 41
116#define SCAN_MAX_CYCLE_INTERVALS 16
117#define SCAN_MAX_BANDS 3
118
119enum {
120 SCAN_CHANNEL_TYPE_2GHZ_PASSIVE,
121 SCAN_CHANNEL_TYPE_2GHZ_ACTIVE,
122 SCAN_CHANNEL_TYPE_5GHZ_PASSIVE,
123 SCAN_CHANNEL_TYPE_5GHZ_ACTIVE,
124 SCAN_CHANNEL_TYPE_5GHZ_DFS,
125};
126
127enum {
128 SCAN_SSID_FILTER_ANY = 0,
129 SCAN_SSID_FILTER_SPECIFIC = 1,
130 SCAN_SSID_FILTER_LIST = 2,
131 SCAN_SSID_FILTER_DISABLED = 3
132};
133
134enum {
135 SCAN_BSS_TYPE_INDEPENDENT,
136 SCAN_BSS_TYPE_INFRASTRUCTURE,
137 SCAN_BSS_TYPE_ANY,
138};
139
140struct conn_scan_ch_params {
141 __le16 min_duration;
142 __le16 max_duration;
143 __le16 passive_duration;
144
145 u8 channel;
146 u8 tx_power_att;
147
148 /* bit 0: DFS channel; bit 1: DFS enabled */
149 u8 flags;
150
151 u8 padding[3];
152} __packed;
153
154struct wl1271_cmd_sched_scan_config {
155 struct wl1271_cmd_header header;
156
157 __le32 intervals[SCAN_MAX_CYCLE_INTERVALS];
158
159 s8 rssi_threshold; /* for filtering (in dBm) */
160 s8 snr_threshold; /* for filtering (in dB) */
161
162 u8 cycles; /* maximum number of scan cycles */
163 u8 report_after; /* report when this number of results are received */
164 u8 terminate; /* stop scanning after reporting */
165
166 u8 tag;
167 u8 bss_type; /* for filtering */
168 u8 filter_type;
169
170 u8 ssid_len; /* For SCAN_SSID_FILTER_SPECIFIC */
171 u8 ssid[IW_ESSID_MAX_SIZE];
172
173 u8 n_probe_reqs; /* Number of probes requests per channel */
174
175 u8 passive[SCAN_MAX_BANDS];
176 u8 active[SCAN_MAX_BANDS];
177
178 u8 dfs;
179
180 u8 padding[3];
181
182 struct conn_scan_ch_params channels[MAX_CHANNELS_ALL_BANDS];
183} __packed;
184
185
186#define SCHED_SCAN_MAX_SSIDS 8
187
188enum {
189 SCAN_SSID_TYPE_PUBLIC = 0,
190 SCAN_SSID_TYPE_HIDDEN = 1,
191};
192
193struct wl1271_ssid {
194 u8 type;
195 u8 len;
196 u8 ssid[IW_ESSID_MAX_SIZE];
197 /* u8 padding[2]; */
198} __packed;
199
200struct wl1271_cmd_sched_scan_ssid_list {
201 struct wl1271_cmd_header header;
202
203 u8 n_ssids;
204 struct wl1271_ssid ssids[SCHED_SCAN_MAX_SSIDS];
205 u8 padding[3];
206} __packed;
207
208struct wl1271_cmd_sched_scan_start {
209 struct wl1271_cmd_header header;
210
211 u8 tag;
212 u8 padding[3];
213} __packed;
214
215struct wl1271_cmd_sched_scan_stop {
216 struct wl1271_cmd_header header;
217
218 u8 tag;
219 u8 padding[3];
220} __packed;
221
222
109#endif /* __WL1271_SCAN_H__ */ 223#endif /* __WL1271_SCAN_H__ */
diff --git a/drivers/net/wireless/wl12xx/sdio.c b/drivers/net/wireless/wl12xx/sdio.c
index b1c7d031c391..92d29a860fc0 100644
--- a/drivers/net/wireless/wl12xx/sdio.c
+++ b/drivers/net/wireless/wl12xx/sdio.c
@@ -51,6 +51,13 @@ static const struct sdio_device_id wl1271_devices[] = {
51}; 51};
52MODULE_DEVICE_TABLE(sdio, wl1271_devices); 52MODULE_DEVICE_TABLE(sdio, wl1271_devices);
53 53
54static void wl1271_sdio_set_block_size(struct wl1271 *wl, unsigned int blksz)
55{
56 sdio_claim_host(wl->if_priv);
57 sdio_set_block_size(wl->if_priv, blksz);
58 sdio_release_host(wl->if_priv);
59}
60
54static inline struct sdio_func *wl_to_func(struct wl1271 *wl) 61static inline struct sdio_func *wl_to_func(struct wl1271 *wl)
55{ 62{
56 return wl->if_priv; 63 return wl->if_priv;
@@ -75,6 +82,16 @@ static irqreturn_t wl1271_hardirq(int irq, void *cookie)
75 complete(wl->elp_compl); 82 complete(wl->elp_compl);
76 wl->elp_compl = NULL; 83 wl->elp_compl = NULL;
77 } 84 }
85
86 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
87 /* don't enqueue a work right now. mark it as pending */
88 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
89 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
90 disable_irq_nosync(wl->irq);
91 pm_wakeup_event(wl1271_sdio_wl_to_dev(wl), 0);
92 spin_unlock_irqrestore(&wl->wl_lock, flags);
93 return IRQ_HANDLED;
94 }
78 spin_unlock_irqrestore(&wl->wl_lock, flags); 95 spin_unlock_irqrestore(&wl->wl_lock, flags);
79 96
80 return IRQ_WAKE_THREAD; 97 return IRQ_WAKE_THREAD;
@@ -203,7 +220,8 @@ static struct wl1271_if_operations sdio_ops = {
203 .power = wl1271_sdio_set_power, 220 .power = wl1271_sdio_set_power,
204 .dev = wl1271_sdio_wl_to_dev, 221 .dev = wl1271_sdio_wl_to_dev,
205 .enable_irq = wl1271_sdio_enable_interrupts, 222 .enable_irq = wl1271_sdio_enable_interrupts,
206 .disable_irq = wl1271_sdio_disable_interrupts 223 .disable_irq = wl1271_sdio_disable_interrupts,
224 .set_block_size = wl1271_sdio_set_block_size,
207}; 225};
208 226
209static int __devinit wl1271_probe(struct sdio_func *func, 227static int __devinit wl1271_probe(struct sdio_func *func,
@@ -212,6 +230,8 @@ static int __devinit wl1271_probe(struct sdio_func *func,
212 struct ieee80211_hw *hw; 230 struct ieee80211_hw *hw;
213 const struct wl12xx_platform_data *wlan_data; 231 const struct wl12xx_platform_data *wlan_data;
214 struct wl1271 *wl; 232 struct wl1271 *wl;
233 unsigned long irqflags;
234 mmc_pm_flag_t mmcflags;
215 int ret; 235 int ret;
216 236
217 /* We are only able to handle the wlan function */ 237 /* We are only able to handle the wlan function */
@@ -230,6 +250,9 @@ static int __devinit wl1271_probe(struct sdio_func *func,
230 /* Grab access to FN0 for ELP reg. */ 250 /* Grab access to FN0 for ELP reg. */
231 func->card->quirks |= MMC_QUIRK_LENIENT_FN0; 251 func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
232 252
253 /* Use block mode for transferring over one block size of data */
254 func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
255
233 wlan_data = wl12xx_get_platform_data(); 256 wlan_data = wl12xx_get_platform_data();
234 if (IS_ERR(wlan_data)) { 257 if (IS_ERR(wlan_data)) {
235 ret = PTR_ERR(wlan_data); 258 ret = PTR_ERR(wlan_data);
@@ -239,17 +262,34 @@ static int __devinit wl1271_probe(struct sdio_func *func,
239 262
240 wl->irq = wlan_data->irq; 263 wl->irq = wlan_data->irq;
241 wl->ref_clock = wlan_data->board_ref_clock; 264 wl->ref_clock = wlan_data->board_ref_clock;
265 wl->tcxo_clock = wlan_data->board_tcxo_clock;
266 wl->platform_quirks = wlan_data->platform_quirks;
267
268 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
269 irqflags = IRQF_TRIGGER_RISING;
270 else
271 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
242 272
243 ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq, 273 ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq,
244 IRQF_TRIGGER_HIGH | IRQF_ONESHOT, 274 irqflags,
245 DRIVER_NAME, wl); 275 DRIVER_NAME, wl);
246 if (ret < 0) { 276 if (ret < 0) {
247 wl1271_error("request_irq() failed: %d", ret); 277 wl1271_error("request_irq() failed: %d", ret);
248 goto out_free; 278 goto out_free;
249 } 279 }
250 280
281 enable_irq_wake(wl->irq);
282 device_init_wakeup(wl1271_sdio_wl_to_dev(wl), 1);
283
251 disable_irq(wl->irq); 284 disable_irq(wl->irq);
252 285
286 /* if sdio can keep power while host is suspended, enable wow */
287 mmcflags = sdio_get_host_pm_caps(func);
288 wl1271_debug(DEBUG_SDIO, "sdio PM caps = 0x%x", mmcflags);
289
290 if (mmcflags & MMC_PM_KEEP_POWER)
291 hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
292
253 ret = wl1271_init_ieee80211(wl); 293 ret = wl1271_init_ieee80211(wl);
254 if (ret) 294 if (ret)
255 goto out_irq; 295 goto out_irq;
@@ -284,6 +324,8 @@ static void __devexit wl1271_remove(struct sdio_func *func)
284 pm_runtime_get_noresume(&func->dev); 324 pm_runtime_get_noresume(&func->dev);
285 325
286 wl1271_unregister_hw(wl); 326 wl1271_unregister_hw(wl);
327 device_init_wakeup(wl1271_sdio_wl_to_dev(wl), 0);
328 disable_irq_wake(wl->irq);
287 free_irq(wl->irq, wl); 329 free_irq(wl->irq, wl);
288 wl1271_free_hw(wl); 330 wl1271_free_hw(wl);
289} 331}
@@ -292,11 +334,50 @@ static int wl1271_suspend(struct device *dev)
292{ 334{
293 /* Tell MMC/SDIO core it's OK to power down the card 335 /* Tell MMC/SDIO core it's OK to power down the card
294 * (if it isn't already), but not to remove it completely */ 336 * (if it isn't already), but not to remove it completely */
295 return 0; 337 struct sdio_func *func = dev_to_sdio_func(dev);
338 struct wl1271 *wl = sdio_get_drvdata(func);
339 mmc_pm_flag_t sdio_flags;
340 int ret = 0;
341
342 wl1271_debug(DEBUG_MAC80211, "wl1271 suspend. wow_enabled: %d",
343 wl->wow_enabled);
344
345 /* check whether sdio should keep power */
346 if (wl->wow_enabled) {
347 sdio_flags = sdio_get_host_pm_caps(func);
348
349 if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
350 wl1271_error("can't keep power while host "
351 "is suspended");
352 ret = -EINVAL;
353 goto out;
354 }
355
356 /* keep power while host suspended */
357 ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
358 if (ret) {
359 wl1271_error("error while trying to keep power");
360 goto out;
361 }
362
363 /* release host */
364 sdio_release_host(func);
365 }
366out:
367 return ret;
296} 368}
297 369
298static int wl1271_resume(struct device *dev) 370static int wl1271_resume(struct device *dev)
299{ 371{
372 struct sdio_func *func = dev_to_sdio_func(dev);
373 struct wl1271 *wl = sdio_get_drvdata(func);
374
375 wl1271_debug(DEBUG_MAC80211, "wl1271 resume");
376 if (wl->wow_enabled) {
377 /* claim back host */
378 sdio_claim_host(func);
379 }
380
300 return 0; 381 return 0;
301} 382}
302 383
@@ -343,4 +424,6 @@ MODULE_LICENSE("GPL");
343MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>"); 424MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
344MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); 425MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
345MODULE_FIRMWARE(WL1271_FW_NAME); 426MODULE_FIRMWARE(WL1271_FW_NAME);
346MODULE_FIRMWARE(WL1271_AP_FW_NAME); 427MODULE_FIRMWARE(WL128X_FW_NAME);
428MODULE_FIRMWARE(WL127X_AP_FW_NAME);
429MODULE_FIRMWARE(WL128X_AP_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/sdio_test.c b/drivers/net/wireless/wl12xx/sdio_test.c
index 9fcbd3dd8490..f28915392877 100644
--- a/drivers/net/wireless/wl12xx/sdio_test.c
+++ b/drivers/net/wireless/wl12xx/sdio_test.c
@@ -189,7 +189,12 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
189 const struct firmware *fw; 189 const struct firmware *fw;
190 int ret; 190 int ret;
191 191
192 ret = request_firmware(&fw, WL1271_FW_NAME, wl1271_wl_to_dev(wl)); 192 if (wl->chip.id == CHIP_ID_1283_PG20)
193 ret = request_firmware(&fw, WL128X_FW_NAME,
194 wl1271_wl_to_dev(wl));
195 else
196 ret = request_firmware(&fw, WL1271_FW_NAME,
197 wl1271_wl_to_dev(wl));
193 198
194 if (ret < 0) { 199 if (ret < 0) {
195 wl1271_error("could not get firmware: %d", ret); 200 wl1271_error("could not get firmware: %d", ret);
@@ -227,14 +232,14 @@ static int wl1271_fetch_nvs(struct wl1271 *wl)
227 const struct firmware *fw; 232 const struct firmware *fw;
228 int ret; 233 int ret;
229 234
230 ret = request_firmware(&fw, WL1271_NVS_NAME, wl1271_wl_to_dev(wl)); 235 ret = request_firmware(&fw, WL12XX_NVS_NAME, wl1271_wl_to_dev(wl));
231 236
232 if (ret < 0) { 237 if (ret < 0) {
233 wl1271_error("could not get nvs file: %d", ret); 238 wl1271_error("could not get nvs file: %d", ret);
234 return ret; 239 return ret;
235 } 240 }
236 241
237 wl->nvs = kmemdup(fw->data, sizeof(struct wl1271_nvs_file), GFP_KERNEL); 242 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
238 243
239 if (!wl->nvs) { 244 if (!wl->nvs) {
240 wl1271_error("could not allocate memory for the nvs file"); 245 wl1271_error("could not allocate memory for the nvs file");
@@ -288,6 +293,11 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
288 wl1271_notice("chip id 0x%x (1271 PG20)", 293 wl1271_notice("chip id 0x%x (1271 PG20)",
289 wl->chip.id); 294 wl->chip.id);
290 break; 295 break;
296 case CHIP_ID_1283_PG20:
297 wl1271_notice("chip id 0x%x (1283 PG20)",
298 wl->chip.id);
299 break;
300 case CHIP_ID_1283_PG10:
291 default: 301 default:
292 wl1271_warning("unsupported chip id: 0x%x", wl->chip.id); 302 wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
293 return -ENODEV; 303 return -ENODEV;
@@ -407,6 +417,9 @@ static int __devinit wl1271_probe(struct sdio_func *func,
407 /* Grab access to FN0 for ELP reg. */ 417 /* Grab access to FN0 for ELP reg. */
408 func->card->quirks |= MMC_QUIRK_LENIENT_FN0; 418 func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
409 419
420 /* Use block mode for transferring over one block size of data */
421 func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
422
410 wlan_data = wl12xx_get_platform_data(); 423 wlan_data = wl12xx_get_platform_data();
411 if (IS_ERR(wlan_data)) { 424 if (IS_ERR(wlan_data)) {
412 ret = PTR_ERR(wlan_data); 425 ret = PTR_ERR(wlan_data);
@@ -416,6 +429,7 @@ static int __devinit wl1271_probe(struct sdio_func *func,
416 429
417 wl->irq = wlan_data->irq; 430 wl->irq = wlan_data->irq;
418 wl->ref_clock = wlan_data->board_ref_clock; 431 wl->ref_clock = wlan_data->board_ref_clock;
432 wl->tcxo_clock = wlan_data->board_tcxo_clock;
419 433
420 sdio_set_drvdata(func, wl_test); 434 sdio_set_drvdata(func, wl_test);
421 435
diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/wl12xx/spi.c
index ffc745b17f4d..51662bb68019 100644
--- a/drivers/net/wireless/wl12xx/spi.c
+++ b/drivers/net/wireless/wl12xx/spi.c
@@ -355,7 +355,8 @@ static struct wl1271_if_operations spi_ops = {
355 .power = wl1271_spi_set_power, 355 .power = wl1271_spi_set_power,
356 .dev = wl1271_spi_wl_to_dev, 356 .dev = wl1271_spi_wl_to_dev,
357 .enable_irq = wl1271_spi_enable_interrupts, 357 .enable_irq = wl1271_spi_enable_interrupts,
358 .disable_irq = wl1271_spi_disable_interrupts 358 .disable_irq = wl1271_spi_disable_interrupts,
359 .set_block_size = NULL,
359}; 360};
360 361
361static int __devinit wl1271_probe(struct spi_device *spi) 362static int __devinit wl1271_probe(struct spi_device *spi)
@@ -363,6 +364,7 @@ static int __devinit wl1271_probe(struct spi_device *spi)
363 struct wl12xx_platform_data *pdata; 364 struct wl12xx_platform_data *pdata;
364 struct ieee80211_hw *hw; 365 struct ieee80211_hw *hw;
365 struct wl1271 *wl; 366 struct wl1271 *wl;
367 unsigned long irqflags;
366 int ret; 368 int ret;
367 369
368 pdata = spi->dev.platform_data; 370 pdata = spi->dev.platform_data;
@@ -400,6 +402,13 @@ static int __devinit wl1271_probe(struct spi_device *spi)
400 } 402 }
401 403
402 wl->ref_clock = pdata->board_ref_clock; 404 wl->ref_clock = pdata->board_ref_clock;
405 wl->tcxo_clock = pdata->board_tcxo_clock;
406 wl->platform_quirks = pdata->platform_quirks;
407
408 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
409 irqflags = IRQF_TRIGGER_RISING;
410 else
411 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
403 412
404 wl->irq = spi->irq; 413 wl->irq = spi->irq;
405 if (wl->irq < 0) { 414 if (wl->irq < 0) {
@@ -409,7 +418,7 @@ static int __devinit wl1271_probe(struct spi_device *spi)
409 } 418 }
410 419
411 ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq, 420 ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq,
412 IRQF_TRIGGER_HIGH | IRQF_ONESHOT, 421 irqflags,
413 DRIVER_NAME, wl); 422 DRIVER_NAME, wl);
414 if (ret < 0) { 423 if (ret < 0) {
415 wl1271_error("request_irq() failed: %d", ret); 424 wl1271_error("request_irq() failed: %d", ret);
@@ -490,5 +499,7 @@ MODULE_LICENSE("GPL");
490MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>"); 499MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
491MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); 500MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
492MODULE_FIRMWARE(WL1271_FW_NAME); 501MODULE_FIRMWARE(WL1271_FW_NAME);
493MODULE_FIRMWARE(WL1271_AP_FW_NAME); 502MODULE_FIRMWARE(WL128X_FW_NAME);
503MODULE_FIRMWARE(WL127X_AP_FW_NAME);
504MODULE_FIRMWARE(WL128X_AP_FW_NAME);
494MODULE_ALIAS("spi:wl1271"); 505MODULE_ALIAS("spi:wl1271");
diff --git a/drivers/net/wireless/wl12xx/testmode.c b/drivers/net/wireless/wl12xx/testmode.c
index 6ec06a4a4c6d..da351d7cd1f2 100644
--- a/drivers/net/wireless/wl12xx/testmode.c
+++ b/drivers/net/wireless/wl12xx/testmode.c
@@ -27,6 +27,7 @@
27 27
28#include "wl12xx.h" 28#include "wl12xx.h"
29#include "acx.h" 29#include "acx.h"
30#include "reg.h"
30 31
31#define WL1271_TM_MAX_DATA_LENGTH 1024 32#define WL1271_TM_MAX_DATA_LENGTH 1024
32 33
@@ -204,7 +205,10 @@ static int wl1271_tm_cmd_nvs_push(struct wl1271 *wl, struct nlattr *tb[])
204 205
205 kfree(wl->nvs); 206 kfree(wl->nvs);
206 207
207 if (len != sizeof(struct wl1271_nvs_file)) 208 if ((wl->chip.id == CHIP_ID_1283_PG20) &&
209 (len != sizeof(struct wl128x_nvs_file)))
210 return -EINVAL;
211 else if (len != sizeof(struct wl1271_nvs_file))
208 return -EINVAL; 212 return -EINVAL;
209 213
210 wl->nvs = kzalloc(len, GFP_KERNEL); 214 wl->nvs = kzalloc(len, GFP_KERNEL);
diff --git a/drivers/net/wireless/wl12xx/tx.c b/drivers/net/wireless/wl12xx/tx.c
index 5e9ef7d53e7e..ca3ab1c1acef 100644
--- a/drivers/net/wireless/wl12xx/tx.c
+++ b/drivers/net/wireless/wl12xx/tx.c
@@ -65,11 +65,36 @@ static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
65static void wl1271_free_tx_id(struct wl1271 *wl, int id) 65static void wl1271_free_tx_id(struct wl1271 *wl, int id)
66{ 66{
67 if (__test_and_clear_bit(id, wl->tx_frames_map)) { 67 if (__test_and_clear_bit(id, wl->tx_frames_map)) {
68 if (unlikely(wl->tx_frames_cnt == ACX_TX_DESCRIPTORS))
69 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
70
68 wl->tx_frames[id] = NULL; 71 wl->tx_frames[id] = NULL;
69 wl->tx_frames_cnt--; 72 wl->tx_frames_cnt--;
70 } 73 }
71} 74}
72 75
76static int wl1271_tx_update_filters(struct wl1271 *wl,
77 struct sk_buff *skb)
78{
79 struct ieee80211_hdr *hdr;
80
81 hdr = (struct ieee80211_hdr *)(skb->data +
82 sizeof(struct wl1271_tx_hw_descr));
83
84 /*
85 * stop bssid-based filtering before transmitting authentication
86 * requests. this way the hw will never drop authentication
87 * responses coming from BSSIDs it isn't familiar with (e.g. on
88 * roaming)
89 */
90 if (!ieee80211_is_auth(hdr->frame_control))
91 return 0;
92
93 wl1271_configure_filters(wl, FIF_OTHER_BSS);
94
95 return wl1271_acx_rx_config(wl, wl->rx_config, wl->rx_filter);
96}
97
73static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl, 98static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
74 struct sk_buff *skb) 99 struct sk_buff *skb)
75{ 100{
@@ -127,13 +152,29 @@ u8 wl1271_tx_get_hlid(struct sk_buff *skb)
127 } 152 }
128} 153}
129 154
155static unsigned int wl12xx_calc_packet_alignment(struct wl1271 *wl,
156 unsigned int packet_length)
157{
158 if (wl->quirks & WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT)
159 return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
160 else
161 return ALIGN(packet_length, WL1271_TX_ALIGN_TO);
162}
163
130static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra, 164static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
131 u32 buf_offset, u8 hlid) 165 u32 buf_offset, u8 hlid)
132{ 166{
133 struct wl1271_tx_hw_descr *desc; 167 struct wl1271_tx_hw_descr *desc;
134 u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra; 168 u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
169 u32 len;
135 u32 total_blocks; 170 u32 total_blocks;
136 int id, ret = -EBUSY; 171 int id, ret = -EBUSY;
172 u32 spare_blocks;
173
174 if (unlikely(wl->quirks & WL12XX_QUIRK_USE_2_SPARE_BLOCKS))
175 spare_blocks = 2;
176 else
177 spare_blocks = 1;
137 178
138 if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE) 179 if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE)
139 return -EAGAIN; 180 return -EAGAIN;
@@ -145,17 +186,27 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
145 186
146 /* approximate the number of blocks required for this packet 187 /* approximate the number of blocks required for this packet
147 in the firmware */ 188 in the firmware */
148 total_blocks = total_len + TX_HW_BLOCK_SIZE - 1; 189 len = wl12xx_calc_packet_alignment(wl, total_len);
149 total_blocks = total_blocks / TX_HW_BLOCK_SIZE + TX_HW_BLOCK_SPARE; 190
191 total_blocks = (len + TX_HW_BLOCK_SIZE - 1) / TX_HW_BLOCK_SIZE +
192 spare_blocks;
193
150 if (total_blocks <= wl->tx_blocks_available) { 194 if (total_blocks <= wl->tx_blocks_available) {
151 desc = (struct wl1271_tx_hw_descr *)skb_push( 195 desc = (struct wl1271_tx_hw_descr *)skb_push(
152 skb, total_len - skb->len); 196 skb, total_len - skb->len);
153 197
154 desc->extra_mem_blocks = TX_HW_BLOCK_SPARE; 198 /* HW descriptor fields change between wl127x and wl128x */
155 desc->total_mem_blocks = total_blocks; 199 if (wl->chip.id == CHIP_ID_1283_PG20) {
200 desc->wl128x_mem.total_mem_blocks = total_blocks;
201 } else {
202 desc->wl127x_mem.extra_blocks = spare_blocks;
203 desc->wl127x_mem.total_mem_blocks = total_blocks;
204 }
205
156 desc->id = id; 206 desc->id = id;
157 207
158 wl->tx_blocks_available -= total_blocks; 208 wl->tx_blocks_available -= total_blocks;
209 wl->tx_allocated_blocks += total_blocks;
159 210
160 if (wl->bss_type == BSS_TYPE_AP_BSS) 211 if (wl->bss_type == BSS_TYPE_AP_BSS)
161 wl->links[hlid].allocated_blks += total_blocks; 212 wl->links[hlid].allocated_blks += total_blocks;
@@ -172,13 +223,18 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
172 return ret; 223 return ret;
173} 224}
174 225
226static bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
227{
228 return wl->dummy_packet == skb;
229}
230
175static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb, 231static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
176 u32 extra, struct ieee80211_tx_info *control, 232 u32 extra, struct ieee80211_tx_info *control,
177 u8 hlid) 233 u8 hlid)
178{ 234{
179 struct timespec ts; 235 struct timespec ts;
180 struct wl1271_tx_hw_descr *desc; 236 struct wl1271_tx_hw_descr *desc;
181 int pad, ac, rate_idx; 237 int aligned_len, ac, rate_idx;
182 s64 hosttime; 238 s64 hosttime;
183 u16 tx_attr; 239 u16 tx_attr;
184 240
@@ -202,12 +258,25 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
202 else 258 else
203 desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU); 259 desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU);
204 260
205 /* configure the tx attributes */ 261 /* queue */
206 tx_attr = wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
207
208 /* queue (we use same identifiers for tid's and ac's */
209 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 262 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
210 desc->tid = ac; 263 desc->tid = skb->priority;
264
265 if (wl12xx_is_dummy_packet(wl, skb)) {
266 /*
267 * FW expects the dummy packet to have an invalid session id -
268 * any session id that is different than the one set in the join
269 */
270 tx_attr = ((~wl->session_counter) <<
271 TX_HW_ATTR_OFST_SESSION_COUNTER) &
272 TX_HW_ATTR_SESSION_COUNTER;
273
274 tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ;
275 } else {
276 /* configure the tx attributes */
277 tx_attr =
278 wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
279 }
211 280
212 if (wl->bss_type != BSS_TYPE_AP_BSS) { 281 if (wl->bss_type != BSS_TYPE_AP_BSS) {
213 desc->aid = hlid; 282 desc->aid = hlid;
@@ -237,20 +306,37 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
237 tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY; 306 tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
238 desc->reserved = 0; 307 desc->reserved = 0;
239 308
240 /* align the length (and store in terms of words) */ 309 aligned_len = wl12xx_calc_packet_alignment(wl, skb->len);
241 pad = ALIGN(skb->len, WL1271_TX_ALIGN_TO);
242 desc->length = cpu_to_le16(pad >> 2);
243 310
244 /* calculate number of padding bytes */ 311 if (wl->chip.id == CHIP_ID_1283_PG20) {
245 pad = pad - skb->len; 312 desc->wl128x_mem.extra_bytes = aligned_len - skb->len;
246 tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD; 313 desc->length = cpu_to_le16(aligned_len >> 2);
247 314
248 desc->tx_attr = cpu_to_le16(tx_attr); 315 wl1271_debug(DEBUG_TX, "tx_fill_hdr: hlid: %d "
316 "tx_attr: 0x%x len: %d life: %d mem: %d",
317 desc->hlid, tx_attr,
318 le16_to_cpu(desc->length),
319 le16_to_cpu(desc->life_time),
320 desc->wl128x_mem.total_mem_blocks);
321 } else {
322 int pad;
249 323
250 wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d hlid: %d " 324 /* Store the aligned length in terms of words */
251 "tx_attr: 0x%x len: %d life: %d mem: %d", pad, desc->hlid, 325 desc->length = cpu_to_le16(aligned_len >> 2);
252 le16_to_cpu(desc->tx_attr), le16_to_cpu(desc->length), 326
253 le16_to_cpu(desc->life_time), desc->total_mem_blocks); 327 /* calculate number of padding bytes */
328 pad = aligned_len - skb->len;
329 tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD;
330
331 wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d hlid: %d "
332 "tx_attr: 0x%x len: %d life: %d mem: %d", pad,
333 desc->hlid, tx_attr,
334 le16_to_cpu(desc->length),
335 le16_to_cpu(desc->life_time),
336 desc->wl127x_mem.total_mem_blocks);
337 }
338
339 desc->tx_attr = cpu_to_le16(tx_attr);
254} 340}
255 341
256/* caller must hold wl->mutex */ 342/* caller must hold wl->mutex */
@@ -300,19 +386,29 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
300 if (wl->bss_type == BSS_TYPE_AP_BSS) { 386 if (wl->bss_type == BSS_TYPE_AP_BSS) {
301 wl1271_tx_ap_update_inconnection_sta(wl, skb); 387 wl1271_tx_ap_update_inconnection_sta(wl, skb);
302 wl1271_tx_regulate_link(wl, hlid); 388 wl1271_tx_regulate_link(wl, hlid);
389 } else {
390 wl1271_tx_update_filters(wl, skb);
303 } 391 }
304 392
305 wl1271_tx_fill_hdr(wl, skb, extra, info, hlid); 393 wl1271_tx_fill_hdr(wl, skb, extra, info, hlid);
306 394
307 /* 395 /*
308 * The length of each packet is stored in terms of words. Thus, we must 396 * The length of each packet is stored in terms of
309 * pad the skb data to make sure its length is aligned. 397 * words. Thus, we must pad the skb data to make sure its
310 * The number of padding bytes is computed and set in wl1271_tx_fill_hdr 398 * length is aligned. The number of padding bytes is computed
399 * and set in wl1271_tx_fill_hdr.
400 * In special cases, we want to align to a specific block size
401 * (eg. for wl128x with SDIO we align to 256).
311 */ 402 */
312 total_len = ALIGN(skb->len, WL1271_TX_ALIGN_TO); 403 total_len = wl12xx_calc_packet_alignment(wl, skb->len);
404
313 memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len); 405 memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len);
314 memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len); 406 memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
315 407
408 /* Revert side effects in the dummy packet skb, so it can be reused */
409 if (wl12xx_is_dummy_packet(wl, skb))
410 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
411
316 return total_len; 412 return total_len;
317} 413}
318 414
@@ -425,10 +521,23 @@ out:
425 521
426static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl) 522static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
427{ 523{
524 unsigned long flags;
525 struct sk_buff *skb = NULL;
526
428 if (wl->bss_type == BSS_TYPE_AP_BSS) 527 if (wl->bss_type == BSS_TYPE_AP_BSS)
429 return wl1271_ap_skb_dequeue(wl); 528 skb = wl1271_ap_skb_dequeue(wl);
529 else
530 skb = wl1271_sta_skb_dequeue(wl);
430 531
431 return wl1271_sta_skb_dequeue(wl); 532 if (!skb &&
533 test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) {
534 skb = wl->dummy_packet;
535 spin_lock_irqsave(&wl->wl_lock, flags);
536 wl->tx_queue_count--;
537 spin_unlock_irqrestore(&wl->wl_lock, flags);
538 }
539
540 return skb;
432} 541}
433 542
434static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb) 543static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb)
@@ -436,7 +545,9 @@ static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb)
436 unsigned long flags; 545 unsigned long flags;
437 int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 546 int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
438 547
439 if (wl->bss_type == BSS_TYPE_AP_BSS) { 548 if (wl12xx_is_dummy_packet(wl, skb)) {
549 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
550 } else if (wl->bss_type == BSS_TYPE_AP_BSS) {
440 u8 hlid = wl1271_tx_get_hlid(skb); 551 u8 hlid = wl1271_tx_get_hlid(skb);
441 skb_queue_head(&wl->links[hlid].tx_queue[q], skb); 552 skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
442 553
@@ -454,22 +565,14 @@ static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb)
454void wl1271_tx_work_locked(struct wl1271 *wl) 565void wl1271_tx_work_locked(struct wl1271 *wl)
455{ 566{
456 struct sk_buff *skb; 567 struct sk_buff *skb;
457 bool woken_up = false;
458 u32 buf_offset = 0; 568 u32 buf_offset = 0;
459 bool sent_packets = false; 569 bool sent_packets = false;
460 int ret; 570 int ret;
461 571
462 if (unlikely(wl->state == WL1271_STATE_OFF)) 572 if (unlikely(wl->state == WL1271_STATE_OFF))
463 goto out; 573 return;
464 574
465 while ((skb = wl1271_skb_dequeue(wl))) { 575 while ((skb = wl1271_skb_dequeue(wl))) {
466 if (!woken_up) {
467 ret = wl1271_ps_elp_wakeup(wl);
468 if (ret < 0)
469 goto out_ack;
470 woken_up = true;
471 }
472
473 ret = wl1271_prepare_tx_frame(wl, skb, buf_offset); 576 ret = wl1271_prepare_tx_frame(wl, skb, buf_offset);
474 if (ret == -EAGAIN) { 577 if (ret == -EAGAIN) {
475 /* 578 /*
@@ -516,18 +619,22 @@ out_ack:
516 619
517 wl1271_handle_tx_low_watermark(wl); 620 wl1271_handle_tx_low_watermark(wl);
518 } 621 }
519
520out:
521 if (woken_up)
522 wl1271_ps_elp_sleep(wl);
523} 622}
524 623
525void wl1271_tx_work(struct work_struct *work) 624void wl1271_tx_work(struct work_struct *work)
526{ 625{
527 struct wl1271 *wl = container_of(work, struct wl1271, tx_work); 626 struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
627 int ret;
528 628
529 mutex_lock(&wl->mutex); 629 mutex_lock(&wl->mutex);
630 ret = wl1271_ps_elp_wakeup(wl);
631 if (ret < 0)
632 goto out;
633
530 wl1271_tx_work_locked(wl); 634 wl1271_tx_work_locked(wl);
635
636 wl1271_ps_elp_sleep(wl);
637out:
531 mutex_unlock(&wl->mutex); 638 mutex_unlock(&wl->mutex);
532} 639}
533 640
@@ -549,6 +656,11 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
549 skb = wl->tx_frames[id]; 656 skb = wl->tx_frames[id];
550 info = IEEE80211_SKB_CB(skb); 657 info = IEEE80211_SKB_CB(skb);
551 658
659 if (wl12xx_is_dummy_packet(wl, skb)) {
660 wl1271_free_tx_id(wl, id);
661 return;
662 }
663
552 /* update the TX status info */ 664 /* update the TX status info */
553 if (result->status == TX_SUCCESS) { 665 if (result->status == TX_SUCCESS) {
554 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) 666 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
@@ -657,8 +769,8 @@ void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
657 wl1271_handle_tx_low_watermark(wl); 769 wl1271_handle_tx_low_watermark(wl);
658} 770}
659 771
660/* caller must hold wl->mutex */ 772/* caller must hold wl->mutex and TX must be stopped */
661void wl1271_tx_reset(struct wl1271 *wl) 773void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
662{ 774{
663 int i; 775 int i;
664 struct sk_buff *skb; 776 struct sk_buff *skb;
@@ -678,10 +790,13 @@ void wl1271_tx_reset(struct wl1271 *wl)
678 while ((skb = skb_dequeue(&wl->tx_queue[i]))) { 790 while ((skb = skb_dequeue(&wl->tx_queue[i]))) {
679 wl1271_debug(DEBUG_TX, "freeing skb 0x%p", 791 wl1271_debug(DEBUG_TX, "freeing skb 0x%p",
680 skb); 792 skb);
681 info = IEEE80211_SKB_CB(skb); 793
682 info->status.rates[0].idx = -1; 794 if (!wl12xx_is_dummy_packet(wl, skb)) {
683 info->status.rates[0].count = 0; 795 info = IEEE80211_SKB_CB(skb);
684 ieee80211_tx_status(wl->hw, skb); 796 info->status.rates[0].idx = -1;
797 info->status.rates[0].count = 0;
798 ieee80211_tx_status(wl->hw, skb);
799 }
685 } 800 }
686 } 801 }
687 } 802 }
@@ -691,8 +806,10 @@ void wl1271_tx_reset(struct wl1271 *wl)
691 /* 806 /*
692 * Make sure the driver is at a consistent state, in case this 807 * Make sure the driver is at a consistent state, in case this
693 * function is called from a context other than interface removal. 808 * function is called from a context other than interface removal.
809 * This call will always wake the TX queues.
694 */ 810 */
695 wl1271_handle_tx_low_watermark(wl); 811 if (reset_tx_queues)
812 wl1271_handle_tx_low_watermark(wl);
696 813
697 for (i = 0; i < ACX_TX_DESCRIPTORS; i++) { 814 for (i = 0; i < ACX_TX_DESCRIPTORS; i++) {
698 if (wl->tx_frames[i] == NULL) 815 if (wl->tx_frames[i] == NULL)
@@ -702,21 +819,27 @@ void wl1271_tx_reset(struct wl1271 *wl)
702 wl1271_free_tx_id(wl, i); 819 wl1271_free_tx_id(wl, i);
703 wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb); 820 wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
704 821
705 /* Remove private headers before passing the skb to mac80211 */ 822 if (!wl12xx_is_dummy_packet(wl, skb)) {
706 info = IEEE80211_SKB_CB(skb); 823 /*
707 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); 824 * Remove private headers before passing the skb to
708 if (info->control.hw_key && 825 * mac80211
709 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) { 826 */
710 int hdrlen = ieee80211_get_hdrlen_from_skb(skb); 827 info = IEEE80211_SKB_CB(skb);
711 memmove(skb->data + WL1271_TKIP_IV_SPACE, skb->data, 828 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
712 hdrlen); 829 if (info->control.hw_key &&
713 skb_pull(skb, WL1271_TKIP_IV_SPACE); 830 info->control.hw_key->cipher ==
714 } 831 WLAN_CIPHER_SUITE_TKIP) {
832 int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
833 memmove(skb->data + WL1271_TKIP_IV_SPACE,
834 skb->data, hdrlen);
835 skb_pull(skb, WL1271_TKIP_IV_SPACE);
836 }
715 837
716 info->status.rates[0].idx = -1; 838 info->status.rates[0].idx = -1;
717 info->status.rates[0].count = 0; 839 info->status.rates[0].count = 0;
718 840
719 ieee80211_tx_status(wl->hw, skb); 841 ieee80211_tx_status(wl->hw, skb);
842 }
720 } 843 }
721} 844}
722 845
diff --git a/drivers/net/wireless/wl12xx/tx.h b/drivers/net/wireless/wl12xx/tx.h
index 02f07fa66e82..832f9258d675 100644
--- a/drivers/net/wireless/wl12xx/tx.h
+++ b/drivers/net/wireless/wl12xx/tx.h
@@ -25,7 +25,6 @@
25#ifndef __TX_H__ 25#ifndef __TX_H__
26#define __TX_H__ 26#define __TX_H__
27 27
28#define TX_HW_BLOCK_SPARE 2
29#define TX_HW_BLOCK_SIZE 252 28#define TX_HW_BLOCK_SIZE 252
30 29
31#define TX_HW_MGMT_PKT_LIFETIME_TU 2000 30#define TX_HW_MGMT_PKT_LIFETIME_TU 2000
@@ -41,6 +40,7 @@
41 BIT(8) | BIT(9)) 40 BIT(8) | BIT(9))
42#define TX_HW_ATTR_LAST_WORD_PAD (BIT(10) | BIT(11)) 41#define TX_HW_ATTR_LAST_WORD_PAD (BIT(10) | BIT(11))
43#define TX_HW_ATTR_TX_CMPLT_REQ BIT(12) 42#define TX_HW_ATTR_TX_CMPLT_REQ BIT(12)
43#define TX_HW_ATTR_TX_DUMMY_REQ BIT(13)
44 44
45#define TX_HW_ATTR_OFST_SAVE_RETRIES 0 45#define TX_HW_ATTR_OFST_SAVE_RETRIES 0
46#define TX_HW_ATTR_OFST_HEADER_PAD 1 46#define TX_HW_ATTR_OFST_HEADER_PAD 1
@@ -55,20 +55,60 @@
55#define WL1271_TX_ALIGN_TO 4 55#define WL1271_TX_ALIGN_TO 4
56#define WL1271_TKIP_IV_SPACE 4 56#define WL1271_TKIP_IV_SPACE 4
57 57
58/* Used for management frames and dummy packets */
59#define WL1271_TID_MGMT 7
60
61struct wl127x_tx_mem {
62 /*
63 * Number of extra memory blocks to allocate for this packet
64 * in addition to the number of blocks derived from the packet
65 * length.
66 */
67 u8 extra_blocks;
68 /*
69 * Total number of memory blocks allocated by the host for
70 * this packet. Must be equal or greater than the actual
71 * blocks number allocated by HW.
72 */
73 u8 total_mem_blocks;
74} __packed;
75
76struct wl128x_tx_mem {
77 /*
78 * Total number of memory blocks allocated by the host for
79 * this packet.
80 */
81 u8 total_mem_blocks;
82 /*
83 * Number of extra bytes, at the end of the frame. the host
84 * uses this padding to complete each frame to integer number
85 * of SDIO blocks.
86 */
87 u8 extra_bytes;
88} __packed;
89
90/*
91 * On wl128x based devices, when TX packets are aggregated, each packet
92 * size must be aligned to the SDIO block size. The maximum block size
93 * is bounded by the type of the padded bytes field that is sent to the
94 * FW. Currently the type is u8, so the maximum block size is 256 bytes.
95 */
96#define WL12XX_BUS_BLOCK_SIZE min(512u, \
97 (1u << (8 * sizeof(((struct wl128x_tx_mem *) 0)->extra_bytes))))
98
58struct wl1271_tx_hw_descr { 99struct wl1271_tx_hw_descr {
59 /* Length of packet in words, including descriptor+header+data */ 100 /* Length of packet in words, including descriptor+header+data */
60 __le16 length; 101 __le16 length;
61 /* Number of extra memory blocks to allocate for this packet in 102 union {
62 addition to the number of blocks derived from the packet length */ 103 struct wl127x_tx_mem wl127x_mem;
63 u8 extra_mem_blocks; 104 struct wl128x_tx_mem wl128x_mem;
64 /* Total number of memory blocks allocated by the host for this packet. 105 } __packed;
65 Must be equal or greater than the actual blocks number allocated by
66 HW!! */
67 u8 total_mem_blocks;
68 /* Device time (in us) when the packet arrived to the driver */ 106 /* Device time (in us) when the packet arrived to the driver */
69 __le32 start_time; 107 __le32 start_time;
70 /* Max delay in TUs until transmission. The last device time the 108 /*
71 packet can be transmitted is: startTime+(1024*LifeTime) */ 109 * Max delay in TUs until transmission. The last device time the
110 * packet can be transmitted is: start_time + (1024 * life_time)
111 */
72 __le16 life_time; 112 __le16 life_time;
73 /* Bitwise fields - see TX_ATTR... definitions above. */ 113 /* Bitwise fields - see TX_ATTR... definitions above. */
74 __le16 tx_attr; 114 __le16 tx_attr;
@@ -145,7 +185,7 @@ static inline int wl1271_tx_get_queue(int queue)
145void wl1271_tx_work(struct work_struct *work); 185void wl1271_tx_work(struct work_struct *work);
146void wl1271_tx_work_locked(struct wl1271 *wl); 186void wl1271_tx_work_locked(struct wl1271 *wl);
147void wl1271_tx_complete(struct wl1271 *wl); 187void wl1271_tx_complete(struct wl1271 *wl);
148void wl1271_tx_reset(struct wl1271 *wl); 188void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues);
149void wl1271_tx_flush(struct wl1271 *wl); 189void wl1271_tx_flush(struct wl1271 *wl);
150u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band); 190u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
151u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set); 191u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set);
diff --git a/drivers/net/wireless/wl12xx/wl12xx.h b/drivers/net/wireless/wl12xx/wl12xx.h
index 86be83e25ec5..fbe8f46d1232 100644
--- a/drivers/net/wireless/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/wl12xx/wl12xx.h
@@ -131,9 +131,16 @@ extern u32 wl12xx_debug_level;
131 131
132 132
133#define WL1271_FW_NAME "ti-connectivity/wl1271-fw-2.bin" 133#define WL1271_FW_NAME "ti-connectivity/wl1271-fw-2.bin"
134#define WL1271_AP_FW_NAME "ti-connectivity/wl1271-fw-ap.bin" 134#define WL128X_FW_NAME "ti-connectivity/wl128x-fw.bin"
135#define WL127X_AP_FW_NAME "ti-connectivity/wl1271-fw-ap.bin"
136#define WL128X_AP_FW_NAME "ti-connectivity/wl128x-fw-ap.bin"
135 137
136#define WL1271_NVS_NAME "ti-connectivity/wl1271-nvs.bin" 138/*
139 * wl127x and wl128x are using the same NVS file name. However, the
140 * ini parameters between them are different. The driver validates
141 * the correct NVS size in wl1271_boot_upload_nvs().
142 */
143#define WL12XX_NVS_NAME "ti-connectivity/wl1271-nvs.bin"
137 144
138#define WL1271_TX_SECURITY_LO16(s) ((u16)((s) & 0xffff)) 145#define WL1271_TX_SECURITY_LO16(s) ((u16)((s) & 0xffff))
139#define WL1271_TX_SECURITY_HI32(s) ((u32)(((s) >> 16) & 0xffffffff)) 146#define WL1271_TX_SECURITY_HI32(s) ((u32)(((s) >> 16) & 0xffffffff))
@@ -200,13 +207,29 @@ struct wl1271_partition_set {
200 207
201struct wl1271; 208struct wl1271;
202 209
203#define WL12XX_NUM_FW_VER 5 210enum {
211 FW_VER_CHIP,
212 FW_VER_IF_TYPE,
213 FW_VER_MAJOR,
214 FW_VER_SUBTYPE,
215 FW_VER_MINOR,
216
217 NUM_FW_VER
218};
219
220#define FW_VER_CHIP_WL127X 6
221#define FW_VER_CHIP_WL128X 7
222
223#define FW_VER_IF_TYPE_STA 1
224#define FW_VER_IF_TYPE_AP 2
225
226#define FW_VER_MINOR_1_SPARE_STA_MIN 58
227#define FW_VER_MINOR_1_SPARE_AP_MIN 47
204 228
205/* FIXME: I'm not sure about this structure name */
206struct wl1271_chip { 229struct wl1271_chip {
207 u32 id; 230 u32 id;
208 char fw_ver_str[ETHTOOL_BUSINFO_LEN]; 231 char fw_ver_str[ETHTOOL_BUSINFO_LEN];
209 unsigned int fw_ver[WL12XX_NUM_FW_VER]; 232 unsigned int fw_ver[NUM_FW_VER];
210}; 233};
211 234
212struct wl1271_stats { 235struct wl1271_stats {
@@ -261,6 +284,8 @@ struct wl1271_fw_sta_status {
261 u8 tx_total; 284 u8 tx_total;
262 u8 reserved1; 285 u8 reserved1;
263 __le16 reserved2; 286 __le16 reserved2;
287 /* Total structure size is 68 bytes */
288 u32 padding;
264} __packed; 289} __packed;
265 290
266struct wl1271_fw_full_status { 291struct wl1271_fw_full_status {
@@ -277,9 +302,10 @@ struct wl1271_rx_mem_pool_addr {
277 u32 addr_extra; 302 u32 addr_extra;
278}; 303};
279 304
305#define WL1271_MAX_CHANNELS 64
280struct wl1271_scan { 306struct wl1271_scan {
281 struct cfg80211_scan_request *req; 307 struct cfg80211_scan_request *req;
282 bool *scanned_ch; 308 unsigned long scanned_ch[BITS_TO_LONGS(WL1271_MAX_CHANNELS)];
283 bool failed; 309 bool failed;
284 u8 state; 310 u8 state;
285 u8 ssid[IW_ESSID_MAX_SIZE+1]; 311 u8 ssid[IW_ESSID_MAX_SIZE+1];
@@ -297,6 +323,7 @@ struct wl1271_if_operations {
297 struct device* (*dev)(struct wl1271 *wl); 323 struct device* (*dev)(struct wl1271 *wl);
298 void (*enable_irq)(struct wl1271 *wl); 324 void (*enable_irq)(struct wl1271 *wl);
299 void (*disable_irq)(struct wl1271 *wl); 325 void (*disable_irq)(struct wl1271 *wl);
326 void (*set_block_size) (struct wl1271 *wl, unsigned int blksz);
300}; 327};
301 328
302#define MAX_NUM_KEYS 14 329#define MAX_NUM_KEYS 14
@@ -319,15 +346,19 @@ enum wl12xx_flags {
319 WL1271_FLAG_TX_QUEUE_STOPPED, 346 WL1271_FLAG_TX_QUEUE_STOPPED,
320 WL1271_FLAG_TX_PENDING, 347 WL1271_FLAG_TX_PENDING,
321 WL1271_FLAG_IN_ELP, 348 WL1271_FLAG_IN_ELP,
349 WL1271_FLAG_ELP_REQUESTED,
322 WL1271_FLAG_PSM, 350 WL1271_FLAG_PSM,
323 WL1271_FLAG_PSM_REQUESTED, 351 WL1271_FLAG_PSM_REQUESTED,
324 WL1271_FLAG_IRQ_RUNNING, 352 WL1271_FLAG_IRQ_RUNNING,
325 WL1271_FLAG_IDLE, 353 WL1271_FLAG_IDLE,
326 WL1271_FLAG_IDLE_REQUESTED,
327 WL1271_FLAG_PSPOLL_FAILURE, 354 WL1271_FLAG_PSPOLL_FAILURE,
328 WL1271_FLAG_STA_STATE_SENT, 355 WL1271_FLAG_STA_STATE_SENT,
329 WL1271_FLAG_FW_TX_BUSY, 356 WL1271_FLAG_FW_TX_BUSY,
330 WL1271_FLAG_AP_STARTED 357 WL1271_FLAG_AP_STARTED,
358 WL1271_FLAG_IF_INITIALIZED,
359 WL1271_FLAG_DUMMY_PACKET_PENDING,
360 WL1271_FLAG_SUSPENDED,
361 WL1271_FLAG_PENDING_WORK,
331}; 362};
332 363
333struct wl1271_link { 364struct wl1271_link {
@@ -371,7 +402,7 @@ struct wl1271 {
371 u8 *fw; 402 u8 *fw;
372 size_t fw_len; 403 size_t fw_len;
373 u8 fw_bss_type; 404 u8 fw_bss_type;
374 struct wl1271_nvs_file *nvs; 405 void *nvs;
375 size_t nvs_len; 406 size_t nvs_len;
376 407
377 s8 hw_pg_ver; 408 s8 hw_pg_ver;
@@ -389,6 +420,7 @@ struct wl1271 {
389 /* Accounting for allocated / available TX blocks on HW */ 420 /* Accounting for allocated / available TX blocks on HW */
390 u32 tx_blocks_freed[NUM_TX_QUEUES]; 421 u32 tx_blocks_freed[NUM_TX_QUEUES];
391 u32 tx_blocks_available; 422 u32 tx_blocks_available;
423 u32 tx_allocated_blocks;
392 u32 tx_results_count; 424 u32 tx_results_count;
393 425
394 /* Transmitted TX packets counter for chipset interface */ 426 /* Transmitted TX packets counter for chipset interface */
@@ -430,6 +462,9 @@ struct wl1271 {
430 /* Intermediate buffer, used for packet aggregation */ 462 /* Intermediate buffer, used for packet aggregation */
431 u8 *aggr_buf; 463 u8 *aggr_buf;
432 464
465 /* Reusable dummy packet template */
466 struct sk_buff *dummy_packet;
467
433 /* Network stack work */ 468 /* Network stack work */
434 struct work_struct netstack_work; 469 struct work_struct netstack_work;
435 470
@@ -446,6 +481,8 @@ struct wl1271 {
446 struct wl1271_scan scan; 481 struct wl1271_scan scan;
447 struct delayed_work scan_complete_work; 482 struct delayed_work scan_complete_work;
448 483
484 bool sched_scanning;
485
449 /* probe-req template for the current AP */ 486 /* probe-req template for the current AP */
450 struct sk_buff *probereq; 487 struct sk_buff *probereq;
451 488
@@ -476,6 +513,7 @@ struct wl1271 {
476 unsigned int rx_filter; 513 unsigned int rx_filter;
477 514
478 struct completion *elp_compl; 515 struct completion *elp_compl;
516 struct completion *ps_compl;
479 struct delayed_work elp_work; 517 struct delayed_work elp_work;
480 struct delayed_work pspoll_work; 518 struct delayed_work pspoll_work;
481 519
@@ -527,6 +565,14 @@ struct wl1271 {
527 bool ba_support; 565 bool ba_support;
528 u8 ba_rx_bitmap; 566 u8 ba_rx_bitmap;
529 567
568 int tcxo_clock;
569
570 /*
571 * wowlan trigger was configured during suspend.
572 * (currently, only "ANY" trigger is supported)
573 */
574 bool wow_enabled;
575
530 /* 576 /*
531 * AP-mode - links indexed by HLID. The global and broadcast links 577 * AP-mode - links indexed by HLID. The global and broadcast links
532 * are always active. 578 * are always active.
@@ -544,6 +590,9 @@ struct wl1271 {
544 590
545 /* Quirks of specific hardware revisions */ 591 /* Quirks of specific hardware revisions */
546 unsigned int quirks; 592 unsigned int quirks;
593
594 /* Platform limitations */
595 unsigned int platform_quirks;
547}; 596};
548 597
549struct wl1271_station { 598struct wl1271_station {
@@ -576,6 +625,15 @@ int wl1271_plt_stop(struct wl1271 *wl);
576/* Quirks */ 625/* Quirks */
577 626
578/* Each RX/TX transaction requires an end-of-transaction transfer */ 627/* Each RX/TX transaction requires an end-of-transaction transfer */
579#define WL12XX_QUIRK_END_OF_TRANSACTION BIT(0) 628#define WL12XX_QUIRK_END_OF_TRANSACTION BIT(0)
629
630/*
631 * Older firmwares use 2 spare TX blocks
632 * (for STA < 6.1.3.50.58 or for AP < 6.2.0.0.47)
633 */
634#define WL12XX_QUIRK_USE_2_SPARE_BLOCKS BIT(1)
635
636/* WL128X requires aggregated packets to be aligned to the SDIO block size */
637#define WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT BIT(2)
580 638
581#endif 639#endif
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index a73a305d3cba..ff306d763e37 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -557,7 +557,7 @@ int zd_chip_unlock_phy_regs(struct zd_chip *chip)
557 return r; 557 return r;
558} 558}
559 559
560/* CR157 can be optionally patched by the EEPROM for original ZD1211 */ 560/* ZD_CR157 can be optionally patched by the EEPROM for original ZD1211 */
561static int patch_cr157(struct zd_chip *chip) 561static int patch_cr157(struct zd_chip *chip)
562{ 562{
563 int r; 563 int r;
@@ -571,7 +571,7 @@ static int patch_cr157(struct zd_chip *chip)
571 return r; 571 return r;
572 572
573 dev_dbg_f(zd_chip_dev(chip), "patching value %x\n", value >> 8); 573 dev_dbg_f(zd_chip_dev(chip), "patching value %x\n", value >> 8);
574 return zd_iowrite32_locked(chip, value >> 8, CR157); 574 return zd_iowrite32_locked(chip, value >> 8, ZD_CR157);
575} 575}
576 576
577/* 577/*
@@ -593,8 +593,8 @@ static int patch_6m_band_edge(struct zd_chip *chip, u8 channel)
593int zd_chip_generic_patch_6m_band(struct zd_chip *chip, int channel) 593int zd_chip_generic_patch_6m_band(struct zd_chip *chip, int channel)
594{ 594{
595 struct zd_ioreq16 ioreqs[] = { 595 struct zd_ioreq16 ioreqs[] = {
596 { CR128, 0x14 }, { CR129, 0x12 }, { CR130, 0x10 }, 596 { ZD_CR128, 0x14 }, { ZD_CR129, 0x12 }, { ZD_CR130, 0x10 },
597 { CR47, 0x1e }, 597 { ZD_CR47, 0x1e },
598 }; 598 };
599 599
600 /* FIXME: Channel 11 is not the edge for all regulatory domains. */ 600 /* FIXME: Channel 11 is not the edge for all regulatory domains. */
@@ -608,69 +608,69 @@ int zd_chip_generic_patch_6m_band(struct zd_chip *chip, int channel)
608static int zd1211_hw_reset_phy(struct zd_chip *chip) 608static int zd1211_hw_reset_phy(struct zd_chip *chip)
609{ 609{
610 static const struct zd_ioreq16 ioreqs[] = { 610 static const struct zd_ioreq16 ioreqs[] = {
611 { CR0, 0x0a }, { CR1, 0x06 }, { CR2, 0x26 }, 611 { ZD_CR0, 0x0a }, { ZD_CR1, 0x06 }, { ZD_CR2, 0x26 },
612 { CR3, 0x38 }, { CR4, 0x80 }, { CR9, 0xa0 }, 612 { ZD_CR3, 0x38 }, { ZD_CR4, 0x80 }, { ZD_CR9, 0xa0 },
613 { CR10, 0x81 }, { CR11, 0x00 }, { CR12, 0x7f }, 613 { ZD_CR10, 0x81 }, { ZD_CR11, 0x00 }, { ZD_CR12, 0x7f },
614 { CR13, 0x8c }, { CR14, 0x80 }, { CR15, 0x3d }, 614 { ZD_CR13, 0x8c }, { ZD_CR14, 0x80 }, { ZD_CR15, 0x3d },
615 { CR16, 0x20 }, { CR17, 0x1e }, { CR18, 0x0a }, 615 { ZD_CR16, 0x20 }, { ZD_CR17, 0x1e }, { ZD_CR18, 0x0a },
616 { CR19, 0x48 }, { CR20, 0x0c }, { CR21, 0x0c }, 616 { ZD_CR19, 0x48 }, { ZD_CR20, 0x0c }, { ZD_CR21, 0x0c },
617 { CR22, 0x23 }, { CR23, 0x90 }, { CR24, 0x14 }, 617 { ZD_CR22, 0x23 }, { ZD_CR23, 0x90 }, { ZD_CR24, 0x14 },
618 { CR25, 0x40 }, { CR26, 0x10 }, { CR27, 0x19 }, 618 { ZD_CR25, 0x40 }, { ZD_CR26, 0x10 }, { ZD_CR27, 0x19 },
619 { CR28, 0x7f }, { CR29, 0x80 }, { CR30, 0x4b }, 619 { ZD_CR28, 0x7f }, { ZD_CR29, 0x80 }, { ZD_CR30, 0x4b },
620 { CR31, 0x60 }, { CR32, 0x43 }, { CR33, 0x08 }, 620 { ZD_CR31, 0x60 }, { ZD_CR32, 0x43 }, { ZD_CR33, 0x08 },
621 { CR34, 0x06 }, { CR35, 0x0a }, { CR36, 0x00 }, 621 { ZD_CR34, 0x06 }, { ZD_CR35, 0x0a }, { ZD_CR36, 0x00 },
622 { CR37, 0x00 }, { CR38, 0x38 }, { CR39, 0x0c }, 622 { ZD_CR37, 0x00 }, { ZD_CR38, 0x38 }, { ZD_CR39, 0x0c },
623 { CR40, 0x84 }, { CR41, 0x2a }, { CR42, 0x80 }, 623 { ZD_CR40, 0x84 }, { ZD_CR41, 0x2a }, { ZD_CR42, 0x80 },
624 { CR43, 0x10 }, { CR44, 0x12 }, { CR46, 0xff }, 624 { ZD_CR43, 0x10 }, { ZD_CR44, 0x12 }, { ZD_CR46, 0xff },
625 { CR47, 0x1E }, { CR48, 0x26 }, { CR49, 0x5b }, 625 { ZD_CR47, 0x1E }, { ZD_CR48, 0x26 }, { ZD_CR49, 0x5b },
626 { CR64, 0xd0 }, { CR65, 0x04 }, { CR66, 0x58 }, 626 { ZD_CR64, 0xd0 }, { ZD_CR65, 0x04 }, { ZD_CR66, 0x58 },
627 { CR67, 0xc9 }, { CR68, 0x88 }, { CR69, 0x41 }, 627 { ZD_CR67, 0xc9 }, { ZD_CR68, 0x88 }, { ZD_CR69, 0x41 },
628 { CR70, 0x23 }, { CR71, 0x10 }, { CR72, 0xff }, 628 { ZD_CR70, 0x23 }, { ZD_CR71, 0x10 }, { ZD_CR72, 0xff },
629 { CR73, 0x32 }, { CR74, 0x30 }, { CR75, 0x65 }, 629 { ZD_CR73, 0x32 }, { ZD_CR74, 0x30 }, { ZD_CR75, 0x65 },
630 { CR76, 0x41 }, { CR77, 0x1b }, { CR78, 0x30 }, 630 { ZD_CR76, 0x41 }, { ZD_CR77, 0x1b }, { ZD_CR78, 0x30 },
631 { CR79, 0x68 }, { CR80, 0x64 }, { CR81, 0x64 }, 631 { ZD_CR79, 0x68 }, { ZD_CR80, 0x64 }, { ZD_CR81, 0x64 },
632 { CR82, 0x00 }, { CR83, 0x00 }, { CR84, 0x00 }, 632 { ZD_CR82, 0x00 }, { ZD_CR83, 0x00 }, { ZD_CR84, 0x00 },
633 { CR85, 0x02 }, { CR86, 0x00 }, { CR87, 0x00 }, 633 { ZD_CR85, 0x02 }, { ZD_CR86, 0x00 }, { ZD_CR87, 0x00 },
634 { CR88, 0xff }, { CR89, 0xfc }, { CR90, 0x00 }, 634 { ZD_CR88, 0xff }, { ZD_CR89, 0xfc }, { ZD_CR90, 0x00 },
635 { CR91, 0x00 }, { CR92, 0x00 }, { CR93, 0x08 }, 635 { ZD_CR91, 0x00 }, { ZD_CR92, 0x00 }, { ZD_CR93, 0x08 },
636 { CR94, 0x00 }, { CR95, 0x00 }, { CR96, 0xff }, 636 { ZD_CR94, 0x00 }, { ZD_CR95, 0x00 }, { ZD_CR96, 0xff },
637 { CR97, 0xe7 }, { CR98, 0x00 }, { CR99, 0x00 }, 637 { ZD_CR97, 0xe7 }, { ZD_CR98, 0x00 }, { ZD_CR99, 0x00 },
638 { CR100, 0x00 }, { CR101, 0xae }, { CR102, 0x02 }, 638 { ZD_CR100, 0x00 }, { ZD_CR101, 0xae }, { ZD_CR102, 0x02 },
639 { CR103, 0x00 }, { CR104, 0x03 }, { CR105, 0x65 }, 639 { ZD_CR103, 0x00 }, { ZD_CR104, 0x03 }, { ZD_CR105, 0x65 },
640 { CR106, 0x04 }, { CR107, 0x00 }, { CR108, 0x0a }, 640 { ZD_CR106, 0x04 }, { ZD_CR107, 0x00 }, { ZD_CR108, 0x0a },
641 { CR109, 0xaa }, { CR110, 0xaa }, { CR111, 0x25 }, 641 { ZD_CR109, 0xaa }, { ZD_CR110, 0xaa }, { ZD_CR111, 0x25 },
642 { CR112, 0x25 }, { CR113, 0x00 }, { CR119, 0x1e }, 642 { ZD_CR112, 0x25 }, { ZD_CR113, 0x00 }, { ZD_CR119, 0x1e },
643 { CR125, 0x90 }, { CR126, 0x00 }, { CR127, 0x00 }, 643 { ZD_CR125, 0x90 }, { ZD_CR126, 0x00 }, { ZD_CR127, 0x00 },
644 { }, 644 { },
645 { CR5, 0x00 }, { CR6, 0x00 }, { CR7, 0x00 }, 645 { ZD_CR5, 0x00 }, { ZD_CR6, 0x00 }, { ZD_CR7, 0x00 },
646 { CR8, 0x00 }, { CR9, 0x20 }, { CR12, 0xf0 }, 646 { ZD_CR8, 0x00 }, { ZD_CR9, 0x20 }, { ZD_CR12, 0xf0 },
647 { CR20, 0x0e }, { CR21, 0x0e }, { CR27, 0x10 }, 647 { ZD_CR20, 0x0e }, { ZD_CR21, 0x0e }, { ZD_CR27, 0x10 },
648 { CR44, 0x33 }, { CR47, 0x1E }, { CR83, 0x24 }, 648 { ZD_CR44, 0x33 }, { ZD_CR47, 0x1E }, { ZD_CR83, 0x24 },
649 { CR84, 0x04 }, { CR85, 0x00 }, { CR86, 0x0C }, 649 { ZD_CR84, 0x04 }, { ZD_CR85, 0x00 }, { ZD_CR86, 0x0C },
650 { CR87, 0x12 }, { CR88, 0x0C }, { CR89, 0x00 }, 650 { ZD_CR87, 0x12 }, { ZD_CR88, 0x0C }, { ZD_CR89, 0x00 },
651 { CR90, 0x10 }, { CR91, 0x08 }, { CR93, 0x00 }, 651 { ZD_CR90, 0x10 }, { ZD_CR91, 0x08 }, { ZD_CR93, 0x00 },
652 { CR94, 0x01 }, { CR95, 0x00 }, { CR96, 0x50 }, 652 { ZD_CR94, 0x01 }, { ZD_CR95, 0x00 }, { ZD_CR96, 0x50 },
653 { CR97, 0x37 }, { CR98, 0x35 }, { CR101, 0x13 }, 653 { ZD_CR97, 0x37 }, { ZD_CR98, 0x35 }, { ZD_CR101, 0x13 },
654 { CR102, 0x27 }, { CR103, 0x27 }, { CR104, 0x18 }, 654 { ZD_CR102, 0x27 }, { ZD_CR103, 0x27 }, { ZD_CR104, 0x18 },
655 { CR105, 0x12 }, { CR109, 0x27 }, { CR110, 0x27 }, 655 { ZD_CR105, 0x12 }, { ZD_CR109, 0x27 }, { ZD_CR110, 0x27 },
656 { CR111, 0x27 }, { CR112, 0x27 }, { CR113, 0x27 }, 656 { ZD_CR111, 0x27 }, { ZD_CR112, 0x27 }, { ZD_CR113, 0x27 },
657 { CR114, 0x27 }, { CR115, 0x26 }, { CR116, 0x24 }, 657 { ZD_CR114, 0x27 }, { ZD_CR115, 0x26 }, { ZD_CR116, 0x24 },
658 { CR117, 0xfc }, { CR118, 0xfa }, { CR120, 0x4f }, 658 { ZD_CR117, 0xfc }, { ZD_CR118, 0xfa }, { ZD_CR120, 0x4f },
659 { CR125, 0xaa }, { CR127, 0x03 }, { CR128, 0x14 }, 659 { ZD_CR125, 0xaa }, { ZD_CR127, 0x03 }, { ZD_CR128, 0x14 },
660 { CR129, 0x12 }, { CR130, 0x10 }, { CR131, 0x0C }, 660 { ZD_CR129, 0x12 }, { ZD_CR130, 0x10 }, { ZD_CR131, 0x0C },
661 { CR136, 0xdf }, { CR137, 0x40 }, { CR138, 0xa0 }, 661 { ZD_CR136, 0xdf }, { ZD_CR137, 0x40 }, { ZD_CR138, 0xa0 },
662 { CR139, 0xb0 }, { CR140, 0x99 }, { CR141, 0x82 }, 662 { ZD_CR139, 0xb0 }, { ZD_CR140, 0x99 }, { ZD_CR141, 0x82 },
663 { CR142, 0x54 }, { CR143, 0x1c }, { CR144, 0x6c }, 663 { ZD_CR142, 0x54 }, { ZD_CR143, 0x1c }, { ZD_CR144, 0x6c },
664 { CR147, 0x07 }, { CR148, 0x4c }, { CR149, 0x50 }, 664 { ZD_CR147, 0x07 }, { ZD_CR148, 0x4c }, { ZD_CR149, 0x50 },
665 { CR150, 0x0e }, { CR151, 0x18 }, { CR160, 0xfe }, 665 { ZD_CR150, 0x0e }, { ZD_CR151, 0x18 }, { ZD_CR160, 0xfe },
666 { CR161, 0xee }, { CR162, 0xaa }, { CR163, 0xfa }, 666 { ZD_CR161, 0xee }, { ZD_CR162, 0xaa }, { ZD_CR163, 0xfa },
667 { CR164, 0xfa }, { CR165, 0xea }, { CR166, 0xbe }, 667 { ZD_CR164, 0xfa }, { ZD_CR165, 0xea }, { ZD_CR166, 0xbe },
668 { CR167, 0xbe }, { CR168, 0x6a }, { CR169, 0xba }, 668 { ZD_CR167, 0xbe }, { ZD_CR168, 0x6a }, { ZD_CR169, 0xba },
669 { CR170, 0xba }, { CR171, 0xba }, 669 { ZD_CR170, 0xba }, { ZD_CR171, 0xba },
670 /* Note: CR204 must lead the CR203 */ 670 /* Note: ZD_CR204 must lead the ZD_CR203 */
671 { CR204, 0x7d }, 671 { ZD_CR204, 0x7d },
672 { }, 672 { },
673 { CR203, 0x30 }, 673 { ZD_CR203, 0x30 },
674 }; 674 };
675 675
676 int r, t; 676 int r, t;
@@ -697,62 +697,62 @@ out:
697static int zd1211b_hw_reset_phy(struct zd_chip *chip) 697static int zd1211b_hw_reset_phy(struct zd_chip *chip)
698{ 698{
699 static const struct zd_ioreq16 ioreqs[] = { 699 static const struct zd_ioreq16 ioreqs[] = {
700 { CR0, 0x14 }, { CR1, 0x06 }, { CR2, 0x26 }, 700 { ZD_CR0, 0x14 }, { ZD_CR1, 0x06 }, { ZD_CR2, 0x26 },
701 { CR3, 0x38 }, { CR4, 0x80 }, { CR9, 0xe0 }, 701 { ZD_CR3, 0x38 }, { ZD_CR4, 0x80 }, { ZD_CR9, 0xe0 },
702 { CR10, 0x81 }, 702 { ZD_CR10, 0x81 },
703 /* power control { { CR11, 1 << 6 }, */ 703 /* power control { { ZD_CR11, 1 << 6 }, */
704 { CR11, 0x00 }, 704 { ZD_CR11, 0x00 },
705 { CR12, 0xf0 }, { CR13, 0x8c }, { CR14, 0x80 }, 705 { ZD_CR12, 0xf0 }, { ZD_CR13, 0x8c }, { ZD_CR14, 0x80 },
706 { CR15, 0x3d }, { CR16, 0x20 }, { CR17, 0x1e }, 706 { ZD_CR15, 0x3d }, { ZD_CR16, 0x20 }, { ZD_CR17, 0x1e },
707 { CR18, 0x0a }, { CR19, 0x48 }, 707 { ZD_CR18, 0x0a }, { ZD_CR19, 0x48 },
708 { CR20, 0x10 }, /* Org:0x0E, ComTrend:RalLink AP */ 708 { ZD_CR20, 0x10 }, /* Org:0x0E, ComTrend:RalLink AP */
709 { CR21, 0x0e }, { CR22, 0x23 }, { CR23, 0x90 }, 709 { ZD_CR21, 0x0e }, { ZD_CR22, 0x23 }, { ZD_CR23, 0x90 },
710 { CR24, 0x14 }, { CR25, 0x40 }, { CR26, 0x10 }, 710 { ZD_CR24, 0x14 }, { ZD_CR25, 0x40 }, { ZD_CR26, 0x10 },
711 { CR27, 0x10 }, { CR28, 0x7f }, { CR29, 0x80 }, 711 { ZD_CR27, 0x10 }, { ZD_CR28, 0x7f }, { ZD_CR29, 0x80 },
712 { CR30, 0x4b }, /* ASIC/FWT, no jointly decoder */ 712 { ZD_CR30, 0x4b }, /* ASIC/FWT, no jointly decoder */
713 { CR31, 0x60 }, { CR32, 0x43 }, { CR33, 0x08 }, 713 { ZD_CR31, 0x60 }, { ZD_CR32, 0x43 }, { ZD_CR33, 0x08 },
714 { CR34, 0x06 }, { CR35, 0x0a }, { CR36, 0x00 }, 714 { ZD_CR34, 0x06 }, { ZD_CR35, 0x0a }, { ZD_CR36, 0x00 },
715 { CR37, 0x00 }, { CR38, 0x38 }, { CR39, 0x0c }, 715 { ZD_CR37, 0x00 }, { ZD_CR38, 0x38 }, { ZD_CR39, 0x0c },
716 { CR40, 0x84 }, { CR41, 0x2a }, { CR42, 0x80 }, 716 { ZD_CR40, 0x84 }, { ZD_CR41, 0x2a }, { ZD_CR42, 0x80 },
717 { CR43, 0x10 }, { CR44, 0x33 }, { CR46, 0xff }, 717 { ZD_CR43, 0x10 }, { ZD_CR44, 0x33 }, { ZD_CR46, 0xff },
718 { CR47, 0x1E }, { CR48, 0x26 }, { CR49, 0x5b }, 718 { ZD_CR47, 0x1E }, { ZD_CR48, 0x26 }, { ZD_CR49, 0x5b },
719 { CR64, 0xd0 }, { CR65, 0x04 }, { CR66, 0x58 }, 719 { ZD_CR64, 0xd0 }, { ZD_CR65, 0x04 }, { ZD_CR66, 0x58 },
720 { CR67, 0xc9 }, { CR68, 0x88 }, { CR69, 0x41 }, 720 { ZD_CR67, 0xc9 }, { ZD_CR68, 0x88 }, { ZD_CR69, 0x41 },
721 { CR70, 0x23 }, { CR71, 0x10 }, { CR72, 0xff }, 721 { ZD_CR70, 0x23 }, { ZD_CR71, 0x10 }, { ZD_CR72, 0xff },
722 { CR73, 0x32 }, { CR74, 0x30 }, { CR75, 0x65 }, 722 { ZD_CR73, 0x32 }, { ZD_CR74, 0x30 }, { ZD_CR75, 0x65 },
723 { CR76, 0x41 }, { CR77, 0x1b }, { CR78, 0x30 }, 723 { ZD_CR76, 0x41 }, { ZD_CR77, 0x1b }, { ZD_CR78, 0x30 },
724 { CR79, 0xf0 }, { CR80, 0x64 }, { CR81, 0x64 }, 724 { ZD_CR79, 0xf0 }, { ZD_CR80, 0x64 }, { ZD_CR81, 0x64 },
725 { CR82, 0x00 }, { CR83, 0x24 }, { CR84, 0x04 }, 725 { ZD_CR82, 0x00 }, { ZD_CR83, 0x24 }, { ZD_CR84, 0x04 },
726 { CR85, 0x00 }, { CR86, 0x0c }, { CR87, 0x12 }, 726 { ZD_CR85, 0x00 }, { ZD_CR86, 0x0c }, { ZD_CR87, 0x12 },
727 { CR88, 0x0c }, { CR89, 0x00 }, { CR90, 0x58 }, 727 { ZD_CR88, 0x0c }, { ZD_CR89, 0x00 }, { ZD_CR90, 0x58 },
728 { CR91, 0x04 }, { CR92, 0x00 }, { CR93, 0x00 }, 728 { ZD_CR91, 0x04 }, { ZD_CR92, 0x00 }, { ZD_CR93, 0x00 },
729 { CR94, 0x01 }, 729 { ZD_CR94, 0x01 },
730 { CR95, 0x20 }, /* ZD1211B */ 730 { ZD_CR95, 0x20 }, /* ZD1211B */
731 { CR96, 0x50 }, { CR97, 0x37 }, { CR98, 0x35 }, 731 { ZD_CR96, 0x50 }, { ZD_CR97, 0x37 }, { ZD_CR98, 0x35 },
732 { CR99, 0x00 }, { CR100, 0x01 }, { CR101, 0x13 }, 732 { ZD_CR99, 0x00 }, { ZD_CR100, 0x01 }, { ZD_CR101, 0x13 },
733 { CR102, 0x27 }, { CR103, 0x27 }, { CR104, 0x18 }, 733 { ZD_CR102, 0x27 }, { ZD_CR103, 0x27 }, { ZD_CR104, 0x18 },
734 { CR105, 0x12 }, { CR106, 0x04 }, { CR107, 0x00 }, 734 { ZD_CR105, 0x12 }, { ZD_CR106, 0x04 }, { ZD_CR107, 0x00 },
735 { CR108, 0x0a }, { CR109, 0x27 }, { CR110, 0x27 }, 735 { ZD_CR108, 0x0a }, { ZD_CR109, 0x27 }, { ZD_CR110, 0x27 },
736 { CR111, 0x27 }, { CR112, 0x27 }, { CR113, 0x27 }, 736 { ZD_CR111, 0x27 }, { ZD_CR112, 0x27 }, { ZD_CR113, 0x27 },
737 { CR114, 0x27 }, { CR115, 0x26 }, { CR116, 0x24 }, 737 { ZD_CR114, 0x27 }, { ZD_CR115, 0x26 }, { ZD_CR116, 0x24 },
738 { CR117, 0xfc }, { CR118, 0xfa }, { CR119, 0x1e }, 738 { ZD_CR117, 0xfc }, { ZD_CR118, 0xfa }, { ZD_CR119, 0x1e },
739 { CR125, 0x90 }, { CR126, 0x00 }, { CR127, 0x00 }, 739 { ZD_CR125, 0x90 }, { ZD_CR126, 0x00 }, { ZD_CR127, 0x00 },
740 { CR128, 0x14 }, { CR129, 0x12 }, { CR130, 0x10 }, 740 { ZD_CR128, 0x14 }, { ZD_CR129, 0x12 }, { ZD_CR130, 0x10 },
741 { CR131, 0x0c }, { CR136, 0xdf }, { CR137, 0xa0 }, 741 { ZD_CR131, 0x0c }, { ZD_CR136, 0xdf }, { ZD_CR137, 0xa0 },
742 { CR138, 0xa8 }, { CR139, 0xb4 }, { CR140, 0x98 }, 742 { ZD_CR138, 0xa8 }, { ZD_CR139, 0xb4 }, { ZD_CR140, 0x98 },
743 { CR141, 0x82 }, { CR142, 0x53 }, { CR143, 0x1c }, 743 { ZD_CR141, 0x82 }, { ZD_CR142, 0x53 }, { ZD_CR143, 0x1c },
744 { CR144, 0x6c }, { CR147, 0x07 }, { CR148, 0x40 }, 744 { ZD_CR144, 0x6c }, { ZD_CR147, 0x07 }, { ZD_CR148, 0x40 },
745 { CR149, 0x40 }, /* Org:0x50 ComTrend:RalLink AP */ 745 { ZD_CR149, 0x40 }, /* Org:0x50 ComTrend:RalLink AP */
746 { CR150, 0x14 }, /* Org:0x0E ComTrend:RalLink AP */ 746 { ZD_CR150, 0x14 }, /* Org:0x0E ComTrend:RalLink AP */
747 { CR151, 0x18 }, { CR159, 0x70 }, { CR160, 0xfe }, 747 { ZD_CR151, 0x18 }, { ZD_CR159, 0x70 }, { ZD_CR160, 0xfe },
748 { CR161, 0xee }, { CR162, 0xaa }, { CR163, 0xfa }, 748 { ZD_CR161, 0xee }, { ZD_CR162, 0xaa }, { ZD_CR163, 0xfa },
749 { CR164, 0xfa }, { CR165, 0xea }, { CR166, 0xbe }, 749 { ZD_CR164, 0xfa }, { ZD_CR165, 0xea }, { ZD_CR166, 0xbe },
750 { CR167, 0xbe }, { CR168, 0x6a }, { CR169, 0xba }, 750 { ZD_CR167, 0xbe }, { ZD_CR168, 0x6a }, { ZD_CR169, 0xba },
751 { CR170, 0xba }, { CR171, 0xba }, 751 { ZD_CR170, 0xba }, { ZD_CR171, 0xba },
752 /* Note: CR204 must lead the CR203 */ 752 /* Note: ZD_CR204 must lead the ZD_CR203 */
753 { CR204, 0x7d }, 753 { ZD_CR204, 0x7d },
754 {}, 754 {},
755 { CR203, 0x30 }, 755 { ZD_CR203, 0x30 },
756 }; 756 };
757 757
758 int r, t; 758 int r, t;
@@ -1200,24 +1200,24 @@ out:
1200static int update_pwr_int(struct zd_chip *chip, u8 channel) 1200static int update_pwr_int(struct zd_chip *chip, u8 channel)
1201{ 1201{
1202 u8 value = chip->pwr_int_values[channel - 1]; 1202 u8 value = chip->pwr_int_values[channel - 1];
1203 return zd_iowrite16_locked(chip, value, CR31); 1203 return zd_iowrite16_locked(chip, value, ZD_CR31);
1204} 1204}
1205 1205
1206static int update_pwr_cal(struct zd_chip *chip, u8 channel) 1206static int update_pwr_cal(struct zd_chip *chip, u8 channel)
1207{ 1207{
1208 u8 value = chip->pwr_cal_values[channel-1]; 1208 u8 value = chip->pwr_cal_values[channel-1];
1209 return zd_iowrite16_locked(chip, value, CR68); 1209 return zd_iowrite16_locked(chip, value, ZD_CR68);
1210} 1210}
1211 1211
1212static int update_ofdm_cal(struct zd_chip *chip, u8 channel) 1212static int update_ofdm_cal(struct zd_chip *chip, u8 channel)
1213{ 1213{
1214 struct zd_ioreq16 ioreqs[3]; 1214 struct zd_ioreq16 ioreqs[3];
1215 1215
1216 ioreqs[0].addr = CR67; 1216 ioreqs[0].addr = ZD_CR67;
1217 ioreqs[0].value = chip->ofdm_cal_values[OFDM_36M_INDEX][channel-1]; 1217 ioreqs[0].value = chip->ofdm_cal_values[OFDM_36M_INDEX][channel-1];
1218 ioreqs[1].addr = CR66; 1218 ioreqs[1].addr = ZD_CR66;
1219 ioreqs[1].value = chip->ofdm_cal_values[OFDM_48M_INDEX][channel-1]; 1219 ioreqs[1].value = chip->ofdm_cal_values[OFDM_48M_INDEX][channel-1];
1220 ioreqs[2].addr = CR65; 1220 ioreqs[2].addr = ZD_CR65;
1221 ioreqs[2].value = chip->ofdm_cal_values[OFDM_54M_INDEX][channel-1]; 1221 ioreqs[2].value = chip->ofdm_cal_values[OFDM_54M_INDEX][channel-1];
1222 1222
1223 return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); 1223 return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
@@ -1236,9 +1236,9 @@ static int update_channel_integration_and_calibration(struct zd_chip *chip,
1236 return r; 1236 return r;
1237 if (zd_chip_is_zd1211b(chip)) { 1237 if (zd_chip_is_zd1211b(chip)) {
1238 static const struct zd_ioreq16 ioreqs[] = { 1238 static const struct zd_ioreq16 ioreqs[] = {
1239 { CR69, 0x28 }, 1239 { ZD_CR69, 0x28 },
1240 {}, 1240 {},
1241 { CR69, 0x2a }, 1241 { ZD_CR69, 0x2a },
1242 }; 1242 };
1243 1243
1244 r = update_ofdm_cal(chip, channel); 1244 r = update_ofdm_cal(chip, channel);
@@ -1269,7 +1269,7 @@ static int patch_cck_gain(struct zd_chip *chip)
1269 if (r) 1269 if (r)
1270 return r; 1270 return r;
1271 dev_dbg_f(zd_chip_dev(chip), "patching value %x\n", value & 0xff); 1271 dev_dbg_f(zd_chip_dev(chip), "patching value %x\n", value & 0xff);
1272 return zd_iowrite16_locked(chip, value & 0xff, CR47); 1272 return zd_iowrite16_locked(chip, value & 0xff, ZD_CR47);
1273} 1273}
1274 1274
1275int zd_chip_set_channel(struct zd_chip *chip, u8 channel) 1275int zd_chip_set_channel(struct zd_chip *chip, u8 channel)
@@ -1505,9 +1505,9 @@ int zd_rfwritev_locked(struct zd_chip *chip,
1505int zd_rfwrite_cr_locked(struct zd_chip *chip, u32 value) 1505int zd_rfwrite_cr_locked(struct zd_chip *chip, u32 value)
1506{ 1506{
1507 const struct zd_ioreq16 ioreqs[] = { 1507 const struct zd_ioreq16 ioreqs[] = {
1508 { CR244, (value >> 16) & 0xff }, 1508 { ZD_CR244, (value >> 16) & 0xff },
1509 { CR243, (value >> 8) & 0xff }, 1509 { ZD_CR243, (value >> 8) & 0xff },
1510 { CR242, value & 0xff }, 1510 { ZD_CR242, value & 0xff },
1511 }; 1511 };
1512 ZD_ASSERT(mutex_is_locked(&chip->mutex)); 1512 ZD_ASSERT(mutex_is_locked(&chip->mutex));
1513 return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); 1513 return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
index 14e4402a6111..4be7c3b5b265 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.h
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -61,277 +61,288 @@ enum {
61#define FWRAW_DATA(offset) ((zd_addr_t)(FW_START + (offset))) 61#define FWRAW_DATA(offset) ((zd_addr_t)(FW_START + (offset)))
62 62
63/* 8-bit hardware registers */ 63/* 8-bit hardware registers */
64#define CR0 CTL_REG(0x0000) 64#define ZD_CR0 CTL_REG(0x0000)
65#define CR1 CTL_REG(0x0004) 65#define ZD_CR1 CTL_REG(0x0004)
66#define CR2 CTL_REG(0x0008) 66#define ZD_CR2 CTL_REG(0x0008)
67#define CR3 CTL_REG(0x000C) 67#define ZD_CR3 CTL_REG(0x000C)
68 68
69#define CR5 CTL_REG(0x0010) 69#define ZD_CR5 CTL_REG(0x0010)
70/* bit 5: if set short preamble used 70/* bit 5: if set short preamble used
71 * bit 6: filter band - Japan channel 14 on, else off 71 * bit 6: filter band - Japan channel 14 on, else off
72 */ 72 */
73#define CR6 CTL_REG(0x0014) 73#define ZD_CR6 CTL_REG(0x0014)
74#define CR7 CTL_REG(0x0018) 74#define ZD_CR7 CTL_REG(0x0018)
75#define CR8 CTL_REG(0x001C) 75#define ZD_CR8 CTL_REG(0x001C)
76 76
77#define CR4 CTL_REG(0x0020) 77#define ZD_CR4 CTL_REG(0x0020)
78 78
79#define CR9 CTL_REG(0x0024) 79#define ZD_CR9 CTL_REG(0x0024)
80/* bit 2: antenna switch (together with CR10) */ 80/* bit 2: antenna switch (together with ZD_CR10) */
81#define CR10 CTL_REG(0x0028) 81#define ZD_CR10 CTL_REG(0x0028)
82/* bit 1: antenna switch (together with CR9) 82/* bit 1: antenna switch (together with ZD_CR9)
83 * RF2959 controls with CR11 radion on and off 83 * RF2959 controls with ZD_CR11 radion on and off
84 */ 84 */
85#define CR11 CTL_REG(0x002C) 85#define ZD_CR11 CTL_REG(0x002C)
86/* bit 6: TX power control for OFDM 86/* bit 6: TX power control for OFDM
87 * RF2959 controls with CR10 radio on and off 87 * RF2959 controls with ZD_CR10 radio on and off
88 */ 88 */
89#define CR12 CTL_REG(0x0030) 89#define ZD_CR12 CTL_REG(0x0030)
90#define CR13 CTL_REG(0x0034) 90#define ZD_CR13 CTL_REG(0x0034)
91#define CR14 CTL_REG(0x0038) 91#define ZD_CR14 CTL_REG(0x0038)
92#define CR15 CTL_REG(0x003C) 92#define ZD_CR15 CTL_REG(0x003C)
93#define CR16 CTL_REG(0x0040) 93#define ZD_CR16 CTL_REG(0x0040)
94#define CR17 CTL_REG(0x0044) 94#define ZD_CR17 CTL_REG(0x0044)
95#define CR18 CTL_REG(0x0048) 95#define ZD_CR18 CTL_REG(0x0048)
96#define CR19 CTL_REG(0x004C) 96#define ZD_CR19 CTL_REG(0x004C)
97#define CR20 CTL_REG(0x0050) 97#define ZD_CR20 CTL_REG(0x0050)
98#define CR21 CTL_REG(0x0054) 98#define ZD_CR21 CTL_REG(0x0054)
99#define CR22 CTL_REG(0x0058) 99#define ZD_CR22 CTL_REG(0x0058)
100#define CR23 CTL_REG(0x005C) 100#define ZD_CR23 CTL_REG(0x005C)
101#define CR24 CTL_REG(0x0060) /* CCA threshold */ 101#define ZD_CR24 CTL_REG(0x0060) /* CCA threshold */
102#define CR25 CTL_REG(0x0064) 102#define ZD_CR25 CTL_REG(0x0064)
103#define CR26 CTL_REG(0x0068) 103#define ZD_CR26 CTL_REG(0x0068)
104#define CR27 CTL_REG(0x006C) 104#define ZD_CR27 CTL_REG(0x006C)
105#define CR28 CTL_REG(0x0070) 105#define ZD_CR28 CTL_REG(0x0070)
106#define CR29 CTL_REG(0x0074) 106#define ZD_CR29 CTL_REG(0x0074)
107#define CR30 CTL_REG(0x0078) 107#define ZD_CR30 CTL_REG(0x0078)
108#define CR31 CTL_REG(0x007C) /* TX power control for RF in CCK mode */ 108#define ZD_CR31 CTL_REG(0x007C) /* TX power control for RF in
109#define CR32 CTL_REG(0x0080) 109 * CCK mode
110#define CR33 CTL_REG(0x0084) 110 */
111#define CR34 CTL_REG(0x0088) 111#define ZD_CR32 CTL_REG(0x0080)
112#define CR35 CTL_REG(0x008C) 112#define ZD_CR33 CTL_REG(0x0084)
113#define CR36 CTL_REG(0x0090) 113#define ZD_CR34 CTL_REG(0x0088)
114#define CR37 CTL_REG(0x0094) 114#define ZD_CR35 CTL_REG(0x008C)
115#define CR38 CTL_REG(0x0098) 115#define ZD_CR36 CTL_REG(0x0090)
116#define CR39 CTL_REG(0x009C) 116#define ZD_CR37 CTL_REG(0x0094)
117#define CR40 CTL_REG(0x00A0) 117#define ZD_CR38 CTL_REG(0x0098)
118#define CR41 CTL_REG(0x00A4) 118#define ZD_CR39 CTL_REG(0x009C)
119#define CR42 CTL_REG(0x00A8) 119#define ZD_CR40 CTL_REG(0x00A0)
120#define CR43 CTL_REG(0x00AC) 120#define ZD_CR41 CTL_REG(0x00A4)
121#define CR44 CTL_REG(0x00B0) 121#define ZD_CR42 CTL_REG(0x00A8)
122#define CR45 CTL_REG(0x00B4) 122#define ZD_CR43 CTL_REG(0x00AC)
123#define CR46 CTL_REG(0x00B8) 123#define ZD_CR44 CTL_REG(0x00B0)
124#define CR47 CTL_REG(0x00BC) /* CCK baseband gain 124#define ZD_CR45 CTL_REG(0x00B4)
125 * (patch value might be in EEPROM) 125#define ZD_CR46 CTL_REG(0x00B8)
126 */ 126#define ZD_CR47 CTL_REG(0x00BC) /* CCK baseband gain
127#define CR48 CTL_REG(0x00C0) 127 * (patch value might be in EEPROM)
128#define CR49 CTL_REG(0x00C4) 128 */
129#define CR50 CTL_REG(0x00C8) 129#define ZD_CR48 CTL_REG(0x00C0)
130#define CR51 CTL_REG(0x00CC) /* TX power control for RF in 6-36M modes */ 130#define ZD_CR49 CTL_REG(0x00C4)
131#define CR52 CTL_REG(0x00D0) /* TX power control for RF in 48M mode */ 131#define ZD_CR50 CTL_REG(0x00C8)
132#define CR53 CTL_REG(0x00D4) /* TX power control for RF in 54M mode */ 132#define ZD_CR51 CTL_REG(0x00CC) /* TX power control for RF in
133#define CR54 CTL_REG(0x00D8) 133 * 6-36M modes
134#define CR55 CTL_REG(0x00DC) 134 */
135#define CR56 CTL_REG(0x00E0) 135#define ZD_CR52 CTL_REG(0x00D0) /* TX power control for RF in
136#define CR57 CTL_REG(0x00E4) 136 * 48M mode
137#define CR58 CTL_REG(0x00E8) 137 */
138#define CR59 CTL_REG(0x00EC) 138#define ZD_CR53 CTL_REG(0x00D4) /* TX power control for RF in
139#define CR60 CTL_REG(0x00F0) 139 * 54M mode
140#define CR61 CTL_REG(0x00F4) 140 */
141#define CR62 CTL_REG(0x00F8) 141#define ZD_CR54 CTL_REG(0x00D8)
142#define CR63 CTL_REG(0x00FC) 142#define ZD_CR55 CTL_REG(0x00DC)
143#define CR64 CTL_REG(0x0100) 143#define ZD_CR56 CTL_REG(0x00E0)
144#define CR65 CTL_REG(0x0104) /* OFDM 54M calibration */ 144#define ZD_CR57 CTL_REG(0x00E4)
145#define CR66 CTL_REG(0x0108) /* OFDM 48M calibration */ 145#define ZD_CR58 CTL_REG(0x00E8)
146#define CR67 CTL_REG(0x010C) /* OFDM 36M calibration */ 146#define ZD_CR59 CTL_REG(0x00EC)
147#define CR68 CTL_REG(0x0110) /* CCK calibration */ 147#define ZD_CR60 CTL_REG(0x00F0)
148#define CR69 CTL_REG(0x0114) 148#define ZD_CR61 CTL_REG(0x00F4)
149#define CR70 CTL_REG(0x0118) 149#define ZD_CR62 CTL_REG(0x00F8)
150#define CR71 CTL_REG(0x011C) 150#define ZD_CR63 CTL_REG(0x00FC)
151#define CR72 CTL_REG(0x0120) 151#define ZD_CR64 CTL_REG(0x0100)
152#define CR73 CTL_REG(0x0124) 152#define ZD_CR65 CTL_REG(0x0104) /* OFDM 54M calibration */
153#define CR74 CTL_REG(0x0128) 153#define ZD_CR66 CTL_REG(0x0108) /* OFDM 48M calibration */
154#define CR75 CTL_REG(0x012C) 154#define ZD_CR67 CTL_REG(0x010C) /* OFDM 36M calibration */
155#define CR76 CTL_REG(0x0130) 155#define ZD_CR68 CTL_REG(0x0110) /* CCK calibration */
156#define CR77 CTL_REG(0x0134) 156#define ZD_CR69 CTL_REG(0x0114)
157#define CR78 CTL_REG(0x0138) 157#define ZD_CR70 CTL_REG(0x0118)
158#define CR79 CTL_REG(0x013C) 158#define ZD_CR71 CTL_REG(0x011C)
159#define CR80 CTL_REG(0x0140) 159#define ZD_CR72 CTL_REG(0x0120)
160#define CR81 CTL_REG(0x0144) 160#define ZD_CR73 CTL_REG(0x0124)
161#define CR82 CTL_REG(0x0148) 161#define ZD_CR74 CTL_REG(0x0128)
162#define CR83 CTL_REG(0x014C) 162#define ZD_CR75 CTL_REG(0x012C)
163#define CR84 CTL_REG(0x0150) 163#define ZD_CR76 CTL_REG(0x0130)
164#define CR85 CTL_REG(0x0154) 164#define ZD_CR77 CTL_REG(0x0134)
165#define CR86 CTL_REG(0x0158) 165#define ZD_CR78 CTL_REG(0x0138)
166#define CR87 CTL_REG(0x015C) 166#define ZD_CR79 CTL_REG(0x013C)
167#define CR88 CTL_REG(0x0160) 167#define ZD_CR80 CTL_REG(0x0140)
168#define CR89 CTL_REG(0x0164) 168#define ZD_CR81 CTL_REG(0x0144)
169#define CR90 CTL_REG(0x0168) 169#define ZD_CR82 CTL_REG(0x0148)
170#define CR91 CTL_REG(0x016C) 170#define ZD_CR83 CTL_REG(0x014C)
171#define CR92 CTL_REG(0x0170) 171#define ZD_CR84 CTL_REG(0x0150)
172#define CR93 CTL_REG(0x0174) 172#define ZD_CR85 CTL_REG(0x0154)
173#define CR94 CTL_REG(0x0178) 173#define ZD_CR86 CTL_REG(0x0158)
174#define CR95 CTL_REG(0x017C) 174#define ZD_CR87 CTL_REG(0x015C)
175#define CR96 CTL_REG(0x0180) 175#define ZD_CR88 CTL_REG(0x0160)
176#define CR97 CTL_REG(0x0184) 176#define ZD_CR89 CTL_REG(0x0164)
177#define CR98 CTL_REG(0x0188) 177#define ZD_CR90 CTL_REG(0x0168)
178#define CR99 CTL_REG(0x018C) 178#define ZD_CR91 CTL_REG(0x016C)
179#define CR100 CTL_REG(0x0190) 179#define ZD_CR92 CTL_REG(0x0170)
180#define CR101 CTL_REG(0x0194) 180#define ZD_CR93 CTL_REG(0x0174)
181#define CR102 CTL_REG(0x0198) 181#define ZD_CR94 CTL_REG(0x0178)
182#define CR103 CTL_REG(0x019C) 182#define ZD_CR95 CTL_REG(0x017C)
183#define CR104 CTL_REG(0x01A0) 183#define ZD_CR96 CTL_REG(0x0180)
184#define CR105 CTL_REG(0x01A4) 184#define ZD_CR97 CTL_REG(0x0184)
185#define CR106 CTL_REG(0x01A8) 185#define ZD_CR98 CTL_REG(0x0188)
186#define CR107 CTL_REG(0x01AC) 186#define ZD_CR99 CTL_REG(0x018C)
187#define CR108 CTL_REG(0x01B0) 187#define ZD_CR100 CTL_REG(0x0190)
188#define CR109 CTL_REG(0x01B4) 188#define ZD_CR101 CTL_REG(0x0194)
189#define CR110 CTL_REG(0x01B8) 189#define ZD_CR102 CTL_REG(0x0198)
190#define CR111 CTL_REG(0x01BC) 190#define ZD_CR103 CTL_REG(0x019C)
191#define CR112 CTL_REG(0x01C0) 191#define ZD_CR104 CTL_REG(0x01A0)
192#define CR113 CTL_REG(0x01C4) 192#define ZD_CR105 CTL_REG(0x01A4)
193#define CR114 CTL_REG(0x01C8) 193#define ZD_CR106 CTL_REG(0x01A8)
194#define CR115 CTL_REG(0x01CC) 194#define ZD_CR107 CTL_REG(0x01AC)
195#define CR116 CTL_REG(0x01D0) 195#define ZD_CR108 CTL_REG(0x01B0)
196#define CR117 CTL_REG(0x01D4) 196#define ZD_CR109 CTL_REG(0x01B4)
197#define CR118 CTL_REG(0x01D8) 197#define ZD_CR110 CTL_REG(0x01B8)
198#define CR119 CTL_REG(0x01DC) 198#define ZD_CR111 CTL_REG(0x01BC)
199#define CR120 CTL_REG(0x01E0) 199#define ZD_CR112 CTL_REG(0x01C0)
200#define CR121 CTL_REG(0x01E4) 200#define ZD_CR113 CTL_REG(0x01C4)
201#define CR122 CTL_REG(0x01E8) 201#define ZD_CR114 CTL_REG(0x01C8)
202#define CR123 CTL_REG(0x01EC) 202#define ZD_CR115 CTL_REG(0x01CC)
203#define CR124 CTL_REG(0x01F0) 203#define ZD_CR116 CTL_REG(0x01D0)
204#define CR125 CTL_REG(0x01F4) 204#define ZD_CR117 CTL_REG(0x01D4)
205#define CR126 CTL_REG(0x01F8) 205#define ZD_CR118 CTL_REG(0x01D8)
206#define CR127 CTL_REG(0x01FC) 206#define ZD_CR119 CTL_REG(0x01DC)
207#define CR128 CTL_REG(0x0200) 207#define ZD_CR120 CTL_REG(0x01E0)
208#define CR129 CTL_REG(0x0204) 208#define ZD_CR121 CTL_REG(0x01E4)
209#define CR130 CTL_REG(0x0208) 209#define ZD_CR122 CTL_REG(0x01E8)
210#define CR131 CTL_REG(0x020C) 210#define ZD_CR123 CTL_REG(0x01EC)
211#define CR132 CTL_REG(0x0210) 211#define ZD_CR124 CTL_REG(0x01F0)
212#define CR133 CTL_REG(0x0214) 212#define ZD_CR125 CTL_REG(0x01F4)
213#define CR134 CTL_REG(0x0218) 213#define ZD_CR126 CTL_REG(0x01F8)
214#define CR135 CTL_REG(0x021C) 214#define ZD_CR127 CTL_REG(0x01FC)
215#define CR136 CTL_REG(0x0220) 215#define ZD_CR128 CTL_REG(0x0200)
216#define CR137 CTL_REG(0x0224) 216#define ZD_CR129 CTL_REG(0x0204)
217#define CR138 CTL_REG(0x0228) 217#define ZD_CR130 CTL_REG(0x0208)
218#define CR139 CTL_REG(0x022C) 218#define ZD_CR131 CTL_REG(0x020C)
219#define CR140 CTL_REG(0x0230) 219#define ZD_CR132 CTL_REG(0x0210)
220#define CR141 CTL_REG(0x0234) 220#define ZD_CR133 CTL_REG(0x0214)
221#define CR142 CTL_REG(0x0238) 221#define ZD_CR134 CTL_REG(0x0218)
222#define CR143 CTL_REG(0x023C) 222#define ZD_CR135 CTL_REG(0x021C)
223#define CR144 CTL_REG(0x0240) 223#define ZD_CR136 CTL_REG(0x0220)
224#define CR145 CTL_REG(0x0244) 224#define ZD_CR137 CTL_REG(0x0224)
225#define CR146 CTL_REG(0x0248) 225#define ZD_CR138 CTL_REG(0x0228)
226#define CR147 CTL_REG(0x024C) 226#define ZD_CR139 CTL_REG(0x022C)
227#define CR148 CTL_REG(0x0250) 227#define ZD_CR140 CTL_REG(0x0230)
228#define CR149 CTL_REG(0x0254) 228#define ZD_CR141 CTL_REG(0x0234)
229#define CR150 CTL_REG(0x0258) 229#define ZD_CR142 CTL_REG(0x0238)
230#define CR151 CTL_REG(0x025C) 230#define ZD_CR143 CTL_REG(0x023C)
231#define CR152 CTL_REG(0x0260) 231#define ZD_CR144 CTL_REG(0x0240)
232#define CR153 CTL_REG(0x0264) 232#define ZD_CR145 CTL_REG(0x0244)
233#define CR154 CTL_REG(0x0268) 233#define ZD_CR146 CTL_REG(0x0248)
234#define CR155 CTL_REG(0x026C) 234#define ZD_CR147 CTL_REG(0x024C)
235#define CR156 CTL_REG(0x0270) 235#define ZD_CR148 CTL_REG(0x0250)
236#define CR157 CTL_REG(0x0274) 236#define ZD_CR149 CTL_REG(0x0254)
237#define CR158 CTL_REG(0x0278) 237#define ZD_CR150 CTL_REG(0x0258)
238#define CR159 CTL_REG(0x027C) 238#define ZD_CR151 CTL_REG(0x025C)
239#define CR160 CTL_REG(0x0280) 239#define ZD_CR152 CTL_REG(0x0260)
240#define CR161 CTL_REG(0x0284) 240#define ZD_CR153 CTL_REG(0x0264)
241#define CR162 CTL_REG(0x0288) 241#define ZD_CR154 CTL_REG(0x0268)
242#define CR163 CTL_REG(0x028C) 242#define ZD_CR155 CTL_REG(0x026C)
243#define CR164 CTL_REG(0x0290) 243#define ZD_CR156 CTL_REG(0x0270)
244#define CR165 CTL_REG(0x0294) 244#define ZD_CR157 CTL_REG(0x0274)
245#define CR166 CTL_REG(0x0298) 245#define ZD_CR158 CTL_REG(0x0278)
246#define CR167 CTL_REG(0x029C) 246#define ZD_CR159 CTL_REG(0x027C)
247#define CR168 CTL_REG(0x02A0) 247#define ZD_CR160 CTL_REG(0x0280)
248#define CR169 CTL_REG(0x02A4) 248#define ZD_CR161 CTL_REG(0x0284)
249#define CR170 CTL_REG(0x02A8) 249#define ZD_CR162 CTL_REG(0x0288)
250#define CR171 CTL_REG(0x02AC) 250#define ZD_CR163 CTL_REG(0x028C)
251#define CR172 CTL_REG(0x02B0) 251#define ZD_CR164 CTL_REG(0x0290)
252#define CR173 CTL_REG(0x02B4) 252#define ZD_CR165 CTL_REG(0x0294)
253#define CR174 CTL_REG(0x02B8) 253#define ZD_CR166 CTL_REG(0x0298)
254#define CR175 CTL_REG(0x02BC) 254#define ZD_CR167 CTL_REG(0x029C)
255#define CR176 CTL_REG(0x02C0) 255#define ZD_CR168 CTL_REG(0x02A0)
256#define CR177 CTL_REG(0x02C4) 256#define ZD_CR169 CTL_REG(0x02A4)
257#define CR178 CTL_REG(0x02C8) 257#define ZD_CR170 CTL_REG(0x02A8)
258#define CR179 CTL_REG(0x02CC) 258#define ZD_CR171 CTL_REG(0x02AC)
259#define CR180 CTL_REG(0x02D0) 259#define ZD_CR172 CTL_REG(0x02B0)
260#define CR181 CTL_REG(0x02D4) 260#define ZD_CR173 CTL_REG(0x02B4)
261#define CR182 CTL_REG(0x02D8) 261#define ZD_CR174 CTL_REG(0x02B8)
262#define CR183 CTL_REG(0x02DC) 262#define ZD_CR175 CTL_REG(0x02BC)
263#define CR184 CTL_REG(0x02E0) 263#define ZD_CR176 CTL_REG(0x02C0)
264#define CR185 CTL_REG(0x02E4) 264#define ZD_CR177 CTL_REG(0x02C4)
265#define CR186 CTL_REG(0x02E8) 265#define ZD_CR178 CTL_REG(0x02C8)
266#define CR187 CTL_REG(0x02EC) 266#define ZD_CR179 CTL_REG(0x02CC)
267#define CR188 CTL_REG(0x02F0) 267#define ZD_CR180 CTL_REG(0x02D0)
268#define CR189 CTL_REG(0x02F4) 268#define ZD_CR181 CTL_REG(0x02D4)
269#define CR190 CTL_REG(0x02F8) 269#define ZD_CR182 CTL_REG(0x02D8)
270#define CR191 CTL_REG(0x02FC) 270#define ZD_CR183 CTL_REG(0x02DC)
271#define CR192 CTL_REG(0x0300) 271#define ZD_CR184 CTL_REG(0x02E0)
272#define CR193 CTL_REG(0x0304) 272#define ZD_CR185 CTL_REG(0x02E4)
273#define CR194 CTL_REG(0x0308) 273#define ZD_CR186 CTL_REG(0x02E8)
274#define CR195 CTL_REG(0x030C) 274#define ZD_CR187 CTL_REG(0x02EC)
275#define CR196 CTL_REG(0x0310) 275#define ZD_CR188 CTL_REG(0x02F0)
276#define CR197 CTL_REG(0x0314) 276#define ZD_CR189 CTL_REG(0x02F4)
277#define CR198 CTL_REG(0x0318) 277#define ZD_CR190 CTL_REG(0x02F8)
278#define CR199 CTL_REG(0x031C) 278#define ZD_CR191 CTL_REG(0x02FC)
279#define CR200 CTL_REG(0x0320) 279#define ZD_CR192 CTL_REG(0x0300)
280#define CR201 CTL_REG(0x0324) 280#define ZD_CR193 CTL_REG(0x0304)
281#define CR202 CTL_REG(0x0328) 281#define ZD_CR194 CTL_REG(0x0308)
282#define CR203 CTL_REG(0x032C) /* I2C bus template value & flash control */ 282#define ZD_CR195 CTL_REG(0x030C)
283#define CR204 CTL_REG(0x0330) 283#define ZD_CR196 CTL_REG(0x0310)
284#define CR205 CTL_REG(0x0334) 284#define ZD_CR197 CTL_REG(0x0314)
285#define CR206 CTL_REG(0x0338) 285#define ZD_CR198 CTL_REG(0x0318)
286#define CR207 CTL_REG(0x033C) 286#define ZD_CR199 CTL_REG(0x031C)
287#define CR208 CTL_REG(0x0340) 287#define ZD_CR200 CTL_REG(0x0320)
288#define CR209 CTL_REG(0x0344) 288#define ZD_CR201 CTL_REG(0x0324)
289#define CR210 CTL_REG(0x0348) 289#define ZD_CR202 CTL_REG(0x0328)
290#define CR211 CTL_REG(0x034C) 290#define ZD_CR203 CTL_REG(0x032C) /* I2C bus template value & flash
291#define CR212 CTL_REG(0x0350) 291 * control
292#define CR213 CTL_REG(0x0354) 292 */
293#define CR214 CTL_REG(0x0358) 293#define ZD_CR204 CTL_REG(0x0330)
294#define CR215 CTL_REG(0x035C) 294#define ZD_CR205 CTL_REG(0x0334)
295#define CR216 CTL_REG(0x0360) 295#define ZD_CR206 CTL_REG(0x0338)
296#define CR217 CTL_REG(0x0364) 296#define ZD_CR207 CTL_REG(0x033C)
297#define CR218 CTL_REG(0x0368) 297#define ZD_CR208 CTL_REG(0x0340)
298#define CR219 CTL_REG(0x036C) 298#define ZD_CR209 CTL_REG(0x0344)
299#define CR220 CTL_REG(0x0370) 299#define ZD_CR210 CTL_REG(0x0348)
300#define CR221 CTL_REG(0x0374) 300#define ZD_CR211 CTL_REG(0x034C)
301#define CR222 CTL_REG(0x0378) 301#define ZD_CR212 CTL_REG(0x0350)
302#define CR223 CTL_REG(0x037C) 302#define ZD_CR213 CTL_REG(0x0354)
303#define CR224 CTL_REG(0x0380) 303#define ZD_CR214 CTL_REG(0x0358)
304#define CR225 CTL_REG(0x0384) 304#define ZD_CR215 CTL_REG(0x035C)
305#define CR226 CTL_REG(0x0388) 305#define ZD_CR216 CTL_REG(0x0360)
306#define CR227 CTL_REG(0x038C) 306#define ZD_CR217 CTL_REG(0x0364)
307#define CR228 CTL_REG(0x0390) 307#define ZD_CR218 CTL_REG(0x0368)
308#define CR229 CTL_REG(0x0394) 308#define ZD_CR219 CTL_REG(0x036C)
309#define CR230 CTL_REG(0x0398) 309#define ZD_CR220 CTL_REG(0x0370)
310#define CR231 CTL_REG(0x039C) 310#define ZD_CR221 CTL_REG(0x0374)
311#define CR232 CTL_REG(0x03A0) 311#define ZD_CR222 CTL_REG(0x0378)
312#define CR233 CTL_REG(0x03A4) 312#define ZD_CR223 CTL_REG(0x037C)
313#define CR234 CTL_REG(0x03A8) 313#define ZD_CR224 CTL_REG(0x0380)
314#define CR235 CTL_REG(0x03AC) 314#define ZD_CR225 CTL_REG(0x0384)
315#define CR236 CTL_REG(0x03B0) 315#define ZD_CR226 CTL_REG(0x0388)
316 316#define ZD_CR227 CTL_REG(0x038C)
317#define CR240 CTL_REG(0x03C0) 317#define ZD_CR228 CTL_REG(0x0390)
318/* bit 7: host-controlled RF register writes 318#define ZD_CR229 CTL_REG(0x0394)
319 * CR241-CR245: for hardware controlled writing of RF bits, not needed for 319#define ZD_CR230 CTL_REG(0x0398)
320 * USB 320#define ZD_CR231 CTL_REG(0x039C)
321#define ZD_CR232 CTL_REG(0x03A0)
322#define ZD_CR233 CTL_REG(0x03A4)
323#define ZD_CR234 CTL_REG(0x03A8)
324#define ZD_CR235 CTL_REG(0x03AC)
325#define ZD_CR236 CTL_REG(0x03B0)
326
327#define ZD_CR240 CTL_REG(0x03C0)
328/* bit 7: host-controlled RF register writes
329 * ZD_CR241-ZD_CR245: for hardware controlled writing of RF bits, not needed for
330 * USB
321 */ 331 */
322#define CR241 CTL_REG(0x03C4) 332#define ZD_CR241 CTL_REG(0x03C4)
323#define CR242 CTL_REG(0x03C8) 333#define ZD_CR242 CTL_REG(0x03C8)
324#define CR243 CTL_REG(0x03CC) 334#define ZD_CR243 CTL_REG(0x03CC)
325#define CR244 CTL_REG(0x03D0) 335#define ZD_CR244 CTL_REG(0x03D0)
326#define CR245 CTL_REG(0x03D4) 336#define ZD_CR245 CTL_REG(0x03D4)
327 337
328#define CR251 CTL_REG(0x03EC) /* only used for activation and deactivation of 338#define ZD_CR251 CTL_REG(0x03EC) /* only used for activation and
329 * Airoha RFs AL2230 and AL7230B 339 * deactivation of Airoha RFs AL2230
330 */ 340 * and AL7230B
331#define CR252 CTL_REG(0x03F0) 341 */
332#define CR253 CTL_REG(0x03F4) 342#define ZD_CR252 CTL_REG(0x03F0)
333#define CR254 CTL_REG(0x03F8) 343#define ZD_CR253 CTL_REG(0x03F4)
334#define CR255 CTL_REG(0x03FC) 344#define ZD_CR254 CTL_REG(0x03F8)
345#define ZD_CR255 CTL_REG(0x03FC)
335 346
336#define CR_MAX_PHY_REG 255 347#define CR_MAX_PHY_REG 255
337 348
diff --git a/drivers/net/wireless/zd1211rw/zd_rf.h b/drivers/net/wireless/zd1211rw/zd_rf.h
index 79dc1035592d..725b7c99b23d 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf.h
+++ b/drivers/net/wireless/zd1211rw/zd_rf.h
@@ -55,7 +55,7 @@ struct zd_rf {
55 * defaults to 1 (yes) */ 55 * defaults to 1 (yes) */
56 u8 update_channel_int:1; 56 u8 update_channel_int:1;
57 57
58 /* whether CR47 should be patched from the EEPROM, if the appropriate 58 /* whether ZD_CR47 should be patched from the EEPROM, if the appropriate
59 * flag is set in the POD. The vendor driver suggests that this should 59 * flag is set in the POD. The vendor driver suggests that this should
60 * be done for all RF's, but a bug in their code prevents but their 60 * be done for all RF's, but a bug in their code prevents but their
61 * HW_OverWritePhyRegFromE2P() routine from ever taking effect. */ 61 * HW_OverWritePhyRegFromE2P() routine from ever taking effect. */
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_al2230.c b/drivers/net/wireless/zd1211rw/zd_rf_al2230.c
index 74a8f7a55591..12babcb633c3 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf_al2230.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf_al2230.c
@@ -61,31 +61,31 @@ static const u32 zd1211b_al2230_table[][3] = {
61}; 61};
62 62
63static const struct zd_ioreq16 zd1211b_ioreqs_shared_1[] = { 63static const struct zd_ioreq16 zd1211b_ioreqs_shared_1[] = {
64 { CR240, 0x57 }, { CR9, 0xe0 }, 64 { ZD_CR240, 0x57 }, { ZD_CR9, 0xe0 },
65}; 65};
66 66
67static const struct zd_ioreq16 ioreqs_init_al2230s[] = { 67static const struct zd_ioreq16 ioreqs_init_al2230s[] = {
68 { CR47, 0x1e }, /* MARK_002 */ 68 { ZD_CR47, 0x1e }, /* MARK_002 */
69 { CR106, 0x22 }, 69 { ZD_CR106, 0x22 },
70 { CR107, 0x2a }, /* MARK_002 */ 70 { ZD_CR107, 0x2a }, /* MARK_002 */
71 { CR109, 0x13 }, /* MARK_002 */ 71 { ZD_CR109, 0x13 }, /* MARK_002 */
72 { CR118, 0xf8 }, /* MARK_002 */ 72 { ZD_CR118, 0xf8 }, /* MARK_002 */
73 { CR119, 0x12 }, { CR122, 0xe0 }, 73 { ZD_CR119, 0x12 }, { ZD_CR122, 0xe0 },
74 { CR128, 0x10 }, /* MARK_001 from 0xe->0x10 */ 74 { ZD_CR128, 0x10 }, /* MARK_001 from 0xe->0x10 */
75 { CR129, 0x0e }, /* MARK_001 from 0xd->0x0e */ 75 { ZD_CR129, 0x0e }, /* MARK_001 from 0xd->0x0e */
76 { CR130, 0x10 }, /* MARK_001 from 0xb->0x0d */ 76 { ZD_CR130, 0x10 }, /* MARK_001 from 0xb->0x0d */
77}; 77};
78 78
79static int zd1211b_al2230_finalize_rf(struct zd_chip *chip) 79static int zd1211b_al2230_finalize_rf(struct zd_chip *chip)
80{ 80{
81 int r; 81 int r;
82 static const struct zd_ioreq16 ioreqs[] = { 82 static const struct zd_ioreq16 ioreqs[] = {
83 { CR80, 0x30 }, { CR81, 0x30 }, { CR79, 0x58 }, 83 { ZD_CR80, 0x30 }, { ZD_CR81, 0x30 }, { ZD_CR79, 0x58 },
84 { CR12, 0xf0 }, { CR77, 0x1b }, { CR78, 0x58 }, 84 { ZD_CR12, 0xf0 }, { ZD_CR77, 0x1b }, { ZD_CR78, 0x58 },
85 { CR203, 0x06 }, 85 { ZD_CR203, 0x06 },
86 { }, 86 { },
87 87
88 { CR240, 0x80 }, 88 { ZD_CR240, 0x80 },
89 }; 89 };
90 90
91 r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); 91 r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
@@ -94,12 +94,12 @@ static int zd1211b_al2230_finalize_rf(struct zd_chip *chip)
94 94
95 /* related to antenna selection? */ 95 /* related to antenna selection? */
96 if (chip->new_phy_layout) { 96 if (chip->new_phy_layout) {
97 r = zd_iowrite16_locked(chip, 0xe1, CR9); 97 r = zd_iowrite16_locked(chip, 0xe1, ZD_CR9);
98 if (r) 98 if (r)
99 return r; 99 return r;
100 } 100 }
101 101
102 return zd_iowrite16_locked(chip, 0x06, CR203); 102 return zd_iowrite16_locked(chip, 0x06, ZD_CR203);
103} 103}
104 104
105static int zd1211_al2230_init_hw(struct zd_rf *rf) 105static int zd1211_al2230_init_hw(struct zd_rf *rf)
@@ -108,40 +108,40 @@ static int zd1211_al2230_init_hw(struct zd_rf *rf)
108 struct zd_chip *chip = zd_rf_to_chip(rf); 108 struct zd_chip *chip = zd_rf_to_chip(rf);
109 109
110 static const struct zd_ioreq16 ioreqs_init[] = { 110 static const struct zd_ioreq16 ioreqs_init[] = {
111 { CR15, 0x20 }, { CR23, 0x40 }, { CR24, 0x20 }, 111 { ZD_CR15, 0x20 }, { ZD_CR23, 0x40 }, { ZD_CR24, 0x20 },
112 { CR26, 0x11 }, { CR28, 0x3e }, { CR29, 0x00 }, 112 { ZD_CR26, 0x11 }, { ZD_CR28, 0x3e }, { ZD_CR29, 0x00 },
113 { CR44, 0x33 }, { CR106, 0x2a }, { CR107, 0x1a }, 113 { ZD_CR44, 0x33 }, { ZD_CR106, 0x2a }, { ZD_CR107, 0x1a },
114 { CR109, 0x09 }, { CR110, 0x27 }, { CR111, 0x2b }, 114 { ZD_CR109, 0x09 }, { ZD_CR110, 0x27 }, { ZD_CR111, 0x2b },
115 { CR112, 0x2b }, { CR119, 0x0a }, { CR10, 0x89 }, 115 { ZD_CR112, 0x2b }, { ZD_CR119, 0x0a }, { ZD_CR10, 0x89 },
116 /* for newest (3rd cut) AL2300 */ 116 /* for newest (3rd cut) AL2300 */
117 { CR17, 0x28 }, 117 { ZD_CR17, 0x28 },
118 { CR26, 0x93 }, { CR34, 0x30 }, 118 { ZD_CR26, 0x93 }, { ZD_CR34, 0x30 },
119 /* for newest (3rd cut) AL2300 */ 119 /* for newest (3rd cut) AL2300 */
120 { CR35, 0x3e }, 120 { ZD_CR35, 0x3e },
121 { CR41, 0x24 }, { CR44, 0x32 }, 121 { ZD_CR41, 0x24 }, { ZD_CR44, 0x32 },
122 /* for newest (3rd cut) AL2300 */ 122 /* for newest (3rd cut) AL2300 */
123 { CR46, 0x96 }, 123 { ZD_CR46, 0x96 },
124 { CR47, 0x1e }, { CR79, 0x58 }, { CR80, 0x30 }, 124 { ZD_CR47, 0x1e }, { ZD_CR79, 0x58 }, { ZD_CR80, 0x30 },
125 { CR81, 0x30 }, { CR87, 0x0a }, { CR89, 0x04 }, 125 { ZD_CR81, 0x30 }, { ZD_CR87, 0x0a }, { ZD_CR89, 0x04 },
126 { CR92, 0x0a }, { CR99, 0x28 }, { CR100, 0x00 }, 126 { ZD_CR92, 0x0a }, { ZD_CR99, 0x28 }, { ZD_CR100, 0x00 },
127 { CR101, 0x13 }, { CR102, 0x27 }, { CR106, 0x24 }, 127 { ZD_CR101, 0x13 }, { ZD_CR102, 0x27 }, { ZD_CR106, 0x24 },
128 { CR107, 0x2a }, { CR109, 0x09 }, { CR110, 0x13 }, 128 { ZD_CR107, 0x2a }, { ZD_CR109, 0x09 }, { ZD_CR110, 0x13 },
129 { CR111, 0x1f }, { CR112, 0x1f }, { CR113, 0x27 }, 129 { ZD_CR111, 0x1f }, { ZD_CR112, 0x1f }, { ZD_CR113, 0x27 },
130 { CR114, 0x27 }, 130 { ZD_CR114, 0x27 },
131 /* for newest (3rd cut) AL2300 */ 131 /* for newest (3rd cut) AL2300 */
132 { CR115, 0x24 }, 132 { ZD_CR115, 0x24 },
133 { CR116, 0x24 }, { CR117, 0xf4 }, { CR118, 0xfc }, 133 { ZD_CR116, 0x24 }, { ZD_CR117, 0xf4 }, { ZD_CR118, 0xfc },
134 { CR119, 0x10 }, { CR120, 0x4f }, { CR121, 0x77 }, 134 { ZD_CR119, 0x10 }, { ZD_CR120, 0x4f }, { ZD_CR121, 0x77 },
135 { CR122, 0xe0 }, { CR137, 0x88 }, { CR252, 0xff }, 135 { ZD_CR122, 0xe0 }, { ZD_CR137, 0x88 }, { ZD_CR252, 0xff },
136 { CR253, 0xff }, 136 { ZD_CR253, 0xff },
137 }; 137 };
138 138
139 static const struct zd_ioreq16 ioreqs_pll[] = { 139 static const struct zd_ioreq16 ioreqs_pll[] = {
140 /* shdnb(PLL_ON)=0 */ 140 /* shdnb(PLL_ON)=0 */
141 { CR251, 0x2f }, 141 { ZD_CR251, 0x2f },
142 /* shdnb(PLL_ON)=1 */ 142 /* shdnb(PLL_ON)=1 */
143 { CR251, 0x3f }, 143 { ZD_CR251, 0x3f },
144 { CR138, 0x28 }, { CR203, 0x06 }, 144 { ZD_CR138, 0x28 }, { ZD_CR203, 0x06 },
145 }; 145 };
146 146
147 static const u32 rv1[] = { 147 static const u32 rv1[] = {
@@ -161,7 +161,7 @@ static int zd1211_al2230_init_hw(struct zd_rf *rf)
161 0x0805b6, 161 0x0805b6,
162 0x011687, 162 0x011687,
163 0x000688, 163 0x000688,
164 0x0403b9, /* external control TX power (CR31) */ 164 0x0403b9, /* external control TX power (ZD_CR31) */
165 0x00dbba, 165 0x00dbba,
166 0x00099b, 166 0x00099b,
167 0x0bdffc, 167 0x0bdffc,
@@ -221,52 +221,54 @@ static int zd1211b_al2230_init_hw(struct zd_rf *rf)
221 struct zd_chip *chip = zd_rf_to_chip(rf); 221 struct zd_chip *chip = zd_rf_to_chip(rf);
222 222
223 static const struct zd_ioreq16 ioreqs1[] = { 223 static const struct zd_ioreq16 ioreqs1[] = {
224 { CR10, 0x89 }, { CR15, 0x20 }, 224 { ZD_CR10, 0x89 }, { ZD_CR15, 0x20 },
225 { CR17, 0x2B }, /* for newest(3rd cut) AL2230 */ 225 { ZD_CR17, 0x2B }, /* for newest(3rd cut) AL2230 */
226 { CR23, 0x40 }, { CR24, 0x20 }, { CR26, 0x93 }, 226 { ZD_CR23, 0x40 }, { ZD_CR24, 0x20 }, { ZD_CR26, 0x93 },
227 { CR28, 0x3e }, { CR29, 0x00 }, 227 { ZD_CR28, 0x3e }, { ZD_CR29, 0x00 },
228 { CR33, 0x28 }, /* 5621 */ 228 { ZD_CR33, 0x28 }, /* 5621 */
229 { CR34, 0x30 }, 229 { ZD_CR34, 0x30 },
230 { CR35, 0x3e }, /* for newest(3rd cut) AL2230 */ 230 { ZD_CR35, 0x3e }, /* for newest(3rd cut) AL2230 */
231 { CR41, 0x24 }, { CR44, 0x32 }, 231 { ZD_CR41, 0x24 }, { ZD_CR44, 0x32 },
232 { CR46, 0x99 }, /* for newest(3rd cut) AL2230 */ 232 { ZD_CR46, 0x99 }, /* for newest(3rd cut) AL2230 */
233 { CR47, 0x1e }, 233 { ZD_CR47, 0x1e },
234 234
235 /* ZD1211B 05.06.10 */ 235 /* ZD1211B 05.06.10 */
236 { CR48, 0x06 }, { CR49, 0xf9 }, { CR51, 0x01 }, 236 { ZD_CR48, 0x06 }, { ZD_CR49, 0xf9 }, { ZD_CR51, 0x01 },
237 { CR52, 0x80 }, { CR53, 0x7e }, { CR65, 0x00 }, 237 { ZD_CR52, 0x80 }, { ZD_CR53, 0x7e }, { ZD_CR65, 0x00 },
238 { CR66, 0x00 }, { CR67, 0x00 }, { CR68, 0x00 }, 238 { ZD_CR66, 0x00 }, { ZD_CR67, 0x00 }, { ZD_CR68, 0x00 },
239 { CR69, 0x28 }, 239 { ZD_CR69, 0x28 },
240 240
241 { CR79, 0x58 }, { CR80, 0x30 }, { CR81, 0x30 }, 241 { ZD_CR79, 0x58 }, { ZD_CR80, 0x30 }, { ZD_CR81, 0x30 },
242 { CR87, 0x0a }, { CR89, 0x04 }, 242 { ZD_CR87, 0x0a }, { ZD_CR89, 0x04 },
243 { CR91, 0x00 }, /* 5621 */ 243 { ZD_CR91, 0x00 }, /* 5621 */
244 { CR92, 0x0a }, 244 { ZD_CR92, 0x0a },
245 { CR98, 0x8d }, /* 4804, for 1212 new algorithm */ 245 { ZD_CR98, 0x8d }, /* 4804, for 1212 new algorithm */
246 { CR99, 0x00 }, /* 5621 */ 246 { ZD_CR99, 0x00 }, /* 5621 */
247 { CR101, 0x13 }, { CR102, 0x27 }, 247 { ZD_CR101, 0x13 }, { ZD_CR102, 0x27 },
248 { CR106, 0x24 }, /* for newest(3rd cut) AL2230 */ 248 { ZD_CR106, 0x24 }, /* for newest(3rd cut) AL2230 */
249 { CR107, 0x2a }, 249 { ZD_CR107, 0x2a },
250 { CR109, 0x13 }, /* 4804, for 1212 new algorithm */ 250 { ZD_CR109, 0x13 }, /* 4804, for 1212 new algorithm */
251 { CR110, 0x1f }, /* 4804, for 1212 new algorithm */ 251 { ZD_CR110, 0x1f }, /* 4804, for 1212 new algorithm */
252 { CR111, 0x1f }, { CR112, 0x1f }, { CR113, 0x27 }, 252 { ZD_CR111, 0x1f }, { ZD_CR112, 0x1f }, { ZD_CR113, 0x27 },
253 { CR114, 0x27 }, 253 { ZD_CR114, 0x27 },
254 { CR115, 0x26 }, /* 24->26 at 4902 for newest(3rd cut) AL2230 */ 254 { ZD_CR115, 0x26 }, /* 24->26 at 4902 for newest(3rd cut)
255 { CR116, 0x24 }, 255 * AL2230
256 { CR117, 0xfa }, /* for 1211b */ 256 */
257 { CR118, 0xfa }, /* for 1211b */ 257 { ZD_CR116, 0x24 },
258 { CR119, 0x10 }, 258 { ZD_CR117, 0xfa }, /* for 1211b */
259 { CR120, 0x4f }, 259 { ZD_CR118, 0xfa }, /* for 1211b */
260 { CR121, 0x6c }, /* for 1211b */ 260 { ZD_CR119, 0x10 },
261 { CR122, 0xfc }, /* E0->FC at 4902 */ 261 { ZD_CR120, 0x4f },
262 { CR123, 0x57 }, /* 5623 */ 262 { ZD_CR121, 0x6c }, /* for 1211b */
263 { CR125, 0xad }, /* 4804, for 1212 new algorithm */ 263 { ZD_CR122, 0xfc }, /* E0->FC at 4902 */
264 { CR126, 0x6c }, /* 5614 */ 264 { ZD_CR123, 0x57 }, /* 5623 */
265 { CR127, 0x03 }, /* 4804, for 1212 new algorithm */ 265 { ZD_CR125, 0xad }, /* 4804, for 1212 new algorithm */
266 { CR137, 0x50 }, /* 5614 */ 266 { ZD_CR126, 0x6c }, /* 5614 */
267 { CR138, 0xa8 }, 267 { ZD_CR127, 0x03 }, /* 4804, for 1212 new algorithm */
268 { CR144, 0xac }, /* 5621 */ 268 { ZD_CR137, 0x50 }, /* 5614 */
269 { CR150, 0x0d }, { CR252, 0x34 }, { CR253, 0x34 }, 269 { ZD_CR138, 0xa8 },
270 { ZD_CR144, 0xac }, /* 5621 */
271 { ZD_CR150, 0x0d }, { ZD_CR252, 0x34 }, { ZD_CR253, 0x34 },
270 }; 272 };
271 273
272 static const u32 rv1[] = { 274 static const u32 rv1[] = {
@@ -284,7 +286,7 @@ static int zd1211b_al2230_init_hw(struct zd_rf *rf)
284 0x6da010, /* Reg6 update for MP versio */ 286 0x6da010, /* Reg6 update for MP versio */
285 0xe36280, /* Modified by jxiao for Bor-Chin on 2004/08/02 */ 287 0xe36280, /* Modified by jxiao for Bor-Chin on 2004/08/02 */
286 0x116000, 288 0x116000,
287 0x9dc020, /* External control TX power (CR31) */ 289 0x9dc020, /* External control TX power (ZD_CR31) */
288 0x5ddb00, /* RegA update for MP version */ 290 0x5ddb00, /* RegA update for MP version */
289 0xd99000, /* RegB update for MP version */ 291 0xd99000, /* RegB update for MP version */
290 0x3ffbd0, /* RegC update for MP version */ 292 0x3ffbd0, /* RegC update for MP version */
@@ -295,8 +297,8 @@ static int zd1211b_al2230_init_hw(struct zd_rf *rf)
295 }; 297 };
296 298
297 static const struct zd_ioreq16 ioreqs2[] = { 299 static const struct zd_ioreq16 ioreqs2[] = {
298 { CR251, 0x2f }, /* shdnb(PLL_ON)=0 */ 300 { ZD_CR251, 0x2f }, /* shdnb(PLL_ON)=0 */
299 { CR251, 0x7f }, /* shdnb(PLL_ON)=1 */ 301 { ZD_CR251, 0x7f }, /* shdnb(PLL_ON)=1 */
300 }; 302 };
301 303
302 static const u32 rv3[] = { 304 static const u32 rv3[] = {
@@ -308,7 +310,7 @@ static int zd1211b_al2230_init_hw(struct zd_rf *rf)
308 310
309 static const struct zd_ioreq16 ioreqs3[] = { 311 static const struct zd_ioreq16 ioreqs3[] = {
310 /* related to 6M band edge patching, happens unconditionally */ 312 /* related to 6M band edge patching, happens unconditionally */
311 { CR128, 0x14 }, { CR129, 0x12 }, { CR130, 0x10 }, 313 { ZD_CR128, 0x14 }, { ZD_CR129, 0x12 }, { ZD_CR130, 0x10 },
312 }; 314 };
313 315
314 r = zd_iowrite16a_locked(chip, zd1211b_ioreqs_shared_1, 316 r = zd_iowrite16a_locked(chip, zd1211b_ioreqs_shared_1,
@@ -361,8 +363,8 @@ static int zd1211_al2230_set_channel(struct zd_rf *rf, u8 channel)
361 const u32 *rv = zd1211_al2230_table[channel-1]; 363 const u32 *rv = zd1211_al2230_table[channel-1];
362 struct zd_chip *chip = zd_rf_to_chip(rf); 364 struct zd_chip *chip = zd_rf_to_chip(rf);
363 static const struct zd_ioreq16 ioreqs[] = { 365 static const struct zd_ioreq16 ioreqs[] = {
364 { CR138, 0x28 }, 366 { ZD_CR138, 0x28 },
365 { CR203, 0x06 }, 367 { ZD_CR203, 0x06 },
366 }; 368 };
367 369
368 r = zd_rfwritev_locked(chip, rv, 3, RF_RV_BITS); 370 r = zd_rfwritev_locked(chip, rv, 3, RF_RV_BITS);
@@ -393,8 +395,8 @@ static int zd1211_al2230_switch_radio_on(struct zd_rf *rf)
393{ 395{
394 struct zd_chip *chip = zd_rf_to_chip(rf); 396 struct zd_chip *chip = zd_rf_to_chip(rf);
395 static const struct zd_ioreq16 ioreqs[] = { 397 static const struct zd_ioreq16 ioreqs[] = {
396 { CR11, 0x00 }, 398 { ZD_CR11, 0x00 },
397 { CR251, 0x3f }, 399 { ZD_CR251, 0x3f },
398 }; 400 };
399 401
400 return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); 402 return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
@@ -404,8 +406,8 @@ static int zd1211b_al2230_switch_radio_on(struct zd_rf *rf)
404{ 406{
405 struct zd_chip *chip = zd_rf_to_chip(rf); 407 struct zd_chip *chip = zd_rf_to_chip(rf);
406 static const struct zd_ioreq16 ioreqs[] = { 408 static const struct zd_ioreq16 ioreqs[] = {
407 { CR11, 0x00 }, 409 { ZD_CR11, 0x00 },
408 { CR251, 0x7f }, 410 { ZD_CR251, 0x7f },
409 }; 411 };
410 412
411 return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); 413 return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
@@ -415,8 +417,8 @@ static int al2230_switch_radio_off(struct zd_rf *rf)
415{ 417{
416 struct zd_chip *chip = zd_rf_to_chip(rf); 418 struct zd_chip *chip = zd_rf_to_chip(rf);
417 static const struct zd_ioreq16 ioreqs[] = { 419 static const struct zd_ioreq16 ioreqs[] = {
418 { CR11, 0x04 }, 420 { ZD_CR11, 0x04 },
419 { CR251, 0x2f }, 421 { ZD_CR251, 0x2f },
420 }; 422 };
421 423
422 return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); 424 return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c b/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c
index 65095d661e6b..385c670d1293 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c
@@ -68,19 +68,19 @@ static const u32 rv_init2[] = {
68}; 68};
69 69
70static const struct zd_ioreq16 ioreqs_sw[] = { 70static const struct zd_ioreq16 ioreqs_sw[] = {
71 { CR128, 0x14 }, { CR129, 0x12 }, { CR130, 0x10 }, 71 { ZD_CR128, 0x14 }, { ZD_CR129, 0x12 }, { ZD_CR130, 0x10 },
72 { CR38, 0x38 }, { CR136, 0xdf }, 72 { ZD_CR38, 0x38 }, { ZD_CR136, 0xdf },
73}; 73};
74 74
75static int zd1211b_al7230b_finalize(struct zd_chip *chip) 75static int zd1211b_al7230b_finalize(struct zd_chip *chip)
76{ 76{
77 int r; 77 int r;
78 static const struct zd_ioreq16 ioreqs[] = { 78 static const struct zd_ioreq16 ioreqs[] = {
79 { CR80, 0x30 }, { CR81, 0x30 }, { CR79, 0x58 }, 79 { ZD_CR80, 0x30 }, { ZD_CR81, 0x30 }, { ZD_CR79, 0x58 },
80 { CR12, 0xf0 }, { CR77, 0x1b }, { CR78, 0x58 }, 80 { ZD_CR12, 0xf0 }, { ZD_CR77, 0x1b }, { ZD_CR78, 0x58 },
81 { CR203, 0x04 }, 81 { ZD_CR203, 0x04 },
82 { }, 82 { },
83 { CR240, 0x80 }, 83 { ZD_CR240, 0x80 },
84 }; 84 };
85 85
86 r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); 86 r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
@@ -89,12 +89,12 @@ static int zd1211b_al7230b_finalize(struct zd_chip *chip)
89 89
90 if (chip->new_phy_layout) { 90 if (chip->new_phy_layout) {
91 /* antenna selection? */ 91 /* antenna selection? */
92 r = zd_iowrite16_locked(chip, 0xe5, CR9); 92 r = zd_iowrite16_locked(chip, 0xe5, ZD_CR9);
93 if (r) 93 if (r)
94 return r; 94 return r;
95 } 95 }
96 96
97 return zd_iowrite16_locked(chip, 0x04, CR203); 97 return zd_iowrite16_locked(chip, 0x04, ZD_CR203);
98} 98}
99 99
100static int zd1211_al7230b_init_hw(struct zd_rf *rf) 100static int zd1211_al7230b_init_hw(struct zd_rf *rf)
@@ -106,66 +106,66 @@ static int zd1211_al7230b_init_hw(struct zd_rf *rf)
106 * specified */ 106 * specified */
107 static const struct zd_ioreq16 ioreqs_1[] = { 107 static const struct zd_ioreq16 ioreqs_1[] = {
108 /* This one is 7230-specific, and happens before the rest */ 108 /* This one is 7230-specific, and happens before the rest */
109 { CR240, 0x57 }, 109 { ZD_CR240, 0x57 },
110 { }, 110 { },
111 111
112 { CR15, 0x20 }, { CR23, 0x40 }, { CR24, 0x20 }, 112 { ZD_CR15, 0x20 }, { ZD_CR23, 0x40 }, { ZD_CR24, 0x20 },
113 { CR26, 0x11 }, { CR28, 0x3e }, { CR29, 0x00 }, 113 { ZD_CR26, 0x11 }, { ZD_CR28, 0x3e }, { ZD_CR29, 0x00 },
114 { CR44, 0x33 }, 114 { ZD_CR44, 0x33 },
115 /* This value is different for 7230 (was: 0x2a) */ 115 /* This value is different for 7230 (was: 0x2a) */
116 { CR106, 0x22 }, 116 { ZD_CR106, 0x22 },
117 { CR107, 0x1a }, { CR109, 0x09 }, { CR110, 0x27 }, 117 { ZD_CR107, 0x1a }, { ZD_CR109, 0x09 }, { ZD_CR110, 0x27 },
118 { CR111, 0x2b }, { CR112, 0x2b }, { CR119, 0x0a }, 118 { ZD_CR111, 0x2b }, { ZD_CR112, 0x2b }, { ZD_CR119, 0x0a },
119 /* This happened further down in AL2230, 119 /* This happened further down in AL2230,
120 * and the value changed (was: 0xe0) */ 120 * and the value changed (was: 0xe0) */
121 { CR122, 0xfc }, 121 { ZD_CR122, 0xfc },
122 { CR10, 0x89 }, 122 { ZD_CR10, 0x89 },
123 /* for newest (3rd cut) AL2300 */ 123 /* for newest (3rd cut) AL2300 */
124 { CR17, 0x28 }, 124 { ZD_CR17, 0x28 },
125 { CR26, 0x93 }, { CR34, 0x30 }, 125 { ZD_CR26, 0x93 }, { ZD_CR34, 0x30 },
126 /* for newest (3rd cut) AL2300 */ 126 /* for newest (3rd cut) AL2300 */
127 { CR35, 0x3e }, 127 { ZD_CR35, 0x3e },
128 { CR41, 0x24 }, { CR44, 0x32 }, 128 { ZD_CR41, 0x24 }, { ZD_CR44, 0x32 },
129 /* for newest (3rd cut) AL2300 */ 129 /* for newest (3rd cut) AL2300 */
130 { CR46, 0x96 }, 130 { ZD_CR46, 0x96 },
131 { CR47, 0x1e }, { CR79, 0x58 }, { CR80, 0x30 }, 131 { ZD_CR47, 0x1e }, { ZD_CR79, 0x58 }, { ZD_CR80, 0x30 },
132 { CR81, 0x30 }, { CR87, 0x0a }, { CR89, 0x04 }, 132 { ZD_CR81, 0x30 }, { ZD_CR87, 0x0a }, { ZD_CR89, 0x04 },
133 { CR92, 0x0a }, { CR99, 0x28 }, 133 { ZD_CR92, 0x0a }, { ZD_CR99, 0x28 },
134 /* This value is different for 7230 (was: 0x00) */ 134 /* This value is different for 7230 (was: 0x00) */
135 { CR100, 0x02 }, 135 { ZD_CR100, 0x02 },
136 { CR101, 0x13 }, { CR102, 0x27 }, 136 { ZD_CR101, 0x13 }, { ZD_CR102, 0x27 },
137 /* This value is different for 7230 (was: 0x24) */ 137 /* This value is different for 7230 (was: 0x24) */
138 { CR106, 0x22 }, 138 { ZD_CR106, 0x22 },
139 /* This value is different for 7230 (was: 0x2a) */ 139 /* This value is different for 7230 (was: 0x2a) */
140 { CR107, 0x3f }, 140 { ZD_CR107, 0x3f },
141 { CR109, 0x09 }, 141 { ZD_CR109, 0x09 },
142 /* This value is different for 7230 (was: 0x13) */ 142 /* This value is different for 7230 (was: 0x13) */
143 { CR110, 0x1f }, 143 { ZD_CR110, 0x1f },
144 { CR111, 0x1f }, { CR112, 0x1f }, { CR113, 0x27 }, 144 { ZD_CR111, 0x1f }, { ZD_CR112, 0x1f }, { ZD_CR113, 0x27 },
145 { CR114, 0x27 }, 145 { ZD_CR114, 0x27 },
146 /* for newest (3rd cut) AL2300 */ 146 /* for newest (3rd cut) AL2300 */
147 { CR115, 0x24 }, 147 { ZD_CR115, 0x24 },
148 /* This value is different for 7230 (was: 0x24) */ 148 /* This value is different for 7230 (was: 0x24) */
149 { CR116, 0x3f }, 149 { ZD_CR116, 0x3f },
150 /* This value is different for 7230 (was: 0xf4) */ 150 /* This value is different for 7230 (was: 0xf4) */
151 { CR117, 0xfa }, 151 { ZD_CR117, 0xfa },
152 { CR118, 0xfc }, { CR119, 0x10 }, { CR120, 0x4f }, 152 { ZD_CR118, 0xfc }, { ZD_CR119, 0x10 }, { ZD_CR120, 0x4f },
153 { CR121, 0x77 }, { CR137, 0x88 }, 153 { ZD_CR121, 0x77 }, { ZD_CR137, 0x88 },
154 /* This one is 7230-specific */ 154 /* This one is 7230-specific */
155 { CR138, 0xa8 }, 155 { ZD_CR138, 0xa8 },
156 /* This value is different for 7230 (was: 0xff) */ 156 /* This value is different for 7230 (was: 0xff) */
157 { CR252, 0x34 }, 157 { ZD_CR252, 0x34 },
158 /* This value is different for 7230 (was: 0xff) */ 158 /* This value is different for 7230 (was: 0xff) */
159 { CR253, 0x34 }, 159 { ZD_CR253, 0x34 },
160 160
161 /* PLL_OFF */ 161 /* PLL_OFF */
162 { CR251, 0x2f }, 162 { ZD_CR251, 0x2f },
163 }; 163 };
164 164
165 static const struct zd_ioreq16 ioreqs_2[] = { 165 static const struct zd_ioreq16 ioreqs_2[] = {
166 { CR251, 0x3f }, /* PLL_ON */ 166 { ZD_CR251, 0x3f }, /* PLL_ON */
167 { CR128, 0x14 }, { CR129, 0x12 }, { CR130, 0x10 }, 167 { ZD_CR128, 0x14 }, { ZD_CR129, 0x12 }, { ZD_CR130, 0x10 },
168 { CR38, 0x38 }, { CR136, 0xdf }, 168 { ZD_CR38, 0x38 }, { ZD_CR136, 0xdf },
169 }; 169 };
170 170
171 r = zd_iowrite16a_locked(chip, ioreqs_1, ARRAY_SIZE(ioreqs_1)); 171 r = zd_iowrite16a_locked(chip, ioreqs_1, ARRAY_SIZE(ioreqs_1));
@@ -192,10 +192,10 @@ static int zd1211_al7230b_init_hw(struct zd_rf *rf)
192 if (r) 192 if (r)
193 return r; 193 return r;
194 194
195 r = zd_iowrite16_locked(chip, 0x06, CR203); 195 r = zd_iowrite16_locked(chip, 0x06, ZD_CR203);
196 if (r) 196 if (r)
197 return r; 197 return r;
198 r = zd_iowrite16_locked(chip, 0x80, CR240); 198 r = zd_iowrite16_locked(chip, 0x80, ZD_CR240);
199 if (r) 199 if (r)
200 return r; 200 return r;
201 201
@@ -208,79 +208,79 @@ static int zd1211b_al7230b_init_hw(struct zd_rf *rf)
208 struct zd_chip *chip = zd_rf_to_chip(rf); 208 struct zd_chip *chip = zd_rf_to_chip(rf);
209 209
210 static const struct zd_ioreq16 ioreqs_1[] = { 210 static const struct zd_ioreq16 ioreqs_1[] = {
211 { CR240, 0x57 }, { CR9, 0x9 }, 211 { ZD_CR240, 0x57 }, { ZD_CR9, 0x9 },
212 { }, 212 { },
213 { CR10, 0x8b }, { CR15, 0x20 }, 213 { ZD_CR10, 0x8b }, { ZD_CR15, 0x20 },
214 { CR17, 0x2B }, /* for newest (3rd cut) AL2230 */ 214 { ZD_CR17, 0x2B }, /* for newest (3rd cut) AL2230 */
215 { CR20, 0x10 }, /* 4N25->Stone Request */ 215 { ZD_CR20, 0x10 }, /* 4N25->Stone Request */
216 { CR23, 0x40 }, { CR24, 0x20 }, { CR26, 0x93 }, 216 { ZD_CR23, 0x40 }, { ZD_CR24, 0x20 }, { ZD_CR26, 0x93 },
217 { CR28, 0x3e }, { CR29, 0x00 }, 217 { ZD_CR28, 0x3e }, { ZD_CR29, 0x00 },
218 { CR33, 0x28 }, /* 5613 */ 218 { ZD_CR33, 0x28 }, /* 5613 */
219 { CR34, 0x30 }, 219 { ZD_CR34, 0x30 },
220 { CR35, 0x3e }, /* for newest (3rd cut) AL2230 */ 220 { ZD_CR35, 0x3e }, /* for newest (3rd cut) AL2230 */
221 { CR41, 0x24 }, { CR44, 0x32 }, 221 { ZD_CR41, 0x24 }, { ZD_CR44, 0x32 },
222 { CR46, 0x99 }, /* for newest (3rd cut) AL2230 */ 222 { ZD_CR46, 0x99 }, /* for newest (3rd cut) AL2230 */
223 { CR47, 0x1e }, 223 { ZD_CR47, 0x1e },
224 224
225 /* ZD1215 5610 */ 225 /* ZD1215 5610 */
226 { CR48, 0x00 }, { CR49, 0x00 }, { CR51, 0x01 }, 226 { ZD_CR48, 0x00 }, { ZD_CR49, 0x00 }, { ZD_CR51, 0x01 },
227 { CR52, 0x80 }, { CR53, 0x7e }, { CR65, 0x00 }, 227 { ZD_CR52, 0x80 }, { ZD_CR53, 0x7e }, { ZD_CR65, 0x00 },
228 { CR66, 0x00 }, { CR67, 0x00 }, { CR68, 0x00 }, 228 { ZD_CR66, 0x00 }, { ZD_CR67, 0x00 }, { ZD_CR68, 0x00 },
229 { CR69, 0x28 }, 229 { ZD_CR69, 0x28 },
230 230
231 { CR79, 0x58 }, { CR80, 0x30 }, { CR81, 0x30 }, 231 { ZD_CR79, 0x58 }, { ZD_CR80, 0x30 }, { ZD_CR81, 0x30 },
232 { CR87, 0x0A }, { CR89, 0x04 }, 232 { ZD_CR87, 0x0A }, { ZD_CR89, 0x04 },
233 { CR90, 0x58 }, /* 5112 */ 233 { ZD_CR90, 0x58 }, /* 5112 */
234 { CR91, 0x00 }, /* 5613 */ 234 { ZD_CR91, 0x00 }, /* 5613 */
235 { CR92, 0x0a }, 235 { ZD_CR92, 0x0a },
236 { CR98, 0x8d }, /* 4804, for 1212 new algorithm */ 236 { ZD_CR98, 0x8d }, /* 4804, for 1212 new algorithm */
237 { CR99, 0x00 }, { CR100, 0x02 }, { CR101, 0x13 }, 237 { ZD_CR99, 0x00 }, { ZD_CR100, 0x02 }, { ZD_CR101, 0x13 },
238 { CR102, 0x27 }, 238 { ZD_CR102, 0x27 },
239 { CR106, 0x20 }, /* change to 0x24 for AL7230B */ 239 { ZD_CR106, 0x20 }, /* change to 0x24 for AL7230B */
240 { CR109, 0x13 }, /* 4804, for 1212 new algorithm */ 240 { ZD_CR109, 0x13 }, /* 4804, for 1212 new algorithm */
241 { CR112, 0x1f }, 241 { ZD_CR112, 0x1f },
242 }; 242 };
243 243
244 static const struct zd_ioreq16 ioreqs_new_phy[] = { 244 static const struct zd_ioreq16 ioreqs_new_phy[] = {
245 { CR107, 0x28 }, 245 { ZD_CR107, 0x28 },
246 { CR110, 0x1f }, /* 5127, 0x13->0x1f */ 246 { ZD_CR110, 0x1f }, /* 5127, 0x13->0x1f */
247 { CR111, 0x1f }, /* 0x13 to 0x1f for AL7230B */ 247 { ZD_CR111, 0x1f }, /* 0x13 to 0x1f for AL7230B */
248 { CR116, 0x2a }, { CR118, 0xfa }, { CR119, 0x12 }, 248 { ZD_CR116, 0x2a }, { ZD_CR118, 0xfa }, { ZD_CR119, 0x12 },
249 { CR121, 0x6c }, /* 5613 */ 249 { ZD_CR121, 0x6c }, /* 5613 */
250 }; 250 };
251 251
252 static const struct zd_ioreq16 ioreqs_old_phy[] = { 252 static const struct zd_ioreq16 ioreqs_old_phy[] = {
253 { CR107, 0x24 }, 253 { ZD_CR107, 0x24 },
254 { CR110, 0x13 }, /* 5127, 0x13->0x1f */ 254 { ZD_CR110, 0x13 }, /* 5127, 0x13->0x1f */
255 { CR111, 0x13 }, /* 0x13 to 0x1f for AL7230B */ 255 { ZD_CR111, 0x13 }, /* 0x13 to 0x1f for AL7230B */
256 { CR116, 0x24 }, { CR118, 0xfc }, { CR119, 0x11 }, 256 { ZD_CR116, 0x24 }, { ZD_CR118, 0xfc }, { ZD_CR119, 0x11 },
257 { CR121, 0x6a }, /* 5613 */ 257 { ZD_CR121, 0x6a }, /* 5613 */
258 }; 258 };
259 259
260 static const struct zd_ioreq16 ioreqs_2[] = { 260 static const struct zd_ioreq16 ioreqs_2[] = {
261 { CR113, 0x27 }, { CR114, 0x27 }, { CR115, 0x24 }, 261 { ZD_CR113, 0x27 }, { ZD_CR114, 0x27 }, { ZD_CR115, 0x24 },
262 { CR117, 0xfa }, { CR120, 0x4f }, 262 { ZD_CR117, 0xfa }, { ZD_CR120, 0x4f },
263 { CR122, 0xfc }, /* E0->FCh at 4901 */ 263 { ZD_CR122, 0xfc }, /* E0->FCh at 4901 */
264 { CR123, 0x57 }, /* 5613 */ 264 { ZD_CR123, 0x57 }, /* 5613 */
265 { CR125, 0xad }, /* 4804, for 1212 new algorithm */ 265 { ZD_CR125, 0xad }, /* 4804, for 1212 new algorithm */
266 { CR126, 0x6c }, /* 5613 */ 266 { ZD_CR126, 0x6c }, /* 5613 */
267 { CR127, 0x03 }, /* 4804, for 1212 new algorithm */ 267 { ZD_CR127, 0x03 }, /* 4804, for 1212 new algorithm */
268 { CR130, 0x10 }, 268 { ZD_CR130, 0x10 },
269 { CR131, 0x00 }, /* 5112 */ 269 { ZD_CR131, 0x00 }, /* 5112 */
270 { CR137, 0x50 }, /* 5613 */ 270 { ZD_CR137, 0x50 }, /* 5613 */
271 { CR138, 0xa8 }, /* 5112 */ 271 { ZD_CR138, 0xa8 }, /* 5112 */
272 { CR144, 0xac }, /* 5613 */ 272 { ZD_CR144, 0xac }, /* 5613 */
273 { CR148, 0x40 }, /* 5112 */ 273 { ZD_CR148, 0x40 }, /* 5112 */
274 { CR149, 0x40 }, /* 4O07, 50->40 */ 274 { ZD_CR149, 0x40 }, /* 4O07, 50->40 */
275 { CR150, 0x1a }, /* 5112, 0C->1A */ 275 { ZD_CR150, 0x1a }, /* 5112, 0C->1A */
276 { CR252, 0x34 }, { CR253, 0x34 }, 276 { ZD_CR252, 0x34 }, { ZD_CR253, 0x34 },
277 { CR251, 0x2f }, /* PLL_OFF */ 277 { ZD_CR251, 0x2f }, /* PLL_OFF */
278 }; 278 };
279 279
280 static const struct zd_ioreq16 ioreqs_3[] = { 280 static const struct zd_ioreq16 ioreqs_3[] = {
281 { CR251, 0x7f }, /* PLL_ON */ 281 { ZD_CR251, 0x7f }, /* PLL_ON */
282 { CR128, 0x14 }, { CR129, 0x12 }, { CR130, 0x10 }, 282 { ZD_CR128, 0x14 }, { ZD_CR129, 0x12 }, { ZD_CR130, 0x10 },
283 { CR38, 0x38 }, { CR136, 0xdf }, 283 { ZD_CR38, 0x38 }, { ZD_CR136, 0xdf },
284 }; 284 };
285 285
286 r = zd_iowrite16a_locked(chip, ioreqs_1, ARRAY_SIZE(ioreqs_1)); 286 r = zd_iowrite16a_locked(chip, ioreqs_1, ARRAY_SIZE(ioreqs_1));
@@ -331,16 +331,16 @@ static int zd1211_al7230b_set_channel(struct zd_rf *rf, u8 channel)
331 331
332 static const struct zd_ioreq16 ioreqs[] = { 332 static const struct zd_ioreq16 ioreqs[] = {
333 /* PLL_ON */ 333 /* PLL_ON */
334 { CR251, 0x3f }, 334 { ZD_CR251, 0x3f },
335 { CR203, 0x06 }, { CR240, 0x08 }, 335 { ZD_CR203, 0x06 }, { ZD_CR240, 0x08 },
336 }; 336 };
337 337
338 r = zd_iowrite16_locked(chip, 0x57, CR240); 338 r = zd_iowrite16_locked(chip, 0x57, ZD_CR240);
339 if (r) 339 if (r)
340 return r; 340 return r;
341 341
342 /* PLL_OFF */ 342 /* PLL_OFF */
343 r = zd_iowrite16_locked(chip, 0x2f, CR251); 343 r = zd_iowrite16_locked(chip, 0x2f, ZD_CR251);
344 if (r) 344 if (r)
345 return r; 345 return r;
346 346
@@ -376,15 +376,15 @@ static int zd1211b_al7230b_set_channel(struct zd_rf *rf, u8 channel)
376 const u32 *rv = chan_rv[channel-1]; 376 const u32 *rv = chan_rv[channel-1];
377 struct zd_chip *chip = zd_rf_to_chip(rf); 377 struct zd_chip *chip = zd_rf_to_chip(rf);
378 378
379 r = zd_iowrite16_locked(chip, 0x57, CR240); 379 r = zd_iowrite16_locked(chip, 0x57, ZD_CR240);
380 if (r) 380 if (r)
381 return r; 381 return r;
382 r = zd_iowrite16_locked(chip, 0xe4, CR9); 382 r = zd_iowrite16_locked(chip, 0xe4, ZD_CR9);
383 if (r) 383 if (r)
384 return r; 384 return r;
385 385
386 /* PLL_OFF */ 386 /* PLL_OFF */
387 r = zd_iowrite16_locked(chip, 0x2f, CR251); 387 r = zd_iowrite16_locked(chip, 0x2f, ZD_CR251);
388 if (r) 388 if (r)
389 return r; 389 return r;
390 r = zd_rfwritev_cr_locked(chip, std_rv, ARRAY_SIZE(std_rv)); 390 r = zd_rfwritev_cr_locked(chip, std_rv, ARRAY_SIZE(std_rv));
@@ -410,7 +410,7 @@ static int zd1211b_al7230b_set_channel(struct zd_rf *rf, u8 channel)
410 if (r) 410 if (r)
411 return r; 411 return r;
412 412
413 r = zd_iowrite16_locked(chip, 0x7f, CR251); 413 r = zd_iowrite16_locked(chip, 0x7f, ZD_CR251);
414 if (r) 414 if (r)
415 return r; 415 return r;
416 416
@@ -421,8 +421,8 @@ static int zd1211_al7230b_switch_radio_on(struct zd_rf *rf)
421{ 421{
422 struct zd_chip *chip = zd_rf_to_chip(rf); 422 struct zd_chip *chip = zd_rf_to_chip(rf);
423 static const struct zd_ioreq16 ioreqs[] = { 423 static const struct zd_ioreq16 ioreqs[] = {
424 { CR11, 0x00 }, 424 { ZD_CR11, 0x00 },
425 { CR251, 0x3f }, 425 { ZD_CR251, 0x3f },
426 }; 426 };
427 427
428 return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); 428 return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
@@ -432,8 +432,8 @@ static int zd1211b_al7230b_switch_radio_on(struct zd_rf *rf)
432{ 432{
433 struct zd_chip *chip = zd_rf_to_chip(rf); 433 struct zd_chip *chip = zd_rf_to_chip(rf);
434 static const struct zd_ioreq16 ioreqs[] = { 434 static const struct zd_ioreq16 ioreqs[] = {
435 { CR11, 0x00 }, 435 { ZD_CR11, 0x00 },
436 { CR251, 0x7f }, 436 { ZD_CR251, 0x7f },
437 }; 437 };
438 438
439 return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); 439 return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
@@ -443,8 +443,8 @@ static int al7230b_switch_radio_off(struct zd_rf *rf)
443{ 443{
444 struct zd_chip *chip = zd_rf_to_chip(rf); 444 struct zd_chip *chip = zd_rf_to_chip(rf);
445 static const struct zd_ioreq16 ioreqs[] = { 445 static const struct zd_ioreq16 ioreqs[] = {
446 { CR11, 0x04 }, 446 { ZD_CR11, 0x04 },
447 { CR251, 0x2f }, 447 { ZD_CR251, 0x2f },
448 }; 448 };
449 449
450 return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); 450 return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
@@ -456,7 +456,7 @@ static int zd1211b_al7230b_patch_6m(struct zd_rf *rf, u8 channel)
456{ 456{
457 struct zd_chip *chip = zd_rf_to_chip(rf); 457 struct zd_chip *chip = zd_rf_to_chip(rf);
458 struct zd_ioreq16 ioreqs[] = { 458 struct zd_ioreq16 ioreqs[] = {
459 { CR128, 0x14 }, { CR129, 0x12 }, 459 { ZD_CR128, 0x14 }, { ZD_CR129, 0x12 },
460 }; 460 };
461 461
462 /* FIXME: Channel 11 is not the edge for all regulatory domains. */ 462 /* FIXME: Channel 11 is not the edge for all regulatory domains. */
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c b/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c
index e36117486c91..784d9ccb8fef 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c
@@ -152,44 +152,44 @@ static int rf2959_init_hw(struct zd_rf *rf)
152 struct zd_chip *chip = zd_rf_to_chip(rf); 152 struct zd_chip *chip = zd_rf_to_chip(rf);
153 153
154 static const struct zd_ioreq16 ioreqs[] = { 154 static const struct zd_ioreq16 ioreqs[] = {
155 { CR2, 0x1E }, { CR9, 0x20 }, { CR10, 0x89 }, 155 { ZD_CR2, 0x1E }, { ZD_CR9, 0x20 }, { ZD_CR10, 0x89 },
156 { CR11, 0x00 }, { CR15, 0xD0 }, { CR17, 0x68 }, 156 { ZD_CR11, 0x00 }, { ZD_CR15, 0xD0 }, { ZD_CR17, 0x68 },
157 { CR19, 0x4a }, { CR20, 0x0c }, { CR21, 0x0E }, 157 { ZD_CR19, 0x4a }, { ZD_CR20, 0x0c }, { ZD_CR21, 0x0E },
158 { CR23, 0x48 }, 158 { ZD_CR23, 0x48 },
159 /* normal size for cca threshold */ 159 /* normal size for cca threshold */
160 { CR24, 0x14 }, 160 { ZD_CR24, 0x14 },
161 /* { CR24, 0x20 }, */ 161 /* { ZD_CR24, 0x20 }, */
162 { CR26, 0x90 }, { CR27, 0x30 }, { CR29, 0x20 }, 162 { ZD_CR26, 0x90 }, { ZD_CR27, 0x30 }, { ZD_CR29, 0x20 },
163 { CR31, 0xb2 }, { CR32, 0x43 }, { CR33, 0x28 }, 163 { ZD_CR31, 0xb2 }, { ZD_CR32, 0x43 }, { ZD_CR33, 0x28 },
164 { CR38, 0x30 }, { CR34, 0x0f }, { CR35, 0xF0 }, 164 { ZD_CR38, 0x30 }, { ZD_CR34, 0x0f }, { ZD_CR35, 0xF0 },
165 { CR41, 0x2a }, { CR46, 0x7F }, { CR47, 0x1E }, 165 { ZD_CR41, 0x2a }, { ZD_CR46, 0x7F }, { ZD_CR47, 0x1E },
166 { CR51, 0xc5 }, { CR52, 0xc5 }, { CR53, 0xc5 }, 166 { ZD_CR51, 0xc5 }, { ZD_CR52, 0xc5 }, { ZD_CR53, 0xc5 },
167 { CR79, 0x58 }, { CR80, 0x30 }, { CR81, 0x30 }, 167 { ZD_CR79, 0x58 }, { ZD_CR80, 0x30 }, { ZD_CR81, 0x30 },
168 { CR82, 0x00 }, { CR83, 0x24 }, { CR84, 0x04 }, 168 { ZD_CR82, 0x00 }, { ZD_CR83, 0x24 }, { ZD_CR84, 0x04 },
169 { CR85, 0x00 }, { CR86, 0x10 }, { CR87, 0x2A }, 169 { ZD_CR85, 0x00 }, { ZD_CR86, 0x10 }, { ZD_CR87, 0x2A },
170 { CR88, 0x10 }, { CR89, 0x24 }, { CR90, 0x18 }, 170 { ZD_CR88, 0x10 }, { ZD_CR89, 0x24 }, { ZD_CR90, 0x18 },
171 /* { CR91, 0x18 }, */ 171 /* { ZD_CR91, 0x18 }, */
172 /* should solve continuous CTS frame problems */ 172 /* should solve continuous CTS frame problems */
173 { CR91, 0x00 }, 173 { ZD_CR91, 0x00 },
174 { CR92, 0x0a }, { CR93, 0x00 }, { CR94, 0x01 }, 174 { ZD_CR92, 0x0a }, { ZD_CR93, 0x00 }, { ZD_CR94, 0x01 },
175 { CR95, 0x00 }, { CR96, 0x40 }, { CR97, 0x37 }, 175 { ZD_CR95, 0x00 }, { ZD_CR96, 0x40 }, { ZD_CR97, 0x37 },
176 { CR98, 0x05 }, { CR99, 0x28 }, { CR100, 0x00 }, 176 { ZD_CR98, 0x05 }, { ZD_CR99, 0x28 }, { ZD_CR100, 0x00 },
177 { CR101, 0x13 }, { CR102, 0x27 }, { CR103, 0x27 }, 177 { ZD_CR101, 0x13 }, { ZD_CR102, 0x27 }, { ZD_CR103, 0x27 },
178 { CR104, 0x18 }, { CR105, 0x12 }, 178 { ZD_CR104, 0x18 }, { ZD_CR105, 0x12 },
179 /* normal size */ 179 /* normal size */
180 { CR106, 0x1a }, 180 { ZD_CR106, 0x1a },
181 /* { CR106, 0x22 }, */ 181 /* { ZD_CR106, 0x22 }, */
182 { CR107, 0x24 }, { CR108, 0x0a }, { CR109, 0x13 }, 182 { ZD_CR107, 0x24 }, { ZD_CR108, 0x0a }, { ZD_CR109, 0x13 },
183 { CR110, 0x2F }, { CR111, 0x27 }, { CR112, 0x27 }, 183 { ZD_CR110, 0x2F }, { ZD_CR111, 0x27 }, { ZD_CR112, 0x27 },
184 { CR113, 0x27 }, { CR114, 0x27 }, { CR115, 0x40 }, 184 { ZD_CR113, 0x27 }, { ZD_CR114, 0x27 }, { ZD_CR115, 0x40 },
185 { CR116, 0x40 }, { CR117, 0xF0 }, { CR118, 0xF0 }, 185 { ZD_CR116, 0x40 }, { ZD_CR117, 0xF0 }, { ZD_CR118, 0xF0 },
186 { CR119, 0x16 }, 186 { ZD_CR119, 0x16 },
187 /* no TX continuation */ 187 /* no TX continuation */
188 { CR122, 0x00 }, 188 { ZD_CR122, 0x00 },
189 /* { CR122, 0xff }, */ 189 /* { ZD_CR122, 0xff }, */
190 { CR127, 0x03 }, { CR131, 0x08 }, { CR138, 0x28 }, 190 { ZD_CR127, 0x03 }, { ZD_CR131, 0x08 }, { ZD_CR138, 0x28 },
191 { CR148, 0x44 }, { CR150, 0x10 }, { CR169, 0xBB }, 191 { ZD_CR148, 0x44 }, { ZD_CR150, 0x10 }, { ZD_CR169, 0xBB },
192 { CR170, 0xBB }, 192 { ZD_CR170, 0xBB },
193 }; 193 };
194 194
195 static const u32 rv[] = { 195 static const u32 rv[] = {
@@ -210,7 +210,7 @@ static int rf2959_init_hw(struct zd_rf *rf)
210 */ 210 */
211 0x294128, /* internal power */ 211 0x294128, /* internal power */
212 /* 0x28252c, */ /* External control TX power */ 212 /* 0x28252c, */ /* External control TX power */
213 /* CR31_CCK, CR51_6-36M, CR52_48M, CR53_54M */ 213 /* ZD_CR31_CCK, ZD_CR51_6-36M, ZD_CR52_48M, ZD_CR53_54M */
214 0x2c0000, 214 0x2c0000,
215 0x300000, 215 0x300000,
216 0x340000, /* REG13(0xD) */ 216 0x340000, /* REG13(0xD) */
@@ -245,8 +245,8 @@ static int rf2959_set_channel(struct zd_rf *rf, u8 channel)
245static int rf2959_switch_radio_on(struct zd_rf *rf) 245static int rf2959_switch_radio_on(struct zd_rf *rf)
246{ 246{
247 static const struct zd_ioreq16 ioreqs[] = { 247 static const struct zd_ioreq16 ioreqs[] = {
248 { CR10, 0x89 }, 248 { ZD_CR10, 0x89 },
249 { CR11, 0x00 }, 249 { ZD_CR11, 0x00 },
250 }; 250 };
251 struct zd_chip *chip = zd_rf_to_chip(rf); 251 struct zd_chip *chip = zd_rf_to_chip(rf);
252 252
@@ -256,8 +256,8 @@ static int rf2959_switch_radio_on(struct zd_rf *rf)
256static int rf2959_switch_radio_off(struct zd_rf *rf) 256static int rf2959_switch_radio_off(struct zd_rf *rf)
257{ 257{
258 static const struct zd_ioreq16 ioreqs[] = { 258 static const struct zd_ioreq16 ioreqs[] = {
259 { CR10, 0x15 }, 259 { ZD_CR10, 0x15 },
260 { CR11, 0x81 }, 260 { ZD_CR11, 0x81 },
261 }; 261 };
262 struct zd_chip *chip = zd_rf_to_chip(rf); 262 struct zd_chip *chip = zd_rf_to_chip(rf);
263 263
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c b/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c
index ba0a0ccb1fa0..c4d324e19c24 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c
@@ -314,42 +314,44 @@ static int uw2453_init_hw(struct zd_rf *rf)
314 struct zd_chip *chip = zd_rf_to_chip(rf); 314 struct zd_chip *chip = zd_rf_to_chip(rf);
315 315
316 static const struct zd_ioreq16 ioreqs[] = { 316 static const struct zd_ioreq16 ioreqs[] = {
317 { CR10, 0x89 }, { CR15, 0x20 }, 317 { ZD_CR10, 0x89 }, { ZD_CR15, 0x20 },
318 { CR17, 0x28 }, /* 6112 no change */ 318 { ZD_CR17, 0x28 }, /* 6112 no change */
319 { CR23, 0x38 }, { CR24, 0x20 }, { CR26, 0x93 }, 319 { ZD_CR23, 0x38 }, { ZD_CR24, 0x20 }, { ZD_CR26, 0x93 },
320 { CR27, 0x15 }, { CR28, 0x3e }, { CR29, 0x00 }, 320 { ZD_CR27, 0x15 }, { ZD_CR28, 0x3e }, { ZD_CR29, 0x00 },
321 { CR33, 0x28 }, { CR34, 0x30 }, 321 { ZD_CR33, 0x28 }, { ZD_CR34, 0x30 },
322 { CR35, 0x43 }, /* 6112 3e->43 */ 322 { ZD_CR35, 0x43 }, /* 6112 3e->43 */
323 { CR41, 0x24 }, { CR44, 0x32 }, 323 { ZD_CR41, 0x24 }, { ZD_CR44, 0x32 },
324 { CR46, 0x92 }, /* 6112 96->92 */ 324 { ZD_CR46, 0x92 }, /* 6112 96->92 */
325 { CR47, 0x1e }, 325 { ZD_CR47, 0x1e },
326 { CR48, 0x04 }, /* 5602 Roger */ 326 { ZD_CR48, 0x04 }, /* 5602 Roger */
327 { CR49, 0xfa }, { CR79, 0x58 }, { CR80, 0x30 }, 327 { ZD_CR49, 0xfa }, { ZD_CR79, 0x58 }, { ZD_CR80, 0x30 },
328 { CR81, 0x30 }, { CR87, 0x0a }, { CR89, 0x04 }, 328 { ZD_CR81, 0x30 }, { ZD_CR87, 0x0a }, { ZD_CR89, 0x04 },
329 { CR91, 0x00 }, { CR92, 0x0a }, { CR98, 0x8d }, 329 { ZD_CR91, 0x00 }, { ZD_CR92, 0x0a }, { ZD_CR98, 0x8d },
330 { CR99, 0x28 }, { CR100, 0x02 }, 330 { ZD_CR99, 0x28 }, { ZD_CR100, 0x02 },
331 { CR101, 0x09 }, /* 6112 13->1f 6220 1f->13 6407 13->9 */ 331 { ZD_CR101, 0x09 }, /* 6112 13->1f 6220 1f->13 6407 13->9 */
332 { CR102, 0x27 }, 332 { ZD_CR102, 0x27 },
333 { CR106, 0x1c }, /* 5d07 5112 1f->1c 6220 1c->1f 6221 1f->1c */ 333 { ZD_CR106, 0x1c }, /* 5d07 5112 1f->1c 6220 1c->1f
334 { CR107, 0x1c }, /* 6220 1c->1a 5221 1a->1c */ 334 * 6221 1f->1c
335 { CR109, 0x13 }, 335 */
336 { CR110, 0x1f }, /* 6112 13->1f 6221 1f->13 6407 13->0x09 */ 336 { ZD_CR107, 0x1c }, /* 6220 1c->1a 5221 1a->1c */
337 { CR111, 0x13 }, { CR112, 0x1f }, { CR113, 0x27 }, 337 { ZD_CR109, 0x13 },
338 { CR114, 0x23 }, /* 6221 27->23 */ 338 { ZD_CR110, 0x1f }, /* 6112 13->1f 6221 1f->13 6407 13->0x09 */
339 { CR115, 0x24 }, /* 6112 24->1c 6220 1c->24 */ 339 { ZD_CR111, 0x13 }, { ZD_CR112, 0x1f }, { ZD_CR113, 0x27 },
340 { CR116, 0x24 }, /* 6220 1c->24 */ 340 { ZD_CR114, 0x23 }, /* 6221 27->23 */
341 { CR117, 0xfa }, /* 6112 fa->f8 6220 f8->f4 6220 f4->fa */ 341 { ZD_CR115, 0x24 }, /* 6112 24->1c 6220 1c->24 */
342 { CR118, 0xf0 }, /* 5d07 6112 f0->f2 6220 f2->f0 */ 342 { ZD_CR116, 0x24 }, /* 6220 1c->24 */
343 { CR119, 0x1a }, /* 6112 1a->10 6220 10->14 6220 14->1a */ 343 { ZD_CR117, 0xfa }, /* 6112 fa->f8 6220 f8->f4 6220 f4->fa */
344 { CR120, 0x4f }, 344 { ZD_CR118, 0xf0 }, /* 5d07 6112 f0->f2 6220 f2->f0 */
345 { CR121, 0x1f }, /* 6220 4f->1f */ 345 { ZD_CR119, 0x1a }, /* 6112 1a->10 6220 10->14 6220 14->1a */
346 { CR122, 0xf0 }, { CR123, 0x57 }, { CR125, 0xad }, 346 { ZD_CR120, 0x4f },
347 { CR126, 0x6c }, { CR127, 0x03 }, 347 { ZD_CR121, 0x1f }, /* 6220 4f->1f */
348 { CR128, 0x14 }, /* 6302 12->11 */ 348 { ZD_CR122, 0xf0 }, { ZD_CR123, 0x57 }, { ZD_CR125, 0xad },
349 { CR129, 0x12 }, /* 6301 10->0f */ 349 { ZD_CR126, 0x6c }, { ZD_CR127, 0x03 },
350 { CR130, 0x10 }, { CR137, 0x50 }, { CR138, 0xa8 }, 350 { ZD_CR128, 0x14 }, /* 6302 12->11 */
351 { CR144, 0xac }, { CR146, 0x20 }, { CR252, 0xff }, 351 { ZD_CR129, 0x12 }, /* 6301 10->0f */
352 { CR253, 0xff }, 352 { ZD_CR130, 0x10 }, { ZD_CR137, 0x50 }, { ZD_CR138, 0xa8 },
353 { ZD_CR144, 0xac }, { ZD_CR146, 0x20 }, { ZD_CR252, 0xff },
354 { ZD_CR253, 0xff },
353 }; 355 };
354 356
355 static const u32 rv[] = { 357 static const u32 rv[] = {
@@ -433,7 +435,7 @@ static int uw2453_init_hw(struct zd_rf *rf)
433 * the one that produced a lock. */ 435 * the one that produced a lock. */
434 UW2453_PRIV(rf)->config = found_config + 1; 436 UW2453_PRIV(rf)->config = found_config + 1;
435 437
436 return zd_iowrite16_locked(chip, 0x06, CR203); 438 return zd_iowrite16_locked(chip, 0x06, ZD_CR203);
437} 439}
438 440
439static int uw2453_set_channel(struct zd_rf *rf, u8 channel) 441static int uw2453_set_channel(struct zd_rf *rf, u8 channel)
@@ -445,8 +447,8 @@ static int uw2453_set_channel(struct zd_rf *rf, u8 channel)
445 struct zd_chip *chip = zd_rf_to_chip(rf); 447 struct zd_chip *chip = zd_rf_to_chip(rf);
446 448
447 static const struct zd_ioreq16 ioreqs[] = { 449 static const struct zd_ioreq16 ioreqs[] = {
448 { CR80, 0x30 }, { CR81, 0x30 }, { CR79, 0x58 }, 450 { ZD_CR80, 0x30 }, { ZD_CR81, 0x30 }, { ZD_CR79, 0x58 },
449 { CR12, 0xf0 }, { CR77, 0x1b }, { CR78, 0x58 }, 451 { ZD_CR12, 0xf0 }, { ZD_CR77, 0x1b }, { ZD_CR78, 0x58 },
450 }; 452 };
451 453
452 r = uw2453_synth_set_channel(chip, channel, autocal); 454 r = uw2453_synth_set_channel(chip, channel, autocal);
@@ -474,7 +476,7 @@ static int uw2453_set_channel(struct zd_rf *rf, u8 channel)
474 if (r) 476 if (r)
475 return r; 477 return r;
476 478
477 return zd_iowrite16_locked(chip, 0x06, CR203); 479 return zd_iowrite16_locked(chip, 0x06, ZD_CR203);
478} 480}
479 481
480static int uw2453_switch_radio_on(struct zd_rf *rf) 482static int uw2453_switch_radio_on(struct zd_rf *rf)
@@ -482,7 +484,7 @@ static int uw2453_switch_radio_on(struct zd_rf *rf)
482 int r; 484 int r;
483 struct zd_chip *chip = zd_rf_to_chip(rf); 485 struct zd_chip *chip = zd_rf_to_chip(rf);
484 struct zd_ioreq16 ioreqs[] = { 486 struct zd_ioreq16 ioreqs[] = {
485 { CR11, 0x00 }, { CR251, 0x3f }, 487 { ZD_CR11, 0x00 }, { ZD_CR251, 0x3f },
486 }; 488 };
487 489
488 /* enter RXTX mode */ 490 /* enter RXTX mode */
@@ -501,7 +503,7 @@ static int uw2453_switch_radio_off(struct zd_rf *rf)
501 int r; 503 int r;
502 struct zd_chip *chip = zd_rf_to_chip(rf); 504 struct zd_chip *chip = zd_rf_to_chip(rf);
503 static const struct zd_ioreq16 ioreqs[] = { 505 static const struct zd_ioreq16 ioreqs[] = {
504 { CR11, 0x04 }, { CR251, 0x2f }, 506 { ZD_CR11, 0x04 }, { ZD_CR251, 0x2f },
505 }; 507 };
506 508
507 /* enter IDLE mode */ 509 /* enter IDLE mode */
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index ab607bbd6291..0e819943b9e4 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -1893,10 +1893,10 @@ int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits)
1893 1893
1894 dev_dbg_f(zd_usb_dev(usb), "value %#09x bits %d\n", value, bits); 1894 dev_dbg_f(zd_usb_dev(usb), "value %#09x bits %d\n", value, bits);
1895 1895
1896 r = zd_usb_ioread16(usb, &bit_value_template, CR203); 1896 r = zd_usb_ioread16(usb, &bit_value_template, ZD_CR203);
1897 if (r) { 1897 if (r) {
1898 dev_dbg_f(zd_usb_dev(usb), 1898 dev_dbg_f(zd_usb_dev(usb),
1899 "error %d: Couldn't read CR203\n", r); 1899 "error %d: Couldn't read ZD_CR203\n", r);
1900 return r; 1900 return r;
1901 } 1901 }
1902 bit_value_template &= ~(RF_IF_LE|RF_CLK|RF_DATA); 1902 bit_value_template &= ~(RF_IF_LE|RF_CLK|RF_DATA);
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zd1211rw/zd_usb.h
index 325d0f989257..bf942843b733 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.h
+++ b/drivers/net/wireless/zd1211rw/zd_usb.h
@@ -109,7 +109,7 @@ struct usb_req_rfwrite {
109 __le16 bits; 109 __le16 bits;
110 /* RF2595: 24 */ 110 /* RF2595: 24 */
111 __le16 bit_values[0]; 111 __le16 bit_values[0];
112 /* (CR203 & ~(RF_IF_LE | RF_CLK | RF_DATA)) | (bit ? RF_DATA : 0) */ 112 /* (ZD_CR203 & ~(RF_IF_LE | RF_CLK | RF_DATA)) | (bit ? RF_DATA : 0) */
113} __packed; 113} __packed;
114 114
115/* USB interrupt */ 115/* USB interrupt */
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 5d7bbf2b2ee7..161f207786a4 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -73,9 +73,6 @@ struct xenvif {
73 struct vm_struct *tx_comms_area; 73 struct vm_struct *tx_comms_area;
74 struct vm_struct *rx_comms_area; 74 struct vm_struct *rx_comms_area;
75 75
76 /* Flags that must not be set in dev->features */
77 u32 features_disabled;
78
79 /* Frontend feature information. */ 76 /* Frontend feature information. */
80 u8 can_sg:1; 77 u8 can_sg:1;
81 u8 gso:1; 78 u8 gso:1;
@@ -109,8 +106,8 @@ struct xenvif {
109 wait_queue_head_t waiting_to_free; 106 wait_queue_head_t waiting_to_free;
110}; 107};
111 108
112#define XEN_NETIF_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE) 109#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
113#define XEN_NETIF_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE) 110#define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
114 111
115struct xenvif *xenvif_alloc(struct device *parent, 112struct xenvif *xenvif_alloc(struct device *parent,
116 domid_t domid, 113 domid_t domid,
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index de569cc19da4..0ca86f9ec4ed 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -165,69 +165,18 @@ static int xenvif_change_mtu(struct net_device *dev, int mtu)
165 return 0; 165 return 0;
166} 166}
167 167
168static void xenvif_set_features(struct xenvif *vif) 168static u32 xenvif_fix_features(struct net_device *dev, u32 features)
169{
170 struct net_device *dev = vif->dev;
171 u32 features = dev->features;
172
173 if (vif->can_sg)
174 features |= NETIF_F_SG;
175 if (vif->gso || vif->gso_prefix)
176 features |= NETIF_F_TSO;
177 if (vif->csum)
178 features |= NETIF_F_IP_CSUM;
179
180 features &= ~(vif->features_disabled);
181
182 if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN)
183 dev->mtu = ETH_DATA_LEN;
184
185 dev->features = features;
186}
187
188static int xenvif_set_tx_csum(struct net_device *dev, u32 data)
189{
190 struct xenvif *vif = netdev_priv(dev);
191 if (data) {
192 if (!vif->csum)
193 return -EOPNOTSUPP;
194 vif->features_disabled &= ~NETIF_F_IP_CSUM;
195 } else {
196 vif->features_disabled |= NETIF_F_IP_CSUM;
197 }
198
199 xenvif_set_features(vif);
200 return 0;
201}
202
203static int xenvif_set_sg(struct net_device *dev, u32 data)
204{ 169{
205 struct xenvif *vif = netdev_priv(dev); 170 struct xenvif *vif = netdev_priv(dev);
206 if (data) {
207 if (!vif->can_sg)
208 return -EOPNOTSUPP;
209 vif->features_disabled &= ~NETIF_F_SG;
210 } else {
211 vif->features_disabled |= NETIF_F_SG;
212 }
213 171
214 xenvif_set_features(vif); 172 if (!vif->can_sg)
215 return 0; 173 features &= ~NETIF_F_SG;
216} 174 if (!vif->gso && !vif->gso_prefix)
175 features &= ~NETIF_F_TSO;
176 if (!vif->csum)
177 features &= ~NETIF_F_IP_CSUM;
217 178
218static int xenvif_set_tso(struct net_device *dev, u32 data) 179 return features;
219{
220 struct xenvif *vif = netdev_priv(dev);
221 if (data) {
222 if (!vif->gso && !vif->gso_prefix)
223 return -EOPNOTSUPP;
224 vif->features_disabled &= ~NETIF_F_TSO;
225 } else {
226 vif->features_disabled |= NETIF_F_TSO;
227 }
228
229 xenvif_set_features(vif);
230 return 0;
231} 180}
232 181
233static const struct xenvif_stat { 182static const struct xenvif_stat {
@@ -274,12 +223,6 @@ static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
274} 223}
275 224
276static struct ethtool_ops xenvif_ethtool_ops = { 225static struct ethtool_ops xenvif_ethtool_ops = {
277 .get_tx_csum = ethtool_op_get_tx_csum,
278 .set_tx_csum = xenvif_set_tx_csum,
279 .get_sg = ethtool_op_get_sg,
280 .set_sg = xenvif_set_sg,
281 .get_tso = ethtool_op_get_tso,
282 .set_tso = xenvif_set_tso,
283 .get_link = ethtool_op_get_link, 226 .get_link = ethtool_op_get_link,
284 227
285 .get_sset_count = xenvif_get_sset_count, 228 .get_sset_count = xenvif_get_sset_count,
@@ -293,6 +236,7 @@ static struct net_device_ops xenvif_netdev_ops = {
293 .ndo_open = xenvif_open, 236 .ndo_open = xenvif_open,
294 .ndo_stop = xenvif_close, 237 .ndo_stop = xenvif_close,
295 .ndo_change_mtu = xenvif_change_mtu, 238 .ndo_change_mtu = xenvif_change_mtu,
239 .ndo_fix_features = xenvif_fix_features,
296}; 240};
297 241
298struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, 242struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
@@ -331,7 +275,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
331 vif->credit_timeout.expires = jiffies; 275 vif->credit_timeout.expires = jiffies;
332 276
333 dev->netdev_ops = &xenvif_netdev_ops; 277 dev->netdev_ops = &xenvif_netdev_ops;
334 xenvif_set_features(vif); 278 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
279 dev->features = dev->hw_features;
335 SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops); 280 SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops);
336 281
337 dev->tx_queue_len = XENVIF_QUEUE_LENGTH; 282 dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
@@ -367,8 +312,6 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
367 if (vif->irq) 312 if (vif->irq)
368 return 0; 313 return 0;
369 314
370 xenvif_set_features(vif);
371
372 err = xen_netbk_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref); 315 err = xen_netbk_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
373 if (err < 0) 316 if (err < 0)
374 goto err; 317 goto err;
@@ -384,9 +327,12 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
384 xenvif_get(vif); 327 xenvif_get(vif);
385 328
386 rtnl_lock(); 329 rtnl_lock();
387 netif_carrier_on(vif->dev);
388 if (netif_running(vif->dev)) 330 if (netif_running(vif->dev))
389 xenvif_up(vif); 331 xenvif_up(vif);
332 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
333 dev_set_mtu(vif->dev, ETH_DATA_LEN);
334 netdev_update_features(vif->dev);
335 netif_carrier_on(vif->dev);
390 rtnl_unlock(); 336 rtnl_unlock();
391 337
392 return 0; 338 return 0;
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 22b8c3505991..1ce729d6af75 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -26,7 +26,7 @@ struct backend_info {
26 struct xenvif *vif; 26 struct xenvif *vif;
27 enum xenbus_state frontend_state; 27 enum xenbus_state frontend_state;
28 struct xenbus_watch hotplug_status_watch; 28 struct xenbus_watch hotplug_status_watch;
29 int have_hotplug_status_watch:1; 29 u8 have_hotplug_status_watch:1;
30}; 30};
31 31
32static int connect_rings(struct backend_info *); 32static int connect_rings(struct backend_info *);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 5c8d9c385be0..db9a763aaa7f 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1140,6 +1140,42 @@ static void xennet_uninit(struct net_device *dev)
1140 gnttab_free_grant_references(np->gref_rx_head); 1140 gnttab_free_grant_references(np->gref_rx_head);
1141} 1141}
1142 1142
1143static u32 xennet_fix_features(struct net_device *dev, u32 features)
1144{
1145 struct netfront_info *np = netdev_priv(dev);
1146 int val;
1147
1148 if (features & NETIF_F_SG) {
1149 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
1150 "%d", &val) < 0)
1151 val = 0;
1152
1153 if (!val)
1154 features &= ~NETIF_F_SG;
1155 }
1156
1157 if (features & NETIF_F_TSO) {
1158 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1159 "feature-gso-tcpv4", "%d", &val) < 0)
1160 val = 0;
1161
1162 if (!val)
1163 features &= ~NETIF_F_TSO;
1164 }
1165
1166 return features;
1167}
1168
1169static int xennet_set_features(struct net_device *dev, u32 features)
1170{
1171 if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1172 netdev_info(dev, "Reducing MTU because no SG offload");
1173 dev->mtu = ETH_DATA_LEN;
1174 }
1175
1176 return 0;
1177}
1178
1143static const struct net_device_ops xennet_netdev_ops = { 1179static const struct net_device_ops xennet_netdev_ops = {
1144 .ndo_open = xennet_open, 1180 .ndo_open = xennet_open,
1145 .ndo_uninit = xennet_uninit, 1181 .ndo_uninit = xennet_uninit,
@@ -1148,6 +1184,8 @@ static const struct net_device_ops xennet_netdev_ops = {
1148 .ndo_change_mtu = xennet_change_mtu, 1184 .ndo_change_mtu = xennet_change_mtu,
1149 .ndo_set_mac_address = eth_mac_addr, 1185 .ndo_set_mac_address = eth_mac_addr,
1150 .ndo_validate_addr = eth_validate_addr, 1186 .ndo_validate_addr = eth_validate_addr,
1187 .ndo_fix_features = xennet_fix_features,
1188 .ndo_set_features = xennet_set_features,
1151}; 1189};
1152 1190
1153static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev) 1191static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev)
@@ -1209,7 +1247,17 @@ static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev
1209 netdev->netdev_ops = &xennet_netdev_ops; 1247 netdev->netdev_ops = &xennet_netdev_ops;
1210 1248
1211 netif_napi_add(netdev, &np->napi, xennet_poll, 64); 1249 netif_napi_add(netdev, &np->napi, xennet_poll, 64);
1212 netdev->features = NETIF_F_IP_CSUM; 1250 netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1251 NETIF_F_GSO_ROBUST;
1252 netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO;
1253
1254 /*
1255 * Assume that all hw features are available for now. This set
1256 * will be adjusted by the call to netdev_update_features() in
1257 * xennet_connect() which is the earliest point where we can
1258 * negotiate with the backend regarding supported features.
1259 */
1260 netdev->features |= netdev->hw_features;
1213 1261
1214 SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops); 1262 SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops);
1215 SET_NETDEV_DEV(netdev, &dev->dev); 1263 SET_NETDEV_DEV(netdev, &dev->dev);
@@ -1416,8 +1464,7 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
1416 goto fail; 1464 goto fail;
1417 1465
1418 err = bind_evtchn_to_irqhandler(info->evtchn, xennet_interrupt, 1466 err = bind_evtchn_to_irqhandler(info->evtchn, xennet_interrupt,
1419 IRQF_SAMPLE_RANDOM, netdev->name, 1467 0, netdev->name, netdev);
1420 netdev);
1421 if (err < 0) 1468 if (err < 0)
1422 goto fail; 1469 goto fail;
1423 netdev->irq = err; 1470 netdev->irq = err;
@@ -1510,54 +1557,6 @@ again:
1510 return err; 1557 return err;
1511} 1558}
1512 1559
1513static int xennet_set_sg(struct net_device *dev, u32 data)
1514{
1515 if (data) {
1516 struct netfront_info *np = netdev_priv(dev);
1517 int val;
1518
1519 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
1520 "%d", &val) < 0)
1521 val = 0;
1522 if (!val)
1523 return -ENOSYS;
1524 } else if (dev->mtu > ETH_DATA_LEN)
1525 dev->mtu = ETH_DATA_LEN;
1526
1527 return ethtool_op_set_sg(dev, data);
1528}
1529
1530static int xennet_set_tso(struct net_device *dev, u32 data)
1531{
1532 if (data) {
1533 struct netfront_info *np = netdev_priv(dev);
1534 int val;
1535
1536 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1537 "feature-gso-tcpv4", "%d", &val) < 0)
1538 val = 0;
1539 if (!val)
1540 return -ENOSYS;
1541 }
1542
1543 return ethtool_op_set_tso(dev, data);
1544}
1545
1546static void xennet_set_features(struct net_device *dev)
1547{
1548 /* Turn off all GSO bits except ROBUST. */
1549 dev->features &= ~NETIF_F_GSO_MASK;
1550 dev->features |= NETIF_F_GSO_ROBUST;
1551 xennet_set_sg(dev, 0);
1552
1553 /* We need checksum offload to enable scatter/gather and TSO. */
1554 if (!(dev->features & NETIF_F_IP_CSUM))
1555 return;
1556
1557 if (!xennet_set_sg(dev, 1))
1558 xennet_set_tso(dev, 1);
1559}
1560
1561static int xennet_connect(struct net_device *dev) 1560static int xennet_connect(struct net_device *dev)
1562{ 1561{
1563 struct netfront_info *np = netdev_priv(dev); 1562 struct netfront_info *np = netdev_priv(dev);
@@ -1582,7 +1581,7 @@ static int xennet_connect(struct net_device *dev)
1582 if (err) 1581 if (err)
1583 return err; 1582 return err;
1584 1583
1585 xennet_set_features(dev); 1584 netdev_update_features(dev);
1586 1585
1587 spin_lock_bh(&np->rx_lock); 1586 spin_lock_bh(&np->rx_lock);
1588 spin_lock_irq(&np->tx_lock); 1587 spin_lock_irq(&np->tx_lock);
@@ -1710,9 +1709,6 @@ static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
1710 1709
1711static const struct ethtool_ops xennet_ethtool_ops = 1710static const struct ethtool_ops xennet_ethtool_ops =
1712{ 1711{
1713 .set_tx_csum = ethtool_op_set_tx_csum,
1714 .set_sg = xennet_set_sg,
1715 .set_tso = xennet_set_tso,
1716 .get_link = ethtool_op_get_link, 1712 .get_link = ethtool_op_get_link,
1717 1713
1718 .get_sset_count = xennet_get_sset_count, 1714 .get_sset_count = xennet_get_sset_count,
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index da8aa75bb20b..f1fa2483ae6b 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -845,12 +845,10 @@ claw_irq_tasklet ( unsigned long data )
845{ 845{
846 struct chbk * p_ch; 846 struct chbk * p_ch;
847 struct net_device *dev; 847 struct net_device *dev;
848 struct claw_privbk * privptr;
849 848
850 p_ch = (struct chbk *) data; 849 p_ch = (struct chbk *) data;
851 dev = (struct net_device *)p_ch->ndev; 850 dev = (struct net_device *)p_ch->ndev;
852 CLAW_DBF_TEXT(4, trace, "IRQtask"); 851 CLAW_DBF_TEXT(4, trace, "IRQtask");
853 privptr = (struct claw_privbk *)dev->ml_priv;
854 unpack_read(dev); 852 unpack_read(dev);
855 clear_bit(CLAW_BH_ACTIVE, (void *)&p_ch->flag_a); 853 clear_bit(CLAW_BH_ACTIVE, (void *)&p_ch->flag_a);
856 CLAW_DBF_TEXT(4, trace, "TskletXt"); 854 CLAW_DBF_TEXT(4, trace, "TskletXt");
@@ -1026,7 +1024,6 @@ claw_write_next ( struct chbk * p_ch )
1026 struct net_device *dev; 1024 struct net_device *dev;
1027 struct claw_privbk *privptr=NULL; 1025 struct claw_privbk *privptr=NULL;
1028 struct sk_buff *pk_skb; 1026 struct sk_buff *pk_skb;
1029 int rc;
1030 1027
1031 CLAW_DBF_TEXT(4, trace, "claw_wrt"); 1028 CLAW_DBF_TEXT(4, trace, "claw_wrt");
1032 if (p_ch->claw_state == CLAW_STOP) 1029 if (p_ch->claw_state == CLAW_STOP)
@@ -1038,7 +1035,7 @@ claw_write_next ( struct chbk * p_ch )
1038 !skb_queue_empty(&p_ch->collect_queue)) { 1035 !skb_queue_empty(&p_ch->collect_queue)) {
1039 pk_skb = claw_pack_skb(privptr); 1036 pk_skb = claw_pack_skb(privptr);
1040 while (pk_skb != NULL) { 1037 while (pk_skb != NULL) {
1041 rc = claw_hw_tx( pk_skb, dev,1); 1038 claw_hw_tx(pk_skb, dev, 1);
1042 if (privptr->write_free_count > 0) { 1039 if (privptr->write_free_count > 0) {
1043 pk_skb = claw_pack_skb(privptr); 1040 pk_skb = claw_pack_skb(privptr);
1044 } else 1041 } else
@@ -1322,15 +1319,12 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
1322 unsigned char *pDataAddress; 1319 unsigned char *pDataAddress;
1323 struct endccw *pEnd; 1320 struct endccw *pEnd;
1324 struct ccw1 tempCCW; 1321 struct ccw1 tempCCW;
1325 struct chbk *p_ch;
1326 struct claw_env *p_env; 1322 struct claw_env *p_env;
1327 int lock;
1328 struct clawph *pk_head; 1323 struct clawph *pk_head;
1329 struct chbk *ch; 1324 struct chbk *ch;
1330 1325
1331 CLAW_DBF_TEXT(4, trace, "hw_tx"); 1326 CLAW_DBF_TEXT(4, trace, "hw_tx");
1332 privptr = (struct claw_privbk *)(dev->ml_priv); 1327 privptr = (struct claw_privbk *)(dev->ml_priv);
1333 p_ch = (struct chbk *)&privptr->channel[WRITE_CHANNEL];
1334 p_env =privptr->p_env; 1328 p_env =privptr->p_env;
1335 claw_free_wrt_buf(dev); /* Clean up free chain if posible */ 1329 claw_free_wrt_buf(dev); /* Clean up free chain if posible */
1336 /* scan the write queue to free any completed write packets */ 1330 /* scan the write queue to free any completed write packets */
@@ -1511,12 +1505,6 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
1511 1505
1512 } /* endif (p_first_ccw!=NULL) */ 1506 } /* endif (p_first_ccw!=NULL) */
1513 dev_kfree_skb_any(skb); 1507 dev_kfree_skb_any(skb);
1514 if (linkid==0) {
1515 lock=LOCK_NO;
1516 }
1517 else {
1518 lock=LOCK_YES;
1519 }
1520 claw_strt_out_IO(dev ); 1508 claw_strt_out_IO(dev );
1521 /* if write free count is zero , set NOBUFFER */ 1509 /* if write free count is zero , set NOBUFFER */
1522 if (privptr->write_free_count==0) { 1510 if (privptr->write_free_count==0) {
@@ -2821,15 +2809,11 @@ claw_free_wrt_buf( struct net_device *dev )
2821{ 2809{
2822 2810
2823 struct claw_privbk *privptr = (struct claw_privbk *)dev->ml_priv; 2811 struct claw_privbk *privptr = (struct claw_privbk *)dev->ml_priv;
2824 struct ccwbk*p_first_ccw;
2825 struct ccwbk*p_last_ccw;
2826 struct ccwbk*p_this_ccw; 2812 struct ccwbk*p_this_ccw;
2827 struct ccwbk*p_next_ccw; 2813 struct ccwbk*p_next_ccw;
2828 2814
2829 CLAW_DBF_TEXT(4, trace, "freewrtb"); 2815 CLAW_DBF_TEXT(4, trace, "freewrtb");
2830 /* scan the write queue to free any completed write packets */ 2816 /* scan the write queue to free any completed write packets */
2831 p_first_ccw=NULL;
2832 p_last_ccw=NULL;
2833 p_this_ccw=privptr->p_write_active_first; 2817 p_this_ccw=privptr->p_write_active_first;
2834 while ( (p_this_ccw!=NULL) && (p_this_ccw->header.flag!=CLAW_PENDING)) 2818 while ( (p_this_ccw!=NULL) && (p_this_ccw->header.flag!=CLAW_PENDING))
2835 { 2819 {
@@ -3072,7 +3056,7 @@ claw_shutdown_device(struct ccwgroup_device *cgdev)
3072{ 3056{
3073 struct claw_privbk *priv; 3057 struct claw_privbk *priv;
3074 struct net_device *ndev; 3058 struct net_device *ndev;
3075 int ret; 3059 int ret = 0;
3076 3060
3077 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev)); 3061 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
3078 priv = dev_get_drvdata(&cgdev->dev); 3062 priv = dev_get_drvdata(&cgdev->dev);
@@ -3095,7 +3079,7 @@ claw_shutdown_device(struct ccwgroup_device *cgdev)
3095 } 3079 }
3096 ccw_device_set_offline(cgdev->cdev[1]); 3080 ccw_device_set_offline(cgdev->cdev[1]);
3097 ccw_device_set_offline(cgdev->cdev[0]); 3081 ccw_device_set_offline(cgdev->cdev[0]);
3098 return 0; 3082 return ret;
3099} 3083}
3100 3084
3101static void 3085static void
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index c189296763a4..426787efc492 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -672,7 +672,6 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
672 int ccw_idx; 672 int ccw_idx;
673 unsigned long hi; 673 unsigned long hi;
674 unsigned long saveflags = 0; /* avoids compiler warning */ 674 unsigned long saveflags = 0; /* avoids compiler warning */
675 __u16 block_len;
676 675
677 CTCM_PR_DEBUG("Enter %s: %s, cp=%i ch=0x%p id=%s state=%s\n", 676 CTCM_PR_DEBUG("Enter %s: %s, cp=%i ch=0x%p id=%s state=%s\n",
678 __func__, dev->name, smp_processor_id(), ch, 677 __func__, dev->name, smp_processor_id(), ch,
@@ -719,7 +718,6 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
719 */ 718 */
720 atomic_inc(&skb->users); 719 atomic_inc(&skb->users);
721 720
722 block_len = skb->len + TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
723 /* 721 /*
724 * IDAL support in CTCM is broken, so we have to 722 * IDAL support in CTCM is broken, so we have to
725 * care about skb's above 2G ourselves. 723 * care about skb's above 2G ourselves.
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index b64881f33f23..da4c747335e7 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -653,7 +653,6 @@ static void ctcmpc_send_sweep_resp(struct channel *rch)
653 struct net_device *dev = rch->netdev; 653 struct net_device *dev = rch->netdev;
654 struct ctcm_priv *priv = dev->ml_priv; 654 struct ctcm_priv *priv = dev->ml_priv;
655 struct mpc_group *grp = priv->mpcg; 655 struct mpc_group *grp = priv->mpcg;
656 int rc = 0;
657 struct th_sweep *header; 656 struct th_sweep *header;
658 struct sk_buff *sweep_skb; 657 struct sk_buff *sweep_skb;
659 struct channel *ch = priv->channel[CTCM_WRITE]; 658 struct channel *ch = priv->channel[CTCM_WRITE];
@@ -665,16 +664,14 @@ static void ctcmpc_send_sweep_resp(struct channel *rch)
665 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, 664 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
666 "%s(%s): sweep_skb allocation ERROR\n", 665 "%s(%s): sweep_skb allocation ERROR\n",
667 CTCM_FUNTAIL, rch->id); 666 CTCM_FUNTAIL, rch->id);
668 rc = -ENOMEM; 667 goto done;
669 goto done;
670 } 668 }
671 669
672 header = kmalloc(sizeof(struct th_sweep), gfp_type()); 670 header = kmalloc(sizeof(struct th_sweep), gfp_type());
673 671
674 if (!header) { 672 if (!header) {
675 dev_kfree_skb_any(sweep_skb); 673 dev_kfree_skb_any(sweep_skb);
676 rc = -ENOMEM; 674 goto done;
677 goto done;
678 } 675 }
679 676
680 header->th.th_seg = 0x00 ; 677 header->th.th_seg = 0x00 ;
@@ -1370,8 +1367,7 @@ static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg)
1370 struct net_device *dev = arg; 1367 struct net_device *dev = arg;
1371 struct ctcm_priv *priv; 1368 struct ctcm_priv *priv;
1372 struct mpc_group *grp; 1369 struct mpc_group *grp;
1373 int rc = 0; 1370 struct channel *wch;
1374 struct channel *wch, *rch;
1375 1371
1376 BUG_ON(dev == NULL); 1372 BUG_ON(dev == NULL);
1377 CTCM_PR_DEBUG("Enter %s: %s\n", __func__, dev->name); 1373 CTCM_PR_DEBUG("Enter %s: %s\n", __func__, dev->name);
@@ -1396,7 +1392,6 @@ static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg)
1396 fsm_deltimer(&priv->restart_timer); 1392 fsm_deltimer(&priv->restart_timer);
1397 1393
1398 wch = priv->channel[CTCM_WRITE]; 1394 wch = priv->channel[CTCM_WRITE];
1399 rch = priv->channel[CTCM_READ];
1400 1395
1401 switch (grp->saved_state) { 1396 switch (grp->saved_state) {
1402 case MPCG_STATE_RESET: 1397 case MPCG_STATE_RESET:
@@ -1435,7 +1430,7 @@ static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg)
1435 1430
1436 if (grp->send_qllc_disc == 1) { 1431 if (grp->send_qllc_disc == 1) {
1437 grp->send_qllc_disc = 0; 1432 grp->send_qllc_disc = 0;
1438 rc = mpc_send_qllc_discontact(dev); 1433 mpc_send_qllc_discontact(dev);
1439 } 1434 }
1440 1435
1441 /* DO NOT issue DEV_EVENT_STOP directly out of this code */ 1436 /* DO NOT issue DEV_EVENT_STOP directly out of this code */
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 49d1cfc3217e..c3b8064a102d 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1483,7 +1483,6 @@ lcs_tasklet(unsigned long data)
1483 struct lcs_channel *channel; 1483 struct lcs_channel *channel;
1484 struct lcs_buffer *iob; 1484 struct lcs_buffer *iob;
1485 int buf_idx; 1485 int buf_idx;
1486 int rc;
1487 1486
1488 channel = (struct lcs_channel *) data; 1487 channel = (struct lcs_channel *) data;
1489 LCS_DBF_TEXT_(5, trace, "tlet%s", dev_name(&channel->ccwdev->dev)); 1488 LCS_DBF_TEXT_(5, trace, "tlet%s", dev_name(&channel->ccwdev->dev));
@@ -1500,14 +1499,11 @@ lcs_tasklet(unsigned long data)
1500 channel->buf_idx = buf_idx; 1499 channel->buf_idx = buf_idx;
1501 1500
1502 if (channel->state == LCS_CH_STATE_STOPPED) 1501 if (channel->state == LCS_CH_STATE_STOPPED)
1503 // FIXME: what if rc != 0 ?? 1502 lcs_start_channel(channel);
1504 rc = lcs_start_channel(channel);
1505 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); 1503 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1506 if (channel->state == LCS_CH_STATE_SUSPENDED && 1504 if (channel->state == LCS_CH_STATE_SUSPENDED &&
1507 channel->iob[channel->io_idx].state == LCS_BUF_STATE_READY) { 1505 channel->iob[channel->io_idx].state == LCS_BUF_STATE_READY)
1508 // FIXME: what if rc != 0 ?? 1506 __lcs_resume_channel(channel);
1509 rc = __lcs_resume_channel(channel);
1510 }
1511 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); 1507 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1512 1508
1513 /* Something happened on the channel. Wake up waiters. */ 1509 /* Something happened on the channel. Wake up waiters. */
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index b6a6356d09b3..3251333a23df 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -1994,8 +1994,6 @@ static struct net_device *netiucv_init_netdevice(char *username)
1994 netiucv_setup_netdevice); 1994 netiucv_setup_netdevice);
1995 if (!dev) 1995 if (!dev)
1996 return NULL; 1996 return NULL;
1997 if (dev_alloc_name(dev, dev->name) < 0)
1998 goto out_netdev;
1999 1997
2000 privptr = netdev_priv(dev); 1998 privptr = netdev_priv(dev);
2001 privptr->fsm = init_fsm("netiucvdev", dev_state_names, 1999 privptr->fsm = init_fsm("netiucvdev", dev_state_names,
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index af3f7b095647..55c6aa1c9704 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -407,12 +407,6 @@ struct qeth_qdio_q {
407 int next_buf_to_init; 407 int next_buf_to_init;
408} __attribute__ ((aligned(256))); 408} __attribute__ ((aligned(256)));
409 409
410/* possible types of qeth large_send support */
411enum qeth_large_send_types {
412 QETH_LARGE_SEND_NO,
413 QETH_LARGE_SEND_TSO,
414};
415
416struct qeth_qdio_out_buffer { 410struct qeth_qdio_out_buffer {
417 struct qdio_buffer *buffer; 411 struct qdio_buffer *buffer;
418 atomic_t state; 412 atomic_t state;
@@ -637,6 +631,8 @@ struct qeth_card_info {
637 __u32 csum_mask; 631 __u32 csum_mask;
638 __u32 tx_csum_mask; 632 __u32 tx_csum_mask;
639 enum qeth_ipa_promisc_modes promisc_mode; 633 enum qeth_ipa_promisc_modes promisc_mode;
634 __u32 diagass_support;
635 __u32 hwtrap;
640}; 636};
641 637
642struct qeth_card_options { 638struct qeth_card_options {
@@ -645,13 +641,11 @@ struct qeth_card_options {
645 struct qeth_ipa_info adp; /*Adapter parameters*/ 641 struct qeth_ipa_info adp; /*Adapter parameters*/
646 struct qeth_routing_info route6; 642 struct qeth_routing_info route6;
647 struct qeth_ipa_info ipa6; 643 struct qeth_ipa_info ipa6;
648 enum qeth_checksum_types checksum_type;
649 int broadcast_mode; 644 int broadcast_mode;
650 int macaddr_mode; 645 int macaddr_mode;
651 int fake_broadcast; 646 int fake_broadcast;
652 int add_hhlen; 647 int add_hhlen;
653 int layer2; 648 int layer2;
654 enum qeth_large_send_types large_send;
655 int performance_stats; 649 int performance_stats;
656 int rx_sg_cb; 650 int rx_sg_cb;
657 enum qeth_ipa_isolation_modes isolation; 651 enum qeth_ipa_isolation_modes isolation;
@@ -760,6 +754,14 @@ struct qeth_card_list_struct {
760 rwlock_t rwlock; 754 rwlock_t rwlock;
761}; 755};
762 756
757struct qeth_trap_id {
758 __u16 lparnr;
759 char vmname[8];
760 __u8 chpid;
761 __u8 ssid;
762 __u16 devno;
763} __packed;
764
763/*some helper functions*/ 765/*some helper functions*/
764#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "") 766#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
765 767
@@ -794,6 +796,12 @@ static inline void qeth_put_buffer_pool_entry(struct qeth_card *card,
794 list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list); 796 list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list);
795} 797}
796 798
799static inline int qeth_is_diagass_supported(struct qeth_card *card,
800 enum qeth_diags_cmds cmd)
801{
802 return card->info.diagass_support & (__u32)cmd;
803}
804
797extern struct ccwgroup_driver qeth_l2_ccwgroup_driver; 805extern struct ccwgroup_driver qeth_l2_ccwgroup_driver;
798extern struct ccwgroup_driver qeth_l3_ccwgroup_driver; 806extern struct ccwgroup_driver qeth_l3_ccwgroup_driver;
799const char *qeth_get_cardname_short(struct qeth_card *); 807const char *qeth_get_cardname_short(struct qeth_card *);
@@ -879,6 +887,8 @@ void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...);
879int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *); 887int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *);
880int qeth_set_access_ctrl_online(struct qeth_card *card); 888int qeth_set_access_ctrl_online(struct qeth_card *card);
881int qeth_hdr_chk_and_bounce(struct sk_buff *, int); 889int qeth_hdr_chk_and_bounce(struct sk_buff *, int);
890int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
891int qeth_query_ipassists(struct qeth_card *, enum qeth_prot_versions prot);
882 892
883/* exports for OSN */ 893/* exports for OSN */
884int qeth_osn_assist(struct net_device *, void *, int); 894int qeth_osn_assist(struct net_device *, void *, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 85cc53117ea6..503678a30981 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -24,6 +24,7 @@
24 24
25#include <asm/ebcdic.h> 25#include <asm/ebcdic.h>
26#include <asm/io.h> 26#include <asm/io.h>
27#include <asm/sysinfo.h>
27 28
28#include "qeth_core.h" 29#include "qeth_core.h"
29 30
@@ -349,6 +350,8 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
349 card->info.chpid); 350 card->info.chpid);
350 netif_carrier_on(card->dev); 351 netif_carrier_on(card->dev);
351 card->lan_online = 1; 352 card->lan_online = 1;
353 if (card->info.hwtrap)
354 card->info.hwtrap = 2;
352 qeth_schedule_recovery(card); 355 qeth_schedule_recovery(card);
353 return NULL; 356 return NULL;
354 case IPA_CMD_MODCCID: 357 case IPA_CMD_MODCCID:
@@ -1039,7 +1042,6 @@ static void qeth_set_intial_options(struct qeth_card *card)
1039{ 1042{
1040 card->options.route4.type = NO_ROUTER; 1043 card->options.route4.type = NO_ROUTER;
1041 card->options.route6.type = NO_ROUTER; 1044 card->options.route6.type = NO_ROUTER;
1042 card->options.checksum_type = QETH_CHECKSUM_DEFAULT;
1043 card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS; 1045 card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
1044 card->options.macaddr_mode = QETH_TR_MACADDR_NONCANONICAL; 1046 card->options.macaddr_mode = QETH_TR_MACADDR_NONCANONICAL;
1045 card->options.fake_broadcast = 0; 1047 card->options.fake_broadcast = 0;
@@ -2574,6 +2576,142 @@ int qeth_query_setadapterparms(struct qeth_card *card)
2574} 2576}
2575EXPORT_SYMBOL_GPL(qeth_query_setadapterparms); 2577EXPORT_SYMBOL_GPL(qeth_query_setadapterparms);
2576 2578
2579static int qeth_query_ipassists_cb(struct qeth_card *card,
2580 struct qeth_reply *reply, unsigned long data)
2581{
2582 struct qeth_ipa_cmd *cmd;
2583
2584 QETH_DBF_TEXT(SETUP, 2, "qipasscb");
2585
2586 cmd = (struct qeth_ipa_cmd *) data;
2587 if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
2588 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
2589 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
2590 } else {
2591 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
2592 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
2593 }
2594 QETH_DBF_TEXT(SETUP, 2, "suppenbl");
2595 QETH_DBF_TEXT_(SETUP, 2, "%x", cmd->hdr.ipa_supported);
2596 QETH_DBF_TEXT_(SETUP, 2, "%x", cmd->hdr.ipa_enabled);
2597 return 0;
2598}
2599
2600int qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot)
2601{
2602 int rc;
2603 struct qeth_cmd_buffer *iob;
2604
2605 QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot);
2606 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot);
2607 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
2608 return rc;
2609}
2610EXPORT_SYMBOL_GPL(qeth_query_ipassists);
2611
2612static int qeth_query_setdiagass_cb(struct qeth_card *card,
2613 struct qeth_reply *reply, unsigned long data)
2614{
2615 struct qeth_ipa_cmd *cmd;
2616 __u16 rc;
2617
2618 cmd = (struct qeth_ipa_cmd *)data;
2619 rc = cmd->hdr.return_code;
2620 if (rc)
2621 QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
2622 else
2623 card->info.diagass_support = cmd->data.diagass.ext;
2624 return 0;
2625}
2626
2627static int qeth_query_setdiagass(struct qeth_card *card)
2628{
2629 struct qeth_cmd_buffer *iob;
2630 struct qeth_ipa_cmd *cmd;
2631
2632 QETH_DBF_TEXT(SETUP, 2, "qdiagass");
2633 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
2634 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
2635 cmd->data.diagass.subcmd_len = 16;
2636 cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY;
2637 return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
2638}
2639
2640static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
2641{
2642 unsigned long info = get_zeroed_page(GFP_KERNEL);
2643 struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
2644 struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
2645 struct ccw_dev_id ccwid;
2646 int level, rc;
2647
2648 tid->chpid = card->info.chpid;
2649 ccw_device_get_id(CARD_RDEV(card), &ccwid);
2650 tid->ssid = ccwid.ssid;
2651 tid->devno = ccwid.devno;
2652 if (!info)
2653 return;
2654
2655 rc = stsi(NULL, 0, 0, 0);
2656 if (rc == -ENOSYS)
2657 level = rc;
2658 else
2659 level = (((unsigned int) rc) >> 28);
2660
2661 if ((level >= 2) && (stsi(info222, 2, 2, 2) != -ENOSYS))
2662 tid->lparnr = info222->lpar_number;
2663
2664 if ((level >= 3) && (stsi(info322, 3, 2, 2) != -ENOSYS)) {
2665 EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
2666 memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
2667 }
2668 free_page(info);
2669 return;
2670}
2671
2672static int qeth_hw_trap_cb(struct qeth_card *card,
2673 struct qeth_reply *reply, unsigned long data)
2674{
2675 struct qeth_ipa_cmd *cmd;
2676 __u16 rc;
2677
2678 cmd = (struct qeth_ipa_cmd *)data;
2679 rc = cmd->hdr.return_code;
2680 if (rc)
2681 QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
2682 return 0;
2683}
2684
2685int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
2686{
2687 struct qeth_cmd_buffer *iob;
2688 struct qeth_ipa_cmd *cmd;
2689
2690 QETH_DBF_TEXT(SETUP, 2, "diagtrap");
2691 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
2692 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
2693 cmd->data.diagass.subcmd_len = 80;
2694 cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP;
2695 cmd->data.diagass.type = 1;
2696 cmd->data.diagass.action = action;
2697 switch (action) {
2698 case QETH_DIAGS_TRAP_ARM:
2699 cmd->data.diagass.options = 0x0003;
2700 cmd->data.diagass.ext = 0x00010000 +
2701 sizeof(struct qeth_trap_id);
2702 qeth_get_trap_id(card,
2703 (struct qeth_trap_id *)cmd->data.diagass.cdata);
2704 break;
2705 case QETH_DIAGS_TRAP_DISARM:
2706 cmd->data.diagass.options = 0x0001;
2707 break;
2708 case QETH_DIAGS_TRAP_CAPTURE:
2709 break;
2710 }
2711 return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
2712}
2713EXPORT_SYMBOL_GPL(qeth_hw_trap);
2714
2577int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf, 2715int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf,
2578 unsigned int qdio_error, const char *dbftext) 2716 unsigned int qdio_error, const char *dbftext)
2579{ 2717{
@@ -3903,6 +4041,7 @@ MODULE_DEVICE_TABLE(ccw, qeth_ids);
3903 4041
3904static struct ccw_driver qeth_ccw_driver = { 4042static struct ccw_driver qeth_ccw_driver = {
3905 .driver = { 4043 .driver = {
4044 .owner = THIS_MODULE,
3906 .name = "qeth", 4045 .name = "qeth",
3907 }, 4046 },
3908 .ids = qeth_ids, 4047 .ids = qeth_ids,
@@ -3984,6 +4123,15 @@ retriable:
3984 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); 4123 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
3985 goto out; 4124 goto out;
3986 } 4125 }
4126
4127 card->options.ipa4.supported_funcs = 0;
4128 card->options.adp.supported_funcs = 0;
4129 card->info.diagass_support = 0;
4130 qeth_query_ipassists(card, QETH_PROT_IPV4);
4131 if (qeth_is_supported(card, IPA_SETADAPTERPARMS))
4132 qeth_query_setadapterparms(card);
4133 if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST))
4134 qeth_query_setdiagass(card);
3987 return 0; 4135 return 0;
3988out: 4136out:
3989 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " 4137 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 07d588867b57..e5a9d1c03839 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -80,14 +80,6 @@ enum qeth_tr_broadcast_modes {
80 QETH_TR_BROADCAST_LOCAL = 1, 80 QETH_TR_BROADCAST_LOCAL = 1,
81}; 81};
82 82
83/* these values match CHECKSUM_* in include/linux/skbuff.h */
84enum qeth_checksum_types {
85 SW_CHECKSUMMING = 0, /* TODO: set to bit flag used in IPA Command */
86 HW_CHECKSUMMING = 1,
87 NO_CHECKSUMMING = 2,
88};
89#define QETH_CHECKSUM_DEFAULT SW_CHECKSUMMING
90
91/* 83/*
92 * Routing stuff 84 * Routing stuff
93 */ 85 */
@@ -456,6 +448,12 @@ enum qeth_diags_trace_cmds {
456 QETH_DIAGS_CMD_TRACE_QUERY = 0x0010, 448 QETH_DIAGS_CMD_TRACE_QUERY = 0x0010,
457}; 449};
458 450
451enum qeth_diags_trap_action {
452 QETH_DIAGS_TRAP_ARM = 0x01,
453 QETH_DIAGS_TRAP_DISARM = 0x02,
454 QETH_DIAGS_TRAP_CAPTURE = 0x04,
455};
456
459struct qeth_ipacmd_diagass { 457struct qeth_ipacmd_diagass {
460 __u32 host_tod2; 458 __u32 host_tod2;
461 __u32:32; 459 __u32:32;
@@ -465,7 +463,8 @@ struct qeth_ipacmd_diagass {
465 __u8 type; 463 __u8 type;
466 __u8 action; 464 __u8 action;
467 __u16 options; 465 __u16 options;
468 __u32:32; 466 __u32 ext;
467 __u8 cdata[64];
469} __attribute__ ((packed)); 468} __attribute__ ((packed));
470 469
471/* Header for each IPA command */ 470/* Header for each IPA command */
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index b5e967cf7e2d..0a8e86c1b0ea 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -530,6 +530,66 @@ out:
530static DEVICE_ATTR(isolation, 0644, qeth_dev_isolation_show, 530static DEVICE_ATTR(isolation, 0644, qeth_dev_isolation_show,
531 qeth_dev_isolation_store); 531 qeth_dev_isolation_store);
532 532
533static ssize_t qeth_hw_trap_show(struct device *dev,
534 struct device_attribute *attr, char *buf)
535{
536 struct qeth_card *card = dev_get_drvdata(dev);
537
538 if (!card)
539 return -EINVAL;
540 if (card->info.hwtrap)
541 return snprintf(buf, 5, "arm\n");
542 else
543 return snprintf(buf, 8, "disarm\n");
544}
545
546static ssize_t qeth_hw_trap_store(struct device *dev,
547 struct device_attribute *attr, const char *buf, size_t count)
548{
549 struct qeth_card *card = dev_get_drvdata(dev);
550 int rc = 0;
551 char *tmp, *curtoken;
552 int state = 0;
553 curtoken = (char *)buf;
554
555 if (!card)
556 return -EINVAL;
557
558 mutex_lock(&card->conf_mutex);
559 if (card->state == CARD_STATE_SOFTSETUP || card->state == CARD_STATE_UP)
560 state = 1;
561 tmp = strsep(&curtoken, "\n");
562
563 if (!strcmp(tmp, "arm") && !card->info.hwtrap) {
564 if (state) {
565 if (qeth_is_diagass_supported(card,
566 QETH_DIAGS_CMD_TRAP)) {
567 rc = qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM);
568 if (!rc)
569 card->info.hwtrap = 1;
570 } else
571 rc = -EINVAL;
572 } else
573 card->info.hwtrap = 1;
574 } else if (!strcmp(tmp, "disarm") && card->info.hwtrap) {
575 if (state) {
576 rc = qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
577 if (!rc)
578 card->info.hwtrap = 0;
579 } else
580 card->info.hwtrap = 0;
581 } else if (!strcmp(tmp, "trap") && state && card->info.hwtrap)
582 rc = qeth_hw_trap(card, QETH_DIAGS_TRAP_CAPTURE);
583 else
584 rc = -EINVAL;
585
586 mutex_unlock(&card->conf_mutex);
587 return rc ? rc : count;
588}
589
590static DEVICE_ATTR(hw_trap, 0644, qeth_hw_trap_show,
591 qeth_hw_trap_store);
592
533static ssize_t qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value) 593static ssize_t qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value)
534{ 594{
535 595
@@ -653,6 +713,7 @@ static struct attribute *qeth_device_attrs[] = {
653 &dev_attr_performance_stats.attr, 713 &dev_attr_performance_stats.attr,
654 &dev_attr_layer2.attr, 714 &dev_attr_layer2.attr,
655 &dev_attr_isolation.attr, 715 &dev_attr_isolation.attr,
716 &dev_attr_hw_trap.attr,
656 NULL, 717 NULL,
657}; 718};
658 719
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 6fbaacb21943..b70b47fbd6cd 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -420,10 +420,7 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
420 case QETH_HEADER_TYPE_LAYER2: 420 case QETH_HEADER_TYPE_LAYER2:
421 skb->pkt_type = PACKET_HOST; 421 skb->pkt_type = PACKET_HOST;
422 skb->protocol = eth_type_trans(skb, skb->dev); 422 skb->protocol = eth_type_trans(skb, skb->dev);
423 if (card->options.checksum_type == NO_CHECKSUMMING) 423 skb->ip_summed = CHECKSUM_NONE;
424 skb->ip_summed = CHECKSUM_UNNECESSARY;
425 else
426 skb->ip_summed = CHECKSUM_NONE;
427 if (skb->protocol == htons(ETH_P_802_2)) 424 if (skb->protocol == htons(ETH_P_802_2))
428 *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno; 425 *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
429 len = skb->len; 426 len = skb->len;
@@ -879,6 +876,7 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
879 INIT_LIST_HEAD(&card->vid_list); 876 INIT_LIST_HEAD(&card->vid_list);
880 INIT_LIST_HEAD(&card->mc_list); 877 INIT_LIST_HEAD(&card->mc_list);
881 card->options.layer2 = 1; 878 card->options.layer2 = 1;
879 card->info.hwtrap = 0;
882 card->discipline.start_poll = qeth_qdio_start_poll; 880 card->discipline.start_poll = qeth_qdio_start_poll;
883 card->discipline.input_handler = (qdio_handler_t *) 881 card->discipline.input_handler = (qdio_handler_t *)
884 qeth_qdio_input_handler; 882 qeth_qdio_input_handler;
@@ -997,6 +995,13 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
997 if (card->info.type != QETH_CARD_TYPE_OSN) 995 if (card->info.type != QETH_CARD_TYPE_OSN)
998 qeth_l2_send_setmac(card, &card->dev->dev_addr[0]); 996 qeth_l2_send_setmac(card, &card->dev->dev_addr[0]);
999 997
998 if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) {
999 if (card->info.hwtrap &&
1000 qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM))
1001 card->info.hwtrap = 0;
1002 } else
1003 card->info.hwtrap = 0;
1004
1000 card->state = CARD_STATE_HARDSETUP; 1005 card->state = CARD_STATE_HARDSETUP;
1001 memset(&card->rx, 0, sizeof(struct qeth_rx)); 1006 memset(&card->rx, 0, sizeof(struct qeth_rx));
1002 qeth_print_status_message(card); 1007 qeth_print_status_message(card);
@@ -1095,6 +1100,10 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
1095 if (card->dev && netif_carrier_ok(card->dev)) 1100 if (card->dev && netif_carrier_ok(card->dev))
1096 netif_carrier_off(card->dev); 1101 netif_carrier_off(card->dev);
1097 recover_flag = card->state; 1102 recover_flag = card->state;
1103 if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) {
1104 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
1105 card->info.hwtrap = 1;
1106 }
1098 qeth_l2_stop_card(card, recovery_mode); 1107 qeth_l2_stop_card(card, recovery_mode);
1099 rc = ccw_device_set_offline(CARD_DDEV(card)); 1108 rc = ccw_device_set_offline(CARD_DDEV(card));
1100 rc2 = ccw_device_set_offline(CARD_WDEV(card)); 1109 rc2 = ccw_device_set_offline(CARD_WDEV(card));
@@ -1160,6 +1169,8 @@ static void __exit qeth_l2_exit(void)
1160static void qeth_l2_shutdown(struct ccwgroup_device *gdev) 1169static void qeth_l2_shutdown(struct ccwgroup_device *gdev)
1161{ 1170{
1162 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 1171 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
1172 if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
1173 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
1163 qeth_qdio_clear_card(card, 0); 1174 qeth_qdio_clear_card(card, 0);
1164 qeth_clear_qdio_buffers(card); 1175 qeth_clear_qdio_buffers(card);
1165} 1176}
@@ -1175,6 +1186,8 @@ static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev)
1175 if (gdev->state == CCWGROUP_OFFLINE) 1186 if (gdev->state == CCWGROUP_OFFLINE)
1176 return 0; 1187 return 0;
1177 if (card->state == CARD_STATE_UP) { 1188 if (card->state == CARD_STATE_UP) {
1189 if (card->info.hwtrap)
1190 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
1178 __qeth_l2_set_offline(card->gdev, 1); 1191 __qeth_l2_set_offline(card->gdev, 1);
1179 } else 1192 } else
1180 __qeth_l2_set_offline(card->gdev, 0); 1193 __qeth_l2_set_offline(card->gdev, 0);
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index e705b27ec7dc..14a43aeb0c2a 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -62,8 +62,6 @@ void qeth_l3_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
62int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *); 62int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
63void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions, 63void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions,
64 const u8 *); 64 const u8 *);
65int qeth_l3_set_large_send(struct qeth_card *, enum qeth_large_send_types);
66int qeth_l3_set_rx_csum(struct qeth_card *, enum qeth_checksum_types);
67int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *); 65int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *);
68 66
69#endif /* __QETH_L3_H__ */ 67#endif /* __QETH_L3_H__ */
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 142e5f6ef4f3..bbe7e1c058ab 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -43,33 +43,6 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *,
43static int __qeth_l3_set_online(struct ccwgroup_device *, int); 43static int __qeth_l3_set_online(struct ccwgroup_device *, int);
44static int __qeth_l3_set_offline(struct ccwgroup_device *, int); 44static int __qeth_l3_set_offline(struct ccwgroup_device *, int);
45 45
46int qeth_l3_set_large_send(struct qeth_card *card,
47 enum qeth_large_send_types type)
48{
49 int rc = 0;
50
51 card->options.large_send = type;
52 if (card->dev == NULL)
53 return 0;
54
55 if (card->options.large_send == QETH_LARGE_SEND_TSO) {
56 if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
57 card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
58 NETIF_F_IP_CSUM;
59 } else {
60 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
61 NETIF_F_IP_CSUM);
62 card->options.large_send = QETH_LARGE_SEND_NO;
63 rc = -EOPNOTSUPP;
64 }
65 } else {
66 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
67 NETIF_F_IP_CSUM);
68 card->options.large_send = QETH_LARGE_SEND_NO;
69 }
70 return rc;
71}
72
73static int qeth_l3_isxdigit(char *buf) 46static int qeth_l3_isxdigit(char *buf)
74{ 47{
75 while (*buf) { 48 while (*buf) {
@@ -1304,39 +1277,6 @@ static int qeth_l3_start_ipa_multicast(struct qeth_card *card)
1304 return rc; 1277 return rc;
1305} 1278}
1306 1279
1307static int qeth_l3_query_ipassists_cb(struct qeth_card *card,
1308 struct qeth_reply *reply, unsigned long data)
1309{
1310 struct qeth_ipa_cmd *cmd;
1311
1312 QETH_DBF_TEXT(SETUP, 2, "qipasscb");
1313
1314 cmd = (struct qeth_ipa_cmd *) data;
1315 if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
1316 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
1317 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
1318 } else {
1319 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
1320 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
1321 }
1322 QETH_DBF_TEXT(SETUP, 2, "suppenbl");
1323 QETH_DBF_TEXT_(SETUP, 2, "%x", cmd->hdr.ipa_supported);
1324 QETH_DBF_TEXT_(SETUP, 2, "%x", cmd->hdr.ipa_enabled);
1325 return 0;
1326}
1327
1328static int qeth_l3_query_ipassists(struct qeth_card *card,
1329 enum qeth_prot_versions prot)
1330{
1331 int rc;
1332 struct qeth_cmd_buffer *iob;
1333
1334 QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot);
1335 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot);
1336 rc = qeth_send_ipa_cmd(card, iob, qeth_l3_query_ipassists_cb, NULL);
1337 return rc;
1338}
1339
1340#ifdef CONFIG_QETH_IPV6 1280#ifdef CONFIG_QETH_IPV6
1341static int qeth_l3_softsetup_ipv6(struct qeth_card *card) 1281static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
1342{ 1282{
@@ -1347,7 +1287,7 @@ static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
1347 if (card->info.type == QETH_CARD_TYPE_IQD) 1287 if (card->info.type == QETH_CARD_TYPE_IQD)
1348 goto out; 1288 goto out;
1349 1289
1350 rc = qeth_l3_query_ipassists(card, QETH_PROT_IPV6); 1290 rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
1351 if (rc) { 1291 if (rc) {
1352 dev_err(&card->gdev->dev, 1292 dev_err(&card->gdev->dev,
1353 "Activating IPv6 support for %s failed\n", 1293 "Activating IPv6 support for %s failed\n",
@@ -1472,32 +1412,30 @@ static int qeth_l3_send_checksum_command(struct qeth_card *card)
1472 return 0; 1412 return 0;
1473} 1413}
1474 1414
1475int qeth_l3_set_rx_csum(struct qeth_card *card, 1415int qeth_l3_set_rx_csum(struct qeth_card *card, int on)
1476 enum qeth_checksum_types csum_type)
1477{ 1416{
1478 int rc = 0; 1417 int rc = 0;
1479 1418
1480 if (card->options.checksum_type == HW_CHECKSUMMING) { 1419 if (on) {
1481 if ((csum_type != HW_CHECKSUMMING) && 1420 if (card->state != CARD_STATE_DOWN) {
1482 (card->state != CARD_STATE_DOWN)) { 1421 if (!qeth_is_supported(card,
1483 rc = qeth_l3_send_simple_setassparms(card, 1422 IPA_INBOUND_CHECKSUM))
1484 IPA_INBOUND_CHECKSUM, IPA_CMD_ASS_STOP, 0); 1423 return -EPERM;
1424 rc = qeth_l3_send_checksum_command(card);
1485 if (rc) 1425 if (rc)
1486 return -EIO; 1426 return -EIO;
1487 } 1427 }
1428 card->dev->features |= NETIF_F_RXCSUM;
1488 } else { 1429 } else {
1489 if (csum_type == HW_CHECKSUMMING) { 1430 if (card->state != CARD_STATE_DOWN) {
1490 if (card->state != CARD_STATE_DOWN) { 1431 rc = qeth_l3_send_simple_setassparms(card,
1491 if (!qeth_is_supported(card, 1432 IPA_INBOUND_CHECKSUM, IPA_CMD_ASS_STOP, 0);
1492 IPA_INBOUND_CHECKSUM)) 1433 if (rc)
1493 return -EPERM; 1434 return -EIO;
1494 rc = qeth_l3_send_checksum_command(card);
1495 if (rc)
1496 return -EIO;
1497 }
1498 } 1435 }
1436 card->dev->features &= ~NETIF_F_RXCSUM;
1499 } 1437 }
1500 card->options.checksum_type = csum_type; 1438
1501 return rc; 1439 return rc;
1502} 1440}
1503 1441
@@ -1507,32 +1445,34 @@ static int qeth_l3_start_ipa_checksum(struct qeth_card *card)
1507 1445
1508 QETH_CARD_TEXT(card, 3, "strtcsum"); 1446 QETH_CARD_TEXT(card, 3, "strtcsum");
1509 1447
1510 if (card->options.checksum_type == NO_CHECKSUMMING) { 1448 if (card->dev->features & NETIF_F_RXCSUM) {
1511 dev_info(&card->gdev->dev, 1449 /* hw may have changed during offline or recovery */
1512 "Using no checksumming on %s.\n", 1450 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) {
1513 QETH_CARD_IFNAME(card)); 1451 dev_info(&card->gdev->dev,
1514 return 0;
1515 }
1516 if (card->options.checksum_type == SW_CHECKSUMMING) {
1517 dev_info(&card->gdev->dev,
1518 "Using SW checksumming on %s.\n",
1519 QETH_CARD_IFNAME(card));
1520 return 0;
1521 }
1522 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) {
1523 dev_info(&card->gdev->dev,
1524 "Inbound HW Checksumming not " 1452 "Inbound HW Checksumming not "
1525 "supported on %s,\ncontinuing " 1453 "supported on %s,\ncontinuing "
1526 "using Inbound SW Checksumming\n", 1454 "using Inbound SW Checksumming\n",
1527 QETH_CARD_IFNAME(card)); 1455 QETH_CARD_IFNAME(card));
1528 card->options.checksum_type = SW_CHECKSUMMING; 1456 goto update_feature;
1529 return 0; 1457 }
1530 } 1458
1531 rc = qeth_l3_send_checksum_command(card); 1459 rc = qeth_l3_send_checksum_command(card);
1532 if (!rc) 1460 if (!rc)
1533 dev_info(&card->gdev->dev, 1461 dev_info(&card->gdev->dev,
1534 "HW Checksumming (inbound) enabled\n"); 1462 "HW Checksumming (inbound) enabled\n");
1463 else
1464 goto update_feature;
1465 } else
1466 dev_info(&card->gdev->dev,
1467 "Using SW checksumming on %s.\n",
1468 QETH_CARD_IFNAME(card));
1469 return 0;
1535 1470
1471update_feature:
1472 rtnl_lock();
1473 card->dev->features &= ~NETIF_F_RXCSUM;
1474 netdev_update_features(card->dev);
1475 rtnl_unlock();
1536 return rc; 1476 return rc;
1537} 1477}
1538 1478
@@ -1580,10 +1520,8 @@ static int qeth_l3_start_ipa_tso(struct qeth_card *card)
1580 dev_info(&card->gdev->dev, 1520 dev_info(&card->gdev->dev,
1581 "Outbound TSO enabled\n"); 1521 "Outbound TSO enabled\n");
1582 } 1522 }
1583 if (rc && (card->options.large_send == QETH_LARGE_SEND_TSO)) { 1523 if (rc)
1584 card->options.large_send = QETH_LARGE_SEND_NO; 1524 card->dev->features &= ~NETIF_F_TSO;
1585 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG);
1586 }
1587 return rc; 1525 return rc;
1588} 1526}
1589 1527
@@ -2064,14 +2002,7 @@ static inline int qeth_l3_rebuild_skb(struct qeth_card *card,
2064 is_vlan = 1; 2002 is_vlan = 1;
2065 } 2003 }
2066 2004
2067 switch (card->options.checksum_type) { 2005 if (card->dev->features & NETIF_F_RXCSUM) {
2068 case SW_CHECKSUMMING:
2069 skb->ip_summed = CHECKSUM_NONE;
2070 break;
2071 case NO_CHECKSUMMING:
2072 skb->ip_summed = CHECKSUM_UNNECESSARY;
2073 break;
2074 case HW_CHECKSUMMING:
2075 if ((hdr->hdr.l3.ext_flags & 2006 if ((hdr->hdr.l3.ext_flags &
2076 (QETH_HDR_EXT_CSUM_HDR_REQ | 2007 (QETH_HDR_EXT_CSUM_HDR_REQ |
2077 QETH_HDR_EXT_CSUM_TRANSP_REQ)) == 2008 QETH_HDR_EXT_CSUM_TRANSP_REQ)) ==
@@ -2080,7 +2011,8 @@ static inline int qeth_l3_rebuild_skb(struct qeth_card *card,
2080 skb->ip_summed = CHECKSUM_UNNECESSARY; 2011 skb->ip_summed = CHECKSUM_UNNECESSARY;
2081 else 2012 else
2082 skb->ip_summed = CHECKSUM_NONE; 2013 skb->ip_summed = CHECKSUM_NONE;
2083 } 2014 } else
2015 skb->ip_summed = CHECKSUM_NONE;
2084 2016
2085 return is_vlan; 2017 return is_vlan;
2086} 2018}
@@ -3024,7 +2956,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
3024 struct qeth_qdio_out_q *queue = card->qdio.out_qs 2956 struct qeth_qdio_out_q *queue = card->qdio.out_qs
3025 [qeth_get_priority_queue(card, skb, ipv, cast_type)]; 2957 [qeth_get_priority_queue(card, skb, ipv, cast_type)];
3026 int tx_bytes = skb->len; 2958 int tx_bytes = skb->len;
3027 enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO; 2959 bool large_send;
3028 int data_offset = -1; 2960 int data_offset = -1;
3029 int nr_frags; 2961 int nr_frags;
3030 2962
@@ -3046,8 +2978,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
3046 card->perf_stats.outbound_start_time = qeth_get_micros(); 2978 card->perf_stats.outbound_start_time = qeth_get_micros();
3047 } 2979 }
3048 2980
3049 if (skb_is_gso(skb)) 2981 large_send = skb_is_gso(skb);
3050 large_send = card->options.large_send;
3051 2982
3052 if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) && 2983 if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) &&
3053 (skb_shinfo(skb)->nr_frags == 0)) { 2984 (skb_shinfo(skb)->nr_frags == 0)) {
@@ -3096,7 +3027,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
3096 /* fix hardware limitation: as long as we do not have sbal 3027 /* fix hardware limitation: as long as we do not have sbal
3097 * chaining we can not send long frag lists 3028 * chaining we can not send long frag lists
3098 */ 3029 */
3099 if (large_send == QETH_LARGE_SEND_TSO) { 3030 if (large_send) {
3100 if (qeth_l3_tso_elements(new_skb) + 1 > 16) { 3031 if (qeth_l3_tso_elements(new_skb) + 1 > 16) {
3101 if (skb_linearize(new_skb)) 3032 if (skb_linearize(new_skb))
3102 goto tx_drop; 3033 goto tx_drop;
@@ -3105,8 +3036,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
3105 } 3036 }
3106 } 3037 }
3107 3038
3108 if ((large_send == QETH_LARGE_SEND_TSO) && 3039 if (large_send && (cast_type == RTN_UNSPEC)) {
3109 (cast_type == RTN_UNSPEC)) {
3110 hdr = (struct qeth_hdr *)skb_push(new_skb, 3040 hdr = (struct qeth_hdr *)skb_push(new_skb,
3111 sizeof(struct qeth_hdr_tso)); 3041 sizeof(struct qeth_hdr_tso));
3112 memset(hdr, 0, sizeof(struct qeth_hdr_tso)); 3042 memset(hdr, 0, sizeof(struct qeth_hdr_tso));
@@ -3141,7 +3071,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
3141 3071
3142 if (card->info.type != QETH_CARD_TYPE_IQD) { 3072 if (card->info.type != QETH_CARD_TYPE_IQD) {
3143 int len; 3073 int len;
3144 if (large_send == QETH_LARGE_SEND_TSO) 3074 if (large_send)
3145 len = ((unsigned long)tcp_hdr(new_skb) + 3075 len = ((unsigned long)tcp_hdr(new_skb) +
3146 tcp_hdr(new_skb)->doff * 4) - 3076 tcp_hdr(new_skb)->doff * 4) -
3147 (unsigned long)new_skb->data; 3077 (unsigned long)new_skb->data;
@@ -3162,7 +3092,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
3162 if (new_skb != skb) 3092 if (new_skb != skb)
3163 dev_kfree_skb_any(skb); 3093 dev_kfree_skb_any(skb);
3164 if (card->options.performance_stats) { 3094 if (card->options.performance_stats) {
3165 if (large_send != QETH_LARGE_SEND_NO) { 3095 if (large_send) {
3166 card->perf_stats.large_send_bytes += tx_bytes; 3096 card->perf_stats.large_send_bytes += tx_bytes;
3167 card->perf_stats.large_send_cnt++; 3097 card->perf_stats.large_send_cnt++;
3168 } 3098 }
@@ -3248,65 +3178,39 @@ static int qeth_l3_stop(struct net_device *dev)
3248 return 0; 3178 return 0;
3249} 3179}
3250 3180
3251static u32 qeth_l3_ethtool_get_rx_csum(struct net_device *dev) 3181static u32 qeth_l3_fix_features(struct net_device *dev, u32 features)
3252{
3253 struct qeth_card *card = dev->ml_priv;
3254
3255 return (card->options.checksum_type == HW_CHECKSUMMING);
3256}
3257
3258static int qeth_l3_ethtool_set_rx_csum(struct net_device *dev, u32 data)
3259{ 3182{
3260 struct qeth_card *card = dev->ml_priv; 3183 struct qeth_card *card = dev->ml_priv;
3261 enum qeth_checksum_types csum_type;
3262 3184
3263 if (data) 3185 if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
3264 csum_type = HW_CHECKSUMMING; 3186 features &= ~NETIF_F_IP_CSUM;
3265 else 3187 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
3266 csum_type = SW_CHECKSUMMING; 3188 features &= ~NETIF_F_TSO;
3189 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
3190 features &= ~NETIF_F_RXCSUM;
3267 3191
3268 return qeth_l3_set_rx_csum(card, csum_type); 3192 return features;
3269} 3193}
3270 3194
3271static int qeth_l3_ethtool_set_tso(struct net_device *dev, u32 data) 3195static int qeth_l3_set_features(struct net_device *dev, u32 features)
3272{ 3196{
3273 struct qeth_card *card = dev->ml_priv; 3197 struct qeth_card *card = dev->ml_priv;
3274 int rc = 0; 3198 u32 changed = dev->features ^ features;
3275 3199 int on;
3276 if (data) {
3277 rc = qeth_l3_set_large_send(card, QETH_LARGE_SEND_TSO);
3278 } else {
3279 dev->features &= ~NETIF_F_TSO;
3280 card->options.large_send = QETH_LARGE_SEND_NO;
3281 }
3282 return rc;
3283}
3284 3200
3285static int qeth_l3_ethtool_set_tx_csum(struct net_device *dev, u32 data) 3201 if (!(changed & NETIF_F_RXCSUM))
3286{ 3202 return 0;
3287 struct qeth_card *card = dev->ml_priv;
3288 3203
3289 if (data) { 3204 if (features & NETIF_F_RXCSUM)
3290 if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) 3205 on = 1;
3291 dev->features |= NETIF_F_IP_CSUM; 3206 else
3292 else 3207 on = 0;
3293 return -EPERM;
3294 } else
3295 dev->features &= ~NETIF_F_IP_CSUM;
3296 3208
3297 return 0; 3209 return qeth_l3_set_rx_csum(card, on);
3298} 3210}
3299 3211
3300static const struct ethtool_ops qeth_l3_ethtool_ops = { 3212static const struct ethtool_ops qeth_l3_ethtool_ops = {
3301 .get_link = ethtool_op_get_link, 3213 .get_link = ethtool_op_get_link,
3302 .get_tx_csum = ethtool_op_get_tx_csum,
3303 .set_tx_csum = qeth_l3_ethtool_set_tx_csum,
3304 .get_rx_csum = qeth_l3_ethtool_get_rx_csum,
3305 .set_rx_csum = qeth_l3_ethtool_set_rx_csum,
3306 .get_sg = ethtool_op_get_sg,
3307 .set_sg = ethtool_op_set_sg,
3308 .get_tso = ethtool_op_get_tso,
3309 .set_tso = qeth_l3_ethtool_set_tso,
3310 .get_strings = qeth_core_get_strings, 3214 .get_strings = qeth_core_get_strings,
3311 .get_ethtool_stats = qeth_core_get_ethtool_stats, 3215 .get_ethtool_stats = qeth_core_get_ethtool_stats,
3312 .get_sset_count = qeth_core_get_sset_count, 3216 .get_sset_count = qeth_core_get_sset_count,
@@ -3347,6 +3251,8 @@ static const struct net_device_ops qeth_l3_netdev_ops = {
3347 .ndo_set_multicast_list = qeth_l3_set_multicast_list, 3251 .ndo_set_multicast_list = qeth_l3_set_multicast_list,
3348 .ndo_do_ioctl = qeth_l3_do_ioctl, 3252 .ndo_do_ioctl = qeth_l3_do_ioctl,
3349 .ndo_change_mtu = qeth_change_mtu, 3253 .ndo_change_mtu = qeth_change_mtu,
3254 .ndo_fix_features = qeth_l3_fix_features,
3255 .ndo_set_features = qeth_l3_set_features,
3350 .ndo_vlan_rx_register = qeth_l3_vlan_rx_register, 3256 .ndo_vlan_rx_register = qeth_l3_vlan_rx_register,
3351 .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid, 3257 .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid,
3352 .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid, 3258 .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid,
@@ -3362,6 +3268,8 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
3362 .ndo_set_multicast_list = qeth_l3_set_multicast_list, 3268 .ndo_set_multicast_list = qeth_l3_set_multicast_list,
3363 .ndo_do_ioctl = qeth_l3_do_ioctl, 3269 .ndo_do_ioctl = qeth_l3_do_ioctl,
3364 .ndo_change_mtu = qeth_change_mtu, 3270 .ndo_change_mtu = qeth_change_mtu,
3271 .ndo_fix_features = qeth_l3_fix_features,
3272 .ndo_set_features = qeth_l3_set_features,
3365 .ndo_vlan_rx_register = qeth_l3_vlan_rx_register, 3273 .ndo_vlan_rx_register = qeth_l3_vlan_rx_register,
3366 .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid, 3274 .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid,
3367 .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid, 3275 .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid,
@@ -3392,8 +3300,12 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
3392 if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD)) 3300 if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
3393 card->dev->dev_id = card->info.unique_id & 3301 card->dev->dev_id = card->info.unique_id &
3394 0xffff; 3302 0xffff;
3395 if (!card->info.guestlan) 3303 if (!card->info.guestlan) {
3396 card->dev->features |= NETIF_F_GRO; 3304 card->dev->hw_features = NETIF_F_SG |
3305 NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
3306 NETIF_F_TSO;
3307 card->dev->features = NETIF_F_RXCSUM;
3308 }
3397 } 3309 }
3398 } else if (card->info.type == QETH_CARD_TYPE_IQD) { 3310 } else if (card->info.type == QETH_CARD_TYPE_IQD) {
3399 card->dev = alloc_netdev(0, "hsi%d", ether_setup); 3311 card->dev = alloc_netdev(0, "hsi%d", ether_setup);
@@ -3426,15 +3338,13 @@ static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
3426 3338
3427 qeth_l3_create_device_attributes(&gdev->dev); 3339 qeth_l3_create_device_attributes(&gdev->dev);
3428 card->options.layer2 = 0; 3340 card->options.layer2 = 0;
3341 card->info.hwtrap = 0;
3429 card->discipline.start_poll = qeth_qdio_start_poll; 3342 card->discipline.start_poll = qeth_qdio_start_poll;
3430 card->discipline.input_handler = (qdio_handler_t *) 3343 card->discipline.input_handler = (qdio_handler_t *)
3431 qeth_qdio_input_handler; 3344 qeth_qdio_input_handler;
3432 card->discipline.output_handler = (qdio_handler_t *) 3345 card->discipline.output_handler = (qdio_handler_t *)
3433 qeth_qdio_output_handler; 3346 qeth_qdio_output_handler;
3434 card->discipline.recover = qeth_l3_recover; 3347 card->discipline.recover = qeth_l3_recover;
3435 if ((card->info.type == QETH_CARD_TYPE_OSD) ||
3436 (card->info.type == QETH_CARD_TYPE_OSX))
3437 card->options.checksum_type = HW_CHECKSUMMING;
3438 return 0; 3348 return 0;
3439} 3349}
3440 3350
@@ -3480,13 +3390,18 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3480 goto out_remove; 3390 goto out_remove;
3481 } 3391 }
3482 3392
3483 qeth_l3_query_ipassists(card, QETH_PROT_IPV4);
3484
3485 if (!card->dev && qeth_l3_setup_netdev(card)) { 3393 if (!card->dev && qeth_l3_setup_netdev(card)) {
3486 rc = -ENODEV; 3394 rc = -ENODEV;
3487 goto out_remove; 3395 goto out_remove;
3488 } 3396 }
3489 3397
3398 if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) {
3399 if (card->info.hwtrap &&
3400 qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM))
3401 card->info.hwtrap = 0;
3402 } else
3403 card->info.hwtrap = 0;
3404
3490 card->state = CARD_STATE_HARDSETUP; 3405 card->state = CARD_STATE_HARDSETUP;
3491 memset(&card->rx, 0, sizeof(struct qeth_rx)); 3406 memset(&card->rx, 0, sizeof(struct qeth_rx));
3492 qeth_print_status_message(card); 3407 qeth_print_status_message(card);
@@ -3516,7 +3431,6 @@ contin:
3516 rc = qeth_l3_start_ipassists(card); 3431 rc = qeth_l3_start_ipassists(card);
3517 if (rc) 3432 if (rc)
3518 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); 3433 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
3519 qeth_l3_set_large_send(card, card->options.large_send);
3520 rc = qeth_l3_setrouting_v4(card); 3434 rc = qeth_l3_setrouting_v4(card);
3521 if (rc) 3435 if (rc)
3522 QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc); 3436 QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
@@ -3589,6 +3503,10 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
3589 if (card->dev && netif_carrier_ok(card->dev)) 3503 if (card->dev && netif_carrier_ok(card->dev))
3590 netif_carrier_off(card->dev); 3504 netif_carrier_off(card->dev);
3591 recover_flag = card->state; 3505 recover_flag = card->state;
3506 if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) {
3507 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
3508 card->info.hwtrap = 1;
3509 }
3592 qeth_l3_stop_card(card, recovery_mode); 3510 qeth_l3_stop_card(card, recovery_mode);
3593 rc = ccw_device_set_offline(CARD_DDEV(card)); 3511 rc = ccw_device_set_offline(CARD_DDEV(card));
3594 rc2 = ccw_device_set_offline(CARD_WDEV(card)); 3512 rc2 = ccw_device_set_offline(CARD_WDEV(card));
@@ -3644,6 +3562,8 @@ static int qeth_l3_recover(void *ptr)
3644static void qeth_l3_shutdown(struct ccwgroup_device *gdev) 3562static void qeth_l3_shutdown(struct ccwgroup_device *gdev)
3645{ 3563{
3646 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 3564 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
3565 if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
3566 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
3647 qeth_qdio_clear_card(card, 0); 3567 qeth_qdio_clear_card(card, 0);
3648 qeth_clear_qdio_buffers(card); 3568 qeth_clear_qdio_buffers(card);
3649} 3569}
@@ -3659,6 +3579,8 @@ static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev)
3659 if (gdev->state == CCWGROUP_OFFLINE) 3579 if (gdev->state == CCWGROUP_OFFLINE)
3660 return 0; 3580 return 0;
3661 if (card->state == CARD_STATE_UP) { 3581 if (card->state == CARD_STATE_UP) {
3582 if (card->info.hwtrap)
3583 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
3662 __qeth_l3_set_offline(card->gdev, 1); 3584 __qeth_l3_set_offline(card->gdev, 1);
3663 } else 3585 } else
3664 __qeth_l3_set_offline(card->gdev, 0); 3586 __qeth_l3_set_offline(card->gdev, 0);
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 67cfa68dcf1b..cd99210296e2 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -15,16 +15,6 @@
15#define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \ 15#define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \
16struct device_attribute dev_attr_##_id = __ATTR(_name, _mode, _show, _store) 16struct device_attribute dev_attr_##_id = __ATTR(_name, _mode, _show, _store)
17 17
18static const char *qeth_l3_get_checksum_str(struct qeth_card *card)
19{
20 if (card->options.checksum_type == SW_CHECKSUMMING)
21 return "sw";
22 else if (card->options.checksum_type == HW_CHECKSUMMING)
23 return "hw";
24 else
25 return "no";
26}
27
28static ssize_t qeth_l3_dev_route_show(struct qeth_card *card, 18static ssize_t qeth_l3_dev_route_show(struct qeth_card *card,
29 struct qeth_routing_info *route, char *buf) 19 struct qeth_routing_info *route, char *buf)
30{ 20{
@@ -295,51 +285,6 @@ out:
295static DEVICE_ATTR(canonical_macaddr, 0644, qeth_l3_dev_canonical_macaddr_show, 285static DEVICE_ATTR(canonical_macaddr, 0644, qeth_l3_dev_canonical_macaddr_show,
296 qeth_l3_dev_canonical_macaddr_store); 286 qeth_l3_dev_canonical_macaddr_store);
297 287
298static ssize_t qeth_l3_dev_checksum_show(struct device *dev,
299 struct device_attribute *attr, char *buf)
300{
301 struct qeth_card *card = dev_get_drvdata(dev);
302
303 if (!card)
304 return -EINVAL;
305
306 return sprintf(buf, "%s checksumming\n",
307 qeth_l3_get_checksum_str(card));
308}
309
310static ssize_t qeth_l3_dev_checksum_store(struct device *dev,
311 struct device_attribute *attr, const char *buf, size_t count)
312{
313 struct qeth_card *card = dev_get_drvdata(dev);
314 enum qeth_checksum_types csum_type;
315 char *tmp;
316 int rc = 0;
317
318 if (!card)
319 return -EINVAL;
320
321 mutex_lock(&card->conf_mutex);
322 tmp = strsep((char **) &buf, "\n");
323 if (!strcmp(tmp, "sw_checksumming"))
324 csum_type = SW_CHECKSUMMING;
325 else if (!strcmp(tmp, "hw_checksumming"))
326 csum_type = HW_CHECKSUMMING;
327 else if (!strcmp(tmp, "no_checksumming"))
328 csum_type = NO_CHECKSUMMING;
329 else {
330 rc = -EINVAL;
331 goto out;
332 }
333
334 rc = qeth_l3_set_rx_csum(card, csum_type);
335out:
336 mutex_unlock(&card->conf_mutex);
337 return rc ? rc : count;
338}
339
340static DEVICE_ATTR(checksumming, 0644, qeth_l3_dev_checksum_show,
341 qeth_l3_dev_checksum_store);
342
343static ssize_t qeth_l3_dev_sniffer_show(struct device *dev, 288static ssize_t qeth_l3_dev_sniffer_show(struct device *dev,
344 struct device_attribute *attr, char *buf) 289 struct device_attribute *attr, char *buf)
345{ 290{
@@ -402,61 +347,13 @@ out:
402static DEVICE_ATTR(sniffer, 0644, qeth_l3_dev_sniffer_show, 347static DEVICE_ATTR(sniffer, 0644, qeth_l3_dev_sniffer_show,
403 qeth_l3_dev_sniffer_store); 348 qeth_l3_dev_sniffer_store);
404 349
405static ssize_t qeth_l3_dev_large_send_show(struct device *dev,
406 struct device_attribute *attr, char *buf)
407{
408 struct qeth_card *card = dev_get_drvdata(dev);
409
410 if (!card)
411 return -EINVAL;
412
413 switch (card->options.large_send) {
414 case QETH_LARGE_SEND_NO:
415 return sprintf(buf, "%s\n", "no");
416 case QETH_LARGE_SEND_TSO:
417 return sprintf(buf, "%s\n", "TSO");
418 default:
419 return sprintf(buf, "%s\n", "N/A");
420 }
421}
422
423static ssize_t qeth_l3_dev_large_send_store(struct device *dev,
424 struct device_attribute *attr, const char *buf, size_t count)
425{
426 struct qeth_card *card = dev_get_drvdata(dev);
427 enum qeth_large_send_types type;
428 int rc = 0;
429 char *tmp;
430
431 if (!card)
432 return -EINVAL;
433 tmp = strsep((char **) &buf, "\n");
434 if (!strcmp(tmp, "no"))
435 type = QETH_LARGE_SEND_NO;
436 else if (!strcmp(tmp, "TSO"))
437 type = QETH_LARGE_SEND_TSO;
438 else
439 return -EINVAL;
440
441 mutex_lock(&card->conf_mutex);
442 if (card->options.large_send != type)
443 rc = qeth_l3_set_large_send(card, type);
444 mutex_unlock(&card->conf_mutex);
445 return rc ? rc : count;
446}
447
448static DEVICE_ATTR(large_send, 0644, qeth_l3_dev_large_send_show,
449 qeth_l3_dev_large_send_store);
450
451static struct attribute *qeth_l3_device_attrs[] = { 350static struct attribute *qeth_l3_device_attrs[] = {
452 &dev_attr_route4.attr, 351 &dev_attr_route4.attr,
453 &dev_attr_route6.attr, 352 &dev_attr_route6.attr,
454 &dev_attr_fake_broadcast.attr, 353 &dev_attr_fake_broadcast.attr,
455 &dev_attr_broadcast_mode.attr, 354 &dev_attr_broadcast_mode.attr,
456 &dev_attr_canonical_macaddr.attr, 355 &dev_attr_canonical_macaddr.attr,
457 &dev_attr_checksumming.attr,
458 &dev_attr_sniffer.attr, 356 &dev_attr_sniffer.attr,
459 &dev_attr_large_send.attr,
460 NULL, 357 NULL,
461}; 358};
462 359
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index e2e647509a73..cd050196a163 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -664,7 +664,7 @@ static void bnx2fc_link_speed_update(struct fc_lport *lport)
664 struct fcoe_port *port = lport_priv(lport); 664 struct fcoe_port *port = lport_priv(lport);
665 struct bnx2fc_hba *hba = port->priv; 665 struct bnx2fc_hba *hba = port->priv;
666 struct net_device *netdev = hba->netdev; 666 struct net_device *netdev = hba->netdev;
667 struct ethtool_cmd ecmd = { ETHTOOL_GSET }; 667 struct ethtool_cmd ecmd;
668 668
669 if (!dev_ethtool_get_settings(netdev, &ecmd)) { 669 if (!dev_ethtool_get_settings(netdev, &ecmd)) {
670 lport->link_supported_speeds &= 670 lport->link_supported_speeds &=
@@ -675,12 +675,15 @@ static void bnx2fc_link_speed_update(struct fc_lport *lport)
675 if (ecmd.supported & SUPPORTED_10000baseT_Full) 675 if (ecmd.supported & SUPPORTED_10000baseT_Full)
676 lport->link_supported_speeds |= FC_PORTSPEED_10GBIT; 676 lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
677 677
678 if (ecmd.speed == SPEED_1000) 678 switch (ethtool_cmd_speed(&ecmd)) {
679 case SPEED_1000:
679 lport->link_speed = FC_PORTSPEED_1GBIT; 680 lport->link_speed = FC_PORTSPEED_1GBIT;
680 if (ecmd.speed == SPEED_10000) 681 break;
682 case SPEED_10000:
681 lport->link_speed = FC_PORTSPEED_10GBIT; 683 lport->link_speed = FC_PORTSPEED_10GBIT;
684 break;
685 }
682 } 686 }
683 return;
684} 687}
685static int bnx2fc_link_ok(struct fc_lport *lport) 688static int bnx2fc_link_ok(struct fc_lport *lport)
686{ 689{
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index de764ea7419d..a2a9c7c6c643 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -450,12 +450,13 @@ static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev)
450 return csk; 450 return csk;
451} 451}
452 452
453static struct rtable *find_route_ipv4(__be32 saddr, __be32 daddr, 453static struct rtable *find_route_ipv4(struct flowi4 *fl4,
454 __be32 saddr, __be32 daddr,
454 __be16 sport, __be16 dport, u8 tos) 455 __be16 sport, __be16 dport, u8 tos)
455{ 456{
456 struct rtable *rt; 457 struct rtable *rt;
457 458
458 rt = ip_route_output_ports(&init_net, NULL, daddr, saddr, 459 rt = ip_route_output_ports(&init_net, fl4, NULL, daddr, saddr,
459 dport, sport, IPPROTO_TCP, tos, 0); 460 dport, sport, IPPROTO_TCP, tos, 0);
460 if (IS_ERR(rt)) 461 if (IS_ERR(rt))
461 return NULL; 462 return NULL;
@@ -470,6 +471,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
470 struct net_device *ndev; 471 struct net_device *ndev;
471 struct cxgbi_device *cdev; 472 struct cxgbi_device *cdev;
472 struct rtable *rt = NULL; 473 struct rtable *rt = NULL;
474 struct flowi4 fl4;
473 struct cxgbi_sock *csk = NULL; 475 struct cxgbi_sock *csk = NULL;
474 unsigned int mtu = 0; 476 unsigned int mtu = 0;
475 int port = 0xFFFF; 477 int port = 0xFFFF;
@@ -482,7 +484,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
482 goto err_out; 484 goto err_out;
483 } 485 }
484 486
485 rt = find_route_ipv4(0, daddr->sin_addr.s_addr, 0, daddr->sin_port, 0); 487 rt = find_route_ipv4(&fl4, 0, daddr->sin_addr.s_addr, 0, daddr->sin_port, 0);
486 if (!rt) { 488 if (!rt) {
487 pr_info("no route to ipv4 0x%x, port %u.\n", 489 pr_info("no route to ipv4 0x%x, port %u.\n",
488 daddr->sin_addr.s_addr, daddr->sin_port); 490 daddr->sin_addr.s_addr, daddr->sin_port);
@@ -531,7 +533,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
531 csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr; 533 csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr;
532 csk->daddr.sin_port = daddr->sin_port; 534 csk->daddr.sin_port = daddr->sin_port;
533 csk->daddr.sin_family = daddr->sin_family; 535 csk->daddr.sin_family = daddr->sin_family;
534 csk->saddr.sin_addr.s_addr = rt->rt_src; 536 csk->saddr.sin_addr.s_addr = fl4.saddr;
535 537
536 return csk; 538 return csk;
537 539
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index bde6ee5333eb..04f346b562da 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -2026,7 +2026,7 @@ out_nodev:
2026int fcoe_link_speed_update(struct fc_lport *lport) 2026int fcoe_link_speed_update(struct fc_lport *lport)
2027{ 2027{
2028 struct net_device *netdev = fcoe_netdev(lport); 2028 struct net_device *netdev = fcoe_netdev(lport);
2029 struct ethtool_cmd ecmd = { ETHTOOL_GSET }; 2029 struct ethtool_cmd ecmd;
2030 2030
2031 if (!dev_ethtool_get_settings(netdev, &ecmd)) { 2031 if (!dev_ethtool_get_settings(netdev, &ecmd)) {
2032 lport->link_supported_speeds &= 2032 lport->link_supported_speeds &=
@@ -2037,11 +2037,14 @@ int fcoe_link_speed_update(struct fc_lport *lport)
2037 if (ecmd.supported & SUPPORTED_10000baseT_Full) 2037 if (ecmd.supported & SUPPORTED_10000baseT_Full)
2038 lport->link_supported_speeds |= 2038 lport->link_supported_speeds |=
2039 FC_PORTSPEED_10GBIT; 2039 FC_PORTSPEED_10GBIT;
2040 if (ecmd.speed == SPEED_1000) 2040 switch (ethtool_cmd_speed(&ecmd)) {
2041 case SPEED_1000:
2041 lport->link_speed = FC_PORTSPEED_1GBIT; 2042 lport->link_speed = FC_PORTSPEED_1GBIT;
2042 if (ecmd.speed == SPEED_10000) 2043 break;
2044 case SPEED_10000:
2043 lport->link_speed = FC_PORTSPEED_10GBIT; 2045 lport->link_speed = FC_PORTSPEED_10GBIT;
2044 2046 break;
2047 }
2045 return 0; 2048 return 0;
2046 } 2049 }
2047 return -1; 2050 return -1;
diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c
index 7c031fdc8205..06d15b6f2215 100644
--- a/drivers/ssb/driver_chipcommon.c
+++ b/drivers/ssb/driver_chipcommon.c
@@ -46,40 +46,66 @@ void ssb_chipco_set_clockmode(struct ssb_chipcommon *cc,
46 if (!ccdev) 46 if (!ccdev)
47 return; 47 return;
48 bus = ccdev->bus; 48 bus = ccdev->bus;
49
50 /* We support SLOW only on 6..9 */
51 if (ccdev->id.revision >= 10 && mode == SSB_CLKMODE_SLOW)
52 mode = SSB_CLKMODE_DYNAMIC;
53
54 if (cc->capabilities & SSB_CHIPCO_CAP_PMU)
55 return; /* PMU controls clockmode, separated function needed */
56 SSB_WARN_ON(ccdev->id.revision >= 20);
57
49 /* chipcommon cores prior to rev6 don't support dynamic clock control */ 58 /* chipcommon cores prior to rev6 don't support dynamic clock control */
50 if (ccdev->id.revision < 6) 59 if (ccdev->id.revision < 6)
51 return; 60 return;
52 /* chipcommon cores rev10 are a whole new ball game */ 61
62 /* ChipCommon cores rev10+ need testing */
53 if (ccdev->id.revision >= 10) 63 if (ccdev->id.revision >= 10)
54 return; 64 return;
65
55 if (!(cc->capabilities & SSB_CHIPCO_CAP_PCTL)) 66 if (!(cc->capabilities & SSB_CHIPCO_CAP_PCTL))
56 return; 67 return;
57 68
58 switch (mode) { 69 switch (mode) {
59 case SSB_CLKMODE_SLOW: 70 case SSB_CLKMODE_SLOW: /* For revs 6..9 only */
60 tmp = chipco_read32(cc, SSB_CHIPCO_SLOWCLKCTL); 71 tmp = chipco_read32(cc, SSB_CHIPCO_SLOWCLKCTL);
61 tmp |= SSB_CHIPCO_SLOWCLKCTL_FSLOW; 72 tmp |= SSB_CHIPCO_SLOWCLKCTL_FSLOW;
62 chipco_write32(cc, SSB_CHIPCO_SLOWCLKCTL, tmp); 73 chipco_write32(cc, SSB_CHIPCO_SLOWCLKCTL, tmp);
63 break; 74 break;
64 case SSB_CLKMODE_FAST: 75 case SSB_CLKMODE_FAST:
65 ssb_pci_xtal(bus, SSB_GPIO_XTAL, 1); /* Force crystal on */ 76 if (ccdev->id.revision < 10) {
66 tmp = chipco_read32(cc, SSB_CHIPCO_SLOWCLKCTL); 77 ssb_pci_xtal(bus, SSB_GPIO_XTAL, 1); /* Force crystal on */
67 tmp &= ~SSB_CHIPCO_SLOWCLKCTL_FSLOW; 78 tmp = chipco_read32(cc, SSB_CHIPCO_SLOWCLKCTL);
68 tmp |= SSB_CHIPCO_SLOWCLKCTL_IPLL; 79 tmp &= ~SSB_CHIPCO_SLOWCLKCTL_FSLOW;
69 chipco_write32(cc, SSB_CHIPCO_SLOWCLKCTL, tmp); 80 tmp |= SSB_CHIPCO_SLOWCLKCTL_IPLL;
81 chipco_write32(cc, SSB_CHIPCO_SLOWCLKCTL, tmp);
82 } else {
83 chipco_write32(cc, SSB_CHIPCO_SYSCLKCTL,
84 (chipco_read32(cc, SSB_CHIPCO_SYSCLKCTL) |
85 SSB_CHIPCO_SYSCLKCTL_FORCEHT));
86 /* udelay(150); TODO: not available in early init */
87 }
70 break; 88 break;
71 case SSB_CLKMODE_DYNAMIC: 89 case SSB_CLKMODE_DYNAMIC:
72 tmp = chipco_read32(cc, SSB_CHIPCO_SLOWCLKCTL); 90 if (ccdev->id.revision < 10) {
73 tmp &= ~SSB_CHIPCO_SLOWCLKCTL_FSLOW; 91 tmp = chipco_read32(cc, SSB_CHIPCO_SLOWCLKCTL);
74 tmp &= ~SSB_CHIPCO_SLOWCLKCTL_IPLL; 92 tmp &= ~SSB_CHIPCO_SLOWCLKCTL_FSLOW;
75 tmp &= ~SSB_CHIPCO_SLOWCLKCTL_ENXTAL; 93 tmp &= ~SSB_CHIPCO_SLOWCLKCTL_IPLL;
76 if ((tmp & SSB_CHIPCO_SLOWCLKCTL_SRC) != SSB_CHIPCO_SLOWCLKCTL_SRC_XTAL) 94 tmp &= ~SSB_CHIPCO_SLOWCLKCTL_ENXTAL;
77 tmp |= SSB_CHIPCO_SLOWCLKCTL_ENXTAL; 95 if ((tmp & SSB_CHIPCO_SLOWCLKCTL_SRC) !=
78 chipco_write32(cc, SSB_CHIPCO_SLOWCLKCTL, tmp); 96 SSB_CHIPCO_SLOWCLKCTL_SRC_XTAL)
79 97 tmp |= SSB_CHIPCO_SLOWCLKCTL_ENXTAL;
80 /* for dynamic control, we have to release our xtal_pu "force on" */ 98 chipco_write32(cc, SSB_CHIPCO_SLOWCLKCTL, tmp);
81 if (tmp & SSB_CHIPCO_SLOWCLKCTL_ENXTAL) 99
82 ssb_pci_xtal(bus, SSB_GPIO_XTAL, 0); 100 /* For dynamic control, we have to release our xtal_pu
101 * "force on" */
102 if (tmp & SSB_CHIPCO_SLOWCLKCTL_ENXTAL)
103 ssb_pci_xtal(bus, SSB_GPIO_XTAL, 0);
104 } else {
105 chipco_write32(cc, SSB_CHIPCO_SYSCLKCTL,
106 (chipco_read32(cc, SSB_CHIPCO_SYSCLKCTL) &
107 ~SSB_CHIPCO_SYSCLKCTL_FORCEHT));
108 }
83 break; 109 break;
84 default: 110 default:
85 SSB_WARN_ON(1); 111 SSB_WARN_ON(1);
@@ -260,6 +286,12 @@ void ssb_chipcommon_init(struct ssb_chipcommon *cc)
260 if (cc->dev->id.revision >= 11) 286 if (cc->dev->id.revision >= 11)
261 cc->status = chipco_read32(cc, SSB_CHIPCO_CHIPSTAT); 287 cc->status = chipco_read32(cc, SSB_CHIPCO_CHIPSTAT);
262 ssb_dprintk(KERN_INFO PFX "chipcommon status is 0x%x\n", cc->status); 288 ssb_dprintk(KERN_INFO PFX "chipcommon status is 0x%x\n", cc->status);
289
290 if (cc->dev->id.revision >= 20) {
291 chipco_write32(cc, SSB_CHIPCO_GPIOPULLUP, 0);
292 chipco_write32(cc, SSB_CHIPCO_GPIOPULLDOWN, 0);
293 }
294
263 ssb_pmu_init(cc); 295 ssb_pmu_init(cc);
264 chipco_powercontrol_init(cc); 296 chipco_powercontrol_init(cc);
265 ssb_chipco_set_clockmode(cc, SSB_CLKMODE_FAST); 297 ssb_chipco_set_clockmode(cc, SSB_CLKMODE_FAST);
diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c
index 5732bb2c3578..305ade7825f7 100644
--- a/drivers/ssb/driver_chipcommon_pmu.c
+++ b/drivers/ssb/driver_chipcommon_pmu.c
@@ -423,6 +423,8 @@ static void ssb_pmu_resources_init(struct ssb_chipcommon *cc)
423 423
424 switch (bus->chip_id) { 424 switch (bus->chip_id) {
425 case 0x4312: 425 case 0x4312:
426 min_msk = 0xCBB;
427 break;
426 case 0x4322: 428 case 0x4322:
427 /* We keep the default settings: 429 /* We keep the default settings:
428 * min_msk = 0xCBB 430 * min_msk = 0xCBB
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index 0e8d35224614..82feb348c8bb 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -15,6 +15,11 @@
15 15
16#include "ssb_private.h" 16#include "ssb_private.h"
17 17
18static u32 ssb_pcie_read(struct ssb_pcicore *pc, u32 address);
19static void ssb_pcie_write(struct ssb_pcicore *pc, u32 address, u32 data);
20static u16 ssb_pcie_mdio_read(struct ssb_pcicore *pc, u8 device, u8 address);
21static void ssb_pcie_mdio_write(struct ssb_pcicore *pc, u8 device,
22 u8 address, u16 data);
18 23
19static inline 24static inline
20u32 pcicore_read32(struct ssb_pcicore *pc, u16 offset) 25u32 pcicore_read32(struct ssb_pcicore *pc, u16 offset)
@@ -403,6 +408,107 @@ static int pcicore_is_in_hostmode(struct ssb_pcicore *pc)
403} 408}
404#endif /* CONFIG_SSB_PCICORE_HOSTMODE */ 409#endif /* CONFIG_SSB_PCICORE_HOSTMODE */
405 410
411/**************************************************
412 * Workarounds.
413 **************************************************/
414
415static void ssb_pcicore_fix_sprom_core_index(struct ssb_pcicore *pc)
416{
417 u16 tmp = pcicore_read16(pc, SSB_PCICORE_SPROM(0));
418 if (((tmp & 0xF000) >> 12) != pc->dev->core_index) {
419 tmp &= ~0xF000;
420 tmp |= (pc->dev->core_index << 12);
421 pcicore_write16(pc, SSB_PCICORE_SPROM(0), tmp);
422 }
423}
424
425static u8 ssb_pcicore_polarity_workaround(struct ssb_pcicore *pc)
426{
427 return (ssb_pcie_read(pc, 0x204) & 0x10) ? 0xC0 : 0x80;
428}
429
430static void ssb_pcicore_serdes_workaround(struct ssb_pcicore *pc)
431{
432 const u8 serdes_pll_device = 0x1D;
433 const u8 serdes_rx_device = 0x1F;
434 u16 tmp;
435
436 ssb_pcie_mdio_write(pc, serdes_rx_device, 1 /* Control */,
437 ssb_pcicore_polarity_workaround(pc));
438 tmp = ssb_pcie_mdio_read(pc, serdes_pll_device, 1 /* Control */);
439 if (tmp & 0x4000)
440 ssb_pcie_mdio_write(pc, serdes_pll_device, 1, tmp & ~0x4000);
441}
442
443static void ssb_pcicore_pci_setup_workarounds(struct ssb_pcicore *pc)
444{
445 struct ssb_device *pdev = pc->dev;
446 struct ssb_bus *bus = pdev->bus;
447 u32 tmp;
448
449 tmp = pcicore_read32(pc, SSB_PCICORE_SBTOPCI2);
450 tmp |= SSB_PCICORE_SBTOPCI_PREF;
451 tmp |= SSB_PCICORE_SBTOPCI_BURST;
452 pcicore_write32(pc, SSB_PCICORE_SBTOPCI2, tmp);
453
454 if (pdev->id.revision < 5) {
455 tmp = ssb_read32(pdev, SSB_IMCFGLO);
456 tmp &= ~SSB_IMCFGLO_SERTO;
457 tmp |= 2;
458 tmp &= ~SSB_IMCFGLO_REQTO;
459 tmp |= 3 << SSB_IMCFGLO_REQTO_SHIFT;
460 ssb_write32(pdev, SSB_IMCFGLO, tmp);
461 ssb_commit_settings(bus);
462 } else if (pdev->id.revision >= 11) {
463 tmp = pcicore_read32(pc, SSB_PCICORE_SBTOPCI2);
464 tmp |= SSB_PCICORE_SBTOPCI_MRM;
465 pcicore_write32(pc, SSB_PCICORE_SBTOPCI2, tmp);
466 }
467}
468
469static void ssb_pcicore_pcie_setup_workarounds(struct ssb_pcicore *pc)
470{
471 u32 tmp;
472 u8 rev = pc->dev->id.revision;
473
474 if (rev == 0 || rev == 1) {
475 /* TLP Workaround register. */
476 tmp = ssb_pcie_read(pc, 0x4);
477 tmp |= 0x8;
478 ssb_pcie_write(pc, 0x4, tmp);
479 }
480 if (rev == 1) {
481 /* DLLP Link Control register. */
482 tmp = ssb_pcie_read(pc, 0x100);
483 tmp |= 0x40;
484 ssb_pcie_write(pc, 0x100, tmp);
485 }
486
487 if (rev == 0) {
488 const u8 serdes_rx_device = 0x1F;
489
490 ssb_pcie_mdio_write(pc, serdes_rx_device,
491 2 /* Timer */, 0x8128);
492 ssb_pcie_mdio_write(pc, serdes_rx_device,
493 6 /* CDR */, 0x0100);
494 ssb_pcie_mdio_write(pc, serdes_rx_device,
495 7 /* CDR BW */, 0x1466);
496 } else if (rev == 3 || rev == 4 || rev == 5) {
497 /* TODO: DLLP Power Management Threshold */
498 ssb_pcicore_serdes_workaround(pc);
499 /* TODO: ASPM */
500 } else if (rev == 7) {
501 /* TODO: No PLL down */
502 }
503
504 if (rev >= 6) {
505 /* Miscellaneous Configuration Fixup */
506 tmp = pcicore_read16(pc, SSB_PCICORE_SPROM(5));
507 if (!(tmp & 0x8000))
508 pcicore_write16(pc, SSB_PCICORE_SPROM(5),
509 tmp | 0x8000);
510 }
511}
406 512
407/************************************************** 513/**************************************************
408 * Generic and Clientmode operation code. 514 * Generic and Clientmode operation code.
@@ -417,14 +523,14 @@ static void ssb_pcicore_init_clientmode(struct ssb_pcicore *pc)
417void ssb_pcicore_init(struct ssb_pcicore *pc) 523void ssb_pcicore_init(struct ssb_pcicore *pc)
418{ 524{
419 struct ssb_device *dev = pc->dev; 525 struct ssb_device *dev = pc->dev;
420 struct ssb_bus *bus;
421 526
422 if (!dev) 527 if (!dev)
423 return; 528 return;
424 bus = dev->bus;
425 if (!ssb_device_is_enabled(dev)) 529 if (!ssb_device_is_enabled(dev))
426 ssb_device_enable(dev, 0); 530 ssb_device_enable(dev, 0);
427 531
532 ssb_pcicore_fix_sprom_core_index(pc);
533
428#ifdef CONFIG_SSB_PCICORE_HOSTMODE 534#ifdef CONFIG_SSB_PCICORE_HOSTMODE
429 pc->hostmode = pcicore_is_in_hostmode(pc); 535 pc->hostmode = pcicore_is_in_hostmode(pc);
430 if (pc->hostmode) 536 if (pc->hostmode)
@@ -432,6 +538,11 @@ void ssb_pcicore_init(struct ssb_pcicore *pc)
432#endif /* CONFIG_SSB_PCICORE_HOSTMODE */ 538#endif /* CONFIG_SSB_PCICORE_HOSTMODE */
433 if (!pc->hostmode) 539 if (!pc->hostmode)
434 ssb_pcicore_init_clientmode(pc); 540 ssb_pcicore_init_clientmode(pc);
541
542 /* Additional always once-executed workarounds */
543 ssb_pcicore_serdes_workaround(pc);
544 /* TODO: ASPM */
545 /* TODO: Clock Request Update */
435} 546}
436 547
437static u32 ssb_pcie_read(struct ssb_pcicore *pc, u32 address) 548static u32 ssb_pcie_read(struct ssb_pcicore *pc, u32 address)
@@ -446,58 +557,104 @@ static void ssb_pcie_write(struct ssb_pcicore *pc, u32 address, u32 data)
446 pcicore_write32(pc, 0x134, data); 557 pcicore_write32(pc, 0x134, data);
447} 558}
448 559
449static void ssb_pcie_mdio_write(struct ssb_pcicore *pc, u8 device, 560static void ssb_pcie_mdio_set_phy(struct ssb_pcicore *pc, u8 phy)
450 u8 address, u16 data)
451{ 561{
452 const u16 mdio_control = 0x128; 562 const u16 mdio_control = 0x128;
453 const u16 mdio_data = 0x12C; 563 const u16 mdio_data = 0x12C;
454 u32 v; 564 u32 v;
455 int i; 565 int i;
456 566
567 v = (1 << 30); /* Start of Transaction */
568 v |= (1 << 28); /* Write Transaction */
569 v |= (1 << 17); /* Turnaround */
570 v |= (0x1F << 18);
571 v |= (phy << 4);
572 pcicore_write32(pc, mdio_data, v);
573
574 udelay(10);
575 for (i = 0; i < 200; i++) {
576 v = pcicore_read32(pc, mdio_control);
577 if (v & 0x100 /* Trans complete */)
578 break;
579 msleep(1);
580 }
581}
582
583static u16 ssb_pcie_mdio_read(struct ssb_pcicore *pc, u8 device, u8 address)
584{
585 const u16 mdio_control = 0x128;
586 const u16 mdio_data = 0x12C;
587 int max_retries = 10;
588 u16 ret = 0;
589 u32 v;
590 int i;
591
457 v = 0x80; /* Enable Preamble Sequence */ 592 v = 0x80; /* Enable Preamble Sequence */
458 v |= 0x2; /* MDIO Clock Divisor */ 593 v |= 0x2; /* MDIO Clock Divisor */
459 pcicore_write32(pc, mdio_control, v); 594 pcicore_write32(pc, mdio_control, v);
460 595
596 if (pc->dev->id.revision >= 10) {
597 max_retries = 200;
598 ssb_pcie_mdio_set_phy(pc, device);
599 }
600
461 v = (1 << 30); /* Start of Transaction */ 601 v = (1 << 30); /* Start of Transaction */
462 v |= (1 << 28); /* Write Transaction */ 602 v |= (1 << 29); /* Read Transaction */
463 v |= (1 << 17); /* Turnaround */ 603 v |= (1 << 17); /* Turnaround */
464 v |= (u32)device << 22; 604 if (pc->dev->id.revision < 10)
605 v |= (u32)device << 22;
465 v |= (u32)address << 18; 606 v |= (u32)address << 18;
466 v |= data;
467 pcicore_write32(pc, mdio_data, v); 607 pcicore_write32(pc, mdio_data, v);
468 /* Wait for the device to complete the transaction */ 608 /* Wait for the device to complete the transaction */
469 udelay(10); 609 udelay(10);
470 for (i = 0; i < 10; i++) { 610 for (i = 0; i < max_retries; i++) {
471 v = pcicore_read32(pc, mdio_control); 611 v = pcicore_read32(pc, mdio_control);
472 if (v & 0x100 /* Trans complete */) 612 if (v & 0x100 /* Trans complete */) {
613 udelay(10);
614 ret = pcicore_read32(pc, mdio_data);
473 break; 615 break;
616 }
474 msleep(1); 617 msleep(1);
475 } 618 }
476 pcicore_write32(pc, mdio_control, 0); 619 pcicore_write32(pc, mdio_control, 0);
620 return ret;
477} 621}
478 622
479static void ssb_broadcast_value(struct ssb_device *dev, 623static void ssb_pcie_mdio_write(struct ssb_pcicore *pc, u8 device,
480 u32 address, u32 data) 624 u8 address, u16 data)
481{ 625{
482 /* This is used for both, PCI and ChipCommon core, so be careful. */ 626 const u16 mdio_control = 0x128;
483 BUILD_BUG_ON(SSB_PCICORE_BCAST_ADDR != SSB_CHIPCO_BCAST_ADDR); 627 const u16 mdio_data = 0x12C;
484 BUILD_BUG_ON(SSB_PCICORE_BCAST_DATA != SSB_CHIPCO_BCAST_DATA); 628 int max_retries = 10;
485 629 u32 v;
486 ssb_write32(dev, SSB_PCICORE_BCAST_ADDR, address); 630 int i;
487 ssb_read32(dev, SSB_PCICORE_BCAST_ADDR); /* flush */
488 ssb_write32(dev, SSB_PCICORE_BCAST_DATA, data);
489 ssb_read32(dev, SSB_PCICORE_BCAST_DATA); /* flush */
490}
491 631
492static void ssb_commit_settings(struct ssb_bus *bus) 632 v = 0x80; /* Enable Preamble Sequence */
493{ 633 v |= 0x2; /* MDIO Clock Divisor */
494 struct ssb_device *dev; 634 pcicore_write32(pc, mdio_control, v);
495 635
496 dev = bus->chipco.dev ? bus->chipco.dev : bus->pcicore.dev; 636 if (pc->dev->id.revision >= 10) {
497 if (WARN_ON(!dev)) 637 max_retries = 200;
498 return; 638 ssb_pcie_mdio_set_phy(pc, device);
499 /* This forces an update of the cached registers. */ 639 }
500 ssb_broadcast_value(dev, 0xFD8, 0); 640
641 v = (1 << 30); /* Start of Transaction */
642 v |= (1 << 28); /* Write Transaction */
643 v |= (1 << 17); /* Turnaround */
644 if (pc->dev->id.revision < 10)
645 v |= (u32)device << 22;
646 v |= (u32)address << 18;
647 v |= data;
648 pcicore_write32(pc, mdio_data, v);
649 /* Wait for the device to complete the transaction */
650 udelay(10);
651 for (i = 0; i < max_retries; i++) {
652 v = pcicore_read32(pc, mdio_control);
653 if (v & 0x100 /* Trans complete */)
654 break;
655 msleep(1);
656 }
657 pcicore_write32(pc, mdio_control, 0);
501} 658}
502 659
503int ssb_pcicore_dev_irqvecs_enable(struct ssb_pcicore *pc, 660int ssb_pcicore_dev_irqvecs_enable(struct ssb_pcicore *pc,
@@ -550,48 +707,10 @@ int ssb_pcicore_dev_irqvecs_enable(struct ssb_pcicore *pc,
550 if (pc->setup_done) 707 if (pc->setup_done)
551 goto out; 708 goto out;
552 if (pdev->id.coreid == SSB_DEV_PCI) { 709 if (pdev->id.coreid == SSB_DEV_PCI) {
553 tmp = pcicore_read32(pc, SSB_PCICORE_SBTOPCI2); 710 ssb_pcicore_pci_setup_workarounds(pc);
554 tmp |= SSB_PCICORE_SBTOPCI_PREF;
555 tmp |= SSB_PCICORE_SBTOPCI_BURST;
556 pcicore_write32(pc, SSB_PCICORE_SBTOPCI2, tmp);
557
558 if (pdev->id.revision < 5) {
559 tmp = ssb_read32(pdev, SSB_IMCFGLO);
560 tmp &= ~SSB_IMCFGLO_SERTO;
561 tmp |= 2;
562 tmp &= ~SSB_IMCFGLO_REQTO;
563 tmp |= 3 << SSB_IMCFGLO_REQTO_SHIFT;
564 ssb_write32(pdev, SSB_IMCFGLO, tmp);
565 ssb_commit_settings(bus);
566 } else if (pdev->id.revision >= 11) {
567 tmp = pcicore_read32(pc, SSB_PCICORE_SBTOPCI2);
568 tmp |= SSB_PCICORE_SBTOPCI_MRM;
569 pcicore_write32(pc, SSB_PCICORE_SBTOPCI2, tmp);
570 }
571 } else { 711 } else {
572 WARN_ON(pdev->id.coreid != SSB_DEV_PCIE); 712 WARN_ON(pdev->id.coreid != SSB_DEV_PCIE);
573 //TODO: Better make defines for all these magic PCIE values. 713 ssb_pcicore_pcie_setup_workarounds(pc);
574 if ((pdev->id.revision == 0) || (pdev->id.revision == 1)) {
575 /* TLP Workaround register. */
576 tmp = ssb_pcie_read(pc, 0x4);
577 tmp |= 0x8;
578 ssb_pcie_write(pc, 0x4, tmp);
579 }
580 if (pdev->id.revision == 0) {
581 const u8 serdes_rx_device = 0x1F;
582
583 ssb_pcie_mdio_write(pc, serdes_rx_device,
584 2 /* Timer */, 0x8128);
585 ssb_pcie_mdio_write(pc, serdes_rx_device,
586 6 /* CDR */, 0x0100);
587 ssb_pcie_mdio_write(pc, serdes_rx_device,
588 7 /* CDR BW */, 0x1466);
589 } else if (pdev->id.revision == 1) {
590 /* DLLP Link Control register. */
591 tmp = ssb_pcie_read(pc, 0x100);
592 tmp |= 0x40;
593 ssb_pcie_write(pc, 0x100, tmp);
594 }
595 } 714 }
596 pc->setup_done = 1; 715 pc->setup_done = 1;
597out: 716out:
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index e05ba6eefc7e..f8a13f863217 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -1117,23 +1117,22 @@ static u32 ssb_tmslow_reject_bitmask(struct ssb_device *dev)
1117{ 1117{
1118 u32 rev = ssb_read32(dev, SSB_IDLOW) & SSB_IDLOW_SSBREV; 1118 u32 rev = ssb_read32(dev, SSB_IDLOW) & SSB_IDLOW_SSBREV;
1119 1119
1120 /* The REJECT bit changed position in TMSLOW between 1120 /* The REJECT bit seems to be different for Backplane rev 2.3 */
1121 * Backplane revisions. */
1122 switch (rev) { 1121 switch (rev) {
1123 case SSB_IDLOW_SSBREV_22: 1122 case SSB_IDLOW_SSBREV_22:
1124 return SSB_TMSLOW_REJECT_22; 1123 case SSB_IDLOW_SSBREV_24:
1124 case SSB_IDLOW_SSBREV_26:
1125 return SSB_TMSLOW_REJECT;
1125 case SSB_IDLOW_SSBREV_23: 1126 case SSB_IDLOW_SSBREV_23:
1126 return SSB_TMSLOW_REJECT_23; 1127 return SSB_TMSLOW_REJECT_23;
1127 case SSB_IDLOW_SSBREV_24: /* TODO - find the proper REJECT bits */ 1128 case SSB_IDLOW_SSBREV_25: /* TODO - find the proper REJECT bit */
1128 case SSB_IDLOW_SSBREV_25: /* same here */
1129 case SSB_IDLOW_SSBREV_26: /* same here */
1130 case SSB_IDLOW_SSBREV_27: /* same here */ 1129 case SSB_IDLOW_SSBREV_27: /* same here */
1131 return SSB_TMSLOW_REJECT_23; /* this is a guess */ 1130 return SSB_TMSLOW_REJECT; /* this is a guess */
1132 default: 1131 default:
1133 printk(KERN_INFO "ssb: Backplane Revision 0x%.8X\n", rev); 1132 printk(KERN_INFO "ssb: Backplane Revision 0x%.8X\n", rev);
1134 WARN_ON(1); 1133 WARN_ON(1);
1135 } 1134 }
1136 return (SSB_TMSLOW_REJECT_22 | SSB_TMSLOW_REJECT_23); 1135 return (SSB_TMSLOW_REJECT | SSB_TMSLOW_REJECT_23);
1137} 1136}
1138 1137
1139int ssb_device_is_enabled(struct ssb_device *dev) 1138int ssb_device_is_enabled(struct ssb_device *dev)
@@ -1309,20 +1308,20 @@ EXPORT_SYMBOL(ssb_bus_may_powerdown);
1309 1308
1310int ssb_bus_powerup(struct ssb_bus *bus, bool dynamic_pctl) 1309int ssb_bus_powerup(struct ssb_bus *bus, bool dynamic_pctl)
1311{ 1310{
1312 struct ssb_chipcommon *cc;
1313 int err; 1311 int err;
1314 enum ssb_clkmode mode; 1312 enum ssb_clkmode mode;
1315 1313
1316 err = ssb_pci_xtal(bus, SSB_GPIO_XTAL | SSB_GPIO_PLL, 1); 1314 err = ssb_pci_xtal(bus, SSB_GPIO_XTAL | SSB_GPIO_PLL, 1);
1317 if (err) 1315 if (err)
1318 goto error; 1316 goto error;
1319 cc = &bus->chipco;
1320 mode = dynamic_pctl ? SSB_CLKMODE_DYNAMIC : SSB_CLKMODE_FAST;
1321 ssb_chipco_set_clockmode(cc, mode);
1322 1317
1323#ifdef CONFIG_SSB_DEBUG 1318#ifdef CONFIG_SSB_DEBUG
1324 bus->powered_up = 1; 1319 bus->powered_up = 1;
1325#endif 1320#endif
1321
1322 mode = dynamic_pctl ? SSB_CLKMODE_DYNAMIC : SSB_CLKMODE_FAST;
1323 ssb_chipco_set_clockmode(&bus->chipco, mode);
1324
1326 return 0; 1325 return 0;
1327error: 1326error:
1328 ssb_printk(KERN_ERR PFX "Bus powerup failed\n"); 1327 ssb_printk(KERN_ERR PFX "Bus powerup failed\n");
@@ -1330,6 +1329,37 @@ error:
1330} 1329}
1331EXPORT_SYMBOL(ssb_bus_powerup); 1330EXPORT_SYMBOL(ssb_bus_powerup);
1332 1331
1332static void ssb_broadcast_value(struct ssb_device *dev,
1333 u32 address, u32 data)
1334{
1335#ifdef CONFIG_SSB_DRIVER_PCICORE
1336 /* This is used for both, PCI and ChipCommon core, so be careful. */
1337 BUILD_BUG_ON(SSB_PCICORE_BCAST_ADDR != SSB_CHIPCO_BCAST_ADDR);
1338 BUILD_BUG_ON(SSB_PCICORE_BCAST_DATA != SSB_CHIPCO_BCAST_DATA);
1339#endif
1340
1341 ssb_write32(dev, SSB_CHIPCO_BCAST_ADDR, address);
1342 ssb_read32(dev, SSB_CHIPCO_BCAST_ADDR); /* flush */
1343 ssb_write32(dev, SSB_CHIPCO_BCAST_DATA, data);
1344 ssb_read32(dev, SSB_CHIPCO_BCAST_DATA); /* flush */
1345}
1346
1347void ssb_commit_settings(struct ssb_bus *bus)
1348{
1349 struct ssb_device *dev;
1350
1351#ifdef CONFIG_SSB_DRIVER_PCICORE
1352 dev = bus->chipco.dev ? bus->chipco.dev : bus->pcicore.dev;
1353#else
1354 dev = bus->chipco.dev;
1355#endif
1356 if (WARN_ON(!dev))
1357 return;
1358 /* This forces an update of the cached registers. */
1359 ssb_broadcast_value(dev, 0xFD8, 0);
1360}
1361EXPORT_SYMBOL(ssb_commit_settings);
1362
1333u32 ssb_admatch_base(u32 adm) 1363u32 ssb_admatch_base(u32 adm)
1334{ 1364{
1335 u32 base = 0; 1365 u32 base = 0;
diff --git a/drivers/ssb/scan.c b/drivers/ssb/scan.c
index 29884c00c4d5..45e5babd3961 100644
--- a/drivers/ssb/scan.c
+++ b/drivers/ssb/scan.c
@@ -258,7 +258,10 @@ static int we_support_multiple_80211_cores(struct ssb_bus *bus)
258#ifdef CONFIG_SSB_PCIHOST 258#ifdef CONFIG_SSB_PCIHOST
259 if (bus->bustype == SSB_BUSTYPE_PCI) { 259 if (bus->bustype == SSB_BUSTYPE_PCI) {
260 if (bus->host_pci->vendor == PCI_VENDOR_ID_BROADCOM && 260 if (bus->host_pci->vendor == PCI_VENDOR_ID_BROADCOM &&
261 bus->host_pci->device == 0x4324) 261 ((bus->host_pci->device == 0x4313) ||
262 (bus->host_pci->device == 0x431A) ||
263 (bus->host_pci->device == 0x4321) ||
264 (bus->host_pci->device == 0x4324)))
262 return 1; 265 return 1;
263 } 266 }
264#endif /* CONFIG_SSB_PCIHOST */ 267#endif /* CONFIG_SSB_PCIHOST */
@@ -307,7 +310,7 @@ int ssb_bus_scan(struct ssb_bus *bus,
307 } else { 310 } else {
308 if (bus->bustype == SSB_BUSTYPE_PCI) { 311 if (bus->bustype == SSB_BUSTYPE_PCI) {
309 bus->chip_id = pcidev_to_chipid(bus->host_pci); 312 bus->chip_id = pcidev_to_chipid(bus->host_pci);
310 pci_read_config_word(bus->host_pci, PCI_REVISION_ID, 313 pci_read_config_byte(bus->host_pci, PCI_REVISION_ID,
311 &bus->chip_rev); 314 &bus->chip_rev);
312 bus->chip_package = 0; 315 bus->chip_package = 0;
313 } else { 316 } else {
diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c
index 33973568214f..aaa81883f0ad 100644
--- a/drivers/staging/hv/netvsc_drv.c
+++ b/drivers/staging/hv/netvsc_drv.c
@@ -317,8 +317,6 @@ static void netvsc_get_drvinfo(struct net_device *net,
317 317
318static const struct ethtool_ops ethtool_ops = { 318static const struct ethtool_ops ethtool_ops = {
319 .get_drvinfo = netvsc_get_drvinfo, 319 .get_drvinfo = netvsc_get_drvinfo,
320 .get_sg = ethtool_op_get_sg,
321 .set_sg = ethtool_op_set_sg,
322 .get_link = ethtool_op_get_link, 320 .get_link = ethtool_op_get_link,
323}; 321};
324 322
@@ -406,6 +404,7 @@ static int netvsc_probe(struct device *device)
406 net->netdev_ops = &device_ops; 404 net->netdev_ops = &device_ops;
407 405
408 /* TODO: Add GSO and Checksum offload */ 406 /* TODO: Add GSO and Checksum offload */
407 net->hw_features = NETIF_F_SG;
409 net->features = NETIF_F_SG; 408 net->features = NETIF_F_SG;
410 409
411 SET_ETHTOOL_OPS(net, &ethtool_ops); 410 SET_ETHTOOL_OPS(net, &ethtool_ops);